1 /* $NetBSD: bcopyinout.S,v 1.11 2003/10/13 21:22:40 scw Exp $ */
4 * Copyright (c) 2002 Wasabi Systems, Inc.
7 * Written by Allen Briggs for Wasabi Systems, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
41 #include <machine/asm.h>
42 #include <sys/errno.h>
45 .word _C_LABEL(_arm_memcpy)
47 .word _C_LABEL(_min_memcpy_size)
49 __FBSDID("$FreeBSD$");
51 #include <arm/arm/bcopyinout_xscale.S>
58 #define GET_PCB(tmp) \
59 mrc p15, 0, tmp, c13, c0, 4; \
60 add tmp, tmp, #(TD_PCB)
63 .word _C_LABEL(__pcpu) + PC_CURPCB
65 #define GET_PCB(tmp) \
70 #define SAVE_REGS stmfd sp!, {r4-r11}
71 #define RESTORE_REGS ldmfd sp!, {r4-r11}
73 #if defined(_ARM_ARCH_5E)
75 #define PREFETCH(rx,o) pld [ rx , HELLOCPP (o) ]
77 #define PREFETCH(rx,o)
81 * r0 = user space address
82 * r1 = kernel space address
85 * Copies bytes from user space to kernel space
87 * We save/restore r4-r11:
91 /* Quick exit if length is zero */
100 ldr r3, .L_min_memcpy_size
104 stmfd sp!, {r0-r2, r4, lr}
108 mov r3, #2 /* SRC_IS_USER */
109 ldr r4, .L_arm_memcpy
113 ldmfd sp!, {r0-r2, r4, lr}
123 ldr r5, [r4, #PCB_ONFAULT]
125 str r3, [r4, #PCB_ONFAULT]
131 * If not too many bytes, take the slow path.
137 * Align destination to word boundary.
140 ldr pc, [pc, r6, lsl #2]
146 .Lial3: ldrbt r6, [r0], #1
149 .Lial2: ldrbt r7, [r0], #1
152 .Lial1: ldrbt r6, [r0], #1
158 * If few bytes left, finish slow.
164 * If source is not aligned, finish slow.
169 cmp r2, #0x60 /* Must be > 0x5f for unrolled cacheline */
173 * Align destination to cacheline boundary.
174 * If source and destination are nicely aligned, this can be a big
175 * win. If not, it's still cheaper to copy in groups of 32 even if
176 * we don't get the nice cacheline alignment.
189 .Lical28:ldrt r6, [r0], #4
192 .Lical24:ldrt r7, [r0], #4
195 .Lical20:ldrt r6, [r0], #4
198 .Lical16:ldrt r7, [r0], #4
201 .Lical12:ldrt r6, [r0], #4
204 .Lical8:ldrt r7, [r0], #4
207 .Lical4:ldrt r6, [r0], #4
212 * We start with > 0x40 bytes to copy (>= 0x60 got us into this
213 * part of the code, and we may have knocked that down by as much
214 * as 0x1c getting aligned).
216 * This loop basically works out to:
218 * prefetch-next-cacheline(s)
221 * } while (bytes >= 0x40);
231 /* Copy a cacheline */
248 /* Copy a cacheline */
273 * If we're done, bail.
280 ldr pc, [pc, r6, lsl #2]
286 .Lic4: ldrbt r6, [r0], #1
289 .Lic3: ldrbt r7, [r0], #1
292 .Lic2: ldrbt r6, [r0], #1
295 .Lic1: ldrbt r7, [r0], #1
304 str r5, [r4, #PCB_ONFAULT]
311 str r5, [r4, #PCB_ONFAULT]
318 * r0 = kernel space address
319 * r1 = user space address
322 * Copies bytes from kernel space to user space
324 * We save/restore r4-r11:
329 /* Quick exit if length is zero */
334 ldr r3, .L_arm_memcpy
338 ldr r3, .L_min_memcpy_size
342 stmfd sp!, {r0-r2, r4, lr}
346 mov r3, #1 /* DST_IS_USER */
347 ldr r4, .L_arm_memcpy
351 ldmfd sp!, {r0-r2, r4, lr}
360 ldr r5, [r4, #PCB_ONFAULT]
362 str r3, [r4, #PCB_ONFAULT]
368 * If not too many bytes, take the slow path.
374 * Align destination to word boundary.
377 ldr pc, [pc, r6, lsl #2]
383 .Lal3: ldrb r6, [r0], #1
386 .Lal2: ldrb r7, [r0], #1
389 .Lal1: ldrb r6, [r0], #1
395 * If few bytes left, finish slow.
401 * If source is not aligned, finish slow.
406 cmp r2, #0x60 /* Must be > 0x5f for unrolled cacheline */
410 * Align source & destination to cacheline boundary.
423 .Lcal28:ldr r6, [r0], #4
426 .Lcal24:ldr r7, [r0], #4
429 .Lcal20:ldr r6, [r0], #4
432 .Lcal16:ldr r7, [r0], #4
435 .Lcal12:ldr r6, [r0], #4
438 .Lcal8: ldr r7, [r0], #4
441 .Lcal4: ldr r6, [r0], #4
446 * We start with > 0x40 bytes to copy (>= 0x60 got us into this
447 * part of the code, and we may have knocked that down by as much
448 * as 0x1c getting aligned).
450 * This loop basically works out to:
452 * prefetch-next-cacheline(s)
455 * } while (bytes >= 0x40);
465 /* Copy a cacheline */
482 /* Copy a cacheline */
507 * If we're done, bail.
514 ldr pc, [pc, r6, lsl #2]
520 .Lc4: ldrb r6, [r0], #1
523 .Lc3: ldrb r7, [r0], #1
526 .Lc2: ldrb r6, [r0], #1
529 .Lc1: ldrb r7, [r0], #1
538 str r5, [r4, #PCB_ONFAULT]
546 * int badaddr_read_1(const uint8_t *src, uint8_t *dest)
548 * Copies a single 8-bit value from src to dest, returning 0 on success,
549 * else EFAULT if a page fault occurred.
551 ENTRY(badaddr_read_1)
555 ldr ip, [r2, #PCB_ONFAULT]
557 str r3, [r2, #PCB_ONFAULT]
566 mov r0, #0 /* No fault */
567 1: str ip, [r2, #PCB_ONFAULT]
572 * int badaddr_read_2(const uint16_t *src, uint16_t *dest)
574 * Copies a single 16-bit value from src to dest, returning 0 on success,
575 * else EFAULT if a page fault occurred.
577 ENTRY(badaddr_read_2)
581 ldr ip, [r2, #PCB_ONFAULT]
583 str r3, [r2, #PCB_ONFAULT]
592 mov r0, #0 /* No fault */
593 1: str ip, [r2, #PCB_ONFAULT]
598 * int badaddr_read_4(const uint32_t *src, uint32_t *dest)
600 * Copies a single 32-bit value from src to dest, returning 0 on success,
601 * else EFAULT if a page fault occurred.
603 ENTRY(badaddr_read_4)
607 ldr ip, [r2, #PCB_ONFAULT]
609 str r3, [r2, #PCB_ONFAULT]
618 mov r0, #0 /* No fault */
619 1: str ip, [r2, #PCB_ONFAULT]