1 /* $NetBSD: memmove.S,v 1.4 2003/10/14 07:51:45 scw Exp $ */
4 * Copyright (c) 1997 The NetBSD Foundation, Inc.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Neil A. Carson and Mark Brinicombe
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #include <machine/asm.h>
33 __FBSDID("$FreeBSD$");
38 /* LINTSTUB: Func: void *memmove(void *, const void *, size_t) */
41 /* bcopy = memcpy/memmove with arguments reversed. */
42 /* LINTSTUB: Func: void bcopy(void *, void *, size_t) */
44 /* switch the source and destination registers */
49 /* Do the buffers overlap? */
51 RETeq /* Bail now if src/dst are the same */
52 subcc r3, r0, r1 /* if (dst > src) r3 = dst - src */
53 subcs r3, r1, r0 /* if (src > dsr) r3 = src - dst */
54 cmp r3, r2 /* if (r3 < len) we have an overlap */
55 bcc PIC_SYM(_C_LABEL(memcpy), PLT)
57 /* Determine copy direction */
59 bcc .Lmemmove_backwards
61 moveq r0, #0 /* Quick abort for len=0 */
64 stmdb sp!, {r0, lr} /* memmove() returns dest addr */
66 blt .Lmemmove_fl4 /* less than 4 bytes */
68 bne .Lmemmove_fdestul /* oh unaligned destination addr */
70 bne .Lmemmove_fsrcul /* oh unaligned source addr */
73 /* We have aligned source and destination */
75 blt .Lmemmove_fl12 /* less than 12 bytes (4 from above) */
77 blt .Lmemmove_fl32 /* less than 32 bytes (12 from above) */
78 stmdb sp!, {r4} /* borrow r4 */
80 /* blat 32 bytes at a time */
81 /* XXX for really big copies perhaps we should use more registers */
83 ldmia r1!, {r3, r4, r12, lr}
84 stmia r0!, {r3, r4, r12, lr}
85 ldmia r1!, {r3, r4, r12, lr}
86 stmia r0!, {r3, r4, r12, lr}
91 ldmiage r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */
92 stmiage r0!, {r3, r4, r12, lr}
94 ldmia sp!, {r4} /* return r4 */
99 /* blat 12 bytes at a time */
101 ldmiage r1!, {r3, r12, lr}
102 stmiage r0!, {r3, r12, lr}
104 bge .Lmemmove_floop12
113 ldmiage r1!, {r3, r12}
114 stmiage r0!, {r3, r12}
118 /* less than 4 bytes to go */
120 ldmiaeq sp!, {r0, pc} /* done */
122 /* copy the crud byte at a time */
132 /* erg - unaligned destination */
137 /* align destination with byte copies */
145 blt .Lmemmove_fl4 /* less the 4 bytes */
148 beq .Lmemmove_ft8 /* we have an aligned source */
150 /* erg - unaligned source */
151 /* This is where it gets nasty ... */
156 bgt .Lmemmove_fsrcul3
157 beq .Lmemmove_fsrcul2
159 blt .Lmemmove_fsrcul1loop4
163 .Lmemmove_fsrcul1loop16:
169 ldmia r1!, {r4, r5, r12, lr}
171 orr r3, r3, r4, lsr #24
173 orr r4, r4, r5, lsr #24
175 orr r5, r5, r12, lsr #24
177 orr r12, r12, lr, lsr #24
179 orr r3, r3, r4, lsl #24
181 orr r4, r4, r5, lsl #24
183 orr r5, r5, r12, lsl #24
185 orr r12, r12, lr, lsl #24
187 stmia r0!, {r3-r5, r12}
189 bge .Lmemmove_fsrcul1loop16
192 blt .Lmemmove_fsrcul1l4
194 .Lmemmove_fsrcul1loop4:
202 orr r12, r12, lr, lsr #24
204 orr r12, r12, lr, lsl #24
208 bge .Lmemmove_fsrcul1loop4
216 blt .Lmemmove_fsrcul2loop4
220 .Lmemmove_fsrcul2loop16:
226 ldmia r1!, {r4, r5, r12, lr}
228 orr r3, r3, r4, lsr #16
230 orr r4, r4, r5, lsr #16
232 orr r5, r5, r12, lsr #16
233 mov r12, r12, lsl #16
234 orr r12, r12, lr, lsr #16
236 orr r3, r3, r4, lsl #16
238 orr r4, r4, r5, lsl #16
240 orr r5, r5, r12, lsl #16
241 mov r12, r12, lsr #16
242 orr r12, r12, lr, lsl #16
244 stmia r0!, {r3-r5, r12}
246 bge .Lmemmove_fsrcul2loop16
249 blt .Lmemmove_fsrcul2l4
251 .Lmemmove_fsrcul2loop4:
259 orr r12, r12, lr, lsr #16
261 orr r12, r12, lr, lsl #16
265 bge .Lmemmove_fsrcul2loop4
273 blt .Lmemmove_fsrcul3loop4
277 .Lmemmove_fsrcul3loop16:
283 ldmia r1!, {r4, r5, r12, lr}
285 orr r3, r3, r4, lsr #8
287 orr r4, r4, r5, lsr #8
289 orr r5, r5, r12, lsr #8
290 mov r12, r12, lsl #24
291 orr r12, r12, lr, lsr #8
293 orr r3, r3, r4, lsl #8
295 orr r4, r4, r5, lsl #8
297 orr r5, r5, r12, lsl #8
298 mov r12, r12, lsr #24
299 orr r12, r12, lr, lsl #8
301 stmia r0!, {r3-r5, r12}
303 bge .Lmemmove_fsrcul3loop16
306 blt .Lmemmove_fsrcul3l4
308 .Lmemmove_fsrcul3loop4:
316 orr r12, r12, lr, lsr #8
318 orr r12, r12, lr, lsl #8
322 bge .Lmemmove_fsrcul3loop4
332 blt .Lmemmove_bl4 /* less than 4 bytes */
334 bne .Lmemmove_bdestul /* oh unaligned destination addr */
336 bne .Lmemmove_bsrcul /* oh unaligned source addr */
339 /* We have aligned source and destination */
341 blt .Lmemmove_bl12 /* less than 12 bytes (4 from above) */
343 subs r2, r2, #0x14 /* less than 32 bytes (12 from above) */
346 /* blat 32 bytes at a time */
347 /* XXX for really big copies perhaps we should use more registers */
349 ldmdb r1!, {r3, r4, r12, lr}
350 stmdb r0!, {r3, r4, r12, lr}
351 ldmdb r1!, {r3, r4, r12, lr}
352 stmdb r0!, {r3, r4, r12, lr}
354 bge .Lmemmove_bloop32
358 ldmdbge r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */
359 stmdbge r0!, {r3, r4, r12, lr}
362 ldmdbge r1!, {r3, r12, lr} /* blat a remaining 12 bytes */
363 stmdbge r0!, {r3, r12, lr}
373 ldmdbge r1!, {r3, r12}
374 stmdbge r0!, {r3, r12}
378 /* less than 4 bytes to go */
382 /* copy the crud byte at a time */
386 ldrbge r3, [r1, #-1]!
387 strbge r3, [r0, #-1]!
388 ldrbgt r3, [r1, #-1]!
389 strbgt r3, [r0, #-1]!
392 /* erg - unaligned destination */
396 /* align destination with byte copies */
399 ldrbge r3, [r1, #-1]!
400 strbge r3, [r0, #-1]!
401 ldrbgt r3, [r1, #-1]!
402 strbgt r3, [r0, #-1]!
404 blt .Lmemmove_bl4 /* less than 4 bytes to go */
406 beq .Lmemmove_bt8 /* we have an aligned source */
408 /* erg - unaligned source */
409 /* This is where it gets nasty ... */
414 blt .Lmemmove_bsrcul1
415 beq .Lmemmove_bsrcul2
417 blt .Lmemmove_bsrcul3loop4
419 stmdb sp!, {r4, r5, lr}
421 .Lmemmove_bsrcul3loop16:
427 ldmdb r1!, {r3-r5, r12}
429 orr lr, lr, r12, lsl #24
431 orr r12, r12, r5, lsl #24
433 orr r5, r5, r4, lsl #24
435 orr r4, r4, r3, lsl #24
437 orr lr, lr, r12, lsr #24
439 orr r12, r12, r5, lsr #24
441 orr r5, r5, r4, lsr #24
443 orr r4, r4, r3, lsr #24
445 stmdb r0!, {r4, r5, r12, lr}
447 bge .Lmemmove_bsrcul3loop16
448 ldmia sp!, {r4, r5, lr}
450 blt .Lmemmove_bsrcul3l4
452 .Lmemmove_bsrcul3loop4:
460 orr r12, r12, r3, lsl #24
462 orr r12, r12, r3, lsr #24
466 bge .Lmemmove_bsrcul3loop4
474 blt .Lmemmove_bsrcul2loop4
476 stmdb sp!, {r4, r5, lr}
478 .Lmemmove_bsrcul2loop16:
484 ldmdb r1!, {r3-r5, r12}
486 orr lr, lr, r12, lsl #16
487 mov r12, r12, lsr #16
488 orr r12, r12, r5, lsl #16
490 orr r5, r5, r4, lsl #16
492 orr r4, r4, r3, lsl #16
494 orr lr, lr, r12, lsr #16
495 mov r12, r12, lsl #16
496 orr r12, r12, r5, lsr #16
498 orr r5, r5, r4, lsr #16
500 orr r4, r4, r3, lsr #16
502 stmdb r0!, {r4, r5, r12, lr}
504 bge .Lmemmove_bsrcul2loop16
505 ldmia sp!, {r4, r5, lr}
507 blt .Lmemmove_bsrcul2l4
509 .Lmemmove_bsrcul2loop4:
517 orr r12, r12, r3, lsl #16
519 orr r12, r12, r3, lsr #16
523 bge .Lmemmove_bsrcul2loop4
531 blt .Lmemmove_bsrcul1loop4
533 stmdb sp!, {r4, r5, lr}
535 .Lmemmove_bsrcul1loop32:
541 ldmdb r1!, {r3-r5, r12}
543 orr lr, lr, r12, lsl #8
544 mov r12, r12, lsr #24
545 orr r12, r12, r5, lsl #8
547 orr r5, r5, r4, lsl #8
549 orr r4, r4, r3, lsl #8
551 orr lr, lr, r12, lsr #8
552 mov r12, r12, lsl #24
553 orr r12, r12, r5, lsr #8
555 orr r5, r5, r4, lsr #8
557 orr r4, r4, r3, lsr #8
559 stmdb r0!, {r4, r5, r12, lr}
561 bge .Lmemmove_bsrcul1loop32
562 ldmia sp!, {r4, r5, lr}
564 blt .Lmemmove_bsrcul1l4
566 .Lmemmove_bsrcul1loop4:
574 orr r12, r12, r3, lsl #8
576 orr r12, r12, r3, lsr #8
580 bge .Lmemmove_bsrcul1loop4