2 * Copyright (c) 2004 Olivier Houchard
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright 2003 Wasabi Systems, Inc.
28 * All rights reserved.
30 * Written by Steve C. Woodford for Wasabi Systems, Inc.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed for the NetBSD Project by
43 * Wasabi Systems, Inc.
44 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
45 * or promote products derived from this software without specific prior
48 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
50 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
51 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
52 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
53 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
54 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
55 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
56 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
57 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
58 * POSSIBILITY OF SUCH DAMAGE.
61 * Copyright (c) 1997 The NetBSD Foundation, Inc.
62 * All rights reserved.
64 * This code is derived from software contributed to The NetBSD Foundation
65 * by Neil A. Carson and Mark Brinicombe
67 * Redistribution and use in source and binary forms, with or without
68 * modification, are permitted provided that the following conditions
70 * 1. Redistributions of source code must retain the above copyright
71 * notice, this list of conditions and the following disclaimer.
72 * 2. Redistributions in binary form must reproduce the above copyright
73 * notice, this list of conditions and the following disclaimer in the
74 * documentation and/or other materials provided with the distribution.
76 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
77 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
78 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
79 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
80 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
81 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
82 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
83 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
84 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
85 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
86 * POSSIBILITY OF SUCH DAMAGE.
89 #include <machine/asm.h>
90 __FBSDID("$FreeBSD$");
95 .word _C_LABEL(_arm_memcpy)
97 .word _C_LABEL(_arm_bzero)
99 .word _C_LABEL(_min_memcpy_size)
101 .word _C_LABEL(_min_bzero_size)
103 * memset: Sets a block of memory to the specified value
108 * r2 - number of bytes to write
113 /* LINTSTUB: Func: void bzero(void *, size_t) */
119 ldr r2, .L_min_bzero_size
123 stmfd sp!, {r0, r1, lr}
128 ldmfd sp!, {r0, r1, lr}
134 /* LINTSTUB: Func: void *memset(void *, int, size_t) */
136 and r3, r1, #0xff /* We deal with bytes */
139 cmp r1, #0x04 /* Do we have less than 4 bytes */
141 blt .Lmemset_lessthanfour
143 /* Ok first we will word align the address */
144 ands r2, ip, #0x03 /* Get the bottom two bits */
145 bne .Lmemset_wordunaligned /* The address is not word aligned */
147 /* We are now word aligned */
148 .Lmemset_wordaligned:
149 orr r3, r3, r3, lsl #8 /* Extend value to 16-bits */
151 tst ip, #0x04 /* Quad-align for armv5e */
155 orr r3, r3, r3, lsl #16 /* Extend value to 32-bits */
157 subne r1, r1, #0x04 /* Quad-align if necessary */
158 strne r3, [ip], #0x04
161 blt .Lmemset_loop4 /* If less than 16 then use words */
162 mov r2, r3 /* Duplicate data */
163 cmp r1, #0x80 /* If < 128 then skip the big loop */
166 /* Do 128 bytes at a time */
170 strged r2, [ip], #0x08
171 strged r2, [ip], #0x08
172 strged r2, [ip], #0x08
173 strged r2, [ip], #0x08
174 strged r2, [ip], #0x08
175 strged r2, [ip], #0x08
176 strged r2, [ip], #0x08
177 strged r2, [ip], #0x08
178 strged r2, [ip], #0x08
179 strged r2, [ip], #0x08
180 strged r2, [ip], #0x08
181 strged r2, [ip], #0x08
182 strged r2, [ip], #0x08
183 strged r2, [ip], #0x08
184 strged r2, [ip], #0x08
185 strged r2, [ip], #0x08
205 RETeq /* Zero length so just exit */
207 add r1, r1, #0x80 /* Adjust for extra sub */
209 /* Do 32 bytes at a time */
213 strged r2, [ip], #0x08
214 strged r2, [ip], #0x08
215 strged r2, [ip], #0x08
216 strged r2, [ip], #0x08
224 RETeq /* Zero length so just exit */
226 adds r1, r1, #0x10 /* Partially adjust for extra sub */
228 /* Deal with 16 bytes or more */
230 strged r2, [ip], #0x08
231 strged r2, [ip], #0x08
236 RETeq /* Zero length so just exit */
238 addlt r1, r1, #0x10 /* Possibly adjust for extra sub */
240 /* We have at least 4 bytes so copy as words */
243 strge r3, [ip], #0x04
245 RETeq /* Zero length so just exit */
248 /* Compensate for 64-bit alignment check */
256 strb r3, [ip], #0x01 /* Set 1 byte */
257 strgeb r3, [ip], #0x01 /* Set another byte */
258 strgtb r3, [ip] /* and a third */
261 .Lmemset_wordunaligned:
263 strb r3, [ip], #0x01 /* Set 1 byte */
265 strgeb r3, [ip], #0x01 /* Set another byte */
267 strgtb r3, [ip], #0x01 /* and a third */
268 cmp r1, #0x04 /* More than 4 bytes left? */
269 bge .Lmemset_wordaligned /* Yup */
271 .Lmemset_lessthanfour:
273 RETeq /* Zero length so exit */
274 strb r3, [ip], #0x01 /* Set 1 byte */
276 strgeb r3, [ip], #0x01 /* Set another byte */
277 strgtb r3, [ip] /* and a third */
287 /* Are both addresses aligned the same way? */
290 RETeq /* len == 0, or same addresses! */
293 bne .Lmemcmp_bytewise2 /* Badly aligned. Do it the slow way */
295 /* Word-align the addresses, if necessary */
298 add r3, r3, r3, lsl #1
299 addne pc, pc, r3, lsl #3
302 /* Compare up to 3 bytes */
310 /* Compare up to 2 bytes */
326 /* Compare 4 bytes at a time, if possible */
328 bcc .Lmemcmp_bytewise
329 .Lmemcmp_word_aligned:
334 beq .Lmemcmp_word_aligned
337 /* Correct for extra subtraction, and check if done */
339 cmpeq r0, #0x00 /* If done, did all bytes match? */
340 RETeq /* Yup. Just return */
342 /* Re-do the final word byte-wise */
353 beq .Lmemcmp_bytewise2
358 * 6 byte compares are very common, thanks to the network stack.
359 * This code is hand-scheduled to reduce the number of stalls for
360 * load results. Everything else being equal, this will be ~32%
361 * faster than a byte-wise memcmp.
365 ldrb r3, [r1, #0x00] /* r3 = b2#0 */
366 ldrb r0, [ip, #0x00] /* r0 = b1#0 */
367 ldrb r2, [r1, #0x01] /* r2 = b2#1 */
368 subs r0, r0, r3 /* r0 = b1#0 - b2#0 */
369 ldreqb r3, [ip, #0x01] /* r3 = b1#1 */
370 RETne /* Return if mismatch on #0 */
371 subs r0, r3, r2 /* r0 = b1#1 - b2#1 */
372 ldreqb r3, [r1, #0x02] /* r3 = b2#2 */
373 ldreqb r0, [ip, #0x02] /* r0 = b1#2 */
374 RETne /* Return if mismatch on #1 */
375 ldrb r2, [r1, #0x03] /* r2 = b2#3 */
376 subs r0, r0, r3 /* r0 = b1#2 - b2#2 */
377 ldreqb r3, [ip, #0x03] /* r3 = b1#3 */
378 RETne /* Return if mismatch on #2 */
379 subs r0, r3, r2 /* r0 = b1#3 - b2#3 */
380 ldreqb r3, [r1, #0x04] /* r3 = b2#4 */
381 ldreqb r0, [ip, #0x04] /* r0 = b1#4 */
382 RETne /* Return if mismatch on #3 */
383 ldrb r2, [r1, #0x05] /* r2 = b2#5 */
384 subs r0, r0, r3 /* r0 = b1#4 - b2#4 */
385 ldreqb r3, [ip, #0x05] /* r3 = b1#5 */
386 RETne /* Return if mismatch on #4 */
387 sub r0, r3, r2 /* r0 = b1#5 - b2#5 */
392 /* switch the source and destination registers */
397 /* Do the buffers overlap? */
399 RETeq /* Bail now if src/dst are the same */
400 subcc r3, r0, r1 /* if (dst > src) r3 = dst - src */
401 subcs r3, r1, r0 /* if (src > dsr) r3 = src - dst */
402 cmp r3, r2 /* if (r3 < len) we have an overlap */
403 bcc PIC_SYM(_C_LABEL(memcpy), PLT)
405 /* Determine copy direction */
407 bcc .Lmemmove_backwards
409 moveq r0, #0 /* Quick abort for len=0 */
412 stmdb sp!, {r0, lr} /* memmove() returns dest addr */
414 blt .Lmemmove_fl4 /* less than 4 bytes */
416 bne .Lmemmove_fdestul /* oh unaligned destination addr */
418 bne .Lmemmove_fsrcul /* oh unaligned source addr */
421 /* We have aligned source and destination */
423 blt .Lmemmove_fl12 /* less than 12 bytes (4 from above) */
425 blt .Lmemmove_fl32 /* less than 32 bytes (12 from above) */
426 stmdb sp!, {r4} /* borrow r4 */
428 /* blat 32 bytes at a time */
429 /* XXX for really big copies perhaps we should use more registers */
431 ldmia r1!, {r3, r4, r12, lr}
432 stmia r0!, {r3, r4, r12, lr}
433 ldmia r1!, {r3, r4, r12, lr}
434 stmia r0!, {r3, r4, r12, lr}
436 bge .Lmemmove_floop32
439 ldmgeia r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */
440 stmgeia r0!, {r3, r4, r12, lr}
442 ldmia sp!, {r4} /* return r4 */
447 /* blat 12 bytes at a time */
449 ldmgeia r1!, {r3, r12, lr}
450 stmgeia r0!, {r3, r12, lr}
452 bge .Lmemmove_floop12
461 ldmgeia r1!, {r3, r12}
462 stmgeia r0!, {r3, r12}
466 /* less than 4 bytes to go */
468 ldmeqia sp!, {r0, pc} /* done */
470 /* copy the crud byte at a time */
480 /* erg - unaligned destination */
485 /* align destination with byte copies */
493 blt .Lmemmove_fl4 /* less the 4 bytes */
496 beq .Lmemmove_ft8 /* we have an aligned source */
498 /* erg - unaligned source */
499 /* This is where it gets nasty ... */
504 bgt .Lmemmove_fsrcul3
505 beq .Lmemmove_fsrcul2
507 blt .Lmemmove_fsrcul1loop4
511 .Lmemmove_fsrcul1loop16:
517 ldmia r1!, {r4, r5, r12, lr}
519 orr r3, r3, r4, lsr #24
521 orr r4, r4, r5, lsr #24
523 orr r5, r5, r12, lsr #24
525 orr r12, r12, lr, lsr #24
527 orr r3, r3, r4, lsl #24
529 orr r4, r4, r5, lsl #24
531 orr r5, r5, r12, lsl #24
533 orr r12, r12, lr, lsl #24
535 stmia r0!, {r3-r5, r12}
537 bge .Lmemmove_fsrcul1loop16
540 blt .Lmemmove_fsrcul1l4
542 .Lmemmove_fsrcul1loop4:
550 orr r12, r12, lr, lsr #24
552 orr r12, r12, lr, lsl #24
556 bge .Lmemmove_fsrcul1loop4
564 blt .Lmemmove_fsrcul2loop4
568 .Lmemmove_fsrcul2loop16:
574 ldmia r1!, {r4, r5, r12, lr}
576 orr r3, r3, r4, lsr #16
578 orr r4, r4, r5, lsr #16
580 orr r5, r5, r12, lsr #16
581 mov r12, r12, lsl #16
582 orr r12, r12, lr, lsr #16
584 orr r3, r3, r4, lsl #16
586 orr r4, r4, r5, lsl #16
588 orr r5, r5, r12, lsl #16
589 mov r12, r12, lsr #16
590 orr r12, r12, lr, lsl #16
592 stmia r0!, {r3-r5, r12}
594 bge .Lmemmove_fsrcul2loop16
597 blt .Lmemmove_fsrcul2l4
599 .Lmemmove_fsrcul2loop4:
607 orr r12, r12, lr, lsr #16
609 orr r12, r12, lr, lsl #16
613 bge .Lmemmove_fsrcul2loop4
621 blt .Lmemmove_fsrcul3loop4
625 .Lmemmove_fsrcul3loop16:
631 ldmia r1!, {r4, r5, r12, lr}
633 orr r3, r3, r4, lsr #8
635 orr r4, r4, r5, lsr #8
637 orr r5, r5, r12, lsr #8
638 mov r12, r12, lsl #24
639 orr r12, r12, lr, lsr #8
641 orr r3, r3, r4, lsl #8
643 orr r4, r4, r5, lsl #8
645 orr r5, r5, r12, lsl #8
646 mov r12, r12, lsr #24
647 orr r12, r12, lr, lsl #8
649 stmia r0!, {r3-r5, r12}
651 bge .Lmemmove_fsrcul3loop16
654 blt .Lmemmove_fsrcul3l4
656 .Lmemmove_fsrcul3loop4:
664 orr r12, r12, lr, lsr #8
666 orr r12, r12, lr, lsl #8
670 bge .Lmemmove_fsrcul3loop4
680 blt .Lmemmove_bl4 /* less than 4 bytes */
682 bne .Lmemmove_bdestul /* oh unaligned destination addr */
684 bne .Lmemmove_bsrcul /* oh unaligned source addr */
687 /* We have aligned source and destination */
689 blt .Lmemmove_bl12 /* less than 12 bytes (4 from above) */
691 subs r2, r2, #0x14 /* less than 32 bytes (12 from above) */
694 /* blat 32 bytes at a time */
695 /* XXX for really big copies perhaps we should use more registers */
697 ldmdb r1!, {r3, r4, r12, lr}
698 stmdb r0!, {r3, r4, r12, lr}
699 ldmdb r1!, {r3, r4, r12, lr}
700 stmdb r0!, {r3, r4, r12, lr}
702 bge .Lmemmove_bloop32
706 ldmgedb r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */
707 stmgedb r0!, {r3, r4, r12, lr}
710 ldmgedb r1!, {r3, r12, lr} /* blat a remaining 12 bytes */
711 stmgedb r0!, {r3, r12, lr}
721 ldmgedb r1!, {r3, r12}
722 stmgedb r0!, {r3, r12}
726 /* less than 4 bytes to go */
730 /* copy the crud byte at a time */
734 ldrgeb r3, [r1, #-1]!
735 strgeb r3, [r0, #-1]!
736 ldrgtb r3, [r1, #-1]!
737 strgtb r3, [r0, #-1]!
740 /* erg - unaligned destination */
744 /* align destination with byte copies */
747 ldrgeb r3, [r1, #-1]!
748 strgeb r3, [r0, #-1]!
749 ldrgtb r3, [r1, #-1]!
750 strgtb r3, [r0, #-1]!
752 blt .Lmemmove_bl4 /* less than 4 bytes to go */
754 beq .Lmemmove_bt8 /* we have an aligned source */
756 /* erg - unaligned source */
757 /* This is where it gets nasty ... */
762 blt .Lmemmove_bsrcul1
763 beq .Lmemmove_bsrcul2
765 blt .Lmemmove_bsrcul3loop4
767 stmdb sp!, {r4, r5, lr}
769 .Lmemmove_bsrcul3loop16:
775 ldmdb r1!, {r3-r5, r12}
777 orr lr, lr, r12, lsl #24
779 orr r12, r12, r5, lsl #24
781 orr r5, r5, r4, lsl #24
783 orr r4, r4, r3, lsl #24
785 orr lr, lr, r12, lsr #24
787 orr r12, r12, r5, lsr #24
789 orr r5, r5, r4, lsr #24
791 orr r4, r4, r3, lsr #24
793 stmdb r0!, {r4, r5, r12, lr}
795 bge .Lmemmove_bsrcul3loop16
796 ldmia sp!, {r4, r5, lr}
798 blt .Lmemmove_bsrcul3l4
800 .Lmemmove_bsrcul3loop4:
808 orr r12, r12, r3, lsl #24
810 orr r12, r12, r3, lsr #24
814 bge .Lmemmove_bsrcul3loop4
822 blt .Lmemmove_bsrcul2loop4
824 stmdb sp!, {r4, r5, lr}
826 .Lmemmove_bsrcul2loop16:
832 ldmdb r1!, {r3-r5, r12}
834 orr lr, lr, r12, lsl #16
835 mov r12, r12, lsr #16
836 orr r12, r12, r5, lsl #16
838 orr r5, r5, r4, lsl #16
840 orr r4, r4, r3, lsl #16
842 orr lr, lr, r12, lsr #16
843 mov r12, r12, lsl #16
844 orr r12, r12, r5, lsr #16
846 orr r5, r5, r4, lsr #16
848 orr r4, r4, r3, lsr #16
850 stmdb r0!, {r4, r5, r12, lr}
852 bge .Lmemmove_bsrcul2loop16
853 ldmia sp!, {r4, r5, lr}
855 blt .Lmemmove_bsrcul2l4
857 .Lmemmove_bsrcul2loop4:
865 orr r12, r12, r3, lsl #16
867 orr r12, r12, r3, lsr #16
871 bge .Lmemmove_bsrcul2loop4
879 blt .Lmemmove_bsrcul1loop4
881 stmdb sp!, {r4, r5, lr}
883 .Lmemmove_bsrcul1loop32:
889 ldmdb r1!, {r3-r5, r12}
891 orr lr, lr, r12, lsl #8
892 mov r12, r12, lsr #24
893 orr r12, r12, r5, lsl #8
895 orr r5, r5, r4, lsl #8
897 orr r4, r4, r3, lsl #8
899 orr lr, lr, r12, lsr #8
900 mov r12, r12, lsl #24
901 orr r12, r12, r5, lsr #8
903 orr r5, r5, r4, lsr #8
905 orr r4, r4, r3, lsr #8
907 stmdb r0!, {r4, r5, r12, lr}
909 bge .Lmemmove_bsrcul1loop32
910 ldmia sp!, {r4, r5, lr}
912 blt .Lmemmove_bsrcul1l4
914 .Lmemmove_bsrcul1loop4:
922 orr r12, r12, r3, lsl #8
924 orr r12, r12, r3, lsr #8
928 bge .Lmemmove_bsrcul1loop4
936 #if !defined(_ARM_ARCH_5E)
938 /* save leaf functions having to store this away */
939 /* Do not check arm_memcpy if we're running from flash */
940 #if defined(FLASHADDR) && defined(PHYSADDR)
941 #if FLASHADDR > PHYSADDR
951 ldr r3, .L_arm_memcpy
955 ldr r3, .L_min_memcpy_size
959 stmfd sp!, {r0-r2, r4, lr}
961 ldr r4, .L_arm_memcpy
965 ldmfd sp!, {r0-r2, r4, lr}
969 stmdb sp!, {r0, lr} /* memcpy() returns dest addr */
972 blt .Lmemcpy_l4 /* less than 4 bytes */
974 bne .Lmemcpy_destul /* oh unaligned destination addr */
976 bne .Lmemcpy_srcul /* oh unaligned source addr */
979 /* We have aligned source and destination */
981 blt .Lmemcpy_l12 /* less than 12 bytes (4 from above) */
983 blt .Lmemcpy_l32 /* less than 32 bytes (12 from above) */
984 stmdb sp!, {r4} /* borrow r4 */
986 /* blat 32 bytes at a time */
987 /* XXX for really big copies perhaps we should use more registers */
989 ldmia r1!, {r3, r4, r12, lr}
990 stmia r0!, {r3, r4, r12, lr}
991 ldmia r1!, {r3, r4, r12, lr}
992 stmia r0!, {r3, r4, r12, lr}
997 ldmgeia r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */
998 stmgeia r0!, {r3, r4, r12, lr}
1000 ldmia sp!, {r4} /* return r4 */
1005 /* blat 12 bytes at a time */
1007 ldmgeia r1!, {r3, r12, lr}
1008 stmgeia r0!, {r3, r12, lr}
1009 subges r2, r2, #0x0c
1019 ldmgeia r1!, {r3, r12}
1020 stmgeia r0!, {r3, r12}
1024 /* less than 4 bytes to go */
1027 ldmeqia sp!, {r0, pc}^ /* done */
1029 ldmeqia sp!, {r0, pc} /* done */
1031 /* copy the crud byte at a time */
1041 /* erg - unaligned destination */
1046 /* align destination with byte copies */
1054 blt .Lmemcpy_l4 /* less the 4 bytes */
1057 beq .Lmemcpy_t8 /* we have an aligned source */
1059 /* erg - unaligned source */
1060 /* This is where it gets nasty ... */
1068 blt .Lmemcpy_srcul1loop4
1072 .Lmemcpy_srcul1loop16:
1074 ldmia r1!, {r4, r5, r12, lr}
1075 orr r3, r3, r4, lsl #24
1077 orr r4, r4, r5, lsl #24
1079 orr r5, r5, r12, lsl #24
1080 mov r12, r12, lsr #8
1081 orr r12, r12, lr, lsl #24
1082 stmia r0!, {r3-r5, r12}
1084 bge .Lmemcpy_srcul1loop16
1087 blt .Lmemcpy_srcul1l4
1089 .Lmemcpy_srcul1loop4:
1092 orr r12, r12, lr, lsl #24
1095 bge .Lmemcpy_srcul1loop4
1103 blt .Lmemcpy_srcul2loop4
1107 .Lmemcpy_srcul2loop16:
1109 ldmia r1!, {r4, r5, r12, lr}
1110 orr r3, r3, r4, lsl #16
1112 orr r4, r4, r5, lsl #16
1114 orr r5, r5, r12, lsl #16
1115 mov r12, r12, lsr #16
1116 orr r12, r12, lr, lsl #16
1117 stmia r0!, {r3-r5, r12}
1119 bge .Lmemcpy_srcul2loop16
1122 blt .Lmemcpy_srcul2l4
1124 .Lmemcpy_srcul2loop4:
1125 mov r12, lr, lsr #16
1127 orr r12, r12, lr, lsl #16
1130 bge .Lmemcpy_srcul2loop4
1138 blt .Lmemcpy_srcul3loop4
1142 .Lmemcpy_srcul3loop16:
1144 ldmia r1!, {r4, r5, r12, lr}
1145 orr r3, r3, r4, lsl #8
1147 orr r4, r4, r5, lsl #8
1149 orr r5, r5, r12, lsl #8
1150 mov r12, r12, lsr #24
1151 orr r12, r12, lr, lsl #8
1152 stmia r0!, {r3-r5, r12}
1154 bge .Lmemcpy_srcul3loop16
1157 blt .Lmemcpy_srcul3l4
1159 .Lmemcpy_srcul3loop4:
1160 mov r12, lr, lsr #24
1162 orr r12, r12, lr, lsl #8
1165 bge .Lmemcpy_srcul3loop4
1173 /* LINTSTUB: Func: void *memcpy(void *dst, const void *src, size_t len) */
1177 ble .Lmemcpy_short /* <= 12 bytes */
1179 #if FLASHADDR > PHYSADDR
1189 ldr r3, .L_arm_memcpy
1193 ldr r3, .L_min_memcpy_size
1197 stmfd sp!, {r0-r2, r4, lr}
1199 ldr r4, .L_arm_memcpy
1203 ldmfd sp!, {r0-r2, r4, lr}
1206 mov r3, r0 /* We must not clobber r0 */
1208 /* Word-align the destination buffer */
1209 ands ip, r3, #0x03 /* Already word aligned? */
1210 beq .Lmemcpy_wordaligned /* Yup */
1212 ldrb ip, [r1], #0x01
1214 strb ip, [r3], #0x01
1215 ldrleb ip, [r1], #0x01
1217 strleb ip, [r3], #0x01
1218 ldrltb ip, [r1], #0x01
1220 strltb ip, [r3], #0x01
1222 /* Destination buffer is now word aligned */
1223 .Lmemcpy_wordaligned:
1224 ands ip, r1, #0x03 /* Is src also word-aligned? */
1225 bne .Lmemcpy_bad_align /* Nope. Things just got bad */
1227 /* Quad-align the destination buffer */
1228 tst r3, #0x07 /* Already quad aligned? */
1229 ldrne ip, [r1], #0x04
1230 stmfd sp!, {r4-r9} /* Free up some registers */
1232 strne ip, [r3], #0x04
1234 /* Destination buffer quad aligned, source is at least word aligned */
1236 blt .Lmemcpy_w_lessthan128
1238 /* Copy 128 bytes at a time */
1240 ldr r4, [r1], #0x04 /* LD:00-03 */
1241 ldr r5, [r1], #0x04 /* LD:04-07 */
1242 pld [r1, #0x18] /* Prefetch 0x20 */
1243 ldr r6, [r1], #0x04 /* LD:08-0b */
1244 ldr r7, [r1], #0x04 /* LD:0c-0f */
1245 ldr r8, [r1], #0x04 /* LD:10-13 */
1246 ldr r9, [r1], #0x04 /* LD:14-17 */
1247 strd r4, [r3], #0x08 /* ST:00-07 */
1248 ldr r4, [r1], #0x04 /* LD:18-1b */
1249 ldr r5, [r1], #0x04 /* LD:1c-1f */
1250 strd r6, [r3], #0x08 /* ST:08-0f */
1251 ldr r6, [r1], #0x04 /* LD:20-23 */
1252 ldr r7, [r1], #0x04 /* LD:24-27 */
1253 pld [r1, #0x18] /* Prefetch 0x40 */
1254 strd r8, [r3], #0x08 /* ST:10-17 */
1255 ldr r8, [r1], #0x04 /* LD:28-2b */
1256 ldr r9, [r1], #0x04 /* LD:2c-2f */
1257 strd r4, [r3], #0x08 /* ST:18-1f */
1258 ldr r4, [r1], #0x04 /* LD:30-33 */
1259 ldr r5, [r1], #0x04 /* LD:34-37 */
1260 strd r6, [r3], #0x08 /* ST:20-27 */
1261 ldr r6, [r1], #0x04 /* LD:38-3b */
1262 ldr r7, [r1], #0x04 /* LD:3c-3f */
1263 strd r8, [r3], #0x08 /* ST:28-2f */
1264 ldr r8, [r1], #0x04 /* LD:40-43 */
1265 ldr r9, [r1], #0x04 /* LD:44-47 */
1266 pld [r1, #0x18] /* Prefetch 0x60 */
1267 strd r4, [r3], #0x08 /* ST:30-37 */
1268 ldr r4, [r1], #0x04 /* LD:48-4b */
1269 ldr r5, [r1], #0x04 /* LD:4c-4f */
1270 strd r6, [r3], #0x08 /* ST:38-3f */
1271 ldr r6, [r1], #0x04 /* LD:50-53 */
1272 ldr r7, [r1], #0x04 /* LD:54-57 */
1273 strd r8, [r3], #0x08 /* ST:40-47 */
1274 ldr r8, [r1], #0x04 /* LD:58-5b */
1275 ldr r9, [r1], #0x04 /* LD:5c-5f */
1276 strd r4, [r3], #0x08 /* ST:48-4f */
1277 ldr r4, [r1], #0x04 /* LD:60-63 */
1278 ldr r5, [r1], #0x04 /* LD:64-67 */
1279 pld [r1, #0x18] /* Prefetch 0x80 */
1280 strd r6, [r3], #0x08 /* ST:50-57 */
1281 ldr r6, [r1], #0x04 /* LD:68-6b */
1282 ldr r7, [r1], #0x04 /* LD:6c-6f */
1283 strd r8, [r3], #0x08 /* ST:58-5f */
1284 ldr r8, [r1], #0x04 /* LD:70-73 */
1285 ldr r9, [r1], #0x04 /* LD:74-77 */
1286 strd r4, [r3], #0x08 /* ST:60-67 */
1287 ldr r4, [r1], #0x04 /* LD:78-7b */
1288 ldr r5, [r1], #0x04 /* LD:7c-7f */
1289 strd r6, [r3], #0x08 /* ST:68-6f */
1290 strd r8, [r3], #0x08 /* ST:70-77 */
1292 strd r4, [r3], #0x08 /* ST:78-7f */
1293 bge .Lmemcpy_w_loop128
1295 .Lmemcpy_w_lessthan128:
1296 adds r2, r2, #0x80 /* Adjust for extra sub */
1297 ldmeqfd sp!, {r4-r9}
1298 RETeq /* Return now if done */
1300 blt .Lmemcpy_w_lessthan32
1302 /* Copy 32 bytes at a time */
1311 strd r4, [r3], #0x08
1314 strd r6, [r3], #0x08
1315 strd r8, [r3], #0x08
1317 strd r4, [r3], #0x08
1318 bge .Lmemcpy_w_loop32
1320 .Lmemcpy_w_lessthan32:
1321 adds r2, r2, #0x20 /* Adjust for extra sub */
1322 ldmeqfd sp!, {r4-r9}
1323 RETeq /* Return now if done */
1327 addne pc, pc, r4, lsl #1
1330 /* At least 24 bytes remaining */
1334 strd r4, [r3], #0x08
1336 /* At least 16 bytes remaining */
1340 strd r4, [r3], #0x08
1342 /* At least 8 bytes remaining */
1346 strd r4, [r3], #0x08
1348 /* Less than 8 bytes remaining */
1350 RETeq /* Return now if done */
1352 ldrge ip, [r1], #0x04
1353 strge ip, [r3], #0x04
1354 RETeq /* Return now if done */
1356 ldrb ip, [r1], #0x01
1358 ldrgeb r2, [r1], #0x01
1359 strb ip, [r3], #0x01
1361 strgeb r2, [r3], #0x01
1367 * At this point, it has not been possible to word align both buffers.
1368 * The destination buffer is word aligned, but the source buffer is not.
1379 .Lmemcpy_bad1_loop16:
1391 orr r4, r4, r5, lsr #24
1393 orr r5, r5, r6, lsr #24
1395 orr r6, r6, r7, lsr #24
1397 orr r7, r7, ip, lsr #24
1399 orr r4, r4, r5, lsl #24
1401 orr r5, r5, r6, lsl #24
1403 orr r6, r6, r7, lsl #24
1405 orr r7, r7, ip, lsl #24
1413 bge .Lmemcpy_bad1_loop16
1416 ldmeqfd sp!, {r4-r7}
1417 RETeq /* Return now if done */
1420 blt .Lmemcpy_bad_done
1422 .Lmemcpy_bad1_loop4:
1431 orr r4, r4, ip, lsr #24
1433 orr r4, r4, ip, lsl #24
1436 bge .Lmemcpy_bad1_loop4
1440 .Lmemcpy_bad2_loop16:
1452 orr r4, r4, r5, lsr #16
1454 orr r5, r5, r6, lsr #16
1456 orr r6, r6, r7, lsr #16
1458 orr r7, r7, ip, lsr #16
1460 orr r4, r4, r5, lsl #16
1462 orr r5, r5, r6, lsl #16
1464 orr r6, r6, r7, lsl #16
1466 orr r7, r7, ip, lsl #16
1474 bge .Lmemcpy_bad2_loop16
1477 ldmeqfd sp!, {r4-r7}
1478 RETeq /* Return now if done */
1481 blt .Lmemcpy_bad_done
1483 .Lmemcpy_bad2_loop4:
1492 orr r4, r4, ip, lsr #16
1494 orr r4, r4, ip, lsl #16
1497 bge .Lmemcpy_bad2_loop4
1501 .Lmemcpy_bad3_loop16:
1513 orr r4, r4, r5, lsr #8
1515 orr r5, r5, r6, lsr #8
1517 orr r6, r6, r7, lsr #8
1519 orr r7, r7, ip, lsr #8
1521 orr r4, r4, r5, lsl #8
1523 orr r5, r5, r6, lsl #8
1525 orr r6, r6, r7, lsl #8
1527 orr r7, r7, ip, lsl #8
1535 bge .Lmemcpy_bad3_loop16
1538 ldmeqfd sp!, {r4-r7}
1539 RETeq /* Return now if done */
1542 blt .Lmemcpy_bad_done
1544 .Lmemcpy_bad3_loop4:
1553 orr r4, r4, ip, lsr #8
1555 orr r4, r4, ip, lsl #8
1558 bge .Lmemcpy_bad3_loop4
1565 ldrb ip, [r1], #0x01
1567 ldrgeb r2, [r1], #0x01
1568 strb ip, [r3], #0x01
1570 strgeb r2, [r3], #0x01
1576 * Handle short copies (less than 16 bytes), possibly misaligned.
1577 * Some of these are *very* common, thanks to the network stack,
1578 * and so are handled specially.
1581 add pc, pc, r2, lsl #2
1584 b .Lmemcpy_bytewise /* 0x01 */
1585 b .Lmemcpy_bytewise /* 0x02 */
1586 b .Lmemcpy_bytewise /* 0x03 */
1587 b .Lmemcpy_4 /* 0x04 */
1588 b .Lmemcpy_bytewise /* 0x05 */
1589 b .Lmemcpy_6 /* 0x06 */
1590 b .Lmemcpy_bytewise /* 0x07 */
1591 b .Lmemcpy_8 /* 0x08 */
1592 b .Lmemcpy_bytewise /* 0x09 */
1593 b .Lmemcpy_bytewise /* 0x0a */
1594 b .Lmemcpy_bytewise /* 0x0b */
1595 b .Lmemcpy_c /* 0x0c */
1597 mov r3, r0 /* We must not clobber r0 */
1598 ldrb ip, [r1], #0x01
1599 1: subs r2, r2, #0x01
1600 strb ip, [r3], #0x01
1601 ldrneb ip, [r1], #0x01
1605 /******************************************************************************
1606 * Special case for 4 byte copies
1608 #define LMEMCPY_4_LOG2 6 /* 64 bytes */
1609 #define LMEMCPY_4_PAD .align LMEMCPY_4_LOG2
1613 orr r2, r2, r0, lsl #2
1616 addne pc, r3, r2, lsl #LMEMCPY_4_LOG2
1619 * 0000: dst is 32-bit aligned, src is 32-bit aligned
1627 * 0001: dst is 32-bit aligned, src is 8-bit aligned
1629 ldr r3, [r1, #-1] /* BE:r3 = x012 LE:r3 = 210x */
1630 ldr r2, [r1, #3] /* BE:r2 = 3xxx LE:r2 = xxx3 */
1632 mov r3, r3, lsl #8 /* r3 = 012. */
1633 orr r3, r3, r2, lsr #24 /* r3 = 0123 */
1635 mov r3, r3, lsr #8 /* r3 = .210 */
1636 orr r3, r3, r2, lsl #24 /* r3 = 3210 */
1643 * 0010: dst is 32-bit aligned, src is 16-bit aligned
1647 ldrh r2, [r1, #0x02]
1649 ldrh r3, [r1, #0x02]
1652 orr r3, r2, r3, lsl #16
1658 * 0011: dst is 32-bit aligned, src is 8-bit aligned
1660 ldr r3, [r1, #-3] /* BE:r3 = xxx0 LE:r3 = 0xxx */
1661 ldr r2, [r1, #1] /* BE:r2 = 123x LE:r2 = x321 */
1663 mov r3, r3, lsl #24 /* r3 = 0... */
1664 orr r3, r3, r2, lsr #8 /* r3 = 0123 */
1666 mov r3, r3, lsr #24 /* r3 = ...0 */
1667 orr r3, r3, r2, lsl #8 /* r3 = 3210 */
1674 * 0100: dst is 8-bit aligned, src is 32-bit aligned
1678 strb r2, [r0, #0x03]
1686 strb r1, [r0, #0x03]
1688 strh r3, [r0, #0x01]
1693 * 0101: dst is 8-bit aligned, src is 8-bit aligned
1696 ldrh r3, [r1, #0x01]
1697 ldrb r1, [r1, #0x03]
1699 strh r3, [r0, #0x01]
1700 strb r1, [r0, #0x03]
1705 * 0110: dst is 8-bit aligned, src is 16-bit aligned
1707 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
1708 ldrh r3, [r1, #0x02] /* LE:r3 = ..23 LE:r3 = ..32 */
1710 mov r1, r2, lsr #8 /* r1 = ...0 */
1712 mov r2, r2, lsl #8 /* r2 = .01. */
1713 orr r2, r2, r3, lsr #8 /* r2 = .012 */
1716 mov r2, r2, lsr #8 /* r2 = ...1 */
1717 orr r2, r2, r3, lsl #8 /* r2 = .321 */
1718 mov r3, r3, lsr #8 /* r3 = ...3 */
1720 strh r2, [r0, #0x01]
1721 strb r3, [r0, #0x03]
1726 * 0111: dst is 8-bit aligned, src is 8-bit aligned
1729 ldrh r3, [r1, #0x01]
1730 ldrb r1, [r1, #0x03]
1732 strh r3, [r0, #0x01]
1733 strb r1, [r0, #0x03]
1738 * 1000: dst is 16-bit aligned, src is 32-bit aligned
1742 strh r2, [r0, #0x02]
1748 strh r3, [r0, #0x02]
1754 * 1001: dst is 16-bit aligned, src is 8-bit aligned
1756 ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
1757 ldr r3, [r1, #3] /* BE:r3 = 3xxx LE:r3 = xxx3 */
1758 mov r1, r2, lsr #8 /* BE:r1 = .x01 LE:r1 = .210 */
1761 mov r2, r2, lsl #8 /* r2 = 012. */
1762 orr r2, r2, r3, lsr #24 /* r2 = 0123 */
1764 mov r2, r2, lsr #24 /* r2 = ...2 */
1765 orr r2, r2, r3, lsl #8 /* r2 = xx32 */
1767 strh r2, [r0, #0x02]
1772 * 1010: dst is 16-bit aligned, src is 16-bit aligned
1775 ldrh r3, [r1, #0x02]
1777 strh r3, [r0, #0x02]
1782 * 1011: dst is 16-bit aligned, src is 8-bit aligned
1784 ldr r3, [r1, #1] /* BE:r3 = 123x LE:r3 = x321 */
1785 ldr r2, [r1, #-3] /* BE:r2 = xxx0 LE:r2 = 0xxx */
1786 mov r1, r3, lsr #8 /* BE:r1 = .123 LE:r1 = .x32 */
1787 strh r1, [r0, #0x02]
1789 mov r3, r3, lsr #24 /* r3 = ...1 */
1790 orr r3, r3, r2, lsl #8 /* r3 = xx01 */
1792 mov r3, r3, lsl #8 /* r3 = 321. */
1793 orr r3, r3, r2, lsr #24 /* r3 = 3210 */
1800 * 1100: dst is 8-bit aligned, src is 32-bit aligned
1802 ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
1804 strb r2, [r0, #0x03]
1807 strh r3, [r0, #0x01]
1813 strh r3, [r0, #0x01]
1814 strb r1, [r0, #0x03]
1820 * 1101: dst is 8-bit aligned, src is 8-bit aligned
1823 ldrh r3, [r1, #0x01]
1824 ldrb r1, [r1, #0x03]
1826 strh r3, [r0, #0x01]
1827 strb r1, [r0, #0x03]
1832 * 1110: dst is 8-bit aligned, src is 16-bit aligned
1835 ldrh r3, [r1, #0x02] /* BE:r3 = ..23 LE:r3 = ..32 */
1836 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
1837 strb r3, [r0, #0x03]
1838 mov r3, r3, lsr #8 /* r3 = ...2 */
1839 orr r3, r3, r2, lsl #8 /* r3 = ..12 */
1840 strh r3, [r0, #0x01]
1841 mov r2, r2, lsr #8 /* r2 = ...0 */
1844 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
1845 ldrh r3, [r1, #0x02] /* BE:r3 = ..23 LE:r3 = ..32 */
1847 mov r2, r2, lsr #8 /* r2 = ...1 */
1848 orr r2, r2, r3, lsl #8 /* r2 = .321 */
1849 strh r2, [r0, #0x01]
1850 mov r3, r3, lsr #8 /* r3 = ...3 */
1851 strb r3, [r0, #0x03]
1857 * 1111: dst is 8-bit aligned, src is 8-bit aligned
1860 ldrh r3, [r1, #0x01]
1861 ldrb r1, [r1, #0x03]
1863 strh r3, [r0, #0x01]
1864 strb r1, [r0, #0x03]
1869 /******************************************************************************
1870 * Special case for 6 byte copies
1872 #define LMEMCPY_6_LOG2 6 /* 64 bytes */
1873 #define LMEMCPY_6_PAD .align LMEMCPY_6_LOG2
1877 orr r2, r2, r0, lsl #2
1880 addne pc, r3, r2, lsl #LMEMCPY_6_LOG2
1883 * 0000: dst is 32-bit aligned, src is 32-bit aligned
1886 ldrh r3, [r1, #0x04]
1888 strh r3, [r0, #0x04]
1893 * 0001: dst is 32-bit aligned, src is 8-bit aligned
1895 ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
1896 ldr r3, [r1, #0x03] /* BE:r3 = 345x LE:r3 = x543 */
1898 mov r2, r2, lsl #8 /* r2 = 012. */
1899 orr r2, r2, r3, lsr #24 /* r2 = 0123 */
1901 mov r2, r2, lsr #8 /* r2 = .210 */
1902 orr r2, r2, r3, lsl #24 /* r2 = 3210 */
1904 mov r3, r3, lsr #8 /* BE:r3 = .345 LE:r3 = .x54 */
1906 strh r3, [r0, #0x04]
1911 * 0010: dst is 32-bit aligned, src is 16-bit aligned
1913 ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
1914 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
1916 mov r1, r3, lsr #16 /* r1 = ..23 */
1917 orr r1, r1, r2, lsl #16 /* r1 = 0123 */
1919 strh r3, [r0, #0x04]
1921 mov r1, r3, lsr #16 /* r1 = ..54 */
1922 orr r2, r2, r3, lsl #16 /* r2 = 3210 */
1924 strh r1, [r0, #0x04]
1930 * 0011: dst is 32-bit aligned, src is 8-bit aligned
1932 ldr r2, [r1, #-3] /* BE:r2 = xxx0 LE:r2 = 0xxx */
1933 ldr r3, [r1, #1] /* BE:r3 = 1234 LE:r3 = 4321 */
1934 ldr r1, [r1, #5] /* BE:r1 = 5xxx LE:r3 = xxx5 */
1936 mov r2, r2, lsl #24 /* r2 = 0... */
1937 orr r2, r2, r3, lsr #8 /* r2 = 0123 */
1938 mov r3, r3, lsl #8 /* r3 = 234. */
1939 orr r1, r3, r1, lsr #24 /* r1 = 2345 */
1941 mov r2, r2, lsr #24 /* r2 = ...0 */
1942 orr r2, r2, r3, lsl #8 /* r2 = 3210 */
1943 mov r1, r1, lsl #8 /* r1 = xx5. */
1944 orr r1, r1, r3, lsr #24 /* r1 = xx54 */
1947 strh r1, [r0, #0x04]
1952 * 0100: dst is 8-bit aligned, src is 32-bit aligned
1954 ldr r3, [r1] /* BE:r3 = 0123 LE:r3 = 3210 */
1955 ldrh r2, [r1, #0x04] /* BE:r2 = ..45 LE:r2 = ..54 */
1956 mov r1, r3, lsr #8 /* BE:r1 = .012 LE:r1 = .321 */
1957 strh r1, [r0, #0x01]
1959 mov r1, r3, lsr #24 /* r1 = ...0 */
1961 mov r3, r3, lsl #8 /* r3 = 123. */
1962 orr r3, r3, r2, lsr #8 /* r3 = 1234 */
1965 mov r3, r3, lsr #24 /* r3 = ...3 */
1966 orr r3, r3, r2, lsl #8 /* r3 = .543 */
1967 mov r2, r2, lsr #8 /* r2 = ...5 */
1969 strh r3, [r0, #0x03]
1970 strb r2, [r0, #0x05]
1975 * 0101: dst is 8-bit aligned, src is 8-bit aligned
1978 ldrh r3, [r1, #0x01]
1979 ldrh ip, [r1, #0x03]
1980 ldrb r1, [r1, #0x05]
1982 strh r3, [r0, #0x01]
1983 strh ip, [r0, #0x03]
1984 strb r1, [r0, #0x05]
1989 * 0110: dst is 8-bit aligned, src is 16-bit aligned
1991 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
1992 ldr r1, [r1, #0x02] /* BE:r1 = 2345 LE:r1 = 5432 */
1994 mov r3, r2, lsr #8 /* r3 = ...0 */
1996 strb r1, [r0, #0x05]
1997 mov r3, r1, lsr #8 /* r3 = .234 */
1998 strh r3, [r0, #0x03]
1999 mov r3, r2, lsl #8 /* r3 = .01. */
2000 orr r3, r3, r1, lsr #24 /* r3 = .012 */
2001 strh r3, [r0, #0x01]
2005 strb r3, [r0, #0x05]
2006 mov r3, r1, lsr #8 /* r3 = .543 */
2007 strh r3, [r0, #0x03]
2008 mov r3, r2, lsr #8 /* r3 = ...1 */
2009 orr r3, r3, r1, lsl #8 /* r3 = 4321 */
2010 strh r3, [r0, #0x01]
2016 * 0111: dst is 8-bit aligned, src is 8-bit aligned
2019 ldrh r3, [r1, #0x01]
2020 ldrh ip, [r1, #0x03]
2021 ldrb r1, [r1, #0x05]
2023 strh r3, [r0, #0x01]
2024 strh ip, [r0, #0x03]
2025 strb r1, [r0, #0x05]
2030 * 1000: dst is 16-bit aligned, src is 32-bit aligned
2033 ldr r2, [r1] /* r2 = 0123 */
2034 ldrh r3, [r1, #0x04] /* r3 = ..45 */
2035 mov r1, r2, lsr #16 /* r1 = ..01 */
2036 orr r3, r3, r2, lsl#16 /* r3 = 2345 */
2040 ldrh r2, [r1, #0x04] /* r2 = ..54 */
2041 ldr r3, [r1] /* r3 = 3210 */
2042 mov r2, r2, lsl #16 /* r2 = 54.. */
2043 orr r2, r2, r3, lsr #16 /* r2 = 5432 */
2051 * 1001: dst is 16-bit aligned, src is 8-bit aligned
2053 ldr r3, [r1, #-1] /* BE:r3 = x012 LE:r3 = 210x */
2054 ldr r2, [r1, #3] /* BE:r2 = 345x LE:r2 = x543 */
2055 mov r1, r3, lsr #8 /* BE:r1 = .x01 LE:r1 = .210 */
2057 mov r2, r2, lsr #8 /* r2 = .345 */
2058 orr r2, r2, r3, lsl #24 /* r2 = 2345 */
2060 mov r2, r2, lsl #8 /* r2 = 543. */
2061 orr r2, r2, r3, lsr #24 /* r2 = 5432 */
2069 * 1010: dst is 16-bit aligned, src is 16-bit aligned
2079 * 1011: dst is 16-bit aligned, src is 8-bit aligned
2081 ldrb r3, [r1] /* r3 = ...0 */
2082 ldr r2, [r1, #0x01] /* BE:r2 = 1234 LE:r2 = 4321 */
2083 ldrb r1, [r1, #0x05] /* r1 = ...5 */
2085 mov r3, r3, lsl #8 /* r3 = ..0. */
2086 orr r3, r3, r2, lsr #24 /* r3 = ..01 */
2087 orr r1, r1, r2, lsl #8 /* r1 = 2345 */
2089 orr r3, r3, r2, lsl #8 /* r3 = 3210 */
2090 mov r1, r1, lsl #24 /* r1 = 5... */
2091 orr r1, r1, r2, lsr #8 /* r1 = 5432 */
2099 * 1100: dst is 8-bit aligned, src is 32-bit aligned
2101 ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
2102 ldrh r1, [r1, #0x04] /* BE:r1 = ..45 LE:r1 = ..54 */
2104 mov r3, r2, lsr #24 /* r3 = ...0 */
2106 mov r2, r2, lsl #8 /* r2 = 123. */
2107 orr r2, r2, r1, lsr #8 /* r2 = 1234 */
2110 mov r2, r2, lsr #8 /* r2 = .321 */
2111 orr r2, r2, r1, lsl #24 /* r2 = 4321 */
2112 mov r1, r1, lsr #8 /* r1 = ...5 */
2115 strb r1, [r0, #0x05]
2120 * 1101: dst is 8-bit aligned, src is 8-bit aligned
2123 ldrh r3, [r1, #0x01]
2124 ldrh ip, [r1, #0x03]
2125 ldrb r1, [r1, #0x05]
2127 strh r3, [r0, #0x01]
2128 strh ip, [r0, #0x03]
2129 strb r1, [r0, #0x05]
2134 * 1110: dst is 8-bit aligned, src is 16-bit aligned
2136 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
2137 ldr r1, [r1, #0x02] /* BE:r1 = 2345 LE:r1 = 5432 */
2139 mov r3, r2, lsr #8 /* r3 = ...0 */
2141 mov r2, r2, lsl #24 /* r2 = 1... */
2142 orr r2, r2, r1, lsr #8 /* r2 = 1234 */
2145 mov r2, r2, lsr #8 /* r2 = ...1 */
2146 orr r2, r2, r1, lsl #8 /* r2 = 4321 */
2147 mov r1, r1, lsr #24 /* r1 = ...5 */
2150 strb r1, [r0, #0x05]
2155 * 1111: dst is 8-bit aligned, src is 8-bit aligned
2159 ldrb r1, [r1, #0x05]
2162 strb r1, [r0, #0x05]
2167 /******************************************************************************
2168 * Special case for 8 byte copies
2170 #define LMEMCPY_8_LOG2 6 /* 64 bytes */
2171 #define LMEMCPY_8_PAD .align LMEMCPY_8_LOG2
2175 orr r2, r2, r0, lsl #2
2178 addne pc, r3, r2, lsl #LMEMCPY_8_LOG2
2181 * 0000: dst is 32-bit aligned, src is 32-bit aligned
2191 * 0001: dst is 32-bit aligned, src is 8-bit aligned
2193 ldr r3, [r1, #-1] /* BE:r3 = x012 LE:r3 = 210x */
2194 ldr r2, [r1, #0x03] /* BE:r2 = 3456 LE:r2 = 6543 */
2195 ldrb r1, [r1, #0x07] /* r1 = ...7 */
2197 mov r3, r3, lsl #8 /* r3 = 012. */
2198 orr r3, r3, r2, lsr #24 /* r3 = 0123 */
2199 orr r2, r1, r2, lsl #8 /* r2 = 4567 */
2201 mov r3, r3, lsr #8 /* r3 = .210 */
2202 orr r3, r3, r2, lsl #24 /* r3 = 3210 */
2203 mov r1, r1, lsl #24 /* r1 = 7... */
2204 orr r2, r1, r2, lsr #8 /* r2 = 7654 */
2212 * 0010: dst is 32-bit aligned, src is 16-bit aligned
2214 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
2215 ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
2216 ldrh r1, [r1, #0x06] /* BE:r1 = ..67 LE:r1 = ..76 */
2218 mov r2, r2, lsl #16 /* r2 = 01.. */
2219 orr r2, r2, r3, lsr #16 /* r2 = 0123 */
2220 orr r3, r1, r3, lsl #16 /* r3 = 4567 */
2222 orr r2, r2, r3, lsl #16 /* r2 = 3210 */
2223 mov r3, r3, lsr #16 /* r3 = ..54 */
2224 orr r3, r3, r1, lsl #16 /* r3 = 7654 */
2232 * 0011: dst is 32-bit aligned, src is 8-bit aligned
2234 ldrb r3, [r1] /* r3 = ...0 */
2235 ldr r2, [r1, #0x01] /* BE:r2 = 1234 LE:r2 = 4321 */
2236 ldr r1, [r1, #0x05] /* BE:r1 = 567x LE:r1 = x765 */
2238 mov r3, r3, lsl #24 /* r3 = 0... */
2239 orr r3, r3, r2, lsr #8 /* r3 = 0123 */
2240 mov r2, r2, lsl #24 /* r2 = 4... */
2241 orr r2, r2, r1, lsr #8 /* r2 = 4567 */
2243 orr r3, r3, r2, lsl #8 /* r3 = 3210 */
2244 mov r2, r2, lsr #24 /* r2 = ...4 */
2245 orr r2, r2, r1, lsl #8 /* r2 = 7654 */
2253 * 0100: dst is 8-bit aligned, src is 32-bit aligned
2255 ldr r3, [r1] /* BE:r3 = 0123 LE:r3 = 3210 */
2256 ldr r2, [r1, #0x04] /* BE:r2 = 4567 LE:r2 = 7654 */
2258 mov r1, r3, lsr #24 /* r1 = ...0 */
2260 mov r1, r3, lsr #8 /* r1 = .012 */
2261 strb r2, [r0, #0x07]
2262 mov r3, r3, lsl #24 /* r3 = 3... */
2263 orr r3, r3, r2, lsr #8 /* r3 = 3456 */
2266 mov r1, r2, lsr #24 /* r1 = ...7 */
2267 strb r1, [r0, #0x07]
2268 mov r1, r3, lsr #8 /* r1 = .321 */
2269 mov r3, r3, lsr #24 /* r3 = ...3 */
2270 orr r3, r3, r2, lsl #8 /* r3 = 6543 */
2272 strh r1, [r0, #0x01]
2278 * 0101: dst is 8-bit aligned, src is 8-bit aligned
2281 ldrh r3, [r1, #0x01]
2283 ldrb r1, [r1, #0x07]
2285 strh r3, [r0, #0x01]
2287 strb r1, [r0, #0x07]
2292 * 0110: dst is 8-bit aligned, src is 16-bit aligned
2294 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
2295 ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
2296 ldrh r1, [r1, #0x06] /* BE:r1 = ..67 LE:r1 = ..76 */
2298 mov ip, r2, lsr #8 /* ip = ...0 */
2300 mov ip, r2, lsl #8 /* ip = .01. */
2301 orr ip, ip, r3, lsr #24 /* ip = .012 */
2302 strb r1, [r0, #0x07]
2303 mov r3, r3, lsl #8 /* r3 = 345. */
2304 orr r3, r3, r1, lsr #8 /* r3 = 3456 */
2306 strb r2, [r0] /* 0 */
2307 mov ip, r1, lsr #8 /* ip = ...7 */
2308 strb ip, [r0, #0x07] /* 7 */
2309 mov ip, r2, lsr #8 /* ip = ...1 */
2310 orr ip, ip, r3, lsl #8 /* ip = 4321 */
2311 mov r3, r3, lsr #8 /* r3 = .543 */
2312 orr r3, r3, r1, lsl #24 /* r3 = 6543 */
2314 strh ip, [r0, #0x01]
2320 * 0111: dst is 8-bit aligned, src is 8-bit aligned
2322 ldrb r3, [r1] /* r3 = ...0 */
2323 ldr ip, [r1, #0x01] /* BE:ip = 1234 LE:ip = 4321 */
2324 ldrh r2, [r1, #0x05] /* BE:r2 = ..56 LE:r2 = ..65 */
2325 ldrb r1, [r1, #0x07] /* r1 = ...7 */
2327 mov r3, ip, lsr #16 /* BE:r3 = ..12 LE:r3 = ..43 */
2329 strh r3, [r0, #0x01]
2330 orr r2, r2, ip, lsl #16 /* r2 = 3456 */
2332 strh ip, [r0, #0x01]
2333 orr r2, r3, r2, lsl #16 /* r2 = 6543 */
2336 strb r1, [r0, #0x07]
2341 * 1000: dst is 16-bit aligned, src is 32-bit aligned
2343 ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
2344 ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */
2345 mov r1, r2, lsr #16 /* BE:r1 = ..01 LE:r1 = ..32 */
2348 mov r1, r3, lsr #16 /* r1 = ..45 */
2349 orr r2, r1 ,r2, lsl #16 /* r2 = 2345 */
2352 orr r2, r1, r3, lsl #16 /* r2 = 5432 */
2353 mov r3, r3, lsr #16 /* r3 = ..76 */
2356 strh r3, [r0, #0x06]
2361 * 1001: dst is 16-bit aligned, src is 8-bit aligned
2363 ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
2364 ldr r3, [r1, #0x03] /* BE:r3 = 3456 LE:r3 = 6543 */
2365 ldrb ip, [r1, #0x07] /* ip = ...7 */
2366 mov r1, r2, lsr #8 /* BE:r1 = .x01 LE:r1 = .210 */
2369 mov r1, r2, lsl #24 /* r1 = 2... */
2370 orr r1, r1, r3, lsr #8 /* r1 = 2345 */
2371 orr r3, ip, r3, lsl #8 /* r3 = 4567 */
2373 mov r1, r2, lsr #24 /* r1 = ...2 */
2374 orr r1, r1, r3, lsl #8 /* r1 = 5432 */
2375 mov r3, r3, lsr #24 /* r3 = ...6 */
2376 orr r3, r3, ip, lsl #8 /* r3 = ..76 */
2379 strh r3, [r0, #0x06]
2384 * 1010: dst is 16-bit aligned, src is 16-bit aligned
2388 ldrh r3, [r1, #0x06]
2391 strh r3, [r0, #0x06]
2396 * 1011: dst is 16-bit aligned, src is 8-bit aligned
2398 ldr r3, [r1, #0x05] /* BE:r3 = 567x LE:r3 = x765 */
2399 ldr r2, [r1, #0x01] /* BE:r2 = 1234 LE:r2 = 4321 */
2400 ldrb ip, [r1] /* ip = ...0 */
2401 mov r1, r3, lsr #8 /* BE:r1 = .567 LE:r1 = .x76 */
2402 strh r1, [r0, #0x06]
2404 mov r3, r3, lsr #24 /* r3 = ...5 */
2405 orr r3, r3, r2, lsl #8 /* r3 = 2345 */
2406 mov r2, r2, lsr #24 /* r2 = ...1 */
2407 orr r2, r2, ip, lsl #8 /* r2 = ..01 */
2409 mov r3, r3, lsl #24 /* r3 = 5... */
2410 orr r3, r3, r2, lsr #8 /* r3 = 5432 */
2411 orr r2, ip, r2, lsl #8 /* r2 = 3210 */
2419 * 1100: dst is 8-bit aligned, src is 32-bit aligned
2421 ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */
2422 ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
2423 mov r1, r3, lsr #8 /* BE:r1 = .456 LE:r1 = .765 */
2424 strh r1, [r0, #0x05]
2426 strb r3, [r0, #0x07]
2427 mov r1, r2, lsr #24 /* r1 = ...0 */
2429 mov r2, r2, lsl #8 /* r2 = 123. */
2430 orr r2, r2, r3, lsr #24 /* r2 = 1234 */
2434 mov r1, r3, lsr #24 /* r1 = ...7 */
2435 strb r1, [r0, #0x07]
2436 mov r2, r2, lsr #8 /* r2 = .321 */
2437 orr r2, r2, r3, lsl #24 /* r2 = 4321 */
2444 * 1101: dst is 8-bit aligned, src is 8-bit aligned
2446 ldrb r3, [r1] /* r3 = ...0 */
2447 ldrh r2, [r1, #0x01] /* BE:r2 = ..12 LE:r2 = ..21 */
2448 ldr ip, [r1, #0x03] /* BE:ip = 3456 LE:ip = 6543 */
2449 ldrb r1, [r1, #0x07] /* r1 = ...7 */
2451 mov r3, ip, lsr #16 /* BE:r3 = ..34 LE:r3 = ..65 */
2453 strh ip, [r0, #0x05]
2454 orr r2, r3, r2, lsl #16 /* r2 = 1234 */
2456 strh r3, [r0, #0x05]
2457 orr r2, r2, ip, lsl #16 /* r2 = 4321 */
2460 strb r1, [r0, #0x07]
2465 * 1110: dst is 8-bit aligned, src is 16-bit aligned
2467 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
2468 ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
2469 ldrh r1, [r1, #0x06] /* BE:r1 = ..67 LE:r1 = ..76 */
2471 mov ip, r2, lsr #8 /* ip = ...0 */
2473 mov ip, r2, lsl #24 /* ip = 1... */
2474 orr ip, ip, r3, lsr #8 /* ip = 1234 */
2475 strb r1, [r0, #0x07]
2476 mov r1, r1, lsr #8 /* r1 = ...6 */
2477 orr r1, r1, r3, lsl #8 /* r1 = 3456 */
2480 mov ip, r2, lsr #8 /* ip = ...1 */
2481 orr ip, ip, r3, lsl #8 /* ip = 4321 */
2482 mov r2, r1, lsr #8 /* r2 = ...7 */
2483 strb r2, [r0, #0x07]
2484 mov r1, r1, lsl #8 /* r1 = .76. */
2485 orr r1, r1, r3, lsr #24 /* r1 = .765 */
2488 strh r1, [r0, #0x05]
2493 * 1111: dst is 8-bit aligned, src is 8-bit aligned
2497 ldrh r3, [r1, #0x05]
2498 ldrb r1, [r1, #0x07]
2501 strh r3, [r0, #0x05]
2502 strb r1, [r0, #0x07]
2506 /******************************************************************************
2507 * Special case for 12 byte copies
2509 #define LMEMCPY_C_LOG2 7 /* 128 bytes */
2510 #define LMEMCPY_C_PAD .align LMEMCPY_C_LOG2
2514 orr r2, r2, r0, lsl #2
2517 addne pc, r3, r2, lsl #LMEMCPY_C_LOG2
2520 * 0000: dst is 32-bit aligned, src is 32-bit aligned
2532 * 0001: dst is 32-bit aligned, src is 8-bit aligned
2534 ldrb r2, [r1, #0xb] /* r2 = ...B */
2535 ldr ip, [r1, #0x07] /* BE:ip = 789A LE:ip = A987 */
2536 ldr r3, [r1, #0x03] /* BE:r3 = 3456 LE:r3 = 6543 */
2537 ldr r1, [r1, #-1] /* BE:r1 = x012 LE:r1 = 210x */
2539 orr r2, r2, ip, lsl #8 /* r2 = 89AB */
2541 mov r2, ip, lsr #24 /* r2 = ...7 */
2542 orr r2, r2, r3, lsl #8 /* r2 = 4567 */
2543 mov r1, r1, lsl #8 /* r1 = 012. */
2544 orr r1, r1, r3, lsr #24 /* r1 = 0123 */
2546 mov r2, r2, lsl #24 /* r2 = B... */
2547 orr r2, r2, ip, lsr #8 /* r2 = BA98 */
2549 mov r2, ip, lsl #24 /* r2 = 7... */
2550 orr r2, r2, r3, lsr #8 /* r2 = 7654 */
2551 mov r1, r1, lsr #8 /* r1 = .210 */
2552 orr r1, r1, r3, lsl #24 /* r1 = 3210 */
2560 * 0010: dst is 32-bit aligned, src is 16-bit aligned
2562 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
2563 ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
2564 ldr ip, [r1, #0x06] /* BE:ip = 6789 LE:ip = 9876 */
2565 ldrh r1, [r1, #0x0a] /* BE:r1 = ..AB LE:r1 = ..BA */
2567 mov r2, r2, lsl #16 /* r2 = 01.. */
2568 orr r2, r2, r3, lsr #16 /* r2 = 0123 */
2570 mov r3, r3, lsl #16 /* r3 = 45.. */
2571 orr r3, r3, ip, lsr #16 /* r3 = 4567 */
2572 orr r1, r1, ip, lsl #16 /* r1 = 89AB */
2574 orr r2, r2, r3, lsl #16 /* r2 = 3210 */
2576 mov r3, r3, lsr #16 /* r3 = ..54 */
2577 orr r3, r3, ip, lsl #16 /* r3 = 7654 */
2578 mov r1, r1, lsl #16 /* r1 = BA.. */
2579 orr r1, r1, ip, lsr #16 /* r1 = BA98 */
2587 * 0011: dst is 32-bit aligned, src is 8-bit aligned
2589 ldrb r2, [r1] /* r2 = ...0 */
2590 ldr r3, [r1, #0x01] /* BE:r3 = 1234 LE:r3 = 4321 */
2591 ldr ip, [r1, #0x05] /* BE:ip = 5678 LE:ip = 8765 */
2592 ldr r1, [r1, #0x09] /* BE:r1 = 9ABx LE:r1 = xBA9 */
2594 mov r2, r2, lsl #24 /* r2 = 0... */
2595 orr r2, r2, r3, lsr #8 /* r2 = 0123 */
2597 mov r3, r3, lsl #24 /* r3 = 4... */
2598 orr r3, r3, ip, lsr #8 /* r3 = 4567 */
2599 mov r1, r1, lsr #8 /* r1 = .9AB */
2600 orr r1, r1, ip, lsl #24 /* r1 = 89AB */
2602 orr r2, r2, r3, lsl #8 /* r2 = 3210 */
2604 mov r3, r3, lsr #24 /* r3 = ...4 */
2605 orr r3, r3, ip, lsl #8 /* r3 = 7654 */
2606 mov r1, r1, lsl #8 /* r1 = BA9. */
2607 orr r1, r1, ip, lsr #24 /* r1 = BA98 */
2615 * 0100: dst is 8-bit aligned (byte 1), src is 32-bit aligned
2617 ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
2618 ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */
2619 ldr ip, [r1, #0x08] /* BE:ip = 89AB LE:ip = BA98 */
2620 mov r1, r2, lsr #8 /* BE:r1 = .012 LE:r1 = .321 */
2621 strh r1, [r0, #0x01]
2623 mov r1, r2, lsr #24 /* r1 = ...0 */
2625 mov r1, r2, lsl #24 /* r1 = 3... */
2626 orr r2, r1, r3, lsr #8 /* r1 = 3456 */
2627 mov r1, r3, lsl #24 /* r1 = 7... */
2628 orr r1, r1, ip, lsr #8 /* r1 = 789A */
2631 mov r1, r2, lsr #24 /* r1 = ...3 */
2632 orr r2, r1, r3, lsl #8 /* r1 = 6543 */
2633 mov r1, r3, lsr #24 /* r1 = ...7 */
2634 orr r1, r1, ip, lsl #8 /* r1 = A987 */
2635 mov ip, ip, lsr #24 /* ip = ...B */
2639 strb ip, [r0, #0x0b]
2644 * 0101: dst is 8-bit aligned (byte 1), src is 8-bit aligned (byte 1)
2647 ldrh r3, [r1, #0x01]
2651 ldrb r1, [r1, #0x0b]
2652 strh r3, [r0, #0x01]
2655 strb r1, [r0, #0x0b]
2660 * 0110: dst is 8-bit aligned (byte 1), src is 16-bit aligned
2662 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
2663 ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
2664 ldr ip, [r1, #0x06] /* BE:ip = 6789 LE:ip = 9876 */
2665 ldrh r1, [r1, #0x0a] /* BE:r1 = ..AB LE:r1 = ..BA */
2667 mov r2, r2, ror #8 /* r2 = 1..0 */
2669 mov r2, r2, lsr #16 /* r2 = ..1. */
2670 orr r2, r2, r3, lsr #24 /* r2 = ..12 */
2671 strh r2, [r0, #0x01]
2672 mov r2, r3, lsl #8 /* r2 = 345. */
2673 orr r3, r2, ip, lsr #24 /* r3 = 3456 */
2674 mov r2, ip, lsl #8 /* r2 = 789. */
2675 orr r2, r2, r1, lsr #8 /* r2 = 789A */
2678 mov r2, r2, lsr #8 /* r2 = ...1 */
2679 orr r2, r2, r3, lsl #8 /* r2 = 4321 */
2680 strh r2, [r0, #0x01]
2681 mov r2, r3, lsr #8 /* r2 = .543 */
2682 orr r3, r2, ip, lsl #24 /* r3 = 6543 */
2683 mov r2, ip, lsr #8 /* r2 = .987 */
2684 orr r2, r2, r1, lsl #24 /* r2 = A987 */
2685 mov r1, r1, lsr #8 /* r1 = ...B */
2689 strb r1, [r0, #0x0b]
2694 * 0111: dst is 8-bit aligned (byte 1), src is 8-bit aligned (byte 3)
2697 ldr r3, [r1, #0x01] /* BE:r3 = 1234 LE:r3 = 4321 */
2698 ldr ip, [r1, #0x05] /* BE:ip = 5678 LE:ip = 8765 */
2699 ldr r1, [r1, #0x09] /* BE:r1 = 9ABx LE:r1 = xBA9 */
2702 mov r2, r3, lsr #16 /* r2 = ..12 */
2703 strh r2, [r0, #0x01]
2704 mov r3, r3, lsl #16 /* r3 = 34.. */
2705 orr r3, r3, ip, lsr #16 /* r3 = 3456 */
2706 mov ip, ip, lsl #16 /* ip = 78.. */
2707 orr ip, ip, r1, lsr #16 /* ip = 789A */
2708 mov r1, r1, lsr #8 /* r1 = .9AB */
2710 strh r3, [r0, #0x01]
2711 mov r3, r3, lsr #16 /* r3 = ..43 */
2712 orr r3, r3, ip, lsl #16 /* r3 = 6543 */
2713 mov ip, ip, lsr #16 /* ip = ..87 */
2714 orr ip, ip, r1, lsl #16 /* ip = A987 */
2715 mov r1, r1, lsr #16 /* r1 = ..xB */
2719 strb r1, [r0, #0x0b]
2724 * 1000: dst is 16-bit aligned, src is 32-bit aligned
2726 ldr ip, [r1] /* BE:ip = 0123 LE:ip = 3210 */
2727 ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */
2728 ldr r2, [r1, #0x08] /* BE:r2 = 89AB LE:r2 = BA98 */
2729 mov r1, ip, lsr #16 /* BE:r1 = ..01 LE:r1 = ..32 */
2732 mov r1, ip, lsl #16 /* r1 = 23.. */
2733 orr r1, r1, r3, lsr #16 /* r1 = 2345 */
2734 mov r3, r3, lsl #16 /* r3 = 67.. */
2735 orr r3, r3, r2, lsr #16 /* r3 = 6789 */
2738 orr r1, r1, r3, lsl #16 /* r1 = 5432 */
2739 mov r3, r3, lsr #16 /* r3 = ..76 */
2740 orr r3, r3, r2, lsl #16 /* r3 = 9876 */
2741 mov r2, r2, lsr #16 /* r2 = ..BA */
2745 strh r2, [r0, #0x0a]
2750 * 1001: dst is 16-bit aligned, src is 8-bit aligned (byte 1)
2752 ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
2753 ldr r3, [r1, #0x03] /* BE:r3 = 3456 LE:r3 = 6543 */
2754 mov ip, r2, lsr #8 /* BE:ip = .x01 LE:ip = .210 */
2756 ldr ip, [r1, #0x07] /* BE:ip = 789A LE:ip = A987 */
2757 ldrb r1, [r1, #0x0b] /* r1 = ...B */
2759 mov r2, r2, lsl #24 /* r2 = 2... */
2760 orr r2, r2, r3, lsr #8 /* r2 = 2345 */
2761 mov r3, r3, lsl #24 /* r3 = 6... */
2762 orr r3, r3, ip, lsr #8 /* r3 = 6789 */
2763 orr r1, r1, ip, lsl #8 /* r1 = 89AB */
2765 mov r2, r2, lsr #24 /* r2 = ...2 */
2766 orr r2, r2, r3, lsl #8 /* r2 = 5432 */
2767 mov r3, r3, lsr #24 /* r3 = ...6 */
2768 orr r3, r3, ip, lsl #8 /* r3 = 9876 */
2769 mov r1, r1, lsl #8 /* r1 = ..B. */
2770 orr r1, r1, ip, lsr #24 /* r1 = ..BA */
2774 strh r1, [r0, #0x0a]
2779 * 1010: dst is 16-bit aligned, src is 16-bit aligned
2784 ldrh r1, [r1, #0x0a]
2788 strh r1, [r0, #0x0a]
2793 * 1011: dst is 16-bit aligned, src is 8-bit aligned (byte 3)
2795 ldr r2, [r1, #0x09] /* BE:r2 = 9ABx LE:r2 = xBA9 */
2796 ldr r3, [r1, #0x05] /* BE:r3 = 5678 LE:r3 = 8765 */
2797 mov ip, r2, lsr #8 /* BE:ip = .9AB LE:ip = .xBA */
2798 strh ip, [r0, #0x0a]
2799 ldr ip, [r1, #0x01] /* BE:ip = 1234 LE:ip = 4321 */
2800 ldrb r1, [r1] /* r1 = ...0 */
2802 mov r2, r2, lsr #24 /* r2 = ...9 */
2803 orr r2, r2, r3, lsl #8 /* r2 = 6789 */
2804 mov r3, r3, lsr #24 /* r3 = ...5 */
2805 orr r3, r3, ip, lsl #8 /* r3 = 2345 */
2806 mov r1, r1, lsl #8 /* r1 = ..0. */
2807 orr r1, r1, ip, lsr #24 /* r1 = ..01 */
2809 mov r2, r2, lsl #24 /* r2 = 9... */
2810 orr r2, r2, r3, lsr #8 /* r2 = 9876 */
2811 mov r3, r3, lsl #24 /* r3 = 5... */
2812 orr r3, r3, ip, lsr #8 /* r3 = 5432 */
2813 orr r1, r1, ip, lsl #8 /* r1 = 3210 */
2822 * 1100: dst is 8-bit aligned (byte 3), src is 32-bit aligned
2824 ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
2825 ldr ip, [r1, #0x04] /* BE:ip = 4567 LE:ip = 7654 */
2826 ldr r1, [r1, #0x08] /* BE:r1 = 89AB LE:r1 = BA98 */
2828 mov r3, r2, lsr #24 /* r3 = ...0 */
2830 mov r2, r2, lsl #8 /* r2 = 123. */
2831 orr r2, r2, ip, lsr #24 /* r2 = 1234 */
2833 mov r2, ip, lsl #8 /* r2 = 567. */
2834 orr r2, r2, r1, lsr #24 /* r2 = 5678 */
2836 mov r2, r1, lsr #8 /* r2 = ..9A */
2837 strh r2, [r0, #0x09]
2838 strb r1, [r0, #0x0b]
2841 mov r3, r2, lsr #8 /* r3 = .321 */
2842 orr r3, r3, ip, lsl #24 /* r3 = 4321 */
2844 mov r3, ip, lsr #8 /* r3 = .765 */
2845 orr r3, r3, r1, lsl #24 /* r3 = 8765 */
2847 mov r1, r1, lsr #8 /* r1 = .BA9 */
2848 strh r1, [r0, #0x09]
2849 mov r1, r1, lsr #16 /* r1 = ...B */
2850 strb r1, [r0, #0x0b]
2856 * 1101: dst is 8-bit aligned (byte 3), src is 8-bit aligned (byte 1)
2858 ldrb r2, [r1, #0x0b] /* r2 = ...B */
2859 ldr r3, [r1, #0x07] /* BE:r3 = 789A LE:r3 = A987 */
2860 ldr ip, [r1, #0x03] /* BE:ip = 3456 LE:ip = 6543 */
2861 ldr r1, [r1, #-1] /* BE:r1 = x012 LE:r1 = 210x */
2862 strb r2, [r0, #0x0b]
2864 strh r3, [r0, #0x09]
2865 mov r3, r3, lsr #16 /* r3 = ..78 */
2866 orr r3, r3, ip, lsl #16 /* r3 = 5678 */
2867 mov ip, ip, lsr #16 /* ip = ..34 */
2868 orr ip, ip, r1, lsl #16 /* ip = 1234 */
2869 mov r1, r1, lsr #16 /* r1 = ..x0 */
2871 mov r2, r3, lsr #16 /* r2 = ..A9 */
2872 strh r2, [r0, #0x09]
2873 mov r3, r3, lsl #16 /* r3 = 87.. */
2874 orr r3, r3, ip, lsr #16 /* r3 = 8765 */
2875 mov ip, ip, lsl #16 /* ip = 43.. */
2876 orr ip, ip, r1, lsr #16 /* ip = 4321 */
2877 mov r1, r1, lsr #8 /* r1 = .210 */
2886 * 1110: dst is 8-bit aligned (byte 3), src is 16-bit aligned
2889 ldrh r2, [r1, #0x0a] /* r2 = ..AB */
2890 ldr ip, [r1, #0x06] /* ip = 6789 */
2891 ldr r3, [r1, #0x02] /* r3 = 2345 */
2892 ldrh r1, [r1] /* r1 = ..01 */
2893 strb r2, [r0, #0x0b]
2894 mov r2, r2, lsr #8 /* r2 = ...A */
2895 orr r2, r2, ip, lsl #8 /* r2 = 789A */
2896 mov ip, ip, lsr #8 /* ip = .678 */
2897 orr ip, ip, r3, lsl #24 /* ip = 5678 */
2898 mov r3, r3, lsr #8 /* r3 = .234 */
2899 orr r3, r3, r1, lsl #24 /* r3 = 1234 */
2900 mov r1, r1, lsr #8 /* r1 = ...0 */
2904 strh r2, [r0, #0x09]
2906 ldrh r2, [r1] /* r2 = ..10 */
2907 ldr r3, [r1, #0x02] /* r3 = 5432 */
2908 ldr ip, [r1, #0x06] /* ip = 9876 */
2909 ldrh r1, [r1, #0x0a] /* r1 = ..BA */
2911 mov r2, r2, lsr #8 /* r2 = ...1 */
2912 orr r2, r2, r3, lsl #8 /* r2 = 4321 */
2913 mov r3, r3, lsr #24 /* r3 = ...5 */
2914 orr r3, r3, ip, lsl #8 /* r3 = 8765 */
2915 mov ip, ip, lsr #24 /* ip = ...9 */
2916 orr ip, ip, r1, lsl #8 /* ip = .BA9 */
2917 mov r1, r1, lsr #8 /* r1 = ...B */
2920 strh ip, [r0, #0x09]
2921 strb r1, [r0, #0x0b]
2927 * 1111: dst is 8-bit aligned (byte 3), src is 8-bit aligned (byte 3)
2933 ldrh r2, [r1, #0x09]
2934 ldrb r1, [r1, #0x0b]
2937 strh r2, [r0, #0x09]
2938 strb r1, [r0, #0x0b]
2941 #endif /* _ARM_ARCH_5E */