2 * Copyright (c) 2004 Olivier Houchard
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Copyright 2003 Wasabi Systems, Inc.
28 * All rights reserved.
30 * Written by Steve C. Woodford for Wasabi Systems, Inc.
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed for the NetBSD Project by
43 * Wasabi Systems, Inc.
44 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
45 * or promote products derived from this software without specific prior
48 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
50 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
51 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
52 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
53 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
54 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
55 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
56 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
57 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
58 * POSSIBILITY OF SUCH DAMAGE.
61 * Copyright (c) 1997 The NetBSD Foundation, Inc.
62 * All rights reserved.
64 * This code is derived from software contributed to The NetBSD Foundation
65 * by Neil A. Carson and Mark Brinicombe
67 * Redistribution and use in source and binary forms, with or without
68 * modification, are permitted provided that the following conditions
70 * 1. Redistributions of source code must retain the above copyright
71 * notice, this list of conditions and the following disclaimer.
72 * 2. Redistributions in binary form must reproduce the above copyright
73 * notice, this list of conditions and the following disclaimer in the
74 * documentation and/or other materials provided with the distribution.
75 * 3. All advertising materials mentioning features or use of this software
76 * must display the following acknowledgement:
77 * This product includes software developed by the NetBSD
78 * Foundation, Inc. and its contributors.
79 * 4. Neither the name of The NetBSD Foundation nor the names of its
80 * contributors may be used to endorse or promote products derived
81 * from this software without specific prior written permission.
83 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
84 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
85 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
86 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
87 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
88 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
89 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
90 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
91 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
92 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
93 * POSSIBILITY OF SUCH DAMAGE.
96 #include <machine/asm.h>
97 #include <machine/asmacros.h>
98 __FBSDID("$FreeBSD$");
103 * memset: Sets a block of memory to the specified value
108 * r2 - number of bytes to write
113 /* LINTSTUB: Func: void bzero(void *, size_t) */
118 /* LINTSTUB: Func: void *memset(void *, int, size_t) */
120 and r3, r1, #0xff /* We deal with bytes */
123 cmp r1, #0x04 /* Do we have less than 4 bytes */
125 blt .Lmemset_lessthanfour
127 /* Ok first we will word align the address */
128 ands r2, ip, #0x03 /* Get the bottom two bits */
129 bne .Lmemset_wordunaligned /* The address is not word aligned */
131 /* We are now word aligned */
132 .Lmemset_wordaligned:
133 orr r3, r3, r3, lsl #8 /* Extend value to 16-bits */
135 tst ip, #0x04 /* Quad-align for Xscale */
139 orr r3, r3, r3, lsl #16 /* Extend value to 32-bits */
141 subne r1, r1, #0x04 /* Quad-align if necessary */
142 strne r3, [ip], #0x04
145 blt .Lmemset_loop4 /* If less than 16 then use words */
146 mov r2, r3 /* Duplicate data */
147 cmp r1, #0x80 /* If < 128 then skip the big loop */
150 /* Do 128 bytes at a time */
154 strged r2, [ip], #0x08
155 strged r2, [ip], #0x08
156 strged r2, [ip], #0x08
157 strged r2, [ip], #0x08
158 strged r2, [ip], #0x08
159 strged r2, [ip], #0x08
160 strged r2, [ip], #0x08
161 strged r2, [ip], #0x08
162 strged r2, [ip], #0x08
163 strged r2, [ip], #0x08
164 strged r2, [ip], #0x08
165 strged r2, [ip], #0x08
166 strged r2, [ip], #0x08
167 strged r2, [ip], #0x08
168 strged r2, [ip], #0x08
169 strged r2, [ip], #0x08
189 RETeq /* Zero length so just exit */
191 add r1, r1, #0x80 /* Adjust for extra sub */
193 /* Do 32 bytes at a time */
197 strged r2, [ip], #0x08
198 strged r2, [ip], #0x08
199 strged r2, [ip], #0x08
200 strged r2, [ip], #0x08
208 RETeq /* Zero length so just exit */
210 adds r1, r1, #0x10 /* Partially adjust for extra sub */
212 /* Deal with 16 bytes or more */
214 strged r2, [ip], #0x08
215 strged r2, [ip], #0x08
220 RETeq /* Zero length so just exit */
222 addlt r1, r1, #0x10 /* Possibly adjust for extra sub */
224 /* We have at least 4 bytes so copy as words */
227 strge r3, [ip], #0x04
229 RETeq /* Zero length so just exit */
232 /* Compensate for 64-bit alignment check */
240 strb r3, [ip], #0x01 /* Set 1 byte */
241 strgeb r3, [ip], #0x01 /* Set another byte */
242 strgtb r3, [ip] /* and a third */
245 .Lmemset_wordunaligned:
247 strb r3, [ip], #0x01 /* Set 1 byte */
249 strgeb r3, [ip], #0x01 /* Set another byte */
251 strgtb r3, [ip], #0x01 /* and a third */
252 cmp r1, #0x04 /* More than 4 bytes left? */
253 bge .Lmemset_wordaligned /* Yup */
255 .Lmemset_lessthanfour:
257 RETeq /* Zero length so exit */
258 strb r3, [ip], #0x01 /* Set 1 byte */
260 strgeb r3, [ip], #0x01 /* Set another byte */
261 strgtb r3, [ip] /* and a third */
270 /* Are both addresses aligned the same way? */
273 RETeq /* len == 0, or same addresses! */
276 bne .Lmemcmp_bytewise2 /* Badly aligned. Do it the slow way */
278 /* Word-align the addresses, if necessary */
281 add r3, r3, r3, lsl #1
282 addne pc, pc, r3, lsl #3
285 /* Compare up to 3 bytes */
293 /* Compare up to 2 bytes */
309 /* Compare 4 bytes at a time, if possible */
311 bcc .Lmemcmp_bytewise
312 .Lmemcmp_word_aligned:
317 beq .Lmemcmp_word_aligned
320 /* Correct for extra subtraction, and check if done */
322 cmpeq r0, #0x00 /* If done, did all bytes match? */
323 RETeq /* Yup. Just return */
325 /* Re-do the final word byte-wise */
336 beq .Lmemcmp_bytewise2
341 * 6 byte compares are very common, thanks to the network stack.
342 * This code is hand-scheduled to reduce the number of stalls for
343 * load results. Everything else being equal, this will be ~32%
344 * faster than a byte-wise memcmp.
348 ldrb r3, [r1, #0x00] /* r3 = b2#0 */
349 ldrb r0, [ip, #0x00] /* r0 = b1#0 */
350 ldrb r2, [r1, #0x01] /* r2 = b2#1 */
351 subs r0, r0, r3 /* r0 = b1#0 - b2#0 */
352 ldreqb r3, [ip, #0x01] /* r3 = b1#1 */
353 RETne /* Return if mismatch on #0 */
354 subs r0, r3, r2 /* r0 = b1#1 - b2#1 */
355 ldreqb r3, [r1, #0x02] /* r3 = b2#2 */
356 ldreqb r0, [ip, #0x02] /* r0 = b1#2 */
357 RETne /* Return if mismatch on #1 */
358 ldrb r2, [r1, #0x03] /* r2 = b2#3 */
359 subs r0, r0, r3 /* r0 = b1#2 - b2#2 */
360 ldreqb r3, [ip, #0x03] /* r3 = b1#3 */
361 RETne /* Return if mismatch on #2 */
362 subs r0, r3, r2 /* r0 = b1#3 - b2#3 */
363 ldreqb r3, [r1, #0x04] /* r3 = b2#4 */
364 ldreqb r0, [ip, #0x04] /* r0 = b1#4 */
365 RETne /* Return if mismatch on #3 */
366 ldrb r2, [r1, #0x05] /* r2 = b2#5 */
367 subs r0, r0, r3 /* r0 = b1#4 - b2#4 */
368 ldreqb r3, [ip, #0x05] /* r3 = b1#5 */
369 RETne /* Return if mismatch on #4 */
370 sub r0, r3, r2 /* r0 = b1#5 - b2#5 */
374 /* switch the source and destination registers */
379 /* Do the buffers overlap? */
381 RETeq /* Bail now if src/dst are the same */
382 subcc r3, r0, r1 /* if (dst > src) r3 = dst - src */
383 subcs r3, r1, r0 /* if (src > dsr) r3 = src - dst */
384 cmp r3, r2 /* if (r3 < len) we have an overlap */
385 bcc PIC_SYM(_C_LABEL(memcpy), PLT)
387 /* Determine copy direction */
389 bcc .Lmemmove_backwards
391 moveq r0, #0 /* Quick abort for len=0 */
394 stmdb sp!, {r0, lr} /* memmove() returns dest addr */
396 blt .Lmemmove_fl4 /* less than 4 bytes */
398 bne .Lmemmove_fdestul /* oh unaligned destination addr */
400 bne .Lmemmove_fsrcul /* oh unaligned source addr */
403 /* We have aligned source and destination */
405 blt .Lmemmove_fl12 /* less than 12 bytes (4 from above) */
407 blt .Lmemmove_fl32 /* less than 32 bytes (12 from above) */
408 stmdb sp!, {r4} /* borrow r4 */
410 /* blat 32 bytes at a time */
411 /* XXX for really big copies perhaps we should use more registers */
413 ldmia r1!, {r3, r4, r12, lr}
414 stmia r0!, {r3, r4, r12, lr}
415 ldmia r1!, {r3, r4, r12, lr}
416 stmia r0!, {r3, r4, r12, lr}
418 bge .Lmemmove_floop32
421 ldmgeia r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */
422 stmgeia r0!, {r3, r4, r12, lr}
424 ldmia sp!, {r4} /* return r4 */
429 /* blat 12 bytes at a time */
431 ldmgeia r1!, {r3, r12, lr}
432 stmgeia r0!, {r3, r12, lr}
434 bge .Lmemmove_floop12
443 ldmgeia r1!, {r3, r12}
444 stmgeia r0!, {r3, r12}
448 /* less than 4 bytes to go */
450 ldmeqia sp!, {r0, pc} /* done */
452 /* copy the crud byte at a time */
462 /* erg - unaligned destination */
467 /* align destination with byte copies */
475 blt .Lmemmove_fl4 /* less the 4 bytes */
478 beq .Lmemmove_ft8 /* we have an aligned source */
480 /* erg - unaligned source */
481 /* This is where it gets nasty ... */
486 bgt .Lmemmove_fsrcul3
487 beq .Lmemmove_fsrcul2
489 blt .Lmemmove_fsrcul1loop4
493 .Lmemmove_fsrcul1loop16:
499 ldmia r1!, {r4, r5, r12, lr}
501 orr r3, r3, r4, lsr #24
503 orr r4, r4, r5, lsr #24
505 orr r5, r5, r12, lsr #24
507 orr r12, r12, lr, lsr #24
509 orr r3, r3, r4, lsl #24
511 orr r4, r4, r5, lsl #24
513 orr r5, r5, r12, lsl #24
515 orr r12, r12, lr, lsl #24
517 stmia r0!, {r3-r5, r12}
519 bge .Lmemmove_fsrcul1loop16
522 blt .Lmemmove_fsrcul1l4
524 .Lmemmove_fsrcul1loop4:
532 orr r12, r12, lr, lsr #24
534 orr r12, r12, lr, lsl #24
538 bge .Lmemmove_fsrcul1loop4
546 blt .Lmemmove_fsrcul2loop4
550 .Lmemmove_fsrcul2loop16:
556 ldmia r1!, {r4, r5, r12, lr}
558 orr r3, r3, r4, lsr #16
560 orr r4, r4, r5, lsr #16
562 orr r5, r5, r12, lsr #16
563 mov r12, r12, lsl #16
564 orr r12, r12, lr, lsr #16
566 orr r3, r3, r4, lsl #16
568 orr r4, r4, r5, lsl #16
570 orr r5, r5, r12, lsl #16
571 mov r12, r12, lsr #16
572 orr r12, r12, lr, lsl #16
574 stmia r0!, {r3-r5, r12}
576 bge .Lmemmove_fsrcul2loop16
579 blt .Lmemmove_fsrcul2l4
581 .Lmemmove_fsrcul2loop4:
589 orr r12, r12, lr, lsr #16
591 orr r12, r12, lr, lsl #16
595 bge .Lmemmove_fsrcul2loop4
603 blt .Lmemmove_fsrcul3loop4
607 .Lmemmove_fsrcul3loop16:
613 ldmia r1!, {r4, r5, r12, lr}
615 orr r3, r3, r4, lsr #8
617 orr r4, r4, r5, lsr #8
619 orr r5, r5, r12, lsr #8
620 mov r12, r12, lsl #24
621 orr r12, r12, lr, lsr #8
623 orr r3, r3, r4, lsl #8
625 orr r4, r4, r5, lsl #8
627 orr r5, r5, r12, lsl #8
628 mov r12, r12, lsr #24
629 orr r12, r12, lr, lsl #8
631 stmia r0!, {r3-r5, r12}
633 bge .Lmemmove_fsrcul3loop16
636 blt .Lmemmove_fsrcul3l4
638 .Lmemmove_fsrcul3loop4:
646 orr r12, r12, lr, lsr #8
648 orr r12, r12, lr, lsl #8
652 bge .Lmemmove_fsrcul3loop4
662 blt .Lmemmove_bl4 /* less than 4 bytes */
664 bne .Lmemmove_bdestul /* oh unaligned destination addr */
666 bne .Lmemmove_bsrcul /* oh unaligned source addr */
669 /* We have aligned source and destination */
671 blt .Lmemmove_bl12 /* less than 12 bytes (4 from above) */
673 subs r2, r2, #0x14 /* less than 32 bytes (12 from above) */
676 /* blat 32 bytes at a time */
677 /* XXX for really big copies perhaps we should use more registers */
679 ldmdb r1!, {r3, r4, r12, lr}
680 stmdb r0!, {r3, r4, r12, lr}
681 ldmdb r1!, {r3, r4, r12, lr}
682 stmdb r0!, {r3, r4, r12, lr}
684 bge .Lmemmove_bloop32
688 ldmgedb r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */
689 stmgedb r0!, {r3, r4, r12, lr}
692 ldmgedb r1!, {r3, r12, lr} /* blat a remaining 12 bytes */
693 stmgedb r0!, {r3, r12, lr}
703 ldmgedb r1!, {r3, r12}
704 stmgedb r0!, {r3, r12}
708 /* less than 4 bytes to go */
712 /* copy the crud byte at a time */
716 ldrgeb r3, [r1, #-1]!
717 strgeb r3, [r0, #-1]!
718 ldrgtb r3, [r1, #-1]!
719 strgtb r3, [r0, #-1]!
722 /* erg - unaligned destination */
726 /* align destination with byte copies */
729 ldrgeb r3, [r1, #-1]!
730 strgeb r3, [r0, #-1]!
731 ldrgtb r3, [r1, #-1]!
732 strgtb r3, [r0, #-1]!
734 blt .Lmemmove_bl4 /* less than 4 bytes to go */
736 beq .Lmemmove_bt8 /* we have an aligned source */
738 /* erg - unaligned source */
739 /* This is where it gets nasty ... */
744 blt .Lmemmove_bsrcul1
745 beq .Lmemmove_bsrcul2
747 blt .Lmemmove_bsrcul3loop4
749 stmdb sp!, {r4, r5, lr}
751 .Lmemmove_bsrcul3loop16:
757 ldmdb r1!, {r3-r5, r12}
759 orr lr, lr, r12, lsl #24
761 orr r12, r12, r5, lsl #24
763 orr r5, r5, r4, lsl #24
765 orr r4, r4, r3, lsl #24
767 orr lr, lr, r12, lsr #24
769 orr r12, r12, r5, lsr #24
771 orr r5, r5, r4, lsr #24
773 orr r4, r4, r3, lsr #24
775 stmdb r0!, {r4, r5, r12, lr}
777 bge .Lmemmove_bsrcul3loop16
778 ldmia sp!, {r4, r5, lr}
780 blt .Lmemmove_bsrcul3l4
782 .Lmemmove_bsrcul3loop4:
790 orr r12, r12, r3, lsl #24
792 orr r12, r12, r3, lsr #24
796 bge .Lmemmove_bsrcul3loop4
804 blt .Lmemmove_bsrcul2loop4
806 stmdb sp!, {r4, r5, lr}
808 .Lmemmove_bsrcul2loop16:
814 ldmdb r1!, {r3-r5, r12}
816 orr lr, lr, r12, lsl #16
817 mov r12, r12, lsr #16
818 orr r12, r12, r5, lsl #16
820 orr r5, r5, r4, lsl #16
822 orr r4, r4, r3, lsl #16
824 orr lr, lr, r12, lsr #16
825 mov r12, r12, lsl #16
826 orr r12, r12, r5, lsr #16
828 orr r5, r5, r4, lsr #16
830 orr r4, r4, r3, lsr #16
832 stmdb r0!, {r4, r5, r12, lr}
834 bge .Lmemmove_bsrcul2loop16
835 ldmia sp!, {r4, r5, lr}
837 blt .Lmemmove_bsrcul2l4
839 .Lmemmove_bsrcul2loop4:
847 orr r12, r12, r3, lsl #16
849 orr r12, r12, r3, lsr #16
853 bge .Lmemmove_bsrcul2loop4
861 blt .Lmemmove_bsrcul1loop4
863 stmdb sp!, {r4, r5, lr}
865 .Lmemmove_bsrcul1loop32:
871 ldmdb r1!, {r3-r5, r12}
873 orr lr, lr, r12, lsl #8
874 mov r12, r12, lsr #24
875 orr r12, r12, r5, lsl #8
877 orr r5, r5, r4, lsl #8
879 orr r4, r4, r3, lsl #8
881 orr lr, lr, r12, lsr #8
882 mov r12, r12, lsl #24
883 orr r12, r12, r5, lsr #8
885 orr r5, r5, r4, lsr #8
887 orr r4, r4, r3, lsr #8
889 stmdb r0!, {r4, r5, r12, lr}
891 bge .Lmemmove_bsrcul1loop32
892 ldmia sp!, {r4, r5, lr}
894 blt .Lmemmove_bsrcul1l4
896 .Lmemmove_bsrcul1loop4:
904 orr r12, r12, r3, lsl #8
906 orr r12, r12, r3, lsr #8
910 bge .Lmemmove_bsrcul1loop4
916 #if !defined(__XSCALE__)
918 /* save leaf functions having to store this away */
919 stmdb sp!, {r0, lr} /* memcpy() returns dest addr */
922 blt .Lmemcpy_l4 /* less than 4 bytes */
924 bne .Lmemcpy_destul /* oh unaligned destination addr */
926 bne .Lmemcpy_srcul /* oh unaligned source addr */
929 /* We have aligned source and destination */
931 blt .Lmemcpy_l12 /* less than 12 bytes (4 from above) */
933 blt .Lmemcpy_l32 /* less than 32 bytes (12 from above) */
934 stmdb sp!, {r4} /* borrow r4 */
936 /* blat 32 bytes at a time */
937 /* XXX for really big copies perhaps we should use more registers */
939 ldmia r1!, {r3, r4, r12, lr}
940 stmia r0!, {r3, r4, r12, lr}
941 ldmia r1!, {r3, r4, r12, lr}
942 stmia r0!, {r3, r4, r12, lr}
947 ldmgeia r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */
948 stmgeia r0!, {r3, r4, r12, lr}
950 ldmia sp!, {r4} /* return r4 */
955 /* blat 12 bytes at a time */
957 ldmgeia r1!, {r3, r12, lr}
958 stmgeia r0!, {r3, r12, lr}
969 ldmgeia r1!, {r3, r12}
970 stmgeia r0!, {r3, r12}
974 /* less than 4 bytes to go */
977 ldmeqia sp!, {r0, pc}^ /* done */
979 ldmeqia sp!, {r0, pc} /* done */
981 /* copy the crud byte at a time */
991 /* erg - unaligned destination */
996 /* align destination with byte copies */
1004 blt .Lmemcpy_l4 /* less the 4 bytes */
1007 beq .Lmemcpy_t8 /* we have an aligned source */
1009 /* erg - unaligned source */
1010 /* This is where it gets nasty ... */
1018 blt .Lmemcpy_srcul1loop4
1022 .Lmemcpy_srcul1loop16:
1024 ldmia r1!, {r4, r5, r12, lr}
1025 orr r3, r3, r4, lsl #24
1027 orr r4, r4, r5, lsl #24
1029 orr r5, r5, r12, lsl #24
1030 mov r12, r12, lsr #8
1031 orr r12, r12, lr, lsl #24
1032 stmia r0!, {r3-r5, r12}
1034 bge .Lmemcpy_srcul1loop16
1037 blt .Lmemcpy_srcul1l4
1039 .Lmemcpy_srcul1loop4:
1042 orr r12, r12, lr, lsl #24
1045 bge .Lmemcpy_srcul1loop4
1053 blt .Lmemcpy_srcul2loop4
1057 .Lmemcpy_srcul2loop16:
1059 ldmia r1!, {r4, r5, r12, lr}
1060 orr r3, r3, r4, lsl #16
1062 orr r4, r4, r5, lsl #16
1064 orr r5, r5, r12, lsl #16
1065 mov r12, r12, lsr #16
1066 orr r12, r12, lr, lsl #16
1067 stmia r0!, {r3-r5, r12}
1069 bge .Lmemcpy_srcul2loop16
1072 blt .Lmemcpy_srcul2l4
1074 .Lmemcpy_srcul2loop4:
1075 mov r12, lr, lsr #16
1077 orr r12, r12, lr, lsl #16
1080 bge .Lmemcpy_srcul2loop4
1088 blt .Lmemcpy_srcul3loop4
1092 .Lmemcpy_srcul3loop16:
1094 ldmia r1!, {r4, r5, r12, lr}
1095 orr r3, r3, r4, lsl #8
1097 orr r4, r4, r5, lsl #8
1099 orr r5, r5, r12, lsl #8
1100 mov r12, r12, lsr #24
1101 orr r12, r12, lr, lsl #8
1102 stmia r0!, {r3-r5, r12}
1104 bge .Lmemcpy_srcul3loop16
1107 blt .Lmemcpy_srcul3l4
1109 .Lmemcpy_srcul3loop4:
1110 mov r12, lr, lsr #24
1112 orr r12, r12, lr, lsl #8
1115 bge .Lmemcpy_srcul3loop4
1121 /* LINTSTUB: Func: void *memcpy(void *dst, const void *src, size_t len) */
1125 ble .Lmemcpy_short /* <= 12 bytes */
1126 mov r3, r0 /* We must not clobber r0 */
1128 /* Word-align the destination buffer */
1129 ands ip, r3, #0x03 /* Already word aligned? */
1130 beq .Lmemcpy_wordaligned /* Yup */
1132 ldrb ip, [r1], #0x01
1134 strb ip, [r3], #0x01
1135 ldrleb ip, [r1], #0x01
1137 strleb ip, [r3], #0x01
1138 ldrltb ip, [r1], #0x01
1140 strltb ip, [r3], #0x01
1142 /* Destination buffer is now word aligned */
1143 .Lmemcpy_wordaligned:
1144 ands ip, r1, #0x03 /* Is src also word-aligned? */
1145 bne .Lmemcpy_bad_align /* Nope. Things just got bad */
1147 /* Quad-align the destination buffer */
1148 tst r3, #0x07 /* Already quad aligned? */
1149 ldrne ip, [r1], #0x04
1150 stmfd sp!, {r4-r9} /* Free up some registers */
1152 strne ip, [r3], #0x04
1154 /* Destination buffer quad aligned, source is at least word aligned */
1156 blt .Lmemcpy_w_lessthan128
1158 /* Copy 128 bytes at a time */
1160 ldr r4, [r1], #0x04 /* LD:00-03 */
1161 ldr r5, [r1], #0x04 /* LD:04-07 */
1162 pld [r1, #0x18] /* Prefetch 0x20 */
1163 ldr r6, [r1], #0x04 /* LD:08-0b */
1164 ldr r7, [r1], #0x04 /* LD:0c-0f */
1165 ldr r8, [r1], #0x04 /* LD:10-13 */
1166 ldr r9, [r1], #0x04 /* LD:14-17 */
1167 strd r4, [r3], #0x08 /* ST:00-07 */
1168 ldr r4, [r1], #0x04 /* LD:18-1b */
1169 ldr r5, [r1], #0x04 /* LD:1c-1f */
1170 strd r6, [r3], #0x08 /* ST:08-0f */
1171 ldr r6, [r1], #0x04 /* LD:20-23 */
1172 ldr r7, [r1], #0x04 /* LD:24-27 */
1173 pld [r1, #0x18] /* Prefetch 0x40 */
1174 strd r8, [r3], #0x08 /* ST:10-17 */
1175 ldr r8, [r1], #0x04 /* LD:28-2b */
1176 ldr r9, [r1], #0x04 /* LD:2c-2f */
1177 strd r4, [r3], #0x08 /* ST:18-1f */
1178 ldr r4, [r1], #0x04 /* LD:30-33 */
1179 ldr r5, [r1], #0x04 /* LD:34-37 */
1180 strd r6, [r3], #0x08 /* ST:20-27 */
1181 ldr r6, [r1], #0x04 /* LD:38-3b */
1182 ldr r7, [r1], #0x04 /* LD:3c-3f */
1183 strd r8, [r3], #0x08 /* ST:28-2f */
1184 ldr r8, [r1], #0x04 /* LD:40-43 */
1185 ldr r9, [r1], #0x04 /* LD:44-47 */
1186 pld [r1, #0x18] /* Prefetch 0x60 */
1187 strd r4, [r3], #0x08 /* ST:30-37 */
1188 ldr r4, [r1], #0x04 /* LD:48-4b */
1189 ldr r5, [r1], #0x04 /* LD:4c-4f */
1190 strd r6, [r3], #0x08 /* ST:38-3f */
1191 ldr r6, [r1], #0x04 /* LD:50-53 */
1192 ldr r7, [r1], #0x04 /* LD:54-57 */
1193 strd r8, [r3], #0x08 /* ST:40-47 */
1194 ldr r8, [r1], #0x04 /* LD:58-5b */
1195 ldr r9, [r1], #0x04 /* LD:5c-5f */
1196 strd r4, [r3], #0x08 /* ST:48-4f */
1197 ldr r4, [r1], #0x04 /* LD:60-63 */
1198 ldr r5, [r1], #0x04 /* LD:64-67 */
1199 pld [r1, #0x18] /* Prefetch 0x80 */
1200 strd r6, [r3], #0x08 /* ST:50-57 */
1201 ldr r6, [r1], #0x04 /* LD:68-6b */
1202 ldr r7, [r1], #0x04 /* LD:6c-6f */
1203 strd r8, [r3], #0x08 /* ST:58-5f */
1204 ldr r8, [r1], #0x04 /* LD:70-73 */
1205 ldr r9, [r1], #0x04 /* LD:74-77 */
1206 strd r4, [r3], #0x08 /* ST:60-67 */
1207 ldr r4, [r1], #0x04 /* LD:78-7b */
1208 ldr r5, [r1], #0x04 /* LD:7c-7f */
1209 strd r6, [r3], #0x08 /* ST:68-6f */
1210 strd r8, [r3], #0x08 /* ST:70-77 */
1212 strd r4, [r3], #0x08 /* ST:78-7f */
1213 bge .Lmemcpy_w_loop128
1215 .Lmemcpy_w_lessthan128:
1216 adds r2, r2, #0x80 /* Adjust for extra sub */
1217 ldmeqfd sp!, {r4-r9}
1218 RETeq /* Return now if done */
1220 blt .Lmemcpy_w_lessthan32
1222 /* Copy 32 bytes at a time */
1231 strd r4, [r3], #0x08
1234 strd r6, [r3], #0x08
1235 strd r8, [r3], #0x08
1237 strd r4, [r3], #0x08
1238 bge .Lmemcpy_w_loop32
1240 .Lmemcpy_w_lessthan32:
1241 adds r2, r2, #0x20 /* Adjust for extra sub */
1242 ldmeqfd sp!, {r4-r9}
1243 RETeq /* Return now if done */
1247 addne pc, pc, r4, lsl #1
1250 /* At least 24 bytes remaining */
1254 strd r4, [r3], #0x08
1256 /* At least 16 bytes remaining */
1260 strd r4, [r3], #0x08
1262 /* At least 8 bytes remaining */
1266 strd r4, [r3], #0x08
1268 /* Less than 8 bytes remaining */
1270 RETeq /* Return now if done */
1272 ldrge ip, [r1], #0x04
1273 strge ip, [r3], #0x04
1274 RETeq /* Return now if done */
1276 ldrb ip, [r1], #0x01
1278 ldrgeb r2, [r1], #0x01
1279 strb ip, [r3], #0x01
1281 strgeb r2, [r3], #0x01
1287 * At this point, it has not been possible to word align both buffers.
1288 * The destination buffer is word aligned, but the source buffer is not.
1299 .Lmemcpy_bad1_loop16:
1311 orr r4, r4, r5, lsr #24
1313 orr r5, r5, r6, lsr #24
1315 orr r6, r6, r7, lsr #24
1317 orr r7, r7, ip, lsr #24
1319 orr r4, r4, r5, lsl #24
1321 orr r5, r5, r6, lsl #24
1323 orr r6, r6, r7, lsl #24
1325 orr r7, r7, ip, lsl #24
1333 bge .Lmemcpy_bad1_loop16
1336 ldmeqfd sp!, {r4-r7}
1337 RETeq /* Return now if done */
1340 blt .Lmemcpy_bad_done
1342 .Lmemcpy_bad1_loop4:
1351 orr r4, r4, ip, lsr #24
1353 orr r4, r4, ip, lsl #24
1356 bge .Lmemcpy_bad1_loop4
1360 .Lmemcpy_bad2_loop16:
1372 orr r4, r4, r5, lsr #16
1374 orr r5, r5, r6, lsr #16
1376 orr r6, r6, r7, lsr #16
1378 orr r7, r7, ip, lsr #16
1380 orr r4, r4, r5, lsl #16
1382 orr r5, r5, r6, lsl #16
1384 orr r6, r6, r7, lsl #16
1386 orr r7, r7, ip, lsl #16
1394 bge .Lmemcpy_bad2_loop16
1397 ldmeqfd sp!, {r4-r7}
1398 RETeq /* Return now if done */
1401 blt .Lmemcpy_bad_done
1403 .Lmemcpy_bad2_loop4:
1412 orr r4, r4, ip, lsr #16
1414 orr r4, r4, ip, lsl #16
1417 bge .Lmemcpy_bad2_loop4
1421 .Lmemcpy_bad3_loop16:
1433 orr r4, r4, r5, lsr #8
1435 orr r5, r5, r6, lsr #8
1437 orr r6, r6, r7, lsr #8
1439 orr r7, r7, ip, lsr #8
1441 orr r4, r4, r5, lsl #8
1443 orr r5, r5, r6, lsl #8
1445 orr r6, r6, r7, lsl #8
1447 orr r7, r7, ip, lsl #8
1455 bge .Lmemcpy_bad3_loop16
1458 ldmeqfd sp!, {r4-r7}
1459 RETeq /* Return now if done */
1462 blt .Lmemcpy_bad_done
1464 .Lmemcpy_bad3_loop4:
1473 orr r4, r4, ip, lsr #8
1475 orr r4, r4, ip, lsl #8
1478 bge .Lmemcpy_bad3_loop4
1485 ldrb ip, [r1], #0x01
1487 ldrgeb r2, [r1], #0x01
1488 strb ip, [r3], #0x01
1490 strgeb r2, [r3], #0x01
1496 * Handle short copies (less than 16 bytes), possibly misaligned.
1497 * Some of these are *very* common, thanks to the network stack,
1498 * and so are handled specially.
1501 add pc, pc, r2, lsl #2
1504 b .Lmemcpy_bytewise /* 0x01 */
1505 b .Lmemcpy_bytewise /* 0x02 */
1506 b .Lmemcpy_bytewise /* 0x03 */
1507 b .Lmemcpy_4 /* 0x04 */
1508 b .Lmemcpy_bytewise /* 0x05 */
1509 b .Lmemcpy_6 /* 0x06 */
1510 b .Lmemcpy_bytewise /* 0x07 */
1511 b .Lmemcpy_8 /* 0x08 */
1512 b .Lmemcpy_bytewise /* 0x09 */
1513 b .Lmemcpy_bytewise /* 0x0a */
1514 b .Lmemcpy_bytewise /* 0x0b */
1515 b .Lmemcpy_c /* 0x0c */
1517 mov r3, r0 /* We must not clobber r0 */
1518 ldrb ip, [r1], #0x01
1519 1: subs r2, r2, #0x01
1520 strb ip, [r3], #0x01
1521 ldrneb ip, [r1], #0x01
1525 /******************************************************************************
1526 * Special case for 4 byte copies
1528 #define LMEMCPY_4_LOG2 6 /* 64 bytes */
1529 #define LMEMCPY_4_PAD .align LMEMCPY_4_LOG2
1533 orr r2, r2, r0, lsl #2
1536 addne pc, r3, r2, lsl #LMEMCPY_4_LOG2
1539 * 0000: dst is 32-bit aligned, src is 32-bit aligned
1547 * 0001: dst is 32-bit aligned, src is 8-bit aligned
1549 ldr r3, [r1, #-1] /* BE:r3 = x012 LE:r3 = 210x */
1550 ldr r2, [r1, #3] /* BE:r2 = 3xxx LE:r2 = xxx3 */
1552 mov r3, r3, lsl #8 /* r3 = 012. */
1553 orr r3, r3, r2, lsr #24 /* r3 = 0123 */
1555 mov r3, r3, lsr #8 /* r3 = .210 */
1556 orr r3, r3, r2, lsl #24 /* r3 = 3210 */
1563 * 0010: dst is 32-bit aligned, src is 16-bit aligned
1567 ldrh r2, [r1, #0x02]
1569 ldrh r3, [r1, #0x02]
1572 orr r3, r2, r3, lsl #16
1578 * 0011: dst is 32-bit aligned, src is 8-bit aligned
1580 ldr r3, [r1, #-3] /* BE:r3 = xxx0 LE:r3 = 0xxx */
1581 ldr r2, [r1, #1] /* BE:r2 = 123x LE:r2 = x321 */
1583 mov r3, r3, lsl #24 /* r3 = 0... */
1584 orr r3, r3, r2, lsr #8 /* r3 = 0123 */
1586 mov r3, r3, lsr #24 /* r3 = ...0 */
1587 orr r3, r3, r2, lsl #8 /* r3 = 3210 */
1594 * 0100: dst is 8-bit aligned, src is 32-bit aligned
1598 strb r2, [r0, #0x03]
1606 strb r1, [r0, #0x03]
1608 strh r3, [r0, #0x01]
1613 * 0101: dst is 8-bit aligned, src is 8-bit aligned
1616 ldrh r3, [r1, #0x01]
1617 ldrb r1, [r1, #0x03]
1619 strh r3, [r0, #0x01]
1620 strb r1, [r0, #0x03]
1625 * 0110: dst is 8-bit aligned, src is 16-bit aligned
1627 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
1628 ldrh r3, [r1, #0x02] /* LE:r3 = ..23 LE:r3 = ..32 */
1630 mov r1, r2, lsr #8 /* r1 = ...0 */
1632 mov r2, r2, lsl #8 /* r2 = .01. */
1633 orr r2, r2, r3, lsr #8 /* r2 = .012 */
1636 mov r2, r2, lsr #8 /* r2 = ...1 */
1637 orr r2, r2, r3, lsl #8 /* r2 = .321 */
1638 mov r3, r3, lsr #8 /* r3 = ...3 */
1640 strh r2, [r0, #0x01]
1641 strb r3, [r0, #0x03]
1646 * 0111: dst is 8-bit aligned, src is 8-bit aligned
1649 ldrh r3, [r1, #0x01]
1650 ldrb r1, [r1, #0x03]
1652 strh r3, [r0, #0x01]
1653 strb r1, [r0, #0x03]
1658 * 1000: dst is 16-bit aligned, src is 32-bit aligned
1662 strh r2, [r0, #0x02]
1668 strh r3, [r0, #0x02]
1674 * 1001: dst is 16-bit aligned, src is 8-bit aligned
1676 ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
1677 ldr r3, [r1, #3] /* BE:r3 = 3xxx LE:r3 = xxx3 */
1678 mov r1, r2, lsr #8 /* BE:r1 = .x01 LE:r1 = .210 */
1681 mov r2, r2, lsl #8 /* r2 = 012. */
1682 orr r2, r2, r3, lsr #24 /* r2 = 0123 */
1684 mov r2, r2, lsr #24 /* r2 = ...2 */
1685 orr r2, r2, r3, lsl #8 /* r2 = xx32 */
1687 strh r2, [r0, #0x02]
1692 * 1010: dst is 16-bit aligned, src is 16-bit aligned
1695 ldrh r3, [r1, #0x02]
1697 strh r3, [r0, #0x02]
1702 * 1011: dst is 16-bit aligned, src is 8-bit aligned
1704 ldr r3, [r1, #1] /* BE:r3 = 123x LE:r3 = x321 */
1705 ldr r2, [r1, #-3] /* BE:r2 = xxx0 LE:r2 = 0xxx */
1706 mov r1, r3, lsr #8 /* BE:r1 = .123 LE:r1 = .x32 */
1707 strh r1, [r0, #0x02]
1709 mov r3, r3, lsr #24 /* r3 = ...1 */
1710 orr r3, r3, r2, lsl #8 /* r3 = xx01 */
1712 mov r3, r3, lsl #8 /* r3 = 321. */
1713 orr r3, r3, r2, lsr #24 /* r3 = 3210 */
1720 * 1100: dst is 8-bit aligned, src is 32-bit aligned
1722 ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
1724 strb r2, [r0, #0x03]
1727 strh r3, [r0, #0x01]
1733 strh r3, [r0, #0x01]
1734 strb r1, [r0, #0x03]
1740 * 1101: dst is 8-bit aligned, src is 8-bit aligned
1743 ldrh r3, [r1, #0x01]
1744 ldrb r1, [r1, #0x03]
1746 strh r3, [r0, #0x01]
1747 strb r1, [r0, #0x03]
1752 * 1110: dst is 8-bit aligned, src is 16-bit aligned
1755 ldrh r3, [r1, #0x02] /* BE:r3 = ..23 LE:r3 = ..32 */
1756 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
1757 strb r3, [r0, #0x03]
1758 mov r3, r3, lsr #8 /* r3 = ...2 */
1759 orr r3, r3, r2, lsl #8 /* r3 = ..12 */
1760 strh r3, [r0, #0x01]
1761 mov r2, r2, lsr #8 /* r2 = ...0 */
1764 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
1765 ldrh r3, [r1, #0x02] /* BE:r3 = ..23 LE:r3 = ..32 */
1767 mov r2, r2, lsr #8 /* r2 = ...1 */
1768 orr r2, r2, r3, lsl #8 /* r2 = .321 */
1769 strh r2, [r0, #0x01]
1770 mov r3, r3, lsr #8 /* r3 = ...3 */
1771 strb r3, [r0, #0x03]
1777 * 1111: dst is 8-bit aligned, src is 8-bit aligned
1780 ldrh r3, [r1, #0x01]
1781 ldrb r1, [r1, #0x03]
1783 strh r3, [r0, #0x01]
1784 strb r1, [r0, #0x03]
1789 /******************************************************************************
1790 * Special case for 6 byte copies
1792 #define LMEMCPY_6_LOG2 6 /* 64 bytes */
1793 #define LMEMCPY_6_PAD .align LMEMCPY_6_LOG2
1797 orr r2, r2, r0, lsl #2
1800 addne pc, r3, r2, lsl #LMEMCPY_6_LOG2
1803 * 0000: dst is 32-bit aligned, src is 32-bit aligned
1806 ldrh r3, [r1, #0x04]
1808 strh r3, [r0, #0x04]
1813 * 0001: dst is 32-bit aligned, src is 8-bit aligned
1815 ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
1816 ldr r3, [r1, #0x03] /* BE:r3 = 345x LE:r3 = x543 */
1818 mov r2, r2, lsl #8 /* r2 = 012. */
1819 orr r2, r2, r3, lsr #24 /* r2 = 0123 */
1821 mov r2, r2, lsr #8 /* r2 = .210 */
1822 orr r2, r2, r3, lsl #24 /* r2 = 3210 */
1824 mov r3, r3, lsr #8 /* BE:r3 = .345 LE:r3 = .x54 */
1826 strh r3, [r0, #0x04]
1831 * 0010: dst is 32-bit aligned, src is 16-bit aligned
1833 ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
1834 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
1836 mov r1, r3, lsr #16 /* r1 = ..23 */
1837 orr r1, r1, r2, lsl #16 /* r1 = 0123 */
1839 strh r3, [r0, #0x04]
1841 mov r1, r3, lsr #16 /* r1 = ..54 */
1842 orr r2, r2, r3, lsl #16 /* r2 = 3210 */
1844 strh r1, [r0, #0x04]
1850 * 0011: dst is 32-bit aligned, src is 8-bit aligned
1852 ldr r2, [r1, #-3] /* BE:r2 = xxx0 LE:r2 = 0xxx */
1853 ldr r3, [r1, #1] /* BE:r3 = 1234 LE:r3 = 4321 */
1854 ldr r1, [r1, #5] /* BE:r1 = 5xxx LE:r3 = xxx5 */
1856 mov r2, r2, lsl #24 /* r2 = 0... */
1857 orr r2, r2, r3, lsr #8 /* r2 = 0123 */
1858 mov r3, r3, lsl #8 /* r3 = 234. */
1859 orr r1, r3, r1, lsr #24 /* r1 = 2345 */
1861 mov r2, r2, lsr #24 /* r2 = ...0 */
1862 orr r2, r2, r3, lsl #8 /* r2 = 3210 */
1863 mov r1, r1, lsl #8 /* r1 = xx5. */
1864 orr r1, r1, r3, lsr #24 /* r1 = xx54 */
1867 strh r1, [r0, #0x04]
1872 * 0100: dst is 8-bit aligned, src is 32-bit aligned
1874 ldr r3, [r1] /* BE:r3 = 0123 LE:r3 = 3210 */
1875 ldrh r2, [r1, #0x04] /* BE:r2 = ..45 LE:r2 = ..54 */
1876 mov r1, r3, lsr #8 /* BE:r1 = .012 LE:r1 = .321 */
1877 strh r1, [r0, #0x01]
1879 mov r1, r3, lsr #24 /* r1 = ...0 */
1881 mov r3, r3, lsl #8 /* r3 = 123. */
1882 orr r3, r3, r2, lsr #8 /* r3 = 1234 */
1885 mov r3, r3, lsr #24 /* r3 = ...3 */
1886 orr r3, r3, r2, lsl #8 /* r3 = .543 */
1887 mov r2, r2, lsr #8 /* r2 = ...5 */
1889 strh r3, [r0, #0x03]
1890 strb r2, [r0, #0x05]
1895 * 0101: dst is 8-bit aligned, src is 8-bit aligned
1898 ldrh r3, [r1, #0x01]
1899 ldrh ip, [r1, #0x03]
1900 ldrb r1, [r1, #0x05]
1902 strh r3, [r0, #0x01]
1903 strh ip, [r0, #0x03]
1904 strb r1, [r0, #0x05]
1909 * 0110: dst is 8-bit aligned, src is 16-bit aligned
1911 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
1912 ldr r1, [r1, #0x02] /* BE:r1 = 2345 LE:r1 = 5432 */
1914 mov r3, r2, lsr #8 /* r3 = ...0 */
1916 strb r1, [r0, #0x05]
1917 mov r3, r1, lsr #8 /* r3 = .234 */
1918 strh r3, [r0, #0x03]
1919 mov r3, r2, lsl #8 /* r3 = .01. */
1920 orr r3, r3, r1, lsr #24 /* r3 = .012 */
1921 strh r3, [r0, #0x01]
1925 strb r3, [r0, #0x05]
1926 mov r3, r1, lsr #8 /* r3 = .543 */
1927 strh r3, [r0, #0x03]
1928 mov r3, r2, lsr #8 /* r3 = ...1 */
1929 orr r3, r3, r1, lsl #8 /* r3 = 4321 */
1930 strh r3, [r0, #0x01]
1936 * 0111: dst is 8-bit aligned, src is 8-bit aligned
1939 ldrh r3, [r1, #0x01]
1940 ldrh ip, [r1, #0x03]
1941 ldrb r1, [r1, #0x05]
1943 strh r3, [r0, #0x01]
1944 strh ip, [r0, #0x03]
1945 strb r1, [r0, #0x05]
1950 * 1000: dst is 16-bit aligned, src is 32-bit aligned
1953 ldr r2, [r1] /* r2 = 0123 */
1954 ldrh r3, [r1, #0x04] /* r3 = ..45 */
1955 mov r1, r2, lsr #16 /* r1 = ..01 */
1956 orr r3, r3, r2, lsl#16 /* r3 = 2345 */
1960 ldrh r2, [r1, #0x04] /* r2 = ..54 */
1961 ldr r3, [r1] /* r3 = 3210 */
1962 mov r2, r2, lsl #16 /* r2 = 54.. */
1963 orr r2, r2, r3, lsr #16 /* r2 = 5432 */
1971 * 1001: dst is 16-bit aligned, src is 8-bit aligned
1973 ldr r3, [r1, #-1] /* BE:r3 = x012 LE:r3 = 210x */
1974 ldr r2, [r1, #3] /* BE:r2 = 345x LE:r2 = x543 */
1975 mov r1, r3, lsr #8 /* BE:r1 = .x01 LE:r1 = .210 */
1977 mov r2, r2, lsr #8 /* r2 = .345 */
1978 orr r2, r2, r3, lsl #24 /* r2 = 2345 */
1980 mov r2, r2, lsl #8 /* r2 = 543. */
1981 orr r2, r2, r3, lsr #24 /* r2 = 5432 */
1989 * 1010: dst is 16-bit aligned, src is 16-bit aligned
1999 * 1011: dst is 16-bit aligned, src is 8-bit aligned
2001 ldrb r3, [r1] /* r3 = ...0 */
2002 ldr r2, [r1, #0x01] /* BE:r2 = 1234 LE:r2 = 4321 */
2003 ldrb r1, [r1, #0x05] /* r1 = ...5 */
2005 mov r3, r3, lsl #8 /* r3 = ..0. */
2006 orr r3, r3, r2, lsr #24 /* r3 = ..01 */
2007 orr r1, r1, r2, lsl #8 /* r1 = 2345 */
2009 orr r3, r3, r2, lsl #8 /* r3 = 3210 */
2010 mov r1, r1, lsl #24 /* r1 = 5... */
2011 orr r1, r1, r2, lsr #8 /* r1 = 5432 */
2019 * 1100: dst is 8-bit aligned, src is 32-bit aligned
2021 ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
2022 ldrh r1, [r1, #0x04] /* BE:r1 = ..45 LE:r1 = ..54 */
2024 mov r3, r2, lsr #24 /* r3 = ...0 */
2026 mov r2, r2, lsl #8 /* r2 = 123. */
2027 orr r2, r2, r1, lsr #8 /* r2 = 1234 */
2030 mov r2, r2, lsr #8 /* r2 = .321 */
2031 orr r2, r2, r1, lsl #24 /* r2 = 4321 */
2032 mov r1, r1, lsr #8 /* r1 = ...5 */
2035 strb r1, [r0, #0x05]
2040 * 1101: dst is 8-bit aligned, src is 8-bit aligned
2043 ldrh r3, [r1, #0x01]
2044 ldrh ip, [r1, #0x03]
2045 ldrb r1, [r1, #0x05]
2047 strh r3, [r0, #0x01]
2048 strh ip, [r0, #0x03]
2049 strb r1, [r0, #0x05]
2054 * 1110: dst is 8-bit aligned, src is 16-bit aligned
2056 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
2057 ldr r1, [r1, #0x02] /* BE:r1 = 2345 LE:r1 = 5432 */
2059 mov r3, r2, lsr #8 /* r3 = ...0 */
2061 mov r2, r2, lsl #24 /* r2 = 1... */
2062 orr r2, r2, r1, lsr #8 /* r2 = 1234 */
2065 mov r2, r2, lsr #8 /* r2 = ...1 */
2066 orr r2, r2, r1, lsl #8 /* r2 = 4321 */
2067 mov r1, r1, lsr #24 /* r1 = ...5 */
2070 strb r1, [r0, #0x05]
2075 * 1111: dst is 8-bit aligned, src is 8-bit aligned
2079 ldrb r1, [r1, #0x05]
2082 strb r1, [r0, #0x05]
2087 /******************************************************************************
2088 * Special case for 8 byte copies
2090 #define LMEMCPY_8_LOG2 6 /* 64 bytes */
2091 #define LMEMCPY_8_PAD .align LMEMCPY_8_LOG2
2095 orr r2, r2, r0, lsl #2
2098 addne pc, r3, r2, lsl #LMEMCPY_8_LOG2
2101 * 0000: dst is 32-bit aligned, src is 32-bit aligned
2111 * 0001: dst is 32-bit aligned, src is 8-bit aligned
2113 ldr r3, [r1, #-1] /* BE:r3 = x012 LE:r3 = 210x */
2114 ldr r2, [r1, #0x03] /* BE:r2 = 3456 LE:r2 = 6543 */
2115 ldrb r1, [r1, #0x07] /* r1 = ...7 */
2117 mov r3, r3, lsl #8 /* r3 = 012. */
2118 orr r3, r3, r2, lsr #24 /* r3 = 0123 */
2119 orr r2, r1, r2, lsl #8 /* r2 = 4567 */
2121 mov r3, r3, lsr #8 /* r3 = .210 */
2122 orr r3, r3, r2, lsl #24 /* r3 = 3210 */
2123 mov r1, r1, lsl #24 /* r1 = 7... */
2124 orr r2, r1, r2, lsr #8 /* r2 = 7654 */
2132 * 0010: dst is 32-bit aligned, src is 16-bit aligned
2134 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
2135 ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
2136 ldrh r1, [r1, #0x06] /* BE:r1 = ..67 LE:r1 = ..76 */
2138 mov r2, r2, lsl #16 /* r2 = 01.. */
2139 orr r2, r2, r3, lsr #16 /* r2 = 0123 */
2140 orr r3, r1, r3, lsl #16 /* r3 = 4567 */
2142 orr r2, r2, r3, lsl #16 /* r2 = 3210 */
2143 mov r3, r3, lsr #16 /* r3 = ..54 */
2144 orr r3, r3, r1, lsl #16 /* r3 = 7654 */
2152 * 0011: dst is 32-bit aligned, src is 8-bit aligned
2154 ldrb r3, [r1] /* r3 = ...0 */
2155 ldr r2, [r1, #0x01] /* BE:r2 = 1234 LE:r2 = 4321 */
2156 ldr r1, [r1, #0x05] /* BE:r1 = 567x LE:r1 = x765 */
2158 mov r3, r3, lsl #24 /* r3 = 0... */
2159 orr r3, r3, r2, lsr #8 /* r3 = 0123 */
2160 mov r2, r2, lsl #24 /* r2 = 4... */
2161 orr r2, r2, r1, lsr #8 /* r2 = 4567 */
2163 orr r3, r3, r2, lsl #8 /* r3 = 3210 */
2164 mov r2, r2, lsr #24 /* r2 = ...4 */
2165 orr r2, r2, r1, lsl #8 /* r2 = 7654 */
2173 * 0100: dst is 8-bit aligned, src is 32-bit aligned
2175 ldr r3, [r1] /* BE:r3 = 0123 LE:r3 = 3210 */
2176 ldr r2, [r1, #0x04] /* BE:r2 = 4567 LE:r2 = 7654 */
2178 mov r1, r3, lsr #24 /* r1 = ...0 */
2180 mov r1, r3, lsr #8 /* r1 = .012 */
2181 strb r2, [r0, #0x07]
2182 mov r3, r3, lsl #24 /* r3 = 3... */
2183 orr r3, r3, r2, lsr #8 /* r3 = 3456 */
2186 mov r1, r2, lsr #24 /* r1 = ...7 */
2187 strb r1, [r0, #0x07]
2188 mov r1, r3, lsr #8 /* r1 = .321 */
2189 mov r3, r3, lsr #24 /* r3 = ...3 */
2190 orr r3, r3, r2, lsl #8 /* r3 = 6543 */
2192 strh r1, [r0, #0x01]
2198 * 0101: dst is 8-bit aligned, src is 8-bit aligned
2201 ldrh r3, [r1, #0x01]
2203 ldrb r1, [r1, #0x07]
2205 strh r3, [r0, #0x01]
2207 strb r1, [r0, #0x07]
2212 * 0110: dst is 8-bit aligned, src is 16-bit aligned
2214 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
2215 ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
2216 ldrh r1, [r1, #0x06] /* BE:r1 = ..67 LE:r1 = ..76 */
2218 mov ip, r2, lsr #8 /* ip = ...0 */
2220 mov ip, r2, lsl #8 /* ip = .01. */
2221 orr ip, ip, r3, lsr #24 /* ip = .012 */
2222 strb r1, [r0, #0x07]
2223 mov r3, r3, lsl #8 /* r3 = 345. */
2224 orr r3, r3, r1, lsr #8 /* r3 = 3456 */
2226 strb r2, [r0] /* 0 */
2227 mov ip, r1, lsr #8 /* ip = ...7 */
2228 strb ip, [r0, #0x07] /* 7 */
2229 mov ip, r2, lsr #8 /* ip = ...1 */
2230 orr ip, ip, r3, lsl #8 /* ip = 4321 */
2231 mov r3, r3, lsr #8 /* r3 = .543 */
2232 orr r3, r3, r1, lsl #24 /* r3 = 6543 */
2234 strh ip, [r0, #0x01]
2240 * 0111: dst is 8-bit aligned, src is 8-bit aligned
2242 ldrb r3, [r1] /* r3 = ...0 */
2243 ldr ip, [r1, #0x01] /* BE:ip = 1234 LE:ip = 4321 */
2244 ldrh r2, [r1, #0x05] /* BE:r2 = ..56 LE:r2 = ..65 */
2245 ldrb r1, [r1, #0x07] /* r1 = ...7 */
2247 mov r3, ip, lsr #16 /* BE:r3 = ..12 LE:r3 = ..43 */
2249 strh r3, [r0, #0x01]
2250 orr r2, r2, ip, lsl #16 /* r2 = 3456 */
2252 strh ip, [r0, #0x01]
2253 orr r2, r3, r2, lsl #16 /* r2 = 6543 */
2256 strb r1, [r0, #0x07]
2261 * 1000: dst is 16-bit aligned, src is 32-bit aligned
2263 ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
2264 ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */
2265 mov r1, r2, lsr #16 /* BE:r1 = ..01 LE:r1 = ..32 */
2268 mov r1, r3, lsr #16 /* r1 = ..45 */
2269 orr r2, r1 ,r2, lsl #16 /* r2 = 2345 */
2272 orr r2, r1, r3, lsl #16 /* r2 = 5432 */
2273 mov r3, r3, lsr #16 /* r3 = ..76 */
2276 strh r3, [r0, #0x06]
2281 * 1001: dst is 16-bit aligned, src is 8-bit aligned
2283 ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
2284 ldr r3, [r1, #0x03] /* BE:r3 = 3456 LE:r3 = 6543 */
2285 ldrb ip, [r1, #0x07] /* ip = ...7 */
2286 mov r1, r2, lsr #8 /* BE:r1 = .x01 LE:r1 = .210 */
2289 mov r1, r2, lsl #24 /* r1 = 2... */
2290 orr r1, r1, r3, lsr #8 /* r1 = 2345 */
2291 orr r3, ip, r3, lsl #8 /* r3 = 4567 */
2293 mov r1, r2, lsr #24 /* r1 = ...2 */
2294 orr r1, r1, r3, lsl #8 /* r1 = 5432 */
2295 mov r3, r3, lsr #24 /* r3 = ...6 */
2296 orr r3, r3, ip, lsl #8 /* r3 = ..76 */
2299 strh r3, [r0, #0x06]
2304 * 1010: dst is 16-bit aligned, src is 16-bit aligned
2308 ldrh r3, [r1, #0x06]
2311 strh r3, [r0, #0x06]
2316 * 1011: dst is 16-bit aligned, src is 8-bit aligned
2318 ldr r3, [r1, #0x05] /* BE:r3 = 567x LE:r3 = x765 */
2319 ldr r2, [r1, #0x01] /* BE:r2 = 1234 LE:r2 = 4321 */
2320 ldrb ip, [r1] /* ip = ...0 */
2321 mov r1, r3, lsr #8 /* BE:r1 = .567 LE:r1 = .x76 */
2322 strh r1, [r0, #0x06]
2324 mov r3, r3, lsr #24 /* r3 = ...5 */
2325 orr r3, r3, r2, lsl #8 /* r3 = 2345 */
2326 mov r2, r2, lsr #24 /* r2 = ...1 */
2327 orr r2, r2, ip, lsl #8 /* r2 = ..01 */
2329 mov r3, r3, lsl #24 /* r3 = 5... */
2330 orr r3, r3, r2, lsr #8 /* r3 = 5432 */
2331 orr r2, ip, r2, lsl #8 /* r2 = 3210 */
2339 * 1100: dst is 8-bit aligned, src is 32-bit aligned
2341 ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */
2342 ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
2343 mov r1, r3, lsr #8 /* BE:r1 = .456 LE:r1 = .765 */
2344 strh r1, [r0, #0x05]
2346 strb r3, [r0, #0x07]
2347 mov r1, r2, lsr #24 /* r1 = ...0 */
2349 mov r2, r2, lsl #8 /* r2 = 123. */
2350 orr r2, r2, r3, lsr #24 /* r2 = 1234 */
2354 mov r1, r3, lsr #24 /* r1 = ...7 */
2355 strb r1, [r0, #0x07]
2356 mov r2, r2, lsr #8 /* r2 = .321 */
2357 orr r2, r2, r3, lsl #24 /* r2 = 4321 */
2364 * 1101: dst is 8-bit aligned, src is 8-bit aligned
2366 ldrb r3, [r1] /* r3 = ...0 */
2367 ldrh r2, [r1, #0x01] /* BE:r2 = ..12 LE:r2 = ..21 */
2368 ldr ip, [r1, #0x03] /* BE:ip = 3456 LE:ip = 6543 */
2369 ldrb r1, [r1, #0x07] /* r1 = ...7 */
2371 mov r3, ip, lsr #16 /* BE:r3 = ..34 LE:r3 = ..65 */
2373 strh ip, [r0, #0x05]
2374 orr r2, r3, r2, lsl #16 /* r2 = 1234 */
2376 strh r3, [r0, #0x05]
2377 orr r2, r2, ip, lsl #16 /* r2 = 4321 */
2380 strb r1, [r0, #0x07]
2385 * 1110: dst is 8-bit aligned, src is 16-bit aligned
2387 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
2388 ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
2389 ldrh r1, [r1, #0x06] /* BE:r1 = ..67 LE:r1 = ..76 */
2391 mov ip, r2, lsr #8 /* ip = ...0 */
2393 mov ip, r2, lsl #24 /* ip = 1... */
2394 orr ip, ip, r3, lsr #8 /* ip = 1234 */
2395 strb r1, [r0, #0x07]
2396 mov r1, r1, lsr #8 /* r1 = ...6 */
2397 orr r1, r1, r3, lsl #8 /* r1 = 3456 */
2400 mov ip, r2, lsr #8 /* ip = ...1 */
2401 orr ip, ip, r3, lsl #8 /* ip = 4321 */
2402 mov r2, r1, lsr #8 /* r2 = ...7 */
2403 strb r2, [r0, #0x07]
2404 mov r1, r1, lsl #8 /* r1 = .76. */
2405 orr r1, r1, r3, lsr #24 /* r1 = .765 */
2408 strh r1, [r0, #0x05]
2413 * 1111: dst is 8-bit aligned, src is 8-bit aligned
2417 ldrh r3, [r1, #0x05]
2418 ldrb r1, [r1, #0x07]
2421 strh r3, [r0, #0x05]
2422 strb r1, [r0, #0x07]
2426 /******************************************************************************
2427 * Special case for 12 byte copies
2429 #define LMEMCPY_C_LOG2 7 /* 128 bytes */
2430 #define LMEMCPY_C_PAD .align LMEMCPY_C_LOG2
2434 orr r2, r2, r0, lsl #2
2437 addne pc, r3, r2, lsl #LMEMCPY_C_LOG2
2440 * 0000: dst is 32-bit aligned, src is 32-bit aligned
2452 * 0001: dst is 32-bit aligned, src is 8-bit aligned
2454 ldrb r2, [r1, #0xb] /* r2 = ...B */
2455 ldr ip, [r1, #0x07] /* BE:ip = 789A LE:ip = A987 */
2456 ldr r3, [r1, #0x03] /* BE:r3 = 3456 LE:r3 = 6543 */
2457 ldr r1, [r1, #-1] /* BE:r1 = x012 LE:r1 = 210x */
2459 orr r2, r2, ip, lsl #8 /* r2 = 89AB */
2461 mov r2, ip, lsr #24 /* r2 = ...7 */
2462 orr r2, r2, r3, lsl #8 /* r2 = 4567 */
2463 mov r1, r1, lsl #8 /* r1 = 012. */
2464 orr r1, r1, r3, lsr #24 /* r1 = 0123 */
2466 mov r2, r2, lsl #24 /* r2 = B... */
2467 orr r2, r2, ip, lsr #8 /* r2 = BA98 */
2469 mov r2, ip, lsl #24 /* r2 = 7... */
2470 orr r2, r2, r3, lsr #8 /* r2 = 7654 */
2471 mov r1, r1, lsr #8 /* r1 = .210 */
2472 orr r1, r1, r3, lsl #24 /* r1 = 3210 */
2480 * 0010: dst is 32-bit aligned, src is 16-bit aligned
2482 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
2483 ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
2484 ldr ip, [r1, #0x06] /* BE:ip = 6789 LE:ip = 9876 */
2485 ldrh r1, [r1, #0x0a] /* BE:r1 = ..AB LE:r1 = ..BA */
2487 mov r2, r2, lsl #16 /* r2 = 01.. */
2488 orr r2, r2, r3, lsr #16 /* r2 = 0123 */
2490 mov r3, r3, lsl #16 /* r3 = 45.. */
2491 orr r3, r3, ip, lsr #16 /* r3 = 4567 */
2492 orr r1, r1, ip, lsl #16 /* r1 = 89AB */
2494 orr r2, r2, r3, lsl #16 /* r2 = 3210 */
2496 mov r3, r3, lsr #16 /* r3 = ..54 */
2497 orr r3, r3, ip, lsl #16 /* r3 = 7654 */
2498 mov r1, r1, lsl #16 /* r1 = BA.. */
2499 orr r1, r1, ip, lsr #16 /* r1 = BA98 */
2507 * 0011: dst is 32-bit aligned, src is 8-bit aligned
2509 ldrb r2, [r1] /* r2 = ...0 */
2510 ldr r3, [r1, #0x01] /* BE:r3 = 1234 LE:r3 = 4321 */
2511 ldr ip, [r1, #0x05] /* BE:ip = 5678 LE:ip = 8765 */
2512 ldr r1, [r1, #0x09] /* BE:r1 = 9ABx LE:r1 = xBA9 */
2514 mov r2, r2, lsl #24 /* r2 = 0... */
2515 orr r2, r2, r3, lsr #8 /* r2 = 0123 */
2517 mov r3, r3, lsl #24 /* r3 = 4... */
2518 orr r3, r3, ip, lsr #8 /* r3 = 4567 */
2519 mov r1, r1, lsr #8 /* r1 = .9AB */
2520 orr r1, r1, ip, lsl #24 /* r1 = 89AB */
2522 orr r2, r2, r3, lsl #8 /* r2 = 3210 */
2524 mov r3, r3, lsr #24 /* r3 = ...4 */
2525 orr r3, r3, ip, lsl #8 /* r3 = 7654 */
2526 mov r1, r1, lsl #8 /* r1 = BA9. */
2527 orr r1, r1, ip, lsr #24 /* r1 = BA98 */
2535 * 0100: dst is 8-bit aligned (byte 1), src is 32-bit aligned
2537 ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
2538 ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */
2539 ldr ip, [r1, #0x08] /* BE:ip = 89AB LE:ip = BA98 */
2540 mov r1, r2, lsr #8 /* BE:r1 = .012 LE:r1 = .321 */
2541 strh r1, [r0, #0x01]
2543 mov r1, r2, lsr #24 /* r1 = ...0 */
2545 mov r1, r2, lsl #24 /* r1 = 3... */
2546 orr r2, r1, r3, lsr #8 /* r1 = 3456 */
2547 mov r1, r3, lsl #24 /* r1 = 7... */
2548 orr r1, r1, ip, lsr #8 /* r1 = 789A */
2551 mov r1, r2, lsr #24 /* r1 = ...3 */
2552 orr r2, r1, r3, lsl #8 /* r1 = 6543 */
2553 mov r1, r3, lsr #24 /* r1 = ...7 */
2554 orr r1, r1, ip, lsl #8 /* r1 = A987 */
2555 mov ip, ip, lsr #24 /* ip = ...B */
2559 strb ip, [r0, #0x0b]
2564 * 0101: dst is 8-bit aligned (byte 1), src is 8-bit aligned (byte 1)
2567 ldrh r3, [r1, #0x01]
2571 ldrb r1, [r1, #0x0b]
2572 strh r3, [r0, #0x01]
2575 strb r1, [r0, #0x0b]
2580 * 0110: dst is 8-bit aligned (byte 1), src is 16-bit aligned
2582 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
2583 ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
2584 ldr ip, [r1, #0x06] /* BE:ip = 6789 LE:ip = 9876 */
2585 ldrh r1, [r1, #0x0a] /* BE:r1 = ..AB LE:r1 = ..BA */
2587 mov r2, r2, ror #8 /* r2 = 1..0 */
2589 mov r2, r2, lsr #16 /* r2 = ..1. */
2590 orr r2, r2, r3, lsr #24 /* r2 = ..12 */
2591 strh r2, [r0, #0x01]
2592 mov r2, r3, lsl #8 /* r2 = 345. */
2593 orr r3, r2, ip, lsr #24 /* r3 = 3456 */
2594 mov r2, ip, lsl #8 /* r2 = 789. */
2595 orr r2, r2, r1, lsr #8 /* r2 = 789A */
2598 mov r2, r2, lsr #8 /* r2 = ...1 */
2599 orr r2, r2, r3, lsl #8 /* r2 = 4321 */
2600 strh r2, [r0, #0x01]
2601 mov r2, r3, lsr #8 /* r2 = .543 */
2602 orr r3, r2, ip, lsl #24 /* r3 = 6543 */
2603 mov r2, ip, lsr #8 /* r2 = .987 */
2604 orr r2, r2, r1, lsl #24 /* r2 = A987 */
2605 mov r1, r1, lsr #8 /* r1 = ...B */
2609 strb r1, [r0, #0x0b]
2614 * 0111: dst is 8-bit aligned (byte 1), src is 8-bit aligned (byte 3)
2617 ldr r3, [r1, #0x01] /* BE:r3 = 1234 LE:r3 = 4321 */
2618 ldr ip, [r1, #0x05] /* BE:ip = 5678 LE:ip = 8765 */
2619 ldr r1, [r1, #0x09] /* BE:r1 = 9ABx LE:r1 = xBA9 */
2622 mov r2, r3, lsr #16 /* r2 = ..12 */
2623 strh r2, [r0, #0x01]
2624 mov r3, r3, lsl #16 /* r3 = 34.. */
2625 orr r3, r3, ip, lsr #16 /* r3 = 3456 */
2626 mov ip, ip, lsl #16 /* ip = 78.. */
2627 orr ip, ip, r1, lsr #16 /* ip = 789A */
2628 mov r1, r1, lsr #8 /* r1 = .9AB */
2630 strh r3, [r0, #0x01]
2631 mov r3, r3, lsr #16 /* r3 = ..43 */
2632 orr r3, r3, ip, lsl #16 /* r3 = 6543 */
2633 mov ip, ip, lsr #16 /* ip = ..87 */
2634 orr ip, ip, r1, lsl #16 /* ip = A987 */
2635 mov r1, r1, lsr #16 /* r1 = ..xB */
2639 strb r1, [r0, #0x0b]
2644 * 1000: dst is 16-bit aligned, src is 32-bit aligned
2646 ldr ip, [r1] /* BE:ip = 0123 LE:ip = 3210 */
2647 ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */
2648 ldr r2, [r1, #0x08] /* BE:r2 = 89AB LE:r2 = BA98 */
2649 mov r1, ip, lsr #16 /* BE:r1 = ..01 LE:r1 = ..32 */
2652 mov r1, ip, lsl #16 /* r1 = 23.. */
2653 orr r1, r1, r3, lsr #16 /* r1 = 2345 */
2654 mov r3, r3, lsl #16 /* r3 = 67.. */
2655 orr r3, r3, r2, lsr #16 /* r3 = 6789 */
2658 orr r1, r1, r3, lsl #16 /* r1 = 5432 */
2659 mov r3, r3, lsr #16 /* r3 = ..76 */
2660 orr r3, r3, r2, lsl #16 /* r3 = 9876 */
2661 mov r2, r2, lsr #16 /* r2 = ..BA */
2665 strh r2, [r0, #0x0a]
2670 * 1001: dst is 16-bit aligned, src is 8-bit aligned (byte 1)
2672 ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
2673 ldr r3, [r1, #0x03] /* BE:r3 = 3456 LE:r3 = 6543 */
2674 mov ip, r2, lsr #8 /* BE:ip = .x01 LE:ip = .210 */
2676 ldr ip, [r1, #0x07] /* BE:ip = 789A LE:ip = A987 */
2677 ldrb r1, [r1, #0x0b] /* r1 = ...B */
2679 mov r2, r2, lsl #24 /* r2 = 2... */
2680 orr r2, r2, r3, lsr #8 /* r2 = 2345 */
2681 mov r3, r3, lsl #24 /* r3 = 6... */
2682 orr r3, r3, ip, lsr #8 /* r3 = 6789 */
2683 orr r1, r1, ip, lsl #8 /* r1 = 89AB */
2685 mov r2, r2, lsr #24 /* r2 = ...2 */
2686 orr r2, r2, r3, lsl #8 /* r2 = 5432 */
2687 mov r3, r3, lsr #24 /* r3 = ...6 */
2688 orr r3, r3, ip, lsl #8 /* r3 = 9876 */
2689 mov r1, r1, lsl #8 /* r1 = ..B. */
2690 orr r1, r1, ip, lsr #24 /* r1 = ..BA */
2694 strh r1, [r0, #0x0a]
2699 * 1010: dst is 16-bit aligned, src is 16-bit aligned
2704 ldrh r1, [r1, #0x0a]
2708 strh r1, [r0, #0x0a]
2713 * 1011: dst is 16-bit aligned, src is 8-bit aligned (byte 3)
2715 ldr r2, [r1, #0x09] /* BE:r2 = 9ABx LE:r2 = xBA9 */
2716 ldr r3, [r1, #0x05] /* BE:r3 = 5678 LE:r3 = 8765 */
2717 mov ip, r2, lsr #8 /* BE:ip = .9AB LE:ip = .xBA */
2718 strh ip, [r0, #0x0a]
2719 ldr ip, [r1, #0x01] /* BE:ip = 1234 LE:ip = 4321 */
2720 ldrb r1, [r1] /* r1 = ...0 */
2722 mov r2, r2, lsr #24 /* r2 = ...9 */
2723 orr r2, r2, r3, lsl #8 /* r2 = 6789 */
2724 mov r3, r3, lsr #24 /* r3 = ...5 */
2725 orr r3, r3, ip, lsl #8 /* r3 = 2345 */
2726 mov r1, r1, lsl #8 /* r1 = ..0. */
2727 orr r1, r1, ip, lsr #24 /* r1 = ..01 */
2729 mov r2, r2, lsl #24 /* r2 = 9... */
2730 orr r2, r2, r3, lsr #8 /* r2 = 9876 */
2731 mov r3, r3, lsl #24 /* r3 = 5... */
2732 orr r3, r3, ip, lsr #8 /* r3 = 5432 */
2733 orr r1, r1, ip, lsl #8 /* r1 = 3210 */
2742 * 1100: dst is 8-bit aligned (byte 3), src is 32-bit aligned
2744 ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
2745 ldr ip, [r1, #0x04] /* BE:ip = 4567 LE:ip = 7654 */
2746 ldr r1, [r1, #0x08] /* BE:r1 = 89AB LE:r1 = BA98 */
2748 mov r3, r2, lsr #24 /* r3 = ...0 */
2750 mov r2, r2, lsl #8 /* r2 = 123. */
2751 orr r2, r2, ip, lsr #24 /* r2 = 1234 */
2753 mov r2, ip, lsl #8 /* r2 = 567. */
2754 orr r2, r2, r1, lsr #24 /* r2 = 5678 */
2756 mov r2, r1, lsr #8 /* r2 = ..9A */
2757 strh r2, [r0, #0x09]
2758 strb r1, [r0, #0x0b]
2761 mov r3, r2, lsr #8 /* r3 = .321 */
2762 orr r3, r3, ip, lsl #24 /* r3 = 4321 */
2764 mov r3, ip, lsr #8 /* r3 = .765 */
2765 orr r3, r3, r1, lsl #24 /* r3 = 8765 */
2767 mov r1, r1, lsr #8 /* r1 = .BA9 */
2768 strh r1, [r0, #0x09]
2769 mov r1, r1, lsr #16 /* r1 = ...B */
2770 strb r1, [r0, #0x0b]
2776 * 1101: dst is 8-bit aligned (byte 3), src is 8-bit aligned (byte 1)
2778 ldrb r2, [r1, #0x0b] /* r2 = ...B */
2779 ldr r3, [r1, #0x07] /* BE:r3 = 789A LE:r3 = A987 */
2780 ldr ip, [r1, #0x03] /* BE:ip = 3456 LE:ip = 6543 */
2781 ldr r1, [r1, #-1] /* BE:r1 = x012 LE:r1 = 210x */
2782 strb r2, [r0, #0x0b]
2784 strh r3, [r0, #0x09]
2785 mov r3, r3, lsr #16 /* r3 = ..78 */
2786 orr r3, r3, ip, lsl #16 /* r3 = 5678 */
2787 mov ip, ip, lsr #16 /* ip = ..34 */
2788 orr ip, ip, r1, lsl #16 /* ip = 1234 */
2789 mov r1, r1, lsr #16 /* r1 = ..x0 */
2791 mov r2, r3, lsr #16 /* r2 = ..A9 */
2792 strh r2, [r0, #0x09]
2793 mov r3, r3, lsl #16 /* r3 = 87.. */
2794 orr r3, r3, ip, lsr #16 /* r3 = 8765 */
2795 mov ip, ip, lsl #16 /* ip = 43.. */
2796 orr ip, ip, r1, lsr #16 /* ip = 4321 */
2797 mov r1, r1, lsr #8 /* r1 = .210 */
2806 * 1110: dst is 8-bit aligned (byte 3), src is 16-bit aligned
2809 ldrh r2, [r1, #0x0a] /* r2 = ..AB */
2810 ldr ip, [r1, #0x06] /* ip = 6789 */
2811 ldr r3, [r1, #0x02] /* r3 = 2345 */
2812 ldrh r1, [r1] /* r1 = ..01 */
2813 strb r2, [r0, #0x0b]
2814 mov r2, r2, lsr #8 /* r2 = ...A */
2815 orr r2, r2, ip, lsl #8 /* r2 = 789A */
2816 mov ip, ip, lsr #8 /* ip = .678 */
2817 orr ip, ip, r3, lsl #24 /* ip = 5678 */
2818 mov r3, r3, lsr #8 /* r3 = .234 */
2819 orr r3, r3, r1, lsl #24 /* r3 = 1234 */
2820 mov r1, r1, lsr #8 /* r1 = ...0 */
2824 strh r2, [r0, #0x09]
2826 ldrh r2, [r1] /* r2 = ..10 */
2827 ldr r3, [r1, #0x02] /* r3 = 5432 */
2828 ldr ip, [r1, #0x06] /* ip = 9876 */
2829 ldrh r1, [r1, #0x0a] /* r1 = ..BA */
2831 mov r2, r2, lsr #8 /* r2 = ...1 */
2832 orr r2, r2, r3, lsl #8 /* r2 = 4321 */
2833 mov r3, r3, lsr #24 /* r3 = ...5 */
2834 orr r3, r3, ip, lsl #8 /* r3 = 8765 */
2835 mov ip, ip, lsr #24 /* ip = ...9 */
2836 orr ip, ip, r1, lsl #8 /* ip = .BA9 */
2837 mov r1, r1, lsr #8 /* r1 = ...B */
2840 strh ip, [r0, #0x09]
2841 strb r1, [r0, #0x0b]
2847 * 1111: dst is 8-bit aligned (byte 3), src is 8-bit aligned (byte 3)
2853 ldrh r2, [r1, #0x09]
2854 ldrb r1, [r1, #0x0b]
2857 strh r2, [r0, #0x09]
2858 strb r1, [r0, #0x0b]
2860 #endif /* __XSCALE__ */