1 /* $NetBSD: memcpy_xscale.S,v 1.1 2003/10/14 07:51:45 scw Exp $ */
4 * Copyright 2003 Wasabi Systems, Inc.
7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
38 #include <machine/asm.h>
39 __FBSDID("$FreeBSD$");
43 /* LINTSTUB: Func: void *memcpy(void *dst, const void *src, size_t len) */
47 ble .Lmemcpy_short /* <= 12 bytes */
48 mov r3, r0 /* We must not clobber r0 */
50 /* Word-align the destination buffer */
51 ands ip, r3, #0x03 /* Already word aligned? */
52 beq .Lmemcpy_wordaligned /* Yup */
57 ldrble ip, [r1], #0x01
59 strble ip, [r3], #0x01
60 ldrblt ip, [r1], #0x01
62 strblt ip, [r3], #0x01
64 /* Destination buffer is now word aligned */
66 ands ip, r1, #0x03 /* Is src also word-aligned? */
67 bne .Lmemcpy_bad_align /* Nope. Things just got bad */
69 /* Quad-align the destination buffer */
70 tst r3, #0x07 /* Already quad aligned? */
72 stmfd sp!, {r4-r9} /* Free up some registers */
76 /* Destination buffer quad aligned, source is at least word aligned */
78 blt .Lmemcpy_w_lessthan128
80 /* Copy 128 bytes at a time */
82 ldr r4, [r1], #0x04 /* LD:00-03 */
83 ldr r5, [r1], #0x04 /* LD:04-07 */
84 pld [r1, #0x18] /* Prefetch 0x20 */
85 ldr r6, [r1], #0x04 /* LD:08-0b */
86 ldr r7, [r1], #0x04 /* LD:0c-0f */
87 ldr r8, [r1], #0x04 /* LD:10-13 */
88 ldr r9, [r1], #0x04 /* LD:14-17 */
89 strd r4, [r3], #0x08 /* ST:00-07 */
90 ldr r4, [r1], #0x04 /* LD:18-1b */
91 ldr r5, [r1], #0x04 /* LD:1c-1f */
92 strd r6, [r3], #0x08 /* ST:08-0f */
93 ldr r6, [r1], #0x04 /* LD:20-23 */
94 ldr r7, [r1], #0x04 /* LD:24-27 */
95 pld [r1, #0x18] /* Prefetch 0x40 */
96 strd r8, [r3], #0x08 /* ST:10-17 */
97 ldr r8, [r1], #0x04 /* LD:28-2b */
98 ldr r9, [r1], #0x04 /* LD:2c-2f */
99 strd r4, [r3], #0x08 /* ST:18-1f */
100 ldr r4, [r1], #0x04 /* LD:30-33 */
101 ldr r5, [r1], #0x04 /* LD:34-37 */
102 strd r6, [r3], #0x08 /* ST:20-27 */
103 ldr r6, [r1], #0x04 /* LD:38-3b */
104 ldr r7, [r1], #0x04 /* LD:3c-3f */
105 strd r8, [r3], #0x08 /* ST:28-2f */
106 ldr r8, [r1], #0x04 /* LD:40-43 */
107 ldr r9, [r1], #0x04 /* LD:44-47 */
108 pld [r1, #0x18] /* Prefetch 0x60 */
109 strd r4, [r3], #0x08 /* ST:30-37 */
110 ldr r4, [r1], #0x04 /* LD:48-4b */
111 ldr r5, [r1], #0x04 /* LD:4c-4f */
112 strd r6, [r3], #0x08 /* ST:38-3f */
113 ldr r6, [r1], #0x04 /* LD:50-53 */
114 ldr r7, [r1], #0x04 /* LD:54-57 */
115 strd r8, [r3], #0x08 /* ST:40-47 */
116 ldr r8, [r1], #0x04 /* LD:58-5b */
117 ldr r9, [r1], #0x04 /* LD:5c-5f */
118 strd r4, [r3], #0x08 /* ST:48-4f */
119 ldr r4, [r1], #0x04 /* LD:60-63 */
120 ldr r5, [r1], #0x04 /* LD:64-67 */
121 pld [r1, #0x18] /* Prefetch 0x80 */
122 strd r6, [r3], #0x08 /* ST:50-57 */
123 ldr r6, [r1], #0x04 /* LD:68-6b */
124 ldr r7, [r1], #0x04 /* LD:6c-6f */
125 strd r8, [r3], #0x08 /* ST:58-5f */
126 ldr r8, [r1], #0x04 /* LD:70-73 */
127 ldr r9, [r1], #0x04 /* LD:74-77 */
128 strd r4, [r3], #0x08 /* ST:60-67 */
129 ldr r4, [r1], #0x04 /* LD:78-7b */
130 ldr r5, [r1], #0x04 /* LD:7c-7f */
131 strd r6, [r3], #0x08 /* ST:68-6f */
132 strd r8, [r3], #0x08 /* ST:70-77 */
134 strd r4, [r3], #0x08 /* ST:78-7f */
135 bge .Lmemcpy_w_loop128
137 .Lmemcpy_w_lessthan128:
138 adds r2, r2, #0x80 /* Adjust for extra sub */
140 bxeq lr /* Return now if done */
142 blt .Lmemcpy_w_lessthan32
144 /* Copy 32 bytes at a time */
160 bge .Lmemcpy_w_loop32
162 .Lmemcpy_w_lessthan32:
163 adds r2, r2, #0x20 /* Adjust for extra sub */
165 bxeq lr /* Return now if done */
169 addne pc, pc, r4, lsl #1
172 /* At least 24 bytes remaining */
178 /* At least 16 bytes remaining */
184 /* At least 8 bytes remaining */
190 /* Less than 8 bytes remaining */
192 bxeq lr /* Return now if done */
194 ldrge ip, [r1], #0x04
195 strge ip, [r3], #0x04
196 bxeq lr /* Return now if done */
200 ldrbge r2, [r1], #0x01
203 strbge r2, [r3], #0x01
209 * At this point, it has not been possible to word align both buffers.
210 * The destination buffer is word aligned, but the source buffer is not.
221 .Lmemcpy_bad1_loop16:
233 orr r4, r4, r5, lsr #24
235 orr r5, r5, r6, lsr #24
237 orr r6, r6, r7, lsr #24
239 orr r7, r7, ip, lsr #24
241 orr r4, r4, r5, lsl #24
243 orr r5, r5, r6, lsl #24
245 orr r6, r6, r7, lsl #24
247 orr r7, r7, ip, lsl #24
255 bge .Lmemcpy_bad1_loop16
259 bxeq lr /* Return now if done */
262 blt .Lmemcpy_bad_done
273 orr r4, r4, ip, lsr #24
275 orr r4, r4, ip, lsl #24
278 bge .Lmemcpy_bad1_loop4
282 .Lmemcpy_bad2_loop16:
294 orr r4, r4, r5, lsr #16
296 orr r5, r5, r6, lsr #16
298 orr r6, r6, r7, lsr #16
300 orr r7, r7, ip, lsr #16
302 orr r4, r4, r5, lsl #16
304 orr r5, r5, r6, lsl #16
306 orr r6, r6, r7, lsl #16
308 orr r7, r7, ip, lsl #16
316 bge .Lmemcpy_bad2_loop16
320 bxeq lr /* Return now if done */
323 blt .Lmemcpy_bad_done
334 orr r4, r4, ip, lsr #16
336 orr r4, r4, ip, lsl #16
339 bge .Lmemcpy_bad2_loop4
343 .Lmemcpy_bad3_loop16:
355 orr r4, r4, r5, lsr #8
357 orr r5, r5, r6, lsr #8
359 orr r6, r6, r7, lsr #8
361 orr r7, r7, ip, lsr #8
363 orr r4, r4, r5, lsl #8
365 orr r5, r5, r6, lsl #8
367 orr r6, r6, r7, lsl #8
369 orr r7, r7, ip, lsl #8
377 bge .Lmemcpy_bad3_loop16
381 bxeq lr /* Return now if done */
384 blt .Lmemcpy_bad_done
395 orr r4, r4, ip, lsr #8
397 orr r4, r4, ip, lsl #8
400 bge .Lmemcpy_bad3_loop4
409 ldrbge r2, [r1], #0x01
412 strbge r2, [r3], #0x01
418 * Handle short copies (less than 16 bytes), possibly misaligned.
419 * Some of these are *very* common, thanks to the network stack,
420 * and so are handled specially.
424 add pc, pc, r2, lsl #2
427 b .Lmemcpy_bytewise /* 0x01 */
428 b .Lmemcpy_bytewise /* 0x02 */
429 b .Lmemcpy_bytewise /* 0x03 */
430 b .Lmemcpy_4 /* 0x04 */
431 b .Lmemcpy_bytewise /* 0x05 */
432 b .Lmemcpy_6 /* 0x06 */
433 b .Lmemcpy_bytewise /* 0x07 */
434 b .Lmemcpy_8 /* 0x08 */
435 b .Lmemcpy_bytewise /* 0x09 */
436 b .Lmemcpy_bytewise /* 0x0a */
437 b .Lmemcpy_bytewise /* 0x0b */
438 b .Lmemcpy_c /* 0x0c */
441 mov r3, r0 /* We must not clobber r0 */
443 1: subs r2, r2, #0x01
445 ldrbne ip, [r1], #0x01
450 /******************************************************************************
451 * Special case for 4 byte copies
453 #define LMEMCPY_4_LOG2 6 /* 64 bytes */
454 #define LMEMCPY_4_PAD .align LMEMCPY_4_LOG2
458 orr r2, r2, r0, lsl #2
461 addne pc, r3, r2, lsl #LMEMCPY_4_LOG2
464 * 0000: dst is 32-bit aligned, src is 32-bit aligned
472 * 0001: dst is 32-bit aligned, src is 8-bit aligned
474 ldr r3, [r1, #-1] /* BE:r3 = x012 LE:r3 = 210x */
475 ldr r2, [r1, #3] /* BE:r2 = 3xxx LE:r2 = xxx3 */
477 mov r3, r3, lsl #8 /* r3 = 012. */
478 orr r3, r3, r2, lsr #24 /* r3 = 0123 */
480 mov r3, r3, lsr #8 /* r3 = .210 */
481 orr r3, r3, r2, lsl #24 /* r3 = 3210 */
488 * 0010: dst is 32-bit aligned, src is 16-bit aligned
497 orr r3, r2, r3, lsl #16
503 * 0011: dst is 32-bit aligned, src is 8-bit aligned
505 ldr r3, [r1, #-3] /* BE:r3 = xxx0 LE:r3 = 0xxx */
506 ldr r2, [r1, #1] /* BE:r2 = 123x LE:r2 = x321 */
508 mov r3, r3, lsl #24 /* r3 = 0... */
509 orr r3, r3, r2, lsr #8 /* r3 = 0123 */
511 mov r3, r3, lsr #24 /* r3 = ...0 */
512 orr r3, r3, r2, lsl #8 /* r3 = 3210 */
519 * 0100: dst is 8-bit aligned, src is 32-bit aligned
538 * 0101: dst is 8-bit aligned, src is 8-bit aligned
550 * 0110: dst is 8-bit aligned, src is 16-bit aligned
552 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
553 ldrh r3, [r1, #0x02] /* LE:r3 = ..23 LE:r3 = ..32 */
555 mov r1, r2, lsr #8 /* r1 = ...0 */
557 mov r2, r2, lsl #8 /* r2 = .01. */
558 orr r2, r2, r3, lsr #8 /* r2 = .012 */
561 mov r2, r2, lsr #8 /* r2 = ...1 */
562 orr r2, r2, r3, lsl #8 /* r2 = .321 */
563 mov r3, r3, lsr #8 /* r3 = ...3 */
571 * 0111: dst is 8-bit aligned, src is 8-bit aligned
583 * 1000: dst is 16-bit aligned, src is 32-bit aligned
599 * 1001: dst is 16-bit aligned, src is 8-bit aligned
601 ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
602 ldr r3, [r1, #3] /* BE:r3 = 3xxx LE:r3 = xxx3 */
603 mov r1, r2, lsr #8 /* BE:r1 = .x01 LE:r1 = .210 */
606 mov r2, r2, lsl #8 /* r2 = 012. */
607 orr r2, r2, r3, lsr #24 /* r2 = 0123 */
609 mov r2, r2, lsr #24 /* r2 = ...2 */
610 orr r2, r2, r3, lsl #8 /* r2 = xx32 */
617 * 1010: dst is 16-bit aligned, src is 16-bit aligned
627 * 1011: dst is 16-bit aligned, src is 8-bit aligned
629 ldr r3, [r1, #1] /* BE:r3 = 123x LE:r3 = x321 */
630 ldr r2, [r1, #-3] /* BE:r2 = xxx0 LE:r2 = 0xxx */
631 mov r1, r3, lsr #8 /* BE:r1 = .123 LE:r1 = .x32 */
634 mov r3, r3, lsr #24 /* r3 = ...1 */
635 orr r3, r3, r2, lsl #8 /* r3 = xx01 */
637 mov r3, r3, lsl #8 /* r3 = 321. */
638 orr r3, r3, r2, lsr #24 /* r3 = 3210 */
645 * 1100: dst is 8-bit aligned, src is 32-bit aligned
647 ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
665 * 1101: dst is 8-bit aligned, src is 8-bit aligned
677 * 1110: dst is 8-bit aligned, src is 16-bit aligned
680 ldrh r3, [r1, #0x02] /* BE:r3 = ..23 LE:r3 = ..32 */
681 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
683 mov r3, r3, lsr #8 /* r3 = ...2 */
684 orr r3, r3, r2, lsl #8 /* r3 = ..12 */
686 mov r2, r2, lsr #8 /* r2 = ...0 */
689 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
690 ldrh r3, [r1, #0x02] /* BE:r3 = ..23 LE:r3 = ..32 */
692 mov r2, r2, lsr #8 /* r2 = ...1 */
693 orr r2, r2, r3, lsl #8 /* r2 = .321 */
695 mov r3, r3, lsr #8 /* r3 = ...3 */
702 * 1111: dst is 8-bit aligned, src is 8-bit aligned
714 /******************************************************************************
715 * Special case for 6 byte copies
717 #define LMEMCPY_6_LOG2 6 /* 64 bytes */
718 #define LMEMCPY_6_PAD .align LMEMCPY_6_LOG2
722 orr r2, r2, r0, lsl #2
725 addne pc, r3, r2, lsl #LMEMCPY_6_LOG2
728 * 0000: dst is 32-bit aligned, src is 32-bit aligned
738 * 0001: dst is 32-bit aligned, src is 8-bit aligned
740 ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
741 ldr r3, [r1, #0x03] /* BE:r3 = 345x LE:r3 = x543 */
743 mov r2, r2, lsl #8 /* r2 = 012. */
744 orr r2, r2, r3, lsr #24 /* r2 = 0123 */
746 mov r2, r2, lsr #8 /* r2 = .210 */
747 orr r2, r2, r3, lsl #24 /* r2 = 3210 */
749 mov r3, r3, lsr #8 /* BE:r3 = .345 LE:r3 = .x54 */
756 * 0010: dst is 32-bit aligned, src is 16-bit aligned
758 ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
759 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
761 mov r1, r3, lsr #16 /* r1 = ..23 */
762 orr r1, r1, r2, lsl #16 /* r1 = 0123 */
766 mov r1, r3, lsr #16 /* r1 = ..54 */
767 orr r2, r2, r3, lsl #16 /* r2 = 3210 */
775 * 0011: dst is 32-bit aligned, src is 8-bit aligned
777 ldr r2, [r1, #-3] /* BE:r2 = xxx0 LE:r2 = 0xxx */
778 ldr r3, [r1, #1] /* BE:r3 = 1234 LE:r3 = 4321 */
779 ldr r1, [r1, #5] /* BE:r1 = 5xxx LE:r3 = xxx5 */
781 mov r2, r2, lsl #24 /* r2 = 0... */
782 orr r2, r2, r3, lsr #8 /* r2 = 0123 */
783 mov r3, r3, lsl #8 /* r3 = 234. */
784 orr r1, r3, r1, lsr #24 /* r1 = 2345 */
786 mov r2, r2, lsr #24 /* r2 = ...0 */
787 orr r2, r2, r3, lsl #8 /* r2 = 3210 */
788 mov r1, r1, lsl #8 /* r1 = xx5. */
789 orr r1, r1, r3, lsr #24 /* r1 = xx54 */
797 * 0100: dst is 8-bit aligned, src is 32-bit aligned
799 ldr r3, [r1] /* BE:r3 = 0123 LE:r3 = 3210 */
800 ldrh r2, [r1, #0x04] /* BE:r2 = ..45 LE:r2 = ..54 */
801 mov r1, r3, lsr #8 /* BE:r1 = .012 LE:r1 = .321 */
804 mov r1, r3, lsr #24 /* r1 = ...0 */
806 mov r3, r3, lsl #8 /* r3 = 123. */
807 orr r3, r3, r2, lsr #8 /* r3 = 1234 */
810 mov r3, r3, lsr #24 /* r3 = ...3 */
811 orr r3, r3, r2, lsl #8 /* r3 = .543 */
812 mov r2, r2, lsr #8 /* r2 = ...5 */
820 * 0101: dst is 8-bit aligned, src is 8-bit aligned
834 * 0110: dst is 8-bit aligned, src is 16-bit aligned
836 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
837 ldr r1, [r1, #0x02] /* BE:r1 = 2345 LE:r1 = 5432 */
839 mov r3, r2, lsr #8 /* r3 = ...0 */
842 mov r3, r1, lsr #8 /* r3 = .234 */
844 mov r3, r2, lsl #8 /* r3 = .01. */
845 orr r3, r3, r1, lsr #24 /* r3 = .012 */
851 mov r3, r1, lsr #8 /* r3 = .543 */
853 mov r3, r2, lsr #8 /* r3 = ...1 */
854 orr r3, r3, r1, lsl #8 /* r3 = 4321 */
861 * 0111: dst is 8-bit aligned, src is 8-bit aligned
875 * 1000: dst is 16-bit aligned, src is 32-bit aligned
878 ldr r2, [r1] /* r2 = 0123 */
879 ldrh r3, [r1, #0x04] /* r3 = ..45 */
880 mov r1, r2, lsr #16 /* r1 = ..01 */
881 orr r3, r3, r2, lsl#16 /* r3 = 2345 */
885 ldrh r2, [r1, #0x04] /* r2 = ..54 */
886 ldr r3, [r1] /* r3 = 3210 */
887 mov r2, r2, lsl #16 /* r2 = 54.. */
888 orr r2, r2, r3, lsr #16 /* r2 = 5432 */
896 * 1001: dst is 16-bit aligned, src is 8-bit aligned
898 ldr r3, [r1, #-1] /* BE:r3 = x012 LE:r3 = 210x */
899 ldr r2, [r1, #3] /* BE:r2 = 345x LE:r2 = x543 */
900 mov r1, r3, lsr #8 /* BE:r1 = .x01 LE:r1 = .210 */
902 mov r2, r2, lsr #8 /* r2 = .345 */
903 orr r2, r2, r3, lsl #24 /* r2 = 2345 */
905 mov r2, r2, lsl #8 /* r2 = 543. */
906 orr r2, r2, r3, lsr #24 /* r2 = 5432 */
914 * 1010: dst is 16-bit aligned, src is 16-bit aligned
924 * 1011: dst is 16-bit aligned, src is 8-bit aligned
926 ldrb r3, [r1] /* r3 = ...0 */
927 ldr r2, [r1, #0x01] /* BE:r2 = 1234 LE:r2 = 4321 */
928 ldrb r1, [r1, #0x05] /* r1 = ...5 */
930 mov r3, r3, lsl #8 /* r3 = ..0. */
931 orr r3, r3, r2, lsr #24 /* r3 = ..01 */
932 orr r1, r1, r2, lsl #8 /* r1 = 2345 */
934 orr r3, r3, r2, lsl #8 /* r3 = 3210 */
935 mov r1, r1, lsl #24 /* r1 = 5... */
936 orr r1, r1, r2, lsr #8 /* r1 = 5432 */
944 * 1100: dst is 8-bit aligned, src is 32-bit aligned
946 ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
947 ldrh r1, [r1, #0x04] /* BE:r1 = ..45 LE:r1 = ..54 */
949 mov r3, r2, lsr #24 /* r3 = ...0 */
951 mov r2, r2, lsl #8 /* r2 = 123. */
952 orr r2, r2, r1, lsr #8 /* r2 = 1234 */
955 mov r2, r2, lsr #8 /* r2 = .321 */
956 orr r2, r2, r1, lsl #24 /* r2 = 4321 */
957 mov r1, r1, lsr #8 /* r1 = ...5 */
965 * 1101: dst is 8-bit aligned, src is 8-bit aligned
979 * 1110: dst is 8-bit aligned, src is 16-bit aligned
981 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
982 ldr r1, [r1, #0x02] /* BE:r1 = 2345 LE:r1 = 5432 */
984 mov r3, r2, lsr #8 /* r3 = ...0 */
986 mov r2, r2, lsl #24 /* r2 = 1... */
987 orr r2, r2, r1, lsr #8 /* r2 = 1234 */
990 mov r2, r2, lsr #8 /* r2 = ...1 */
991 orr r2, r2, r1, lsl #8 /* r2 = 4321 */
992 mov r1, r1, lsr #24 /* r1 = ...5 */
1000 * 1111: dst is 8-bit aligned, src is 8-bit aligned
1004 ldrb r1, [r1, #0x05]
1007 strb r1, [r0, #0x05]
1012 /******************************************************************************
1013 * Special case for 8 byte copies
1015 #define LMEMCPY_8_LOG2 6 /* 64 bytes */
1016 #define LMEMCPY_8_PAD .align LMEMCPY_8_LOG2
1020 orr r2, r2, r0, lsl #2
1023 addne pc, r3, r2, lsl #LMEMCPY_8_LOG2
1026 * 0000: dst is 32-bit aligned, src is 32-bit aligned
1036 * 0001: dst is 32-bit aligned, src is 8-bit aligned
1038 ldr r3, [r1, #-1] /* BE:r3 = x012 LE:r3 = 210x */
1039 ldr r2, [r1, #0x03] /* BE:r2 = 3456 LE:r2 = 6543 */
1040 ldrb r1, [r1, #0x07] /* r1 = ...7 */
1042 mov r3, r3, lsl #8 /* r3 = 012. */
1043 orr r3, r3, r2, lsr #24 /* r3 = 0123 */
1044 orr r2, r1, r2, lsl #8 /* r2 = 4567 */
1046 mov r3, r3, lsr #8 /* r3 = .210 */
1047 orr r3, r3, r2, lsl #24 /* r3 = 3210 */
1048 mov r1, r1, lsl #24 /* r1 = 7... */
1049 orr r2, r1, r2, lsr #8 /* r2 = 7654 */
1057 * 0010: dst is 32-bit aligned, src is 16-bit aligned
1059 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
1060 ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
1061 ldrh r1, [r1, #0x06] /* BE:r1 = ..67 LE:r1 = ..76 */
1063 mov r2, r2, lsl #16 /* r2 = 01.. */
1064 orr r2, r2, r3, lsr #16 /* r2 = 0123 */
1065 orr r3, r1, r3, lsl #16 /* r3 = 4567 */
1067 orr r2, r2, r3, lsl #16 /* r2 = 3210 */
1068 mov r3, r3, lsr #16 /* r3 = ..54 */
1069 orr r3, r3, r1, lsl #16 /* r3 = 7654 */
1077 * 0011: dst is 32-bit aligned, src is 8-bit aligned
1079 ldrb r3, [r1] /* r3 = ...0 */
1080 ldr r2, [r1, #0x01] /* BE:r2 = 1234 LE:r2 = 4321 */
1081 ldr r1, [r1, #0x05] /* BE:r1 = 567x LE:r1 = x765 */
1083 mov r3, r3, lsl #24 /* r3 = 0... */
1084 orr r3, r3, r2, lsr #8 /* r3 = 0123 */
1085 mov r2, r2, lsl #24 /* r2 = 4... */
1086 orr r2, r2, r1, lsr #8 /* r2 = 4567 */
1088 orr r3, r3, r2, lsl #8 /* r3 = 3210 */
1089 mov r2, r2, lsr #24 /* r2 = ...4 */
1090 orr r2, r2, r1, lsl #8 /* r2 = 7654 */
1098 * 0100: dst is 8-bit aligned, src is 32-bit aligned
1100 ldr r3, [r1] /* BE:r3 = 0123 LE:r3 = 3210 */
1101 ldr r2, [r1, #0x04] /* BE:r2 = 4567 LE:r2 = 7654 */
1103 mov r1, r3, lsr #24 /* r1 = ...0 */
1105 mov r1, r3, lsr #8 /* r1 = .012 */
1106 strb r2, [r0, #0x07]
1107 mov r3, r3, lsl #24 /* r3 = 3... */
1108 orr r3, r3, r2, lsr #8 /* r3 = 3456 */
1111 mov r1, r2, lsr #24 /* r1 = ...7 */
1112 strb r1, [r0, #0x07]
1113 mov r1, r3, lsr #8 /* r1 = .321 */
1114 mov r3, r3, lsr #24 /* r3 = ...3 */
1115 orr r3, r3, r2, lsl #8 /* r3 = 6543 */
1117 strh r1, [r0, #0x01]
1123 * 0101: dst is 8-bit aligned, src is 8-bit aligned
1126 ldrh r3, [r1, #0x01]
1128 ldrb r1, [r1, #0x07]
1130 strh r3, [r0, #0x01]
1132 strb r1, [r0, #0x07]
1137 * 0110: dst is 8-bit aligned, src is 16-bit aligned
1139 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
1140 ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
1141 ldrh r1, [r1, #0x06] /* BE:r1 = ..67 LE:r1 = ..76 */
1143 mov ip, r2, lsr #8 /* ip = ...0 */
1145 mov ip, r2, lsl #8 /* ip = .01. */
1146 orr ip, ip, r3, lsr #24 /* ip = .012 */
1147 strb r1, [r0, #0x07]
1148 mov r3, r3, lsl #8 /* r3 = 345. */
1149 orr r3, r3, r1, lsr #8 /* r3 = 3456 */
1151 strb r2, [r0] /* 0 */
1152 mov ip, r1, lsr #8 /* ip = ...7 */
1153 strb ip, [r0, #0x07] /* 7 */
1154 mov ip, r2, lsr #8 /* ip = ...1 */
1155 orr ip, ip, r3, lsl #8 /* ip = 4321 */
1156 mov r3, r3, lsr #8 /* r3 = .543 */
1157 orr r3, r3, r1, lsl #24 /* r3 = 6543 */
1159 strh ip, [r0, #0x01]
1165 * 0111: dst is 8-bit aligned, src is 8-bit aligned
1167 ldrb r3, [r1] /* r3 = ...0 */
1168 ldr ip, [r1, #0x01] /* BE:ip = 1234 LE:ip = 4321 */
1169 ldrh r2, [r1, #0x05] /* BE:r2 = ..56 LE:r2 = ..65 */
1170 ldrb r1, [r1, #0x07] /* r1 = ...7 */
1172 mov r3, ip, lsr #16 /* BE:r3 = ..12 LE:r3 = ..43 */
1174 strh r3, [r0, #0x01]
1175 orr r2, r2, ip, lsl #16 /* r2 = 3456 */
1177 strh ip, [r0, #0x01]
1178 orr r2, r3, r2, lsl #16 /* r2 = 6543 */
1181 strb r1, [r0, #0x07]
1186 * 1000: dst is 16-bit aligned, src is 32-bit aligned
1188 ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
1189 ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */
1190 mov r1, r2, lsr #16 /* BE:r1 = ..01 LE:r1 = ..32 */
1193 mov r1, r3, lsr #16 /* r1 = ..45 */
1194 orr r2, r1 ,r2, lsl #16 /* r2 = 2345 */
1197 orr r2, r1, r3, lsl #16 /* r2 = 5432 */
1198 mov r3, r3, lsr #16 /* r3 = ..76 */
1201 strh r3, [r0, #0x06]
1206 * 1001: dst is 16-bit aligned, src is 8-bit aligned
1208 ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
1209 ldr r3, [r1, #0x03] /* BE:r3 = 3456 LE:r3 = 6543 */
1210 ldrb ip, [r1, #0x07] /* ip = ...7 */
1211 mov r1, r2, lsr #8 /* BE:r1 = .x01 LE:r1 = .210 */
1214 mov r1, r2, lsl #24 /* r1 = 2... */
1215 orr r1, r1, r3, lsr #8 /* r1 = 2345 */
1216 orr r3, ip, r3, lsl #8 /* r3 = 4567 */
1218 mov r1, r2, lsr #24 /* r1 = ...2 */
1219 orr r1, r1, r3, lsl #8 /* r1 = 5432 */
1220 mov r3, r3, lsr #24 /* r3 = ...6 */
1221 orr r3, r3, ip, lsl #8 /* r3 = ..76 */
1224 strh r3, [r0, #0x06]
1229 * 1010: dst is 16-bit aligned, src is 16-bit aligned
1233 ldrh r3, [r1, #0x06]
1236 strh r3, [r0, #0x06]
1241 * 1011: dst is 16-bit aligned, src is 8-bit aligned
1243 ldr r3, [r1, #0x05] /* BE:r3 = 567x LE:r3 = x765 */
1244 ldr r2, [r1, #0x01] /* BE:r2 = 1234 LE:r2 = 4321 */
1245 ldrb ip, [r1] /* ip = ...0 */
1246 mov r1, r3, lsr #8 /* BE:r1 = .567 LE:r1 = .x76 */
1247 strh r1, [r0, #0x06]
1249 mov r3, r3, lsr #24 /* r3 = ...5 */
1250 orr r3, r3, r2, lsl #8 /* r3 = 2345 */
1251 mov r2, r2, lsr #24 /* r2 = ...1 */
1252 orr r2, r2, ip, lsl #8 /* r2 = ..01 */
1254 mov r3, r3, lsl #24 /* r3 = 5... */
1255 orr r3, r3, r2, lsr #8 /* r3 = 5432 */
1256 orr r2, ip, r2, lsl #8 /* r2 = 3210 */
1264 * 1100: dst is 8-bit aligned, src is 32-bit aligned
1266 ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */
1267 ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
1268 mov r1, r3, lsr #8 /* BE:r1 = .456 LE:r1 = .765 */
1269 strh r1, [r0, #0x05]
1271 strb r3, [r0, #0x07]
1272 mov r1, r2, lsr #24 /* r1 = ...0 */
1274 mov r2, r2, lsl #8 /* r2 = 123. */
1275 orr r2, r2, r3, lsr #24 /* r2 = 1234 */
1279 mov r1, r3, lsr #24 /* r1 = ...7 */
1280 strb r1, [r0, #0x07]
1281 mov r2, r2, lsr #8 /* r2 = .321 */
1282 orr r2, r2, r3, lsl #24 /* r2 = 4321 */
1289 * 1101: dst is 8-bit aligned, src is 8-bit aligned
1291 ldrb r3, [r1] /* r3 = ...0 */
1292 ldrh r2, [r1, #0x01] /* BE:r2 = ..12 LE:r2 = ..21 */
1293 ldr ip, [r1, #0x03] /* BE:ip = 3456 LE:ip = 6543 */
1294 ldrb r1, [r1, #0x07] /* r1 = ...7 */
1296 mov r3, ip, lsr #16 /* BE:r3 = ..34 LE:r3 = ..65 */
1298 strh ip, [r0, #0x05]
1299 orr r2, r3, r2, lsl #16 /* r2 = 1234 */
1301 strh r3, [r0, #0x05]
1302 orr r2, r2, ip, lsl #16 /* r2 = 4321 */
1305 strb r1, [r0, #0x07]
1310 * 1110: dst is 8-bit aligned, src is 16-bit aligned
1312 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
1313 ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
1314 ldrh r1, [r1, #0x06] /* BE:r1 = ..67 LE:r1 = ..76 */
1316 mov ip, r2, lsr #8 /* ip = ...0 */
1318 mov ip, r2, lsl #24 /* ip = 1... */
1319 orr ip, ip, r3, lsr #8 /* ip = 1234 */
1320 strb r1, [r0, #0x07]
1321 mov r1, r1, lsr #8 /* r1 = ...6 */
1322 orr r1, r1, r3, lsl #8 /* r1 = 3456 */
1325 mov ip, r2, lsr #8 /* ip = ...1 */
1326 orr ip, ip, r3, lsl #8 /* ip = 4321 */
1327 mov r2, r1, lsr #8 /* r2 = ...7 */
1328 strb r2, [r0, #0x07]
1329 mov r1, r1, lsl #8 /* r1 = .76. */
1330 orr r1, r1, r3, lsr #24 /* r1 = .765 */
1333 strh r1, [r0, #0x05]
1338 * 1111: dst is 8-bit aligned, src is 8-bit aligned
1342 ldrh r3, [r1, #0x05]
1343 ldrb r1, [r1, #0x07]
1346 strh r3, [r0, #0x05]
1347 strb r1, [r0, #0x07]
1351 /******************************************************************************
1352 * Special case for 12 byte copies
1354 #define LMEMCPY_C_LOG2 7 /* 128 bytes */
1355 #define LMEMCPY_C_PAD .align LMEMCPY_C_LOG2
1359 orr r2, r2, r0, lsl #2
1362 addne pc, r3, r2, lsl #LMEMCPY_C_LOG2
1365 * 0000: dst is 32-bit aligned, src is 32-bit aligned
1377 * 0001: dst is 32-bit aligned, src is 8-bit aligned
1379 ldrb r2, [r1, #0xb] /* r2 = ...B */
1380 ldr ip, [r1, #0x07] /* BE:ip = 789A LE:ip = A987 */
1381 ldr r3, [r1, #0x03] /* BE:r3 = 3456 LE:r3 = 6543 */
1382 ldr r1, [r1, #-1] /* BE:r1 = x012 LE:r1 = 210x */
1384 orr r2, r2, ip, lsl #8 /* r2 = 89AB */
1386 mov r2, ip, lsr #24 /* r2 = ...7 */
1387 orr r2, r2, r3, lsl #8 /* r2 = 4567 */
1388 mov r1, r1, lsl #8 /* r1 = 012. */
1389 orr r1, r1, r3, lsr #24 /* r1 = 0123 */
1391 mov r2, r2, lsl #24 /* r2 = B... */
1392 orr r2, r2, ip, lsr #8 /* r2 = BA98 */
1394 mov r2, ip, lsl #24 /* r2 = 7... */
1395 orr r2, r2, r3, lsr #8 /* r2 = 7654 */
1396 mov r1, r1, lsr #8 /* r1 = .210 */
1397 orr r1, r1, r3, lsl #24 /* r1 = 3210 */
1405 * 0010: dst is 32-bit aligned, src is 16-bit aligned
1407 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
1408 ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
1409 ldr ip, [r1, #0x06] /* BE:ip = 6789 LE:ip = 9876 */
1410 ldrh r1, [r1, #0x0a] /* BE:r1 = ..AB LE:r1 = ..BA */
1412 mov r2, r2, lsl #16 /* r2 = 01.. */
1413 orr r2, r2, r3, lsr #16 /* r2 = 0123 */
1415 mov r3, r3, lsl #16 /* r3 = 45.. */
1416 orr r3, r3, ip, lsr #16 /* r3 = 4567 */
1417 orr r1, r1, ip, lsl #16 /* r1 = 89AB */
1419 orr r2, r2, r3, lsl #16 /* r2 = 3210 */
1421 mov r3, r3, lsr #16 /* r3 = ..54 */
1422 orr r3, r3, ip, lsl #16 /* r3 = 7654 */
1423 mov r1, r1, lsl #16 /* r1 = BA.. */
1424 orr r1, r1, ip, lsr #16 /* r1 = BA98 */
1432 * 0011: dst is 32-bit aligned, src is 8-bit aligned
1434 ldrb r2, [r1] /* r2 = ...0 */
1435 ldr r3, [r1, #0x01] /* BE:r3 = 1234 LE:r3 = 4321 */
1436 ldr ip, [r1, #0x05] /* BE:ip = 5678 LE:ip = 8765 */
1437 ldr r1, [r1, #0x09] /* BE:r1 = 9ABx LE:r1 = xBA9 */
1439 mov r2, r2, lsl #24 /* r2 = 0... */
1440 orr r2, r2, r3, lsr #8 /* r2 = 0123 */
1442 mov r3, r3, lsl #24 /* r3 = 4... */
1443 orr r3, r3, ip, lsr #8 /* r3 = 4567 */
1444 mov r1, r1, lsr #8 /* r1 = .9AB */
1445 orr r1, r1, ip, lsl #24 /* r1 = 89AB */
1447 orr r2, r2, r3, lsl #8 /* r2 = 3210 */
1449 mov r3, r3, lsr #24 /* r3 = ...4 */
1450 orr r3, r3, ip, lsl #8 /* r3 = 7654 */
1451 mov r1, r1, lsl #8 /* r1 = BA9. */
1452 orr r1, r1, ip, lsr #24 /* r1 = BA98 */
1460 * 0100: dst is 8-bit aligned (byte 1), src is 32-bit aligned
1462 ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
1463 ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */
1464 ldr ip, [r1, #0x08] /* BE:ip = 89AB LE:ip = BA98 */
1465 mov r1, r2, lsr #8 /* BE:r1 = .012 LE:r1 = .321 */
1466 strh r1, [r0, #0x01]
1468 mov r1, r2, lsr #24 /* r1 = ...0 */
1470 mov r1, r2, lsl #24 /* r1 = 3... */
1471 orr r2, r1, r3, lsr #8 /* r1 = 3456 */
1472 mov r1, r3, lsl #24 /* r1 = 7... */
1473 orr r1, r1, ip, lsr #8 /* r1 = 789A */
1476 mov r1, r2, lsr #24 /* r1 = ...3 */
1477 orr r2, r1, r3, lsl #8 /* r1 = 6543 */
1478 mov r1, r3, lsr #24 /* r1 = ...7 */
1479 orr r1, r1, ip, lsl #8 /* r1 = A987 */
1480 mov ip, ip, lsr #24 /* ip = ...B */
1484 strb ip, [r0, #0x0b]
1489 * 0101: dst is 8-bit aligned (byte 1), src is 8-bit aligned (byte 1)
1492 ldrh r3, [r1, #0x01]
1496 ldrb r1, [r1, #0x0b]
1497 strh r3, [r0, #0x01]
1500 strb r1, [r0, #0x0b]
1505 * 0110: dst is 8-bit aligned (byte 1), src is 16-bit aligned
1507 ldrh r2, [r1] /* BE:r2 = ..01 LE:r2 = ..10 */
1508 ldr r3, [r1, #0x02] /* BE:r3 = 2345 LE:r3 = 5432 */
1509 ldr ip, [r1, #0x06] /* BE:ip = 6789 LE:ip = 9876 */
1510 ldrh r1, [r1, #0x0a] /* BE:r1 = ..AB LE:r1 = ..BA */
1512 mov r2, r2, ror #8 /* r2 = 1..0 */
1514 mov r2, r2, lsr #16 /* r2 = ..1. */
1515 orr r2, r2, r3, lsr #24 /* r2 = ..12 */
1516 strh r2, [r0, #0x01]
1517 mov r2, r3, lsl #8 /* r2 = 345. */
1518 orr r3, r2, ip, lsr #24 /* r3 = 3456 */
1519 mov r2, ip, lsl #8 /* r2 = 789. */
1520 orr r2, r2, r1, lsr #8 /* r2 = 789A */
1523 mov r2, r2, lsr #8 /* r2 = ...1 */
1524 orr r2, r2, r3, lsl #8 /* r2 = 4321 */
1525 strh r2, [r0, #0x01]
1526 mov r2, r3, lsr #8 /* r2 = .543 */
1527 orr r3, r2, ip, lsl #24 /* r3 = 6543 */
1528 mov r2, ip, lsr #8 /* r2 = .987 */
1529 orr r2, r2, r1, lsl #24 /* r2 = A987 */
1530 mov r1, r1, lsr #8 /* r1 = ...B */
1534 strb r1, [r0, #0x0b]
1539 * 0111: dst is 8-bit aligned (byte 1), src is 8-bit aligned (byte 3)
1542 ldr r3, [r1, #0x01] /* BE:r3 = 1234 LE:r3 = 4321 */
1543 ldr ip, [r1, #0x05] /* BE:ip = 5678 LE:ip = 8765 */
1544 ldr r1, [r1, #0x09] /* BE:r1 = 9ABx LE:r1 = xBA9 */
1547 mov r2, r3, lsr #16 /* r2 = ..12 */
1548 strh r2, [r0, #0x01]
1549 mov r3, r3, lsl #16 /* r3 = 34.. */
1550 orr r3, r3, ip, lsr #16 /* r3 = 3456 */
1551 mov ip, ip, lsl #16 /* ip = 78.. */
1552 orr ip, ip, r1, lsr #16 /* ip = 789A */
1553 mov r1, r1, lsr #8 /* r1 = .9AB */
1555 strh r3, [r0, #0x01]
1556 mov r3, r3, lsr #16 /* r3 = ..43 */
1557 orr r3, r3, ip, lsl #16 /* r3 = 6543 */
1558 mov ip, ip, lsr #16 /* ip = ..87 */
1559 orr ip, ip, r1, lsl #16 /* ip = A987 */
1560 mov r1, r1, lsr #16 /* r1 = ..xB */
1564 strb r1, [r0, #0x0b]
1569 * 1000: dst is 16-bit aligned, src is 32-bit aligned
1571 ldr ip, [r1] /* BE:ip = 0123 LE:ip = 3210 */
1572 ldr r3, [r1, #0x04] /* BE:r3 = 4567 LE:r3 = 7654 */
1573 ldr r2, [r1, #0x08] /* BE:r2 = 89AB LE:r2 = BA98 */
1574 mov r1, ip, lsr #16 /* BE:r1 = ..01 LE:r1 = ..32 */
1577 mov r1, ip, lsl #16 /* r1 = 23.. */
1578 orr r1, r1, r3, lsr #16 /* r1 = 2345 */
1579 mov r3, r3, lsl #16 /* r3 = 67.. */
1580 orr r3, r3, r2, lsr #16 /* r3 = 6789 */
1583 orr r1, r1, r3, lsl #16 /* r1 = 5432 */
1584 mov r3, r3, lsr #16 /* r3 = ..76 */
1585 orr r3, r3, r2, lsl #16 /* r3 = 9876 */
1586 mov r2, r2, lsr #16 /* r2 = ..BA */
1590 strh r2, [r0, #0x0a]
1595 * 1001: dst is 16-bit aligned, src is 8-bit aligned (byte 1)
1597 ldr r2, [r1, #-1] /* BE:r2 = x012 LE:r2 = 210x */
1598 ldr r3, [r1, #0x03] /* BE:r3 = 3456 LE:r3 = 6543 */
1599 mov ip, r2, lsr #8 /* BE:ip = .x01 LE:ip = .210 */
1601 ldr ip, [r1, #0x07] /* BE:ip = 789A LE:ip = A987 */
1602 ldrb r1, [r1, #0x0b] /* r1 = ...B */
1604 mov r2, r2, lsl #24 /* r2 = 2... */
1605 orr r2, r2, r3, lsr #8 /* r2 = 2345 */
1606 mov r3, r3, lsl #24 /* r3 = 6... */
1607 orr r3, r3, ip, lsr #8 /* r3 = 6789 */
1608 orr r1, r1, ip, lsl #8 /* r1 = 89AB */
1610 mov r2, r2, lsr #24 /* r2 = ...2 */
1611 orr r2, r2, r3, lsl #8 /* r2 = 5432 */
1612 mov r3, r3, lsr #24 /* r3 = ...6 */
1613 orr r3, r3, ip, lsl #8 /* r3 = 9876 */
1614 mov r1, r1, lsl #8 /* r1 = ..B. */
1615 orr r1, r1, ip, lsr #24 /* r1 = ..BA */
1619 strh r1, [r0, #0x0a]
1624 * 1010: dst is 16-bit aligned, src is 16-bit aligned
1629 ldrh r1, [r1, #0x0a]
1633 strh r1, [r0, #0x0a]
1638 * 1011: dst is 16-bit aligned, src is 8-bit aligned (byte 3)
1640 ldr r2, [r1, #0x09] /* BE:r2 = 9ABx LE:r2 = xBA9 */
1641 ldr r3, [r1, #0x05] /* BE:r3 = 5678 LE:r3 = 8765 */
1642 mov ip, r2, lsr #8 /* BE:ip = .9AB LE:ip = .xBA */
1643 strh ip, [r0, #0x0a]
1644 ldr ip, [r1, #0x01] /* BE:ip = 1234 LE:ip = 4321 */
1645 ldrb r1, [r1] /* r1 = ...0 */
1647 mov r2, r2, lsr #24 /* r2 = ...9 */
1648 orr r2, r2, r3, lsl #8 /* r2 = 6789 */
1649 mov r3, r3, lsr #24 /* r3 = ...5 */
1650 orr r3, r3, ip, lsl #8 /* r3 = 2345 */
1651 mov r1, r1, lsl #8 /* r1 = ..0. */
1652 orr r1, r1, ip, lsr #24 /* r1 = ..01 */
1654 mov r2, r2, lsl #24 /* r2 = 9... */
1655 orr r2, r2, r3, lsr #8 /* r2 = 9876 */
1656 mov r3, r3, lsl #24 /* r3 = 5... */
1657 orr r3, r3, ip, lsr #8 /* r3 = 5432 */
1658 orr r1, r1, ip, lsl #8 /* r1 = 3210 */
1667 * 1100: dst is 8-bit aligned (byte 3), src is 32-bit aligned
1669 ldr r2, [r1] /* BE:r2 = 0123 LE:r2 = 3210 */
1670 ldr ip, [r1, #0x04] /* BE:ip = 4567 LE:ip = 7654 */
1671 ldr r1, [r1, #0x08] /* BE:r1 = 89AB LE:r1 = BA98 */
1673 mov r3, r2, lsr #24 /* r3 = ...0 */
1675 mov r2, r2, lsl #8 /* r2 = 123. */
1676 orr r2, r2, ip, lsr #24 /* r2 = 1234 */
1678 mov r2, ip, lsl #8 /* r2 = 567. */
1679 orr r2, r2, r1, lsr #24 /* r2 = 5678 */
1681 mov r2, r1, lsr #8 /* r2 = ..9A */
1682 strh r2, [r0, #0x09]
1683 strb r1, [r0, #0x0b]
1686 mov r3, r2, lsr #8 /* r3 = .321 */
1687 orr r3, r3, ip, lsl #24 /* r3 = 4321 */
1689 mov r3, ip, lsr #8 /* r3 = .765 */
1690 orr r3, r3, r1, lsl #24 /* r3 = 8765 */
1692 mov r1, r1, lsr #8 /* r1 = .BA9 */
1693 strh r1, [r0, #0x09]
1694 mov r1, r1, lsr #16 /* r1 = ...B */
1695 strb r1, [r0, #0x0b]
1701 * 1101: dst is 8-bit aligned (byte 3), src is 8-bit aligned (byte 1)
1703 ldrb r2, [r1, #0x0b] /* r2 = ...B */
1704 ldr r3, [r1, #0x07] /* BE:r3 = 789A LE:r3 = A987 */
1705 ldr ip, [r1, #0x03] /* BE:ip = 3456 LE:ip = 6543 */
1706 ldr r1, [r1, #-1] /* BE:r1 = x012 LE:r1 = 210x */
1707 strb r2, [r0, #0x0b]
1709 strh r3, [r0, #0x09]
1710 mov r3, r3, lsr #16 /* r3 = ..78 */
1711 orr r3, r3, ip, lsl #16 /* r3 = 5678 */
1712 mov ip, ip, lsr #16 /* ip = ..34 */
1713 orr ip, ip, r1, lsl #16 /* ip = 1234 */
1714 mov r1, r1, lsr #16 /* r1 = ..x0 */
1716 mov r2, r3, lsr #16 /* r2 = ..A9 */
1717 strh r2, [r0, #0x09]
1718 mov r3, r3, lsl #16 /* r3 = 87.. */
1719 orr r3, r3, ip, lsr #16 /* r3 = 8765 */
1720 mov ip, ip, lsl #16 /* ip = 43.. */
1721 orr ip, ip, r1, lsr #16 /* ip = 4321 */
1722 mov r1, r1, lsr #8 /* r1 = .210 */
1731 * 1110: dst is 8-bit aligned (byte 3), src is 16-bit aligned
1734 ldrh r2, [r1, #0x0a] /* r2 = ..AB */
1735 ldr ip, [r1, #0x06] /* ip = 6789 */
1736 ldr r3, [r1, #0x02] /* r3 = 2345 */
1737 ldrh r1, [r1] /* r1 = ..01 */
1738 strb r2, [r0, #0x0b]
1739 mov r2, r2, lsr #8 /* r2 = ...A */
1740 orr r2, r2, ip, lsl #8 /* r2 = 789A */
1741 mov ip, ip, lsr #8 /* ip = .678 */
1742 orr ip, ip, r3, lsl #24 /* ip = 5678 */
1743 mov r3, r3, lsr #8 /* r3 = .234 */
1744 orr r3, r3, r1, lsl #24 /* r3 = 1234 */
1745 mov r1, r1, lsr #8 /* r1 = ...0 */
1749 strh r2, [r0, #0x09]
1751 ldrh r2, [r1] /* r2 = ..10 */
1752 ldr r3, [r1, #0x02] /* r3 = 5432 */
1753 ldr ip, [r1, #0x06] /* ip = 9876 */
1754 ldrh r1, [r1, #0x0a] /* r1 = ..BA */
1756 mov r2, r2, lsr #8 /* r2 = ...1 */
1757 orr r2, r2, r3, lsl #8 /* r2 = 4321 */
1758 mov r3, r3, lsr #24 /* r3 = ...5 */
1759 orr r3, r3, ip, lsl #8 /* r3 = 8765 */
1760 mov ip, ip, lsr #24 /* ip = ...9 */
1761 orr ip, ip, r1, lsl #8 /* ip = .BA9 */
1762 mov r1, r1, lsr #8 /* r1 = ...B */
1765 strh ip, [r0, #0x09]
1766 strb r1, [r0, #0x0b]
1772 * 1111: dst is 8-bit aligned (byte 3), src is 8-bit aligned (byte 3)
1778 ldrh r2, [r1, #0x09]
1779 ldrb r1, [r1, #0x0b]
1782 strh r2, [r0, #0x09]
1783 strb r1, [r0, #0x0b]
1785 #endif /* !_STANDALONE */
1788 .section .note.GNU-stack,"",%progbits