1 /* Copyright (c) 2012, Linaro Limited
4 Redistribution and use in source and binary forms, with or without
5 modification, are permitted provided that the following conditions are met:
6 * Redistributions of source code must retain the above copyright
7 notice, this list of conditions and the following disclaimer.
8 * Redistributions in binary form must reproduce the above copyright
9 notice, this list of conditions and the following disclaimer in the
10 documentation and/or other materials provided with the distribution.
11 * Neither the name of the Linaro nor the
12 names of its contributors may be used to endorse or promote products
13 derived from this software without specific prior written permission.
15 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19 HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
21 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
34 #include <machine/asm.h>
54 mov dst, dstin /* Preserve return value. */
59 orr A_lw, A_lw, A_lw, lsl #8
60 orr A_lw, A_lw, A_lw, lsl #16
61 orr A_l, A_l, A_l, lsl #32
69 ands tmp1, count, #0x30
75 stp A_l, A_l, [dst, #-48]
77 stp A_l, A_l, [dst, #-32]
79 stp A_l, A_l, [dst, #-16]
84 stp A_l, A_l, [dst, #-16] /* Repeat some/all of last store. */
88 /* Set up to 15 bytes. Does not assume earlier memory
104 /* Critical loop. Start at a new cache line boundary. Assuming
105 * 64 bytes per line, this ensures the entire loop is in one line. */
111 /* Bring DST to 128-bit (16-byte) alignment. We know that there's
112 * more than that to set, so we simply store 16 bytes and advance by
113 * the amount required to reach alignment. */
114 sub count, count, tmp2
117 /* There may be less than 63 bytes to go now. */
121 sub dst, dst, #16 /* Pre-bias. */
122 sub count, count, #64
124 stp A_l, A_l, [dst, #16]
125 stp A_l, A_l, [dst, #32]
126 stp A_l, A_l, [dst, #48]
127 stp A_l, A_l, [dst, #64]!
128 subs count, count, #64
135 /* For zeroing memory, check to see if we can use the ZVA feature to
136 * zero entire 'cache' lines. */
140 b.le .Ltail_maybe_tiny
144 sub count, count, tmp2
150 /* For zeroing small amounts of memory, it's not worth setting up
151 * the line-clear code. */
155 adrp tmp2, dczva_line_size
156 add tmp2, tmp2, :lo12:dczva_line_size
158 cbz zva_len, .Lnot_short
161 /* Compute how far we need to go to become suitably aligned. We're
162 * already at quad-word alignment. */
164 b.lt .Lnot_short /* Not enough to reach alignment. */
165 sub zva_bits_x, zva_len_x, #1
167 ands tmp2, tmp2, zva_bits_x
168 b.eq 1f /* Already aligned. */
169 /* Not aligned, check that there's enough to copy after alignment. */
170 sub tmp1, count, tmp2
172 ccmp tmp1, zva_len_x, #8, ge /* NZCV=0b1000 */
174 /* We know that there's at least 64 bytes to zero and that it's safe
175 * to overrun by 64 bytes. */
179 stp A_l, A_l, [dst, #16]
180 stp A_l, A_l, [dst, #32]
182 stp A_l, A_l, [dst, #48]
185 /* We've overrun a bit, so adjust dst downwards. */
188 sub count, count, zva_len_x
191 add dst, dst, zva_len_x
192 subs count, count, zva_len_x
194 ands count, count, zva_bits_x
195 b.ne .Ltail_maybe_long