2 * Derived from crc32c.c version 1.1 by Mark Adler.
4 * Copyright (C) 2013 Mark Adler
6 * This software is provided 'as-is', without any express or implied warranty.
7 * In no event will the author be held liable for any damages arising from the
8 * use of this software.
10 * Permission is granted to anyone to use this software for any purpose,
11 * including commercial applications, and to alter it and redistribute it
12 * freely, subject to the following restrictions:
14 * 1. The origin of this software must not be misrepresented; you must not
15 * claim that you wrote the original software. If you use this software
16 * in a product, an acknowledgment in the product documentation would be
17 * appreciated but is not required.
18 * 2. Altered source versions must be plainly marked as such, and must not be
19 * misrepresented as being the original software.
20 * 3. This notice may not be removed or altered from any source distribution.
23 * madler@alumni.caltech.edu
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
30 * This file is compiled in userspace in order to run ATF unit tests.
32 #ifdef USERSPACE_TESTING
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
41 static __inline uint32_t
42 _mm_crc32_u8(uint32_t x, uint8_t y)
45 * clang (at least 3.9.[0-1]) pessimizes "rm" (y) and "m" (y)
46 * significantly and "r" (y) a lot by copying y to a different
47 * local variable (on the stack or in a register), so only use
48 * the latter. This costs a register and an instruction but
51 __asm("crc32b %1,%0" : "+r" (x) : "r" (y));
56 static __inline uint64_t
57 _mm_crc32_u64(uint64_t x, uint64_t y)
59 __asm("crc32q %1,%0" : "+r" (x) : "r" (y));
63 static __inline uint32_t
64 _mm_crc32_u32(uint32_t x, uint32_t y)
66 __asm("crc32l %1,%0" : "+r" (x) : "r" (y));
71 /* CRC-32C (iSCSI) polynomial in reversed bit order. */
72 #define POLY 0x82f63b78
75 * Block sizes for three-way parallel crc computation. LONG and SHORT must
76 * both be powers of two.
82 * Tables for updating a crc for LONG, 2 * LONG, SHORT and 2 * SHORT bytes
83 * of value 0 later in the input stream, in the same way that the hardware
84 * would, but in software without calculating intermediate steps.
86 static uint32_t crc32c_long[4][256];
87 static uint32_t crc32c_2long[4][256];
88 static uint32_t crc32c_short[4][256];
89 static uint32_t crc32c_2short[4][256];
92 * Multiply a matrix times a vector over the Galois field of two elements,
93 * GF(2). Each element is a bit in an unsigned integer. mat must have at
94 * least as many entries as the power of two for most significant one bit in
97 static inline uint32_t
98 gf2_matrix_times(uint32_t *mat, uint32_t vec)
113 * Multiply a matrix by itself over GF(2). Both mat and square must have 32
117 gf2_matrix_square(uint32_t *square, uint32_t *mat)
121 for (n = 0; n < 32; n++)
122 square[n] = gf2_matrix_times(mat, mat[n]);
126 * Construct an operator to apply len zeros to a crc. len must be a power of
127 * two. If len is not a power of two, then the result is the same as for the
128 * largest power of two less than len. The result for len == 0 is the same as
129 * for len == 1. A version of this routine could be easily written for any
130 * len, but that is not needed for this application.
133 crc32c_zeros_op(uint32_t *even, size_t len)
135 uint32_t odd[32]; /* odd-power-of-two zeros operator */
139 /* put operator for one zero bit in odd */
140 odd[0] = POLY; /* CRC-32C polynomial */
142 for (n = 1; n < 32; n++) {
147 /* put operator for two zero bits in even */
148 gf2_matrix_square(even, odd);
150 /* put operator for four zero bits in odd */
151 gf2_matrix_square(odd, even);
154 * first square will put the operator for one zero byte (eight zero
155 * bits), in even -- next square puts operator for two zero bytes in
156 * odd, and so on, until len has been rotated down to zero
159 gf2_matrix_square(even, odd);
163 gf2_matrix_square(odd, even);
167 /* answer ended up in odd -- copy to even */
168 for (n = 0; n < 32; n++)
173 * Take a length and build four lookup tables for applying the zeros operator
174 * for that length, byte-by-byte on the operand.
177 crc32c_zeros(uint32_t zeros[][256], size_t len)
182 crc32c_zeros_op(op, len);
183 for (n = 0; n < 256; n++) {
184 zeros[0][n] = gf2_matrix_times(op, n);
185 zeros[1][n] = gf2_matrix_times(op, n << 8);
186 zeros[2][n] = gf2_matrix_times(op, n << 16);
187 zeros[3][n] = gf2_matrix_times(op, n << 24);
191 /* Apply the zeros operator table to crc. */
192 static inline uint32_t
193 crc32c_shift(uint32_t zeros[][256], uint32_t crc)
196 return (zeros[0][crc & 0xff] ^ zeros[1][(crc >> 8) & 0xff] ^
197 zeros[2][(crc >> 16) & 0xff] ^ zeros[3][crc >> 24]);
200 /* Initialize tables for shifting crcs. */
202 #ifdef USERSPACE_TESTING
203 __attribute__((__constructor__))
207 crc32c_zeros(crc32c_long, LONG);
208 crc32c_zeros(crc32c_2long, 2 * LONG);
209 crc32c_zeros(crc32c_short, SHORT);
210 crc32c_zeros(crc32c_2short, 2 * SHORT);
213 SYSINIT(crc32c_sse42, SI_SUB_LOCK, SI_ORDER_ANY, crc32c_init_hw, NULL);
216 /* Compute CRC-32C using the Intel hardware instruction. */
217 #ifdef USERSPACE_TESTING
218 uint32_t sse42_crc32c(uint32_t, const unsigned char *, unsigned);
221 sse42_crc32c(uint32_t crc, const unsigned char *buf, unsigned len)
224 const size_t align = 8;
226 const size_t align = 4;
228 const unsigned char *next, *end;
230 uint64_t crc0, crc1, crc2;
232 uint32_t crc0, crc1, crc2;
238 /* Compute the crc to bring the data pointer to an aligned boundary. */
239 while (len && ((uintptr_t)next & (align - 1)) != 0) {
240 crc0 = _mm_crc32_u8(crc0, *next);
247 * Compute the crc on sets of LONG*3 bytes, executing three independent
248 * crc instructions, each on LONG bytes -- this is optimized for the
249 * Nehalem, Westmere, Sandy Bridge, and Ivy Bridge architectures, which
250 * have a throughput of one crc per cycle, but a latency of three
254 while (len >= LONG * 3) {
260 crc0 = _mm_crc32_u64(crc0, *(const uint64_t *)next);
261 crc1 = _mm_crc32_u64(crc1,
262 *(const uint64_t *)(next + LONG));
263 crc2 = _mm_crc32_u64(crc2,
264 *(const uint64_t *)(next + (LONG * 2)));
266 crc0 = _mm_crc32_u32(crc0, *(const uint32_t *)next);
267 crc1 = _mm_crc32_u32(crc1,
268 *(const uint32_t *)(next + LONG));
269 crc2 = _mm_crc32_u32(crc2,
270 *(const uint32_t *)(next + (LONG * 2)));
273 } while (next < end);
275 * Update the crc. Try to do it in parallel with the inner
276 * loop. 'crc' is used to accumulate crc0 and crc1
277 * produced by the inner loop so that the next iteration
278 * of the loop doesn't depend on anything except crc2.
280 * The full expression for the update is:
281 * crc = S*S*S*crc + S*S*crc0 + S*crc1
282 * where the terms are polynomials modulo the CRC polynomial.
283 * We regroup this subtly as:
284 * crc = S*S * (S*crc + crc0) + S*crc1.
285 * This has an extra dependency which reduces possible
286 * parallelism for the expression, but it turns out to be
287 * best to intentionally delay evaluation of this expression
288 * so that it competes less with the inner loop.
290 * We also intentionally reduce parallelism by feedng back
291 * crc2 to the inner loop as crc0 instead of accumulating
292 * it in crc. This synchronizes the loop with crc update.
293 * CPU and/or compiler schedulers produced bad order without
296 * Shifts take about 12 cycles each, so 3 here with 2
297 * parallelizable take about 24 cycles and the crc update
298 * takes slightly longer. 8 dependent crc32 instructions
299 * can run in 24 cycles, so the 3-way blocking is worse
300 * than useless for sizes less than 8 * <word size> = 64
301 * on amd64. In practice, SHORT = 32 confirms these
302 * timing calculations by giving a small improvement
303 * starting at size 96. Then the inner loop takes about
304 * 12 cycles and the crc update about 24, but these are
305 * partly in parallel so the total time is less than the
306 * 36 cycles that 12 dependent crc32 instructions would
309 * To have a chance of completely hiding the overhead for
310 * the crc update, the inner loop must take considerably
311 * longer than 24 cycles. LONG = 64 makes the inner loop
312 * take about 24 cycles, so is not quite large enough.
313 * LONG = 128 works OK. Unhideable overheads are about
314 * 12 cycles per inner loop. All assuming timing like
317 crc = crc32c_shift(crc32c_long, crc) ^ crc0;
318 crc1 = crc32c_shift(crc32c_long, crc1);
319 crc = crc32c_shift(crc32c_2long, crc) ^ crc1;
325 #endif /* LONG > SHORT */
328 * Do the same thing, but now on SHORT*3 blocks for the remaining data
329 * less than a LONG*3 block
332 while (len >= SHORT * 3) {
338 crc0 = _mm_crc32_u64(crc0, *(const uint64_t *)next);
339 crc1 = _mm_crc32_u64(crc1,
340 *(const uint64_t *)(next + SHORT));
341 crc2 = _mm_crc32_u64(crc2,
342 *(const uint64_t *)(next + (SHORT * 2)));
344 crc0 = _mm_crc32_u32(crc0, *(const uint32_t *)next);
345 crc1 = _mm_crc32_u32(crc1,
346 *(const uint32_t *)(next + SHORT));
347 crc2 = _mm_crc32_u32(crc2,
348 *(const uint32_t *)(next + (SHORT * 2)));
351 } while (next < end);
352 crc = crc32c_shift(crc32c_short, crc) ^ crc0;
353 crc1 = crc32c_shift(crc32c_short, crc1);
354 crc = crc32c_shift(crc32c_2short, crc) ^ crc1;
361 /* Compute the crc on the remaining bytes at native word size. */
362 end = next + (len - (len & (align - 1)));
365 crc0 = _mm_crc32_u64(crc0, *(const uint64_t *)next);
367 crc0 = _mm_crc32_u32(crc0, *(const uint32_t *)next);
373 /* Compute the crc for any trailing bytes. */
375 crc0 = _mm_crc32_u8(crc0, *next);
380 return ((uint32_t)crc0);