2 * LZ4 - Fast LZ compression algorithm
4 * Copyright (C) 2011-2013, Yann Collet.
5 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following disclaimer
15 * in the documentation and/or other materials provided with the
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * You can contact the author at :
31 * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
32 * - LZ4 source repository : http://code.google.com/p/lz4/
35 * Copyright (c) 2016 by Delphix. All rights reserved.
38 #include <sys/zfs_context.h>
40 static int real_LZ4_compress(const char *source, char *dest, int isize,
42 static int LZ4_compressBound(int isize);
43 static int LZ4_uncompress_unknownOutputSize(const char *source, char *dest,
44 int isize, int maxOutputSize);
45 static int LZ4_compressCtx(void *ctx, const char *source, char *dest,
46 int isize, int osize);
47 static int LZ4_compress64kCtx(void *ctx, const char *source, char *dest,
48 int isize, int osize);
50 static kmem_cache_t *lz4_ctx_cache;
54 lz4_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
59 ASSERT(d_len >= sizeof (bufsiz));
61 bufsiz = real_LZ4_compress(s_start, &dest[sizeof (bufsiz)], s_len,
62 d_len - sizeof (bufsiz));
64 /* Signal an error if the compression routine returned zero. */
69 * Encode the compresed buffer size at the start. We'll need this in
70 * decompression to counter the effects of padding which might be
71 * added to the compressed buffer and which, if unhandled, would
72 * confuse the hell out of our decompression function.
74 *(uint32_t *)dest = BE_32(bufsiz);
76 return (bufsiz + sizeof (bufsiz));
81 lz4_decompress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
83 const char *src = s_start;
84 uint32_t bufsiz = BE_IN32(src);
86 /* invalid compressed buffer size encoded at start */
87 if (bufsiz + sizeof (bufsiz) > s_len)
91 * Returns 0 on success (decompression function returned non-negative)
92 * and non-zero on failure (decompression function returned negative).
94 return (LZ4_uncompress_unknownOutputSize(&src[sizeof (bufsiz)],
95 d_start, bufsiz, d_len) < 0);
99 * LZ4 API Description:
102 * real_LZ4_compress() :
103 * isize : is the input size. Max supported value is ~1.9GB
104 * return : the number of bytes written in buffer dest
105 * or 0 if the compression fails (if LZ4_COMPRESSMIN is set).
106 * note : destination buffer must be already allocated.
107 * destination buffer must be sized to handle worst cases
108 * situations (input data not compressible) worst case size
109 * evaluation is provided by function LZ4_compressBound().
113 * LZ4_compressBound() :
114 * Provides the maximum size that LZ4 may output in a "worst case"
115 * scenario (input data not compressible) primarily useful for memory
116 * allocation of output buffer.
118 * isize : is the input size. Max supported value is ~1.9GB
119 * return : maximum output size in a "worst case" scenario
120 * note : this function is limited by "int" range (2^31-1)
122 * LZ4_uncompress_unknownOutputSize() :
123 * isize : is the input size, therefore the compressed size
124 * maxOutputSize : is the size of the destination buffer (which must be
126 * return : the number of bytes decoded in the destination buffer
127 * (necessarily <= maxOutputSize). If the source stream is
128 * malformed, the function will stop decoding and return a
129 * negative result, indicating the byte position of the faulty
130 * instruction. This function never writes beyond dest +
131 * maxOutputSize, and is therefore protected against malicious
133 * note : Destination buffer must be already allocated.
135 * LZ4_compressCtx() :
136 * This function explicitly handles the CTX memory structure.
138 * ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
139 * by the caller (either on the stack or using kmem_zalloc). Passing NULL
142 * LZ4_compress64kCtx() :
143 * Same as LZ4_compressCtx(), but specific to small inputs (<64KB).
144 * isize *Must* be <64KB, otherwise the output will be corrupted.
146 * ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
147 * by the caller (either on the stack or using kmem_zalloc). Passing NULL
156 * COMPRESSIONLEVEL: Increasing this value improves compression ratio
157 * Lowering this value reduces memory usage. Reduced memory usage
158 * typically improves speed, due to cache effect (ex: L1 32KB for Intel,
159 * L1 64KB for AMD). Memory usage formula : N->2^(N+2) Bytes
160 * (examples : 12 -> 16KB ; 17 -> 512KB)
162 #define COMPRESSIONLEVEL 12
165 * NOTCOMPRESSIBLE_CONFIRMATION: Decreasing this value will make the
166 * algorithm skip faster data segments considered "incompressible".
167 * This may decrease compression ratio dramatically, but will be
168 * faster on incompressible data. Increasing this value will make
169 * the algorithm search more before declaring a segment "incompressible".
170 * This could improve compression a bit, but will be slower on
171 * incompressible data. The default value (6) is recommended.
173 #define NOTCOMPRESSIBLE_CONFIRMATION 6
176 * BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE: This will provide a boost to
177 * performance for big endian cpu, but the resulting compressed stream
178 * will be incompatible with little-endian CPU. You can set this option
179 * to 1 in situations where data will stay within closed environment.
180 * This option is useless on Little_Endian CPU (such as x86).
182 /* #define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1 */
185 * CPU Feature Detection
188 /* 32 or 64 bits ? */
189 #if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) || \
190 defined(__amd64) || defined(__ppc64__) || defined(_WIN64) || \
191 defined(__LP64__) || defined(_LP64))
198 * Limits the amount of stack space that the algorithm may consume to hold
199 * the compression lookup table. The value `9' here means we'll never use
200 * more than 2k of stack (see above for a description of COMPRESSIONLEVEL).
201 * If more memory is needed, it is allocated from the heap.
203 /* FreeBSD: Use heap for all platforms for now */
207 * Little Endian or Big Endian?
208 * Note: overwrite the below #define if you know your architecture endianess.
210 #if BYTE_ORDER == BIG_ENDIAN
211 #define LZ4_BIG_ENDIAN 1
214 * Little Endian assumed. PDP Endian and other very rare endian format
220 * Unaligned memory access is automatically enabled for "common" CPU,
221 * such as x86. For others CPU, the compiler will be more cautious, and
222 * insert extra code to ensure aligned access is respected. If you know
223 * your target CPU supports unaligned memory access, you may want to
224 * force this option manually to improve performance
226 #if defined(__ARM_FEATURE_UNALIGNED)
227 #define LZ4_FORCE_UNALIGNED_ACCESS 1
231 * FreeBSD: can't use GCC's __builtin_ctz when using sparc64 because
232 * gcc currently rely on libcompiler_rt.
234 * TODO: revisit this when situation changes.
236 #if defined(__sparc64__)
237 #define LZ4_FORCE_SW_BITCOUNT
243 #if __STDC_VERSION__ >= 199901L /* C99 */
244 /* "restrict" is a known keyword */
246 /* Disable restrict */
250 #define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | \
251 (((x) & 0xffu) << 8)))
253 #define expect(expr, value) (__builtin_expect((expr), (value)))
258 #if defined(unlikely)
263 #define likely(expr) expect((expr) != 0, 1)
267 #define unlikely(expr) expect((expr) != 0, 0)
277 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
281 typedef struct _U16_S {
284 typedef struct _U32_S {
287 typedef struct _U64_S {
291 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
295 #define A64(x) (((U64_S *)(x))->v)
296 #define A32(x) (((U32_S *)(x))->v)
297 #define A16(x) (((U16_S *)(x))->v)
304 #define HASH_LOG COMPRESSIONLEVEL
305 #define HASHTABLESIZE (1 << HASH_LOG)
306 #define HASH_MASK (HASHTABLESIZE - 1)
308 #define SKIPSTRENGTH (NOTCOMPRESSIBLE_CONFIRMATION > 2 ? \
309 NOTCOMPRESSIBLE_CONFIRMATION : 2)
312 * Defines if memory is allocated into the stack (local variable),
313 * or into the heap (kmem_alloc()).
315 #define HEAPMODE (HASH_LOG > STACKLIMIT)
317 #define LASTLITERALS 5
318 #define MFLIMIT (COPYLENGTH + MINMATCH)
319 #define MINLENGTH (MFLIMIT + 1)
322 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
325 #define ML_MASK ((1U<<ML_BITS)-1)
326 #define RUN_BITS (8-ML_BITS)
327 #define RUN_MASK ((1U<<RUN_BITS)-1)
331 * Architecture-specific macros
337 #define LZ4_COPYSTEP(s, d) A64(d) = A64(s); d += 8; s += 8;
338 #define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d)
339 #define LZ4_SECURECOPY(s, d, e) if (d < e) LZ4_WILDCOPY(s, d, e)
341 #define INITBASE(base) const BYTE* const base = ip
342 #else /* !LZ4_ARCH64 */
346 #define LZ4_COPYSTEP(s, d) A32(d) = A32(s); d += 4; s += 4;
347 #define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d); LZ4_COPYSTEP(s, d);
348 #define LZ4_SECURECOPY LZ4_WILDCOPY
349 #define HTYPE const BYTE *
350 #define INITBASE(base) const int base = 0
351 #endif /* !LZ4_ARCH64 */
353 #if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
354 #define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
355 { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
356 #define LZ4_WRITE_LITTLEENDIAN_16(p, i) \
357 { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p += 2; }
359 #define LZ4_READ_LITTLEENDIAN_16(d, s, p) { d = (s) - A16(p); }
360 #define LZ4_WRITE_LITTLEENDIAN_16(p, v) { A16(p) = v; p += 2; }
364 /* Local structures */
366 HTYPE hashTable[HASHTABLESIZE];
371 #define LZ4_HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH * 8) - \
373 #define LZ4_HASH_VALUE(p) LZ4_HASH_FUNCTION(A32(p))
374 #define LZ4_WILDCOPY(s, d, e) do { LZ4_COPYPACKET(s, d) } while (d < e);
375 #define LZ4_BLINDCOPY(s, d, l) { BYTE* e = (d) + l; LZ4_WILDCOPY(s, d, e); \
379 /* Private functions */
383 LZ4_NbCommonBytes(register U64 val)
385 #if defined(LZ4_BIG_ENDIAN)
386 #if !defined(LZ4_FORCE_SW_BITCOUNT)
387 return (__builtin_clzll(val) >> 3);
406 #if !defined(LZ4_FORCE_SW_BITCOUNT)
407 return (__builtin_ctzll(val) >> 3);
409 static const int DeBruijnBytePos[64] =
410 { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5,
411 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5,
412 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4,
413 4, 5, 7, 2, 6, 5, 7, 6, 7, 7
415 return DeBruijnBytePos[((U64) ((val & -val) * 0x0218A392CDABBD3F)) >>
424 LZ4_NbCommonBytes(register U32 val)
426 #if defined(LZ4_BIG_ENDIAN)
427 #if !defined(LZ4_FORCE_SW_BITCOUNT)
428 return (__builtin_clz(val) >> 3);
442 #if !defined(LZ4_FORCE_SW_BITCOUNT)
443 return (__builtin_ctz(val) >> 3);
445 static const int DeBruijnBytePos[32] = {
446 0, 0, 3, 0, 3, 1, 3, 0,
447 3, 2, 2, 1, 3, 2, 0, 1,
448 3, 3, 1, 2, 2, 2, 2, 0,
449 3, 1, 2, 0, 1, 0, 1, 1
451 return DeBruijnBytePos[((U32) ((val & -(S32) val) * 0x077CB531U)) >>
459 /* Public functions */
462 LZ4_compressBound(int isize)
464 return (isize + (isize / 255) + 16);
467 /* Compression functions */
471 LZ4_compressCtx(void *ctx, const char *source, char *dest, int isize,
475 struct refTables *srt = (struct refTables *)ctx;
476 HTYPE *HashTable = (HTYPE *) (srt->hashTable);
478 HTYPE HashTable[HASHTABLESIZE] = { 0 };
481 const BYTE *ip = (BYTE *) source;
483 const BYTE *anchor = ip;
484 const BYTE *const iend = ip + isize;
485 const BYTE *const oend = (BYTE *) dest + osize;
486 const BYTE *const mflimit = iend - MFLIMIT;
487 #define matchlimit (iend - LASTLITERALS)
489 BYTE *op = (BYTE *) dest;
492 const int skipStrength = SKIPSTRENGTH;
497 if (isize < MINLENGTH)
501 HashTable[LZ4_HASH_VALUE(ip)] = ip - base;
503 forwardH = LZ4_HASH_VALUE(ip);
507 int findMatchAttempts = (1U << skipStrength) + 3;
508 const BYTE *forwardIp = ip;
515 int step = findMatchAttempts++ >> skipStrength;
517 forwardIp = ip + step;
519 if unlikely(forwardIp > mflimit) {
523 forwardH = LZ4_HASH_VALUE(forwardIp);
524 ref = base + HashTable[h];
525 HashTable[h] = ip - base;
527 } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip)));
530 while ((ip > anchor) && (ref > (BYTE *) source) &&
531 unlikely(ip[-1] == ref[-1])) {
536 /* Encode Literal length */
537 length = ip - anchor;
540 /* Check output limit */
541 if unlikely(op + length + (2 + 1 + LASTLITERALS) +
542 (length >> 8) > oend)
545 if (length >= (int)RUN_MASK) {
546 *token = (RUN_MASK << ML_BITS);
547 len = length - RUN_MASK;
548 for (; len > 254; len -= 255)
552 *token = (length << ML_BITS);
555 LZ4_BLINDCOPY(anchor, op, length);
559 LZ4_WRITE_LITTLEENDIAN_16(op, ip - ref);
563 ref += MINMATCH; /* MinMatch verified */
565 while likely(ip < matchlimit - (STEPSIZE - 1)) {
566 UARCH diff = AARCH(ref) ^ AARCH(ip);
572 ip += LZ4_NbCommonBytes(diff);
576 if ((ip < (matchlimit - 3)) && (A32(ref) == A32(ip))) {
581 if ((ip < (matchlimit - 1)) && (A16(ref) == A16(ip))) {
585 if ((ip < matchlimit) && (*ref == *ip))
589 /* Encode MatchLength */
591 /* Check output limit */
592 if unlikely(op + (1 + LASTLITERALS) + (len >> 8) > oend)
594 if (len >= (int)ML_MASK) {
597 for (; len > 509; len -= 510) {
609 /* Test end of chunk */
615 HashTable[LZ4_HASH_VALUE(ip - 2)] = ip - 2 - base;
617 /* Test next position */
618 ref = base + HashTable[LZ4_HASH_VALUE(ip)];
619 HashTable[LZ4_HASH_VALUE(ip)] = ip - base;
620 if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) {
625 /* Prepare next loop */
627 forwardH = LZ4_HASH_VALUE(ip);
631 /* Encode Last Literals */
633 int lastRun = iend - anchor;
634 if (op + lastRun + 1 + ((lastRun + 255 - RUN_MASK) / 255) >
637 if (lastRun >= (int)RUN_MASK) {
638 *op++ = (RUN_MASK << ML_BITS);
640 for (; lastRun > 254; lastRun -= 255) {
643 *op++ = (BYTE)lastRun;
645 *op++ = (lastRun << ML_BITS);
646 (void) memcpy(op, anchor, iend - anchor);
651 return (int)(((char *)op) - dest);
656 /* Note : this function is valid only if isize < LZ4_64KLIMIT */
657 #define LZ4_64KLIMIT ((1 << 16) + (MFLIMIT - 1))
658 #define HASHLOG64K (HASH_LOG + 1)
659 #define HASH64KTABLESIZE (1U << HASHLOG64K)
660 #define LZ4_HASH64K_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8) - \
662 #define LZ4_HASH64K_VALUE(p) LZ4_HASH64K_FUNCTION(A32(p))
666 LZ4_compress64kCtx(void *ctx, const char *source, char *dest, int isize,
670 struct refTables *srt = (struct refTables *)ctx;
671 U16 *HashTable = (U16 *) (srt->hashTable);
673 U16 HashTable[HASH64KTABLESIZE] = { 0 };
676 const BYTE *ip = (BYTE *) source;
677 const BYTE *anchor = ip;
678 const BYTE *const base = ip;
679 const BYTE *const iend = ip + isize;
680 const BYTE *const oend = (BYTE *) dest + osize;
681 const BYTE *const mflimit = iend - MFLIMIT;
682 #define matchlimit (iend - LASTLITERALS)
684 BYTE *op = (BYTE *) dest;
687 const int skipStrength = SKIPSTRENGTH;
691 if (isize < MINLENGTH)
696 forwardH = LZ4_HASH64K_VALUE(ip);
700 int findMatchAttempts = (1U << skipStrength) + 3;
701 const BYTE *forwardIp = ip;
708 int step = findMatchAttempts++ >> skipStrength;
710 forwardIp = ip + step;
712 if (forwardIp > mflimit) {
716 forwardH = LZ4_HASH64K_VALUE(forwardIp);
717 ref = base + HashTable[h];
718 HashTable[h] = ip - base;
720 } while (A32(ref) != A32(ip));
723 while ((ip > anchor) && (ref > (BYTE *) source) &&
724 (ip[-1] == ref[-1])) {
729 /* Encode Literal length */
730 length = ip - anchor;
733 /* Check output limit */
734 if unlikely(op + length + (2 + 1 + LASTLITERALS) +
735 (length >> 8) > oend)
738 if (length >= (int)RUN_MASK) {
739 *token = (RUN_MASK << ML_BITS);
740 len = length - RUN_MASK;
741 for (; len > 254; len -= 255)
745 *token = (length << ML_BITS);
748 LZ4_BLINDCOPY(anchor, op, length);
752 LZ4_WRITE_LITTLEENDIAN_16(op, ip - ref);
756 ref += MINMATCH; /* MinMatch verified */
758 while (ip < matchlimit - (STEPSIZE - 1)) {
759 UARCH diff = AARCH(ref) ^ AARCH(ip);
765 ip += LZ4_NbCommonBytes(diff);
769 if ((ip < (matchlimit - 3)) && (A32(ref) == A32(ip))) {
774 if ((ip < (matchlimit - 1)) && (A16(ref) == A16(ip))) {
778 if ((ip < matchlimit) && (*ref == *ip))
782 /* Encode MatchLength */
784 /* Check output limit */
785 if unlikely(op + (1 + LASTLITERALS) + (len >> 8) > oend)
787 if (len >= (int)ML_MASK) {
790 for (; len > 509; len -= 510) {
802 /* Test end of chunk */
808 HashTable[LZ4_HASH64K_VALUE(ip - 2)] = ip - 2 - base;
810 /* Test next position */
811 ref = base + HashTable[LZ4_HASH64K_VALUE(ip)];
812 HashTable[LZ4_HASH64K_VALUE(ip)] = ip - base;
813 if (A32(ref) == A32(ip)) {
818 /* Prepare next loop */
820 forwardH = LZ4_HASH64K_VALUE(ip);
824 /* Encode Last Literals */
826 int lastRun = iend - anchor;
827 if (op + lastRun + 1 + ((lastRun + 255 - RUN_MASK) / 255) >
830 if (lastRun >= (int)RUN_MASK) {
831 *op++ = (RUN_MASK << ML_BITS);
833 for (; lastRun > 254; lastRun -= 255)
835 *op++ = (BYTE)lastRun;
837 *op++ = (lastRun << ML_BITS);
838 (void) memcpy(op, anchor, iend - anchor);
843 return (int)(((char *)op) - dest);
847 real_LZ4_compress(const char *source, char *dest, int isize, int osize)
850 void *ctx = kmem_cache_alloc(lz4_ctx_cache, KM_NOSLEEP);
854 * out of kernel memory, gently fall through - this will disable
855 * compression in zio_compress_data
860 bzero(ctx, sizeof(struct refTables));
861 if (isize < LZ4_64KLIMIT)
862 result = LZ4_compress64kCtx(ctx, source, dest, isize, osize);
864 result = LZ4_compressCtx(ctx, source, dest, isize, osize);
866 kmem_cache_free(lz4_ctx_cache, ctx);
869 if (isize < (int)LZ4_64KLIMIT)
870 return (LZ4_compress64kCtx(NULL, source, dest, isize, osize));
871 return (LZ4_compressCtx(NULL, source, dest, isize, osize));
875 /* Decompression functions */
878 * Note: The decoding function LZ4_uncompress_unknownOutputSize() is safe
879 * against "buffer overflow" attack type. They will never write nor
880 * read outside of the provided output buffers.
881 * LZ4_uncompress_unknownOutputSize() also insures that it will never
882 * read outside of the input buffer. A corrupted input will produce
883 * an error result, a negative int, indicating the position of the
884 * error within input stream.
888 LZ4_uncompress_unknownOutputSize(const char *source, char *dest, int isize,
891 /* Local Variables */
892 const BYTE *restrict ip = (const BYTE *) source;
893 const BYTE *const iend = ip + isize;
896 BYTE *op = (BYTE *) dest;
897 BYTE *const oend = op + maxOutputSize;
900 size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
902 size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
912 if ((length = (token >> ML_BITS)) == RUN_MASK) {
914 while ((ip < iend) && (s == 255)) {
921 /* CORNER-CASE: cpy might overflow. */
923 goto _output_error; /* cpy was overflowed, bail! */
924 if ((cpy > oend - COPYLENGTH) ||
925 (ip + length > iend - COPYLENGTH)) {
927 /* Error: writes beyond output buffer */
929 if (ip + length != iend)
931 * Error: LZ4 format requires to consume all
932 * input at this stage
935 (void) memcpy(op, ip, length);
937 /* Necessarily EOF, due to parsing restrictions */
940 LZ4_WILDCOPY(ip, op, cpy);
945 LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
947 if (ref < (BYTE * const) dest)
949 * Error: offset creates reference outside of
954 /* get matchlength */
955 if ((length = (token & ML_MASK)) == ML_MASK) {
964 /* copy repeated sequence */
965 if unlikely(op - ref < STEPSIZE) {
967 size_t dec64 = dec64table[op-ref];
977 ref -= dec32table[op-ref];
982 LZ4_COPYSTEP(ref, op);
984 cpy = op + length - (STEPSIZE - 4);
985 if (cpy > oend - COPYLENGTH) {
988 * Error: request to write outside of
992 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
998 * Check EOF (should never happen, since
999 * last 5 bytes are supposed to be literals)
1004 LZ4_SECURECOPY(ref, op, cpy);
1005 op = cpy; /* correction */
1008 /* end of decoding */
1009 return (int)(((char *)op) - dest);
1011 /* write overflow error detected */
1013 return (int)(-(((char *)ip) - source));
1021 lz4_ctx_cache = kmem_cache_create("lz4_ctx", sizeof(struct refTables),
1022 0, NULL, NULL, NULL, NULL, NULL, 0);
1031 kmem_cache_destroy(lz4_ctx_cache);