2 * LZ4 - Fast LZ compression algorithm
4 * Copyright (C) 2011-2013, Yann Collet.
5 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following disclaimer
15 * in the documentation and/or other materials provided with the
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * You can contact the author at :
31 * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
32 * - LZ4 source repository : http://code.google.com/p/lz4/
35 #include <sys/zfs_context.h>
37 static int real_LZ4_compress(const char *source, char *dest, int isize,
39 static int LZ4_compressBound(int isize);
40 static int LZ4_uncompress_unknownOutputSize(const char *source, char *dest,
41 int isize, int maxOutputSize);
42 static int LZ4_compressCtx(void *ctx, const char *source, char *dest,
43 int isize, int osize);
44 static int LZ4_compress64kCtx(void *ctx, const char *source, char *dest,
45 int isize, int osize);
47 static kmem_cache_t *lz4_ctx_cache;
51 lz4_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
56 ASSERT(d_len >= sizeof (bufsiz));
58 bufsiz = real_LZ4_compress(s_start, &dest[sizeof (bufsiz)], s_len,
59 d_len - sizeof (bufsiz));
61 /* Signal an error if the compression routine returned zero. */
66 * Encode the compresed buffer size at the start. We'll need this in
67 * decompression to counter the effects of padding which might be
68 * added to the compressed buffer and which, if unhandled, would
69 * confuse the hell out of our decompression function.
71 *(uint32_t *)dest = BE_32(bufsiz);
73 return (bufsiz + sizeof (bufsiz));
78 lz4_decompress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
80 const char *src = s_start;
81 uint32_t bufsiz = BE_IN32(src);
83 /* invalid compressed buffer size encoded at start */
84 if (bufsiz + sizeof (bufsiz) > s_len)
88 * Returns 0 on success (decompression function returned non-negative)
89 * and non-zero on failure (decompression function returned negative.
91 return (LZ4_uncompress_unknownOutputSize(&src[sizeof (bufsiz)],
92 d_start, bufsiz, d_len) < 0);
96 * LZ4 API Description:
99 * real_LZ4_compress() :
100 * isize : is the input size. Max supported value is ~1.9GB
101 * return : the number of bytes written in buffer dest
102 * or 0 if the compression fails (if LZ4_COMPRESSMIN is set).
103 * note : destination buffer must be already allocated.
104 * destination buffer must be sized to handle worst cases
105 * situations (input data not compressible) worst case size
106 * evaluation is provided by function LZ4_compressBound().
110 * LZ4_compressBound() :
111 * Provides the maximum size that LZ4 may output in a "worst case"
112 * scenario (input data not compressible) primarily useful for memory
113 * allocation of output buffer.
115 * isize : is the input size. Max supported value is ~1.9GB
116 * return : maximum output size in a "worst case" scenario
117 * note : this function is limited by "int" range (2^31-1)
119 * LZ4_uncompress_unknownOutputSize() :
120 * isize : is the input size, therefore the compressed size
121 * maxOutputSize : is the size of the destination buffer (which must be
123 * return : the number of bytes decoded in the destination buffer
124 * (necessarily <= maxOutputSize). If the source stream is
125 * malformed, the function will stop decoding and return a
126 * negative result, indicating the byte position of the faulty
127 * instruction. This function never writes beyond dest +
128 * maxOutputSize, and is therefore protected against malicious
130 * note : Destination buffer must be already allocated.
132 * LZ4_compressCtx() :
133 * This function explicitly handles the CTX memory structure.
135 * ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
136 * by the caller (either on the stack or using kmem_zalloc). Passing NULL
139 * LZ4_compress64kCtx() :
140 * Same as LZ4_compressCtx(), but specific to small inputs (<64KB).
141 * isize *Must* be <64KB, otherwise the output will be corrupted.
143 * ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
144 * by the caller (either on the stack or using kmem_zalloc). Passing NULL
153 * COMPRESSIONLEVEL: Increasing this value improves compression ratio
154 * Lowering this value reduces memory usage. Reduced memory usage
155 * typically improves speed, due to cache effect (ex: L1 32KB for Intel,
156 * L1 64KB for AMD). Memory usage formula : N->2^(N+2) Bytes
157 * (examples : 12 -> 16KB ; 17 -> 512KB)
159 #define COMPRESSIONLEVEL 12
162 * NOTCOMPRESSIBLE_CONFIRMATION: Decreasing this value will make the
163 * algorithm skip faster data segments considered "incompressible".
164 * This may decrease compression ratio dramatically, but will be
165 * faster on incompressible data. Increasing this value will make
166 * the algorithm search more before declaring a segment "incompressible".
167 * This could improve compression a bit, but will be slower on
168 * incompressible data. The default value (6) is recommended.
170 #define NOTCOMPRESSIBLE_CONFIRMATION 6
173 * BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE: This will provide a boost to
174 * performance for big endian cpu, but the resulting compressed stream
175 * will be incompatible with little-endian CPU. You can set this option
176 * to 1 in situations where data will stay within closed environment.
177 * This option is useless on Little_Endian CPU (such as x86).
179 /* #define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1 */
182 * CPU Feature Detection
185 /* 32 or 64 bits ? */
186 #if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) || \
187 defined(__amd64) || defined(__ppc64__) || defined(_WIN64) || \
188 defined(__LP64__) || defined(_LP64))
195 * Limits the amount of stack space that the algorithm may consume to hold
196 * the compression lookup table. The value `9' here means we'll never use
197 * more than 2k of stack (see above for a description of COMPRESSIONLEVEL).
198 * If more memory is needed, it is allocated from the heap.
200 /* FreeBSD: Use heap for all platforms for now */
204 * Little Endian or Big Endian?
205 * Note: overwrite the below #define if you know your architecture endianess.
207 #if BYTE_ORDER == BIG_ENDIAN
208 #define LZ4_BIG_ENDIAN 1
211 * Little Endian assumed. PDP Endian and other very rare endian format
217 * Unaligned memory access is automatically enabled for "common" CPU,
218 * such as x86. For others CPU, the compiler will be more cautious, and
219 * insert extra code to ensure aligned access is respected. If you know
220 * your target CPU supports unaligned memory access, you may want to
221 * force this option manually to improve performance
223 #if defined(__ARM_FEATURE_UNALIGNED)
224 #define LZ4_FORCE_UNALIGNED_ACCESS 1
228 * FreeBSD: can't use GCC's __builtin_ctz when using sparc64 because
229 * gcc currently rely on libcompiler_rt.
231 * TODO: revisit this when situation changes.
233 #if defined(__sparc64__)
234 #define LZ4_FORCE_SW_BITCOUNT
240 #if __STDC_VERSION__ >= 199901L /* C99 */
241 /* "restrict" is a known keyword */
243 /* Disable restrict */
247 #define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | \
248 (((x) & 0xffu) << 8)))
250 #define expect(expr, value) (__builtin_expect((expr), (value)))
255 #if defined(unlikely)
259 #define likely(expr) expect((expr) != 0, 1)
260 #define unlikely(expr) expect((expr) != 0, 0)
269 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
273 typedef struct _U16_S {
276 typedef struct _U32_S {
279 typedef struct _U64_S {
283 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
287 #define A64(x) (((U64_S *)(x))->v)
288 #define A32(x) (((U32_S *)(x))->v)
289 #define A16(x) (((U16_S *)(x))->v)
296 #define HASH_LOG COMPRESSIONLEVEL
297 #define HASHTABLESIZE (1 << HASH_LOG)
298 #define HASH_MASK (HASHTABLESIZE - 1)
300 #define SKIPSTRENGTH (NOTCOMPRESSIBLE_CONFIRMATION > 2 ? \
301 NOTCOMPRESSIBLE_CONFIRMATION : 2)
304 * Defines if memory is allocated into the stack (local variable),
305 * or into the heap (kmem_alloc()).
307 #define HEAPMODE (HASH_LOG > STACKLIMIT)
309 #define LASTLITERALS 5
310 #define MFLIMIT (COPYLENGTH + MINMATCH)
311 #define MINLENGTH (MFLIMIT + 1)
314 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
317 #define ML_MASK ((1U<<ML_BITS)-1)
318 #define RUN_BITS (8-ML_BITS)
319 #define RUN_MASK ((1U<<RUN_BITS)-1)
323 * Architecture-specific macros
329 #define LZ4_COPYSTEP(s, d) A64(d) = A64(s); d += 8; s += 8;
330 #define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d)
331 #define LZ4_SECURECOPY(s, d, e) if (d < e) LZ4_WILDCOPY(s, d, e)
333 #define INITBASE(base) const BYTE* const base = ip
334 #else /* !LZ4_ARCH64 */
338 #define LZ4_COPYSTEP(s, d) A32(d) = A32(s); d += 4; s += 4;
339 #define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d); LZ4_COPYSTEP(s, d);
340 #define LZ4_SECURECOPY LZ4_WILDCOPY
341 #define HTYPE const BYTE *
342 #define INITBASE(base) const int base = 0
343 #endif /* !LZ4_ARCH64 */
345 #if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
346 #define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
347 { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
348 #define LZ4_WRITE_LITTLEENDIAN_16(p, i) \
349 { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p += 2; }
351 #define LZ4_READ_LITTLEENDIAN_16(d, s, p) { d = (s) - A16(p); }
352 #define LZ4_WRITE_LITTLEENDIAN_16(p, v) { A16(p) = v; p += 2; }
356 /* Local structures */
358 HTYPE hashTable[HASHTABLESIZE];
363 #define LZ4_HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH * 8) - \
365 #define LZ4_HASH_VALUE(p) LZ4_HASH_FUNCTION(A32(p))
366 #define LZ4_WILDCOPY(s, d, e) do { LZ4_COPYPACKET(s, d) } while (d < e);
367 #define LZ4_BLINDCOPY(s, d, l) { BYTE* e = (d) + l; LZ4_WILDCOPY(s, d, e); \
371 /* Private functions */
375 LZ4_NbCommonBytes(register U64 val)
377 #if defined(LZ4_BIG_ENDIAN)
378 #if !defined(LZ4_FORCE_SW_BITCOUNT)
379 return (__builtin_clzll(val) >> 3);
398 #if !defined(LZ4_FORCE_SW_BITCOUNT)
399 return (__builtin_ctzll(val) >> 3);
401 static const int DeBruijnBytePos[64] =
402 { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5,
403 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5,
404 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4,
405 4, 5, 7, 2, 6, 5, 7, 6, 7, 7
407 return DeBruijnBytePos[((U64) ((val & -val) * 0x0218A392CDABBD3F)) >>
416 LZ4_NbCommonBytes(register U32 val)
418 #if defined(LZ4_BIG_ENDIAN)
419 #if !defined(LZ4_FORCE_SW_BITCOUNT)
420 return (__builtin_clz(val) >> 3);
434 #if !defined(LZ4_FORCE_SW_BITCOUNT)
435 return (__builtin_ctz(val) >> 3);
437 static const int DeBruijnBytePos[32] = {
438 0, 0, 3, 0, 3, 1, 3, 0,
439 3, 2, 2, 1, 3, 2, 0, 1,
440 3, 3, 1, 2, 2, 2, 2, 0,
441 3, 1, 2, 0, 1, 0, 1, 1
443 return DeBruijnBytePos[((U32) ((val & -(S32) val) * 0x077CB531U)) >>
451 /* Public functions */
454 LZ4_compressBound(int isize)
456 return (isize + (isize / 255) + 16);
459 /* Compression functions */
463 LZ4_compressCtx(void *ctx, const char *source, char *dest, int isize,
467 struct refTables *srt = (struct refTables *)ctx;
468 HTYPE *HashTable = (HTYPE *) (srt->hashTable);
470 HTYPE HashTable[HASHTABLESIZE] = { 0 };
473 const BYTE *ip = (BYTE *) source;
475 const BYTE *anchor = ip;
476 const BYTE *const iend = ip + isize;
477 const BYTE *const oend = (BYTE *) dest + osize;
478 const BYTE *const mflimit = iend - MFLIMIT;
479 #define matchlimit (iend - LASTLITERALS)
481 BYTE *op = (BYTE *) dest;
484 const int skipStrength = SKIPSTRENGTH;
489 if (isize < MINLENGTH)
493 HashTable[LZ4_HASH_VALUE(ip)] = ip - base;
495 forwardH = LZ4_HASH_VALUE(ip);
499 int findMatchAttempts = (1U << skipStrength) + 3;
500 const BYTE *forwardIp = ip;
507 int step = findMatchAttempts++ >> skipStrength;
509 forwardIp = ip + step;
511 if unlikely(forwardIp > mflimit) {
515 forwardH = LZ4_HASH_VALUE(forwardIp);
516 ref = base + HashTable[h];
517 HashTable[h] = ip - base;
519 } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip)));
522 while ((ip > anchor) && (ref > (BYTE *) source) &&
523 unlikely(ip[-1] == ref[-1])) {
528 /* Encode Literal length */
529 length = ip - anchor;
532 /* Check output limit */
533 if unlikely(op + length + (2 + 1 + LASTLITERALS) +
534 (length >> 8) > oend)
537 if (length >= (int)RUN_MASK) {
538 *token = (RUN_MASK << ML_BITS);
539 len = length - RUN_MASK;
540 for (; len > 254; len -= 255)
544 *token = (length << ML_BITS);
547 LZ4_BLINDCOPY(anchor, op, length);
551 LZ4_WRITE_LITTLEENDIAN_16(op, ip - ref);
555 ref += MINMATCH; /* MinMatch verified */
557 while likely(ip < matchlimit - (STEPSIZE - 1)) {
558 UARCH diff = AARCH(ref) ^ AARCH(ip);
564 ip += LZ4_NbCommonBytes(diff);
568 if ((ip < (matchlimit - 3)) && (A32(ref) == A32(ip))) {
573 if ((ip < (matchlimit - 1)) && (A16(ref) == A16(ip))) {
577 if ((ip < matchlimit) && (*ref == *ip))
581 /* Encode MatchLength */
583 /* Check output limit */
584 if unlikely(op + (1 + LASTLITERALS) + (len >> 8) > oend)
586 if (len >= (int)ML_MASK) {
589 for (; len > 509; len -= 510) {
601 /* Test end of chunk */
607 HashTable[LZ4_HASH_VALUE(ip - 2)] = ip - 2 - base;
609 /* Test next position */
610 ref = base + HashTable[LZ4_HASH_VALUE(ip)];
611 HashTable[LZ4_HASH_VALUE(ip)] = ip - base;
612 if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) {
617 /* Prepare next loop */
619 forwardH = LZ4_HASH_VALUE(ip);
623 /* Encode Last Literals */
625 int lastRun = iend - anchor;
626 if (op + lastRun + 1 + ((lastRun + 255 - RUN_MASK) / 255) >
629 if (lastRun >= (int)RUN_MASK) {
630 *op++ = (RUN_MASK << ML_BITS);
632 for (; lastRun > 254; lastRun -= 255) {
635 *op++ = (BYTE)lastRun;
637 *op++ = (lastRun << ML_BITS);
638 (void) memcpy(op, anchor, iend - anchor);
643 return (int)(((char *)op) - dest);
648 /* Note : this function is valid only if isize < LZ4_64KLIMIT */
649 #define LZ4_64KLIMIT ((1 << 16) + (MFLIMIT - 1))
650 #define HASHLOG64K (HASH_LOG + 1)
651 #define HASH64KTABLESIZE (1U << HASHLOG64K)
652 #define LZ4_HASH64K_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8) - \
654 #define LZ4_HASH64K_VALUE(p) LZ4_HASH64K_FUNCTION(A32(p))
658 LZ4_compress64kCtx(void *ctx, const char *source, char *dest, int isize,
662 struct refTables *srt = (struct refTables *)ctx;
663 U16 *HashTable = (U16 *) (srt->hashTable);
665 U16 HashTable[HASH64KTABLESIZE] = { 0 };
668 const BYTE *ip = (BYTE *) source;
669 const BYTE *anchor = ip;
670 const BYTE *const base = ip;
671 const BYTE *const iend = ip + isize;
672 const BYTE *const oend = (BYTE *) dest + osize;
673 const BYTE *const mflimit = iend - MFLIMIT;
674 #define matchlimit (iend - LASTLITERALS)
676 BYTE *op = (BYTE *) dest;
679 const int skipStrength = SKIPSTRENGTH;
683 if (isize < MINLENGTH)
688 forwardH = LZ4_HASH64K_VALUE(ip);
692 int findMatchAttempts = (1U << skipStrength) + 3;
693 const BYTE *forwardIp = ip;
700 int step = findMatchAttempts++ >> skipStrength;
702 forwardIp = ip + step;
704 if (forwardIp > mflimit) {
708 forwardH = LZ4_HASH64K_VALUE(forwardIp);
709 ref = base + HashTable[h];
710 HashTable[h] = ip - base;
712 } while (A32(ref) != A32(ip));
715 while ((ip > anchor) && (ref > (BYTE *) source) &&
716 (ip[-1] == ref[-1])) {
721 /* Encode Literal length */
722 length = ip - anchor;
725 /* Check output limit */
726 if unlikely(op + length + (2 + 1 + LASTLITERALS) +
727 (length >> 8) > oend)
730 if (length >= (int)RUN_MASK) {
731 *token = (RUN_MASK << ML_BITS);
732 len = length - RUN_MASK;
733 for (; len > 254; len -= 255)
737 *token = (length << ML_BITS);
740 LZ4_BLINDCOPY(anchor, op, length);
744 LZ4_WRITE_LITTLEENDIAN_16(op, ip - ref);
748 ref += MINMATCH; /* MinMatch verified */
750 while (ip < matchlimit - (STEPSIZE - 1)) {
751 UARCH diff = AARCH(ref) ^ AARCH(ip);
757 ip += LZ4_NbCommonBytes(diff);
761 if ((ip < (matchlimit - 3)) && (A32(ref) == A32(ip))) {
766 if ((ip < (matchlimit - 1)) && (A16(ref) == A16(ip))) {
770 if ((ip < matchlimit) && (*ref == *ip))
774 /* Encode MatchLength */
776 /* Check output limit */
777 if unlikely(op + (1 + LASTLITERALS) + (len >> 8) > oend)
779 if (len >= (int)ML_MASK) {
782 for (; len > 509; len -= 510) {
794 /* Test end of chunk */
800 HashTable[LZ4_HASH64K_VALUE(ip - 2)] = ip - 2 - base;
802 /* Test next position */
803 ref = base + HashTable[LZ4_HASH64K_VALUE(ip)];
804 HashTable[LZ4_HASH64K_VALUE(ip)] = ip - base;
805 if (A32(ref) == A32(ip)) {
810 /* Prepare next loop */
812 forwardH = LZ4_HASH64K_VALUE(ip);
816 /* Encode Last Literals */
818 int lastRun = iend - anchor;
819 if (op + lastRun + 1 + ((lastRun + 255 - RUN_MASK) / 255) >
822 if (lastRun >= (int)RUN_MASK) {
823 *op++ = (RUN_MASK << ML_BITS);
825 for (; lastRun > 254; lastRun -= 255)
827 *op++ = (BYTE)lastRun;
829 *op++ = (lastRun << ML_BITS);
830 (void) memcpy(op, anchor, iend - anchor);
835 return (int)(((char *)op) - dest);
839 real_LZ4_compress(const char *source, char *dest, int isize, int osize)
842 void *ctx = kmem_cache_alloc(lz4_ctx_cache, KM_NOSLEEP);
846 * out of kernel memory, gently fall through - this will disable
847 * compression in zio_compress_data
852 bzero(ctx, sizeof(struct refTables));
853 if (isize < LZ4_64KLIMIT)
854 result = LZ4_compress64kCtx(ctx, source, dest, isize, osize);
856 result = LZ4_compressCtx(ctx, source, dest, isize, osize);
858 kmem_cache_free(lz4_ctx_cache, ctx);
861 if (isize < (int)LZ4_64KLIMIT)
862 return (LZ4_compress64kCtx(NULL, source, dest, isize, osize));
863 return (LZ4_compressCtx(NULL, source, dest, isize, osize));
867 /* Decompression functions */
870 * Note: The decoding function LZ4_uncompress_unknownOutputSize() is safe
871 * against "buffer overflow" attack type. They will never write nor
872 * read outside of the provided output buffers.
873 * LZ4_uncompress_unknownOutputSize() also insures that it will never
874 * read outside of the input buffer. A corrupted input will produce
875 * an error result, a negative int, indicating the position of the
876 * error within input stream.
880 LZ4_uncompress_unknownOutputSize(const char *source, char *dest, int isize,
883 /* Local Variables */
884 const BYTE *restrict ip = (const BYTE *) source;
885 const BYTE *const iend = ip + isize;
888 BYTE *op = (BYTE *) dest;
889 BYTE *const oend = op + maxOutputSize;
892 size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
894 size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
904 if ((length = (token >> ML_BITS)) == RUN_MASK) {
906 while ((ip < iend) && (s == 255)) {
913 /* CORNER-CASE: cpy might overflow. */
915 goto _output_error; /* cpy was overflowed, bail! */
916 if ((cpy > oend - COPYLENGTH) ||
917 (ip + length > iend - COPYLENGTH)) {
919 /* Error: writes beyond output buffer */
921 if (ip + length != iend)
923 * Error: LZ4 format requires to consume all
924 * input at this stage
927 (void) memcpy(op, ip, length);
929 /* Necessarily EOF, due to parsing restrictions */
932 LZ4_WILDCOPY(ip, op, cpy);
937 LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
939 if (ref < (BYTE * const) dest)
941 * Error: offset creates reference outside of
946 /* get matchlength */
947 if ((length = (token & ML_MASK)) == ML_MASK) {
956 /* copy repeated sequence */
957 if unlikely(op - ref < STEPSIZE) {
959 size_t dec64 = dec64table[op-ref];
969 ref -= dec32table[op-ref];
974 LZ4_COPYSTEP(ref, op);
976 cpy = op + length - (STEPSIZE - 4);
977 if (cpy > oend - COPYLENGTH) {
980 * Error: request to write outside of
984 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
990 * Check EOF (should never happen, since
991 * last 5 bytes are supposed to be literals)
996 LZ4_SECURECOPY(ref, op, cpy);
997 op = cpy; /* correction */
1000 /* end of decoding */
1001 return (int)(((char *)op) - dest);
1003 /* write overflow error detected */
1005 return (int)(-(((char *)ip) - source));
1013 lz4_ctx_cache = kmem_cache_create("lz4_ctx", sizeof(struct refTables),
1014 0, NULL, NULL, NULL, NULL, NULL, 0);
1023 kmem_cache_destroy(lz4_ctx_cache);