2 * LZ4 - Fast LZ compression algorithm
4 * Copyright (C) 2011-2013, Yann Collet.
5 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following disclaimer
15 * in the documentation and/or other materials provided with the
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * You can contact the author at :
31 * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
32 * - LZ4 source repository : http://code.google.com/p/lz4/
35 #include <sys/zfs_context.h>
37 static int real_LZ4_compress(const char *source, char *dest, int isize,
39 static int LZ4_compressBound(int isize);
40 static int LZ4_uncompress_unknownOutputSize(const char *source, char *dest,
41 int isize, int maxOutputSize);
42 static int LZ4_compressCtx(void *ctx, const char *source, char *dest,
43 int isize, int osize);
44 static int LZ4_compress64kCtx(void *ctx, const char *source, char *dest,
45 int isize, int osize);
49 lz4_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
54 ASSERT(d_len >= sizeof (bufsiz));
56 bufsiz = real_LZ4_compress(s_start, &dest[sizeof (bufsiz)], s_len,
57 d_len - sizeof (bufsiz));
59 /* Signal an error if the compression routine returned zero. */
64 * Encode the compresed buffer size at the start. We'll need this in
65 * decompression to counter the effects of padding which might be
66 * added to the compressed buffer and which, if unhandled, would
67 * confuse the hell out of our decompression function.
69 *(uint32_t *)dest = BE_32(bufsiz);
71 return (bufsiz + sizeof (bufsiz));
76 lz4_decompress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
78 const char *src = s_start;
79 uint32_t bufsiz = BE_IN32(src);
81 /* invalid compressed buffer size encoded at start */
82 if (bufsiz + sizeof (bufsiz) > s_len)
86 * Returns 0 on success (decompression function returned non-negative)
87 * and non-zero on failure (decompression function returned negative.
89 return (LZ4_uncompress_unknownOutputSize(&src[sizeof (bufsiz)],
90 d_start, bufsiz, d_len) < 0);
94 * LZ4 API Description:
97 * real_LZ4_compress() :
98 * isize : is the input size. Max supported value is ~1.9GB
99 * return : the number of bytes written in buffer dest
100 * or 0 if the compression fails (if LZ4_COMPRESSMIN is set).
101 * note : destination buffer must be already allocated.
102 * destination buffer must be sized to handle worst cases
103 * situations (input data not compressible) worst case size
104 * evaluation is provided by function LZ4_compressBound().
108 * LZ4_compressBound() :
109 * Provides the maximum size that LZ4 may output in a "worst case"
110 * scenario (input data not compressible) primarily useful for memory
111 * allocation of output buffer.
113 * isize : is the input size. Max supported value is ~1.9GB
114 * return : maximum output size in a "worst case" scenario
115 * note : this function is limited by "int" range (2^31-1)
117 * LZ4_uncompress_unknownOutputSize() :
118 * isize : is the input size, therefore the compressed size
119 * maxOutputSize : is the size of the destination buffer (which must be
121 * return : the number of bytes decoded in the destination buffer
122 * (necessarily <= maxOutputSize). If the source stream is
123 * malformed, the function will stop decoding and return a
124 * negative result, indicating the byte position of the faulty
125 * instruction. This function never writes beyond dest +
126 * maxOutputSize, and is therefore protected against malicious
128 * note : Destination buffer must be already allocated.
130 * LZ4_compressCtx() :
131 * This function explicitly handles the CTX memory structure.
133 * ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
134 * by the caller (either on the stack or using kmem_zalloc). Passing NULL
137 * LZ4_compress64kCtx() :
138 * Same as LZ4_compressCtx(), but specific to small inputs (<64KB).
139 * isize *Must* be <64KB, otherwise the output will be corrupted.
141 * ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
142 * by the caller (either on the stack or using kmem_zalloc). Passing NULL
151 * COMPRESSIONLEVEL: Increasing this value improves compression ratio
152 * Lowering this value reduces memory usage. Reduced memory usage
153 * typically improves speed, due to cache effect (ex: L1 32KB for Intel,
154 * L1 64KB for AMD). Memory usage formula : N->2^(N+2) Bytes
155 * (examples : 12 -> 16KB ; 17 -> 512KB)
157 #define COMPRESSIONLEVEL 12
160 * NOTCOMPRESSIBLE_CONFIRMATION: Decreasing this value will make the
161 * algorithm skip faster data segments considered "incompressible".
162 * This may decrease compression ratio dramatically, but will be
163 * faster on incompressible data. Increasing this value will make
164 * the algorithm search more before declaring a segment "incompressible".
165 * This could improve compression a bit, but will be slower on
166 * incompressible data. The default value (6) is recommended.
168 #define NOTCOMPRESSIBLE_CONFIRMATION 6
171 * BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE: This will provide a boost to
172 * performance for big endian cpu, but the resulting compressed stream
173 * will be incompatible with little-endian CPU. You can set this option
174 * to 1 in situations where data will stay within closed environment.
175 * This option is useless on Little_Endian CPU (such as x86).
177 /* #define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1 */
180 * CPU Feature Detection
183 /* 32 or 64 bits ? */
184 #if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) || \
185 defined(__amd64) || defined(__ppc64__) || defined(_WIN64) || \
186 defined(__LP64__) || defined(_LP64))
189 * Illumos: On amd64 we have 20k of stack and 24k on sun4u and sun4v, so we
190 * can spend 16k on the algorithm
192 /* FreeBSD: Use heap for all platforms for now */
197 * Illumos: On i386 we only have 12k of stack, so in order to maintain the
198 * same COMPRESSIONLEVEL we have to use heap allocation. Performance will
199 * suck, but alas, it's ZFS on 32-bit we're talking about, so...
205 * Little Endian or Big Endian?
206 * Note: overwrite the below #define if you know your architecture endianess.
208 #if BYTE_ORDER == BIG_ENDIAN
209 #define LZ4_BIG_ENDIAN 1
212 * Little Endian assumed. PDP Endian and other very rare endian format
218 * Unaligned memory access is automatically enabled for "common" CPU,
219 * such as x86. For others CPU, the compiler will be more cautious, and
220 * insert extra code to ensure aligned access is respected. If you know
221 * your target CPU supports unaligned memory access, you may want to
222 * force this option manually to improve performance
224 #if defined(__ARM_FEATURE_UNALIGNED)
225 #define LZ4_FORCE_UNALIGNED_ACCESS 1
229 * FreeBSD: can't use GCC's __builtin_ctz when using sparc64 because
230 * gcc currently rely on libcompiler_rt.
232 * TODO: revisit this when situation changes.
234 #if defined(__sparc64__)
235 #define LZ4_FORCE_SW_BITCOUNT
241 #if __STDC_VERSION__ >= 199901L /* C99 */
242 /* "restrict" is a known keyword */
244 /* Disable restrict */
248 #define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | \
249 (((x) & 0xffu) << 8)))
251 #define expect(expr, value) (__builtin_expect((expr), (value)))
256 #if defined(unlikely)
260 #define likely(expr) expect((expr) != 0, 1)
261 #define unlikely(expr) expect((expr) != 0, 0)
270 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
274 typedef struct _U16_S {
277 typedef struct _U32_S {
280 typedef struct _U64_S {
284 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
288 #define A64(x) (((U64_S *)(x))->v)
289 #define A32(x) (((U32_S *)(x))->v)
290 #define A16(x) (((U16_S *)(x))->v)
297 #define HASH_LOG COMPRESSIONLEVEL
298 #define HASHTABLESIZE (1 << HASH_LOG)
299 #define HASH_MASK (HASHTABLESIZE - 1)
301 #define SKIPSTRENGTH (NOTCOMPRESSIBLE_CONFIRMATION > 2 ? \
302 NOTCOMPRESSIBLE_CONFIRMATION : 2)
305 * Defines if memory is allocated into the stack (local variable),
306 * or into the heap (kmem_alloc()).
308 #define HEAPMODE (HASH_LOG > STACKLIMIT)
310 #define LASTLITERALS 5
311 #define MFLIMIT (COPYLENGTH + MINMATCH)
312 #define MINLENGTH (MFLIMIT + 1)
315 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
318 #define ML_MASK ((1U<<ML_BITS)-1)
319 #define RUN_BITS (8-ML_BITS)
320 #define RUN_MASK ((1U<<RUN_BITS)-1)
324 * Architecture-specific macros
330 #define LZ4_COPYSTEP(s, d) A64(d) = A64(s); d += 8; s += 8;
331 #define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d)
332 #define LZ4_SECURECOPY(s, d, e) if (d < e) LZ4_WILDCOPY(s, d, e)
334 #define INITBASE(base) const BYTE* const base = ip
335 #else /* !LZ4_ARCH64 */
339 #define LZ4_COPYSTEP(s, d) A32(d) = A32(s); d += 4; s += 4;
340 #define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d); LZ4_COPYSTEP(s, d);
341 #define LZ4_SECURECOPY LZ4_WILDCOPY
342 #define HTYPE const BYTE *
343 #define INITBASE(base) const int base = 0
344 #endif /* !LZ4_ARCH64 */
346 #if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
347 #define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
348 { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
349 #define LZ4_WRITE_LITTLEENDIAN_16(p, i) \
350 { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p += 2; }
352 #define LZ4_READ_LITTLEENDIAN_16(d, s, p) { d = (s) - A16(p); }
353 #define LZ4_WRITE_LITTLEENDIAN_16(p, v) { A16(p) = v; p += 2; }
357 /* Local structures */
359 HTYPE hashTable[HASHTABLESIZE];
364 #define LZ4_HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH * 8) - \
366 #define LZ4_HASH_VALUE(p) LZ4_HASH_FUNCTION(A32(p))
367 #define LZ4_WILDCOPY(s, d, e) do { LZ4_COPYPACKET(s, d) } while (d < e);
368 #define LZ4_BLINDCOPY(s, d, l) { BYTE* e = (d) + l; LZ4_WILDCOPY(s, d, e); \
372 /* Private functions */
376 LZ4_NbCommonBytes(register U64 val)
378 #if defined(LZ4_BIG_ENDIAN)
379 #if !defined(LZ4_FORCE_SW_BITCOUNT)
380 return (__builtin_clzll(val) >> 3);
399 #if !defined(LZ4_FORCE_SW_BITCOUNT)
400 return (__builtin_ctzll(val) >> 3);
402 static const int DeBruijnBytePos[64] =
403 { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5,
404 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5,
405 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4,
406 4, 5, 7, 2, 6, 5, 7, 6, 7, 7
408 return DeBruijnBytePos[((U64) ((val & -val) * 0x0218A392CDABBD3F)) >>
417 LZ4_NbCommonBytes(register U32 val)
419 #if defined(LZ4_BIG_ENDIAN)
420 #if !defined(LZ4_FORCE_SW_BITCOUNT)
421 return (__builtin_clz(val) >> 3);
435 #if !defined(LZ4_FORCE_SW_BITCOUNT)
436 return (__builtin_ctz(val) >> 3);
438 static const int DeBruijnBytePos[32] = {
439 0, 0, 3, 0, 3, 1, 3, 0,
440 3, 2, 2, 1, 3, 2, 0, 1,
441 3, 3, 1, 2, 2, 2, 2, 0,
442 3, 1, 2, 0, 1, 0, 1, 1
444 return DeBruijnBytePos[((U32) ((val & -(S32) val) * 0x077CB531U)) >>
452 /* Public functions */
455 LZ4_compressBound(int isize)
457 return (isize + (isize / 255) + 16);
460 /* Compression functions */
464 LZ4_compressCtx(void *ctx, const char *source, char *dest, int isize,
468 struct refTables *srt = (struct refTables *)ctx;
469 HTYPE *HashTable = (HTYPE *) (srt->hashTable);
471 HTYPE HashTable[HASHTABLESIZE] = { 0 };
474 const BYTE *ip = (BYTE *) source;
476 const BYTE *anchor = ip;
477 const BYTE *const iend = ip + isize;
478 const BYTE *const oend = (BYTE *) dest + osize;
479 const BYTE *const mflimit = iend - MFLIMIT;
480 #define matchlimit (iend - LASTLITERALS)
482 BYTE *op = (BYTE *) dest;
485 const int skipStrength = SKIPSTRENGTH;
490 if (isize < MINLENGTH)
494 HashTable[LZ4_HASH_VALUE(ip)] = ip - base;
496 forwardH = LZ4_HASH_VALUE(ip);
500 int findMatchAttempts = (1U << skipStrength) + 3;
501 const BYTE *forwardIp = ip;
508 int step = findMatchAttempts++ >> skipStrength;
510 forwardIp = ip + step;
512 if unlikely(forwardIp > mflimit) {
516 forwardH = LZ4_HASH_VALUE(forwardIp);
517 ref = base + HashTable[h];
518 HashTable[h] = ip - base;
520 } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip)));
523 while ((ip > anchor) && (ref > (BYTE *) source) &&
524 unlikely(ip[-1] == ref[-1])) {
529 /* Encode Literal length */
530 length = ip - anchor;
533 /* Check output limit */
534 if unlikely(op + length + (2 + 1 + LASTLITERALS) +
535 (length >> 8) > oend)
538 if (length >= (int)RUN_MASK) {
539 *token = (RUN_MASK << ML_BITS);
540 len = length - RUN_MASK;
541 for (; len > 254; len -= 255)
545 *token = (length << ML_BITS);
548 LZ4_BLINDCOPY(anchor, op, length);
552 LZ4_WRITE_LITTLEENDIAN_16(op, ip - ref);
556 ref += MINMATCH; /* MinMatch verified */
558 while likely(ip < matchlimit - (STEPSIZE - 1)) {
559 UARCH diff = AARCH(ref) ^ AARCH(ip);
565 ip += LZ4_NbCommonBytes(diff);
569 if ((ip < (matchlimit - 3)) && (A32(ref) == A32(ip))) {
574 if ((ip < (matchlimit - 1)) && (A16(ref) == A16(ip))) {
578 if ((ip < matchlimit) && (*ref == *ip))
582 /* Encode MatchLength */
584 /* Check output limit */
585 if unlikely(op + (1 + LASTLITERALS) + (len >> 8) > oend)
587 if (len >= (int)ML_MASK) {
590 for (; len > 509; len -= 510) {
602 /* Test end of chunk */
608 HashTable[LZ4_HASH_VALUE(ip - 2)] = ip - 2 - base;
610 /* Test next position */
611 ref = base + HashTable[LZ4_HASH_VALUE(ip)];
612 HashTable[LZ4_HASH_VALUE(ip)] = ip - base;
613 if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) {
618 /* Prepare next loop */
620 forwardH = LZ4_HASH_VALUE(ip);
624 /* Encode Last Literals */
626 int lastRun = iend - anchor;
627 if (op + lastRun + 1 + ((lastRun + 255 - RUN_MASK) / 255) >
630 if (lastRun >= (int)RUN_MASK) {
631 *op++ = (RUN_MASK << ML_BITS);
633 for (; lastRun > 254; lastRun -= 255) {
636 *op++ = (BYTE)lastRun;
638 *op++ = (lastRun << ML_BITS);
639 (void) memcpy(op, anchor, iend - anchor);
644 return (int)(((char *)op) - dest);
649 /* Note : this function is valid only if isize < LZ4_64KLIMIT */
650 #define LZ4_64KLIMIT ((1 << 16) + (MFLIMIT - 1))
651 #define HASHLOG64K (HASH_LOG + 1)
652 #define HASH64KTABLESIZE (1U << HASHLOG64K)
653 #define LZ4_HASH64K_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8) - \
655 #define LZ4_HASH64K_VALUE(p) LZ4_HASH64K_FUNCTION(A32(p))
659 LZ4_compress64kCtx(void *ctx, const char *source, char *dest, int isize,
663 struct refTables *srt = (struct refTables *)ctx;
664 U16 *HashTable = (U16 *) (srt->hashTable);
666 U16 HashTable[HASH64KTABLESIZE] = { 0 };
669 const BYTE *ip = (BYTE *) source;
670 const BYTE *anchor = ip;
671 const BYTE *const base = ip;
672 const BYTE *const iend = ip + isize;
673 const BYTE *const oend = (BYTE *) dest + osize;
674 const BYTE *const mflimit = iend - MFLIMIT;
675 #define matchlimit (iend - LASTLITERALS)
677 BYTE *op = (BYTE *) dest;
680 const int skipStrength = SKIPSTRENGTH;
684 if (isize < MINLENGTH)
689 forwardH = LZ4_HASH64K_VALUE(ip);
693 int findMatchAttempts = (1U << skipStrength) + 3;
694 const BYTE *forwardIp = ip;
701 int step = findMatchAttempts++ >> skipStrength;
703 forwardIp = ip + step;
705 if (forwardIp > mflimit) {
709 forwardH = LZ4_HASH64K_VALUE(forwardIp);
710 ref = base + HashTable[h];
711 HashTable[h] = ip - base;
713 } while (A32(ref) != A32(ip));
716 while ((ip > anchor) && (ref > (BYTE *) source) &&
717 (ip[-1] == ref[-1])) {
722 /* Encode Literal length */
723 length = ip - anchor;
726 /* Check output limit */
727 if unlikely(op + length + (2 + 1 + LASTLITERALS) +
728 (length >> 8) > oend)
731 if (length >= (int)RUN_MASK) {
732 *token = (RUN_MASK << ML_BITS);
733 len = length - RUN_MASK;
734 for (; len > 254; len -= 255)
738 *token = (length << ML_BITS);
741 LZ4_BLINDCOPY(anchor, op, length);
745 LZ4_WRITE_LITTLEENDIAN_16(op, ip - ref);
749 ref += MINMATCH; /* MinMatch verified */
751 while (ip < matchlimit - (STEPSIZE - 1)) {
752 UARCH diff = AARCH(ref) ^ AARCH(ip);
758 ip += LZ4_NbCommonBytes(diff);
762 if ((ip < (matchlimit - 3)) && (A32(ref) == A32(ip))) {
767 if ((ip < (matchlimit - 1)) && (A16(ref) == A16(ip))) {
771 if ((ip < matchlimit) && (*ref == *ip))
775 /* Encode MatchLength */
777 /* Check output limit */
778 if unlikely(op + (1 + LASTLITERALS) + (len >> 8) > oend)
780 if (len >= (int)ML_MASK) {
783 for (; len > 509; len -= 510) {
795 /* Test end of chunk */
801 HashTable[LZ4_HASH64K_VALUE(ip - 2)] = ip - 2 - base;
803 /* Test next position */
804 ref = base + HashTable[LZ4_HASH64K_VALUE(ip)];
805 HashTable[LZ4_HASH64K_VALUE(ip)] = ip - base;
806 if (A32(ref) == A32(ip)) {
811 /* Prepare next loop */
813 forwardH = LZ4_HASH64K_VALUE(ip);
817 /* Encode Last Literals */
819 int lastRun = iend - anchor;
820 if (op + lastRun + 1 + ((lastRun + 255 - RUN_MASK) / 255) >
823 if (lastRun >= (int)RUN_MASK) {
824 *op++ = (RUN_MASK << ML_BITS);
826 for (; lastRun > 254; lastRun -= 255)
828 *op++ = (BYTE)lastRun;
830 *op++ = (lastRun << ML_BITS);
831 (void) memcpy(op, anchor, iend - anchor);
836 return (int)(((char *)op) - dest);
840 real_LZ4_compress(const char *source, char *dest, int isize, int osize)
843 void *ctx = kmem_zalloc(sizeof (struct refTables), KM_NOSLEEP);
847 * out of kernel memory, gently fall through - this will disable
848 * compression in zio_compress_data
853 if (isize < LZ4_64KLIMIT)
854 result = LZ4_compress64kCtx(ctx, source, dest, isize, osize);
856 result = LZ4_compressCtx(ctx, source, dest, isize, osize);
858 kmem_free(ctx, sizeof (struct refTables));
861 if (isize < (int)LZ4_64KLIMIT)
862 return (LZ4_compress64kCtx(NULL, source, dest, isize, osize));
863 return (LZ4_compressCtx(NULL, source, dest, isize, osize));
867 /* Decompression functions */
870 * Note: The decoding functionLZ4_uncompress_unknownOutputSize() is safe
871 * against "buffer overflow" attack type. They will never write nor
872 * read outside of the provided output buffers.
873 * LZ4_uncompress_unknownOutputSize() also insures that it will never
874 * read outside of the input buffer. A corrupted input will produce
875 * an error result, a negative int, indicating the position of the
876 * error within input stream.
880 LZ4_uncompress_unknownOutputSize(const char *source, char *dest, int isize,
883 /* Local Variables */
884 const BYTE *restrict ip = (const BYTE *) source;
885 const BYTE *const iend = ip + isize;
888 BYTE *op = (BYTE *) dest;
889 BYTE *const oend = op + maxOutputSize;
892 size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
894 size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
904 if ((length = (token >> ML_BITS)) == RUN_MASK) {
906 while ((ip < iend) && (s == 255)) {
913 if ((cpy > oend - COPYLENGTH) ||
914 (ip + length > iend - COPYLENGTH)) {
916 /* Error: writes beyond output buffer */
918 if (ip + length != iend)
920 * Error: LZ4 format requires to consume all
921 * input at this stage
924 (void) memcpy(op, ip, length);
926 /* Necessarily EOF, due to parsing restrictions */
929 LZ4_WILDCOPY(ip, op, cpy);
934 LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
936 if (ref < (BYTE * const) dest)
938 * Error: offset creates reference outside of
943 /* get matchlength */
944 if ((length = (token & ML_MASK)) == ML_MASK) {
953 /* copy repeated sequence */
954 if unlikely(op - ref < STEPSIZE) {
956 size_t dec64 = dec64table[op-ref];
966 ref -= dec32table[op-ref];
971 LZ4_COPYSTEP(ref, op);
973 cpy = op + length - (STEPSIZE - 4);
974 if (cpy > oend - COPYLENGTH) {
977 * Error: request to write outside of
981 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
987 * Check EOF (should never happen, since
988 * last 5 bytes are supposed to be literals)
993 LZ4_SECURECOPY(ref, op, cpy);
994 op = cpy; /* correction */
997 /* end of decoding */
998 return (int)(((char *)op) - dest);
1000 /* write overflow error detected */
1002 return (int)(-(((char *)ip) - source));