2 * LZ4 - Fast LZ compression algorithm
4 * Copyright (C) 2011-2013, Yann Collet.
5 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following disclaimer
15 * in the documentation and/or other materials provided with the
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * You can contact the author at :
31 * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
32 * - LZ4 source repository : http://code.google.com/p/lz4/
35 #include <sys/zfs_context.h>
37 static int real_LZ4_compress(const char *source, char *dest, int isize,
39 static int LZ4_uncompress_unknownOutputSize(const char *source, char *dest,
40 int isize, int maxOutputSize);
41 static int LZ4_compressCtx(void *ctx, const char *source, char *dest,
42 int isize, int osize);
43 static int LZ4_compress64kCtx(void *ctx, const char *source, char *dest,
44 int isize, int osize);
46 static kmem_cache_t *lz4_cache;
50 lz4_compress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
55 ASSERT(d_len >= sizeof (bufsiz));
57 bufsiz = real_LZ4_compress(s_start, &dest[sizeof (bufsiz)], s_len,
58 d_len - sizeof (bufsiz));
60 /* Signal an error if the compression routine returned zero. */
65 * Encode the compresed buffer size at the start. We'll need this in
66 * decompression to counter the effects of padding which might be
67 * added to the compressed buffer and which, if unhandled, would
68 * confuse the hell out of our decompression function.
70 *(uint32_t *)dest = BE_32(bufsiz);
72 return (bufsiz + sizeof (bufsiz));
77 lz4_decompress(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
79 const char *src = s_start;
80 uint32_t bufsiz = BE_IN32(src);
82 /* invalid compressed buffer size encoded at start */
83 if (bufsiz + sizeof (bufsiz) > s_len)
87 * Returns 0 on success (decompression function returned non-negative)
88 * and non-zero on failure (decompression function returned negative.
90 return (LZ4_uncompress_unknownOutputSize(&src[sizeof (bufsiz)],
91 d_start, bufsiz, d_len) < 0);
95 * LZ4 API Description:
98 * real_LZ4_compress() :
99 * isize : is the input size. Max supported value is ~1.9GB
100 * return : the number of bytes written in buffer dest
101 * or 0 if the compression fails (if LZ4_COMPRESSMIN is set).
102 * note : destination buffer must be already allocated.
103 * destination buffer must be sized to handle worst cases
104 * situations (input data not compressible) worst case size
105 * evaluation is provided by function LZ4_compressBound().
107 * real_LZ4_uncompress() :
108 * osize : is the output size, therefore the original size
109 * return : the number of bytes read in the source buffer.
110 * If the source stream is malformed, the function will stop
111 * decoding and return a negative result, indicating the byte
112 * position of the faulty instruction. This function never
113 * writes beyond dest + osize, and is therefore protected
114 * against malicious data packets.
115 * note : destination buffer must be already allocated
119 * LZ4_compressBound() :
120 * Provides the maximum size that LZ4 may output in a "worst case"
121 * scenario (input data not compressible) primarily useful for memory
122 * allocation of output buffer.
124 * isize : is the input size. Max supported value is ~1.9GB
125 * return : maximum output size in a "worst case" scenario
126 * note : this function is limited by "int" range (2^31-1)
128 * LZ4_uncompress_unknownOutputSize() :
129 * isize : is the input size, therefore the compressed size
130 * maxOutputSize : is the size of the destination buffer (which must be
132 * return : the number of bytes decoded in the destination buffer
133 * (necessarily <= maxOutputSize). If the source stream is
134 * malformed, the function will stop decoding and return a
135 * negative result, indicating the byte position of the faulty
136 * instruction. This function never writes beyond dest +
137 * maxOutputSize, and is therefore protected against malicious
139 * note : Destination buffer must be already allocated.
140 * This version is slightly slower than real_LZ4_uncompress()
142 * LZ4_compressCtx() :
143 * This function explicitly handles the CTX memory structure.
145 * ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
146 * by the caller (either on the stack or using kmem_cache_alloc). Passing NULL
149 * LZ4_compress64kCtx() :
150 * Same as LZ4_compressCtx(), but specific to small inputs (<64KB).
151 * isize *Must* be <64KB, otherwise the output will be corrupted.
153 * ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
154 * by the caller (either on the stack or using kmem_cache_alloc). Passing NULL
163 * COMPRESSIONLEVEL: Increasing this value improves compression ratio
164 * Lowering this value reduces memory usage. Reduced memory usage
165 * typically improves speed, due to cache effect (ex: L1 32KB for Intel,
166 * L1 64KB for AMD). Memory usage formula : N->2^(N+2) Bytes
167 * (examples : 12 -> 16KB ; 17 -> 512KB)
169 #define COMPRESSIONLEVEL 12
172 * NOTCOMPRESSIBLE_CONFIRMATION: Decreasing this value will make the
173 * algorithm skip faster data segments considered "incompressible".
174 * This may decrease compression ratio dramatically, but will be
175 * faster on incompressible data. Increasing this value will make
176 * the algorithm search more before declaring a segment "incompressible".
177 * This could improve compression a bit, but will be slower on
178 * incompressible data. The default value (6) is recommended.
180 #define NOTCOMPRESSIBLE_CONFIRMATION 6
183 * BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE: This will provide a boost to
184 * performance for big endian cpu, but the resulting compressed stream
185 * will be incompatible with little-endian CPU. You can set this option
186 * to 1 in situations where data will stay within closed environment.
187 * This option is useless on Little_Endian CPU (such as x86).
189 /* #define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1 */
192 * CPU Feature Detection
195 /* 32 or 64 bits ? */
196 #if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) || \
197 defined(__amd64) || defined(__ppc64__) || defined(_WIN64) || \
198 defined(__LP64__) || defined(_LP64))
205 * Little Endian or Big Endian?
206 * Note: overwrite the below #define if you know your architecture endianess.
208 #if (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || \
209 defined(_BIG_ENDIAN) || defined(_ARCH_PPC) || defined(__PPC__) || \
210 defined(__PPC) || defined(PPC) || defined(__powerpc__) || \
211 defined(__powerpc) || defined(powerpc) || \
212 ((defined(__BYTE_ORDER__)&&(__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))))
213 #define LZ4_BIG_ENDIAN 1
216 * Little Endian assumed. PDP Endian and other very rare endian format
222 * Unaligned memory access is automatically enabled for "common" CPU,
223 * such as x86. For others CPU, the compiler will be more cautious, and
224 * insert extra code to ensure aligned access is respected. If you know
225 * your target CPU supports unaligned memory access, you may want to
226 * force this option manually to improve performance
228 #if defined(__ARM_FEATURE_UNALIGNED)
229 #define LZ4_FORCE_UNALIGNED_ACCESS 1
233 * Illumos : we can't use GCC's __builtin_ctz family of builtins in the
235 * Linux : we can use GCC's __builtin_ctz family of builtins in the
238 #undef LZ4_FORCE_SW_BITCOUNT
243 /* Disable restrict */
246 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
248 #if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
249 #define expect(expr, value) (__builtin_expect((expr), (value)))
251 #define expect(expr, value) (expr)
255 #define likely(expr) expect((expr) != 0, 1)
259 #define unlikely(expr) expect((expr) != 0, 0)
262 #define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | \
263 (((x) & 0xffu) << 8)))
272 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
276 typedef struct _U16_S {
279 typedef struct _U32_S {
282 typedef struct _U64_S {
286 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
290 #define A64(x) (((U64_S *)(x))->v)
291 #define A32(x) (((U32_S *)(x))->v)
292 #define A16(x) (((U16_S *)(x))->v)
299 #define HASH_LOG COMPRESSIONLEVEL
300 #define HASHTABLESIZE (1 << HASH_LOG)
301 #define HASH_MASK (HASHTABLESIZE - 1)
303 #define SKIPSTRENGTH (NOTCOMPRESSIBLE_CONFIRMATION > 2 ? \
304 NOTCOMPRESSIBLE_CONFIRMATION : 2)
307 #define LASTLITERALS 5
308 #define MFLIMIT (COPYLENGTH + MINMATCH)
309 #define MINLENGTH (MFLIMIT + 1)
312 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
315 #define ML_MASK ((1U<<ML_BITS)-1)
316 #define RUN_BITS (8-ML_BITS)
317 #define RUN_MASK ((1U<<RUN_BITS)-1)
321 * Architecture-specific macros
327 #define LZ4_COPYSTEP(s, d) A64(d) = A64(s); d += 8; s += 8;
328 #define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d)
329 #define LZ4_SECURECOPY(s, d, e) if (d < e) LZ4_WILDCOPY(s, d, e)
331 #define INITBASE(base) const BYTE* const base = ip
332 #else /* !LZ4_ARCH64 */
336 #define LZ4_COPYSTEP(s, d) A32(d) = A32(s); d += 4; s += 4;
337 #define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d); LZ4_COPYSTEP(s, d);
338 #define LZ4_SECURECOPY LZ4_WILDCOPY
339 #define HTYPE const BYTE *
340 #define INITBASE(base) const int base = 0
341 #endif /* !LZ4_ARCH64 */
343 #if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
344 #define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
345 { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
346 #define LZ4_WRITE_LITTLEENDIAN_16(p, i) \
347 { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p += 2; }
349 #define LZ4_READ_LITTLEENDIAN_16(d, s, p) { d = (s) - A16(p); }
350 #define LZ4_WRITE_LITTLEENDIAN_16(p, v) { A16(p) = v; p += 2; }
354 /* Local structures */
356 HTYPE hashTable[HASHTABLESIZE];
361 #define LZ4_HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH * 8) - \
363 #define LZ4_HASH_VALUE(p) LZ4_HASH_FUNCTION(A32(p))
364 #define LZ4_WILDCOPY(s, d, e) do { LZ4_COPYPACKET(s, d) } while (d < e);
365 #define LZ4_BLINDCOPY(s, d, l) { BYTE* e = (d) + l; LZ4_WILDCOPY(s, d, e); \
369 /* Private functions */
373 LZ4_NbCommonBytes(register U64 val)
375 #if defined(LZ4_BIG_ENDIAN)
376 #if defined(__GNUC__) && (GCC_VERSION >= 304) && \
377 !defined(LZ4_FORCE_SW_BITCOUNT)
378 return (__builtin_clzll(val) >> 3);
397 #if defined(__GNUC__) && (GCC_VERSION >= 304) && \
398 !defined(LZ4_FORCE_SW_BITCOUNT)
399 return (__builtin_ctzll(val) >> 3);
401 static const int DeBruijnBytePos[64] =
402 { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5,
403 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5,
404 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4,
405 4, 5, 7, 2, 6, 5, 7, 6, 7, 7
407 return DeBruijnBytePos[((U64) ((val & -val) * 0x0218A392CDABBD3F)) >>
416 LZ4_NbCommonBytes(register U32 val)
418 #if defined(LZ4_BIG_ENDIAN)
419 #if defined(__GNUC__) && (GCC_VERSION >= 304) && \
420 !defined(LZ4_FORCE_SW_BITCOUNT)
421 return (__builtin_clz(val) >> 3);
435 #if defined(__GNUC__) && (GCC_VERSION >= 304) && \
436 !defined(LZ4_FORCE_SW_BITCOUNT)
437 return (__builtin_ctz(val) >> 3);
439 static const int DeBruijnBytePos[32] = {
440 0, 0, 3, 0, 3, 1, 3, 0,
441 3, 2, 2, 1, 3, 2, 0, 1,
442 3, 3, 1, 2, 2, 2, 2, 0,
443 3, 1, 2, 0, 1, 0, 1, 1
445 return DeBruijnBytePos[((U32) ((val & -(S32) val) * 0x077CB531U)) >>
453 /* Compression functions */
457 LZ4_compressCtx(void *ctx, const char *source, char *dest, int isize,
460 struct refTables *srt = (struct refTables *)ctx;
461 HTYPE *HashTable = (HTYPE *) (srt->hashTable);
463 const BYTE *ip = (BYTE *) source;
465 const BYTE *anchor = ip;
466 const BYTE *const iend = ip + isize;
467 const BYTE *const oend = (BYTE *) dest + osize;
468 const BYTE *const mflimit = iend - MFLIMIT;
469 #define matchlimit (iend - LASTLITERALS)
471 BYTE *op = (BYTE *) dest;
474 const int skipStrength = SKIPSTRENGTH;
479 if (isize < MINLENGTH)
483 HashTable[LZ4_HASH_VALUE(ip)] = ip - base;
485 forwardH = LZ4_HASH_VALUE(ip);
489 int findMatchAttempts = (1U << skipStrength) + 3;
490 const BYTE *forwardIp = ip;
497 int step = findMatchAttempts++ >> skipStrength;
499 forwardIp = ip + step;
501 if (unlikely(forwardIp > mflimit)) {
505 forwardH = LZ4_HASH_VALUE(forwardIp);
506 ref = base + HashTable[h];
507 HashTable[h] = ip - base;
509 } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip)));
512 while ((ip > anchor) && (ref > (BYTE *) source) &&
513 unlikely(ip[-1] == ref[-1])) {
518 /* Encode Literal length */
519 length = ip - anchor;
522 /* Check output limit */
523 if (unlikely(op + length + (2 + 1 + LASTLITERALS) +
524 (length >> 8) > oend))
527 if (length >= (int)RUN_MASK) {
528 *token = (RUN_MASK << ML_BITS);
529 len = length - RUN_MASK;
530 for (; len > 254; len -= 255)
534 *token = (length << ML_BITS);
537 LZ4_BLINDCOPY(anchor, op, length);
541 LZ4_WRITE_LITTLEENDIAN_16(op, ip - ref);
545 ref += MINMATCH; /* MinMatch verified */
547 while (likely(ip < matchlimit - (STEPSIZE - 1))) {
548 UARCH diff = AARCH(ref) ^ AARCH(ip);
554 ip += LZ4_NbCommonBytes(diff);
558 if ((ip < (matchlimit - 3)) && (A32(ref) == A32(ip))) {
563 if ((ip < (matchlimit - 1)) && (A16(ref) == A16(ip))) {
567 if ((ip < matchlimit) && (*ref == *ip))
571 /* Encode MatchLength */
573 /* Check output limit */
574 if (unlikely(op + (1 + LASTLITERALS) + (len >> 8) > oend))
576 if (len >= (int)ML_MASK) {
579 for (; len > 509; len -= 510) {
591 /* Test end of chunk */
597 HashTable[LZ4_HASH_VALUE(ip - 2)] = ip - 2 - base;
599 /* Test next position */
600 ref = base + HashTable[LZ4_HASH_VALUE(ip)];
601 HashTable[LZ4_HASH_VALUE(ip)] = ip - base;
602 if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) {
607 /* Prepare next loop */
609 forwardH = LZ4_HASH_VALUE(ip);
613 /* Encode Last Literals */
615 int lastRun = iend - anchor;
616 if (op + lastRun + 1 + ((lastRun + 255 - RUN_MASK) / 255) >
619 if (lastRun >= (int)RUN_MASK) {
620 *op++ = (RUN_MASK << ML_BITS);
622 for (; lastRun > 254; lastRun -= 255) {
625 *op++ = (BYTE)lastRun;
627 *op++ = (lastRun << ML_BITS);
628 (void) memcpy(op, anchor, iend - anchor);
633 return (int)(((char *)op) - dest);
638 /* Note : this function is valid only if isize < LZ4_64KLIMIT */
639 #define LZ4_64KLIMIT ((1 << 16) + (MFLIMIT - 1))
640 #define HASHLOG64K (HASH_LOG + 1)
641 #define HASH64KTABLESIZE (1U << HASHLOG64K)
642 #define LZ4_HASH64K_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8) - \
644 #define LZ4_HASH64K_VALUE(p) LZ4_HASH64K_FUNCTION(A32(p))
648 LZ4_compress64kCtx(void *ctx, const char *source, char *dest, int isize,
651 struct refTables *srt = (struct refTables *)ctx;
652 U16 *HashTable = (U16 *) (srt->hashTable);
654 const BYTE *ip = (BYTE *) source;
655 const BYTE *anchor = ip;
656 const BYTE *const base = ip;
657 const BYTE *const iend = ip + isize;
658 const BYTE *const oend = (BYTE *) dest + osize;
659 const BYTE *const mflimit = iend - MFLIMIT;
660 #define matchlimit (iend - LASTLITERALS)
662 BYTE *op = (BYTE *) dest;
665 const int skipStrength = SKIPSTRENGTH;
669 if (isize < MINLENGTH)
674 forwardH = LZ4_HASH64K_VALUE(ip);
678 int findMatchAttempts = (1U << skipStrength) + 3;
679 const BYTE *forwardIp = ip;
686 int step = findMatchAttempts++ >> skipStrength;
688 forwardIp = ip + step;
690 if (forwardIp > mflimit) {
694 forwardH = LZ4_HASH64K_VALUE(forwardIp);
695 ref = base + HashTable[h];
696 HashTable[h] = ip - base;
698 } while (A32(ref) != A32(ip));
701 while ((ip > anchor) && (ref > (BYTE *) source) &&
702 (ip[-1] == ref[-1])) {
707 /* Encode Literal length */
708 length = ip - anchor;
711 /* Check output limit */
712 if (unlikely(op + length + (2 + 1 + LASTLITERALS) +
713 (length >> 8) > oend))
716 if (length >= (int)RUN_MASK) {
717 *token = (RUN_MASK << ML_BITS);
718 len = length - RUN_MASK;
719 for (; len > 254; len -= 255)
723 *token = (length << ML_BITS);
726 LZ4_BLINDCOPY(anchor, op, length);
730 LZ4_WRITE_LITTLEENDIAN_16(op, ip - ref);
734 ref += MINMATCH; /* MinMatch verified */
736 while (ip < matchlimit - (STEPSIZE - 1)) {
737 UARCH diff = AARCH(ref) ^ AARCH(ip);
743 ip += LZ4_NbCommonBytes(diff);
747 if ((ip < (matchlimit - 3)) && (A32(ref) == A32(ip))) {
752 if ((ip < (matchlimit - 1)) && (A16(ref) == A16(ip))) {
756 if ((ip < matchlimit) && (*ref == *ip))
760 /* Encode MatchLength */
762 /* Check output limit */
763 if (unlikely(op + (1 + LASTLITERALS) + (len >> 8) > oend))
765 if (len >= (int)ML_MASK) {
768 for (; len > 509; len -= 510) {
780 /* Test end of chunk */
786 HashTable[LZ4_HASH64K_VALUE(ip - 2)] = ip - 2 - base;
788 /* Test next position */
789 ref = base + HashTable[LZ4_HASH64K_VALUE(ip)];
790 HashTable[LZ4_HASH64K_VALUE(ip)] = ip - base;
791 if (A32(ref) == A32(ip)) {
796 /* Prepare next loop */
798 forwardH = LZ4_HASH64K_VALUE(ip);
802 /* Encode Last Literals */
804 int lastRun = iend - anchor;
805 if (op + lastRun + 1 + ((lastRun + 255 - RUN_MASK) / 255) >
808 if (lastRun >= (int)RUN_MASK) {
809 *op++ = (RUN_MASK << ML_BITS);
811 for (; lastRun > 254; lastRun -= 255)
813 *op++ = (BYTE)lastRun;
815 *op++ = (lastRun << ML_BITS);
816 (void) memcpy(op, anchor, iend - anchor);
821 return (int)(((char *)op) - dest);
825 real_LZ4_compress(const char *source, char *dest, int isize, int osize)
830 ASSERT(lz4_cache != NULL);
831 ctx = kmem_cache_alloc(lz4_cache, KM_PUSHPAGE);
834 * out of kernel memory, gently fall through - this will disable
835 * compression in zio_compress_data
840 memset(ctx, 0, sizeof (struct refTables));
842 if (isize < LZ4_64KLIMIT)
843 result = LZ4_compress64kCtx(ctx, source, dest, isize, osize);
845 result = LZ4_compressCtx(ctx, source, dest, isize, osize);
847 kmem_cache_free(lz4_cache, ctx);
851 /* Decompression functions */
854 * Note: The decoding functions real_LZ4_uncompress() and
855 * LZ4_uncompress_unknownOutputSize() are safe against "buffer overflow"
856 * attack type. They will never write nor read outside of the provided
857 * output buffers. LZ4_uncompress_unknownOutputSize() also insures that
858 * it will never read outside of the input buffer. A corrupted input
859 * will produce an error result, a negative int, indicating the position
860 * of the error within input stream.
864 LZ4_uncompress_unknownOutputSize(const char *source, char *dest, int isize,
867 /* Local Variables */
868 const BYTE *restrict ip = (const BYTE *) source;
869 const BYTE *const iend = ip + isize;
872 BYTE *op = (BYTE *) dest;
873 BYTE *const oend = op + maxOutputSize;
876 size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
878 size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
888 if ((length = (token >> ML_BITS)) == RUN_MASK) {
890 while ((ip < iend) && (s == 255)) {
897 if ((cpy > oend - COPYLENGTH) ||
898 (ip + length > iend - COPYLENGTH)) {
900 /* Error: writes beyond output buffer */
902 if (ip + length != iend)
904 * Error: LZ4 format requires to consume all
905 * input at this stage
908 (void) memcpy(op, ip, length);
910 /* Necessarily EOF, due to parsing restrictions */
913 LZ4_WILDCOPY(ip, op, cpy);
918 LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
920 if (ref < (BYTE * const) dest)
922 * Error: offset creates reference outside of
927 /* get matchlength */
928 if ((length = (token & ML_MASK)) == ML_MASK) {
937 /* copy repeated sequence */
938 if (unlikely(op - ref < STEPSIZE)) {
940 size_t dec64 = dec64table[op-ref];
950 ref -= dec32table[op-ref];
955 LZ4_COPYSTEP(ref, op);
957 cpy = op + length - (STEPSIZE - 4);
958 if (cpy > oend - COPYLENGTH) {
961 * Error: request to write outside of
965 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
971 * Check EOF (should never happen, since
972 * last 5 bytes are supposed to be literals)
977 LZ4_SECURECOPY(ref, op, cpy);
978 op = cpy; /* correction */
981 /* end of decoding */
982 return (int)(((char *)op) - dest);
984 /* write overflow error detected */
986 return (int)(-(((char *)ip) - source));
992 lz4_cache = kmem_cache_create("lz4_cache",
993 sizeof (struct refTables), 0, NULL, NULL, NULL, NULL, NULL, 0);
1000 kmem_cache_destroy(lz4_cache);