2 * LZ4 - Fast LZ compression algorithm
4 * Copyright (C) 2011-2013, Yann Collet.
5 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following disclaimer
15 * in the documentation and/or other materials provided with the
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * You can contact the author at :
31 * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
32 * - LZ4 source repository : http://code.google.com/p/lz4/
36 * N.B. - This file seems to be based on LZ4 r85, dated Dec 10, 2012
39 #include <sys/zfs_context.h>
40 #include <sys/zio_compress.h>
42 static int real_LZ4_compress(const char *source, char *dest, int isize,
44 static int LZ4_compressCtx(void *ctx, const char *source, char *dest,
45 int isize, int osize);
46 static int LZ4_compress64kCtx(void *ctx, const char *source, char *dest,
47 int isize, int osize);
50 int LZ4_uncompress_unknownOutputSize(const char *source, char *dest,
51 int isize, int maxOutputSize);
53 static void *lz4_alloc(int flags);
54 static void lz4_free(void *ctx);
57 lz4_compress_zfs(void *s_start, void *d_start, size_t s_len,
64 ASSERT(d_len >= sizeof (bufsiz));
66 bufsiz = real_LZ4_compress(s_start, &dest[sizeof (bufsiz)], s_len,
67 d_len - sizeof (bufsiz));
69 /* Signal an error if the compression routine returned zero. */
74 * The exact compressed size is needed by the decompression routine,
75 * so it is stored at the start of the buffer. Note that this may be
76 * less than the compressed block size, which is rounded up to a
77 * multiple of 1<<ashift.
79 *(uint32_t *)dest = BE_32(bufsiz);
81 return (bufsiz + sizeof (bufsiz));
85 lz4_decompress_zfs(void *s_start, void *d_start, size_t s_len,
89 const char *src = s_start;
90 uint32_t bufsiz = BE_IN32(src);
92 /* invalid compressed buffer size encoded at start */
93 if (bufsiz + sizeof (bufsiz) > s_len)
97 * Returns 0 on success (decompression function returned non-negative)
98 * and non-zero on failure (decompression function returned negative).
100 return (LZ4_uncompress_unknownOutputSize(&src[sizeof (bufsiz)],
101 d_start, bufsiz, d_len) < 0);
105 * LZ4 API Description:
108 * real_LZ4_compress() :
109 * isize : is the input size. Max supported value is ~1.9GB
110 * return : the number of bytes written in buffer dest
111 * or 0 if the compression fails (if LZ4_COMPRESSMIN is set).
112 * note : destination buffer must be already allocated.
113 * destination buffer must be sized to handle worst cases
114 * situations (input data not compressible) worst case size
115 * evaluation is provided by function LZ4_compressBound().
117 * real_LZ4_uncompress() :
118 * osize : is the output size, therefore the original size
119 * return : the number of bytes read in the source buffer.
120 * If the source stream is malformed, the function will stop
121 * decoding and return a negative result, indicating the byte
122 * position of the faulty instruction. This function never
123 * writes beyond dest + osize, and is therefore protected
124 * against malicious data packets.
125 * note : destination buffer must be already allocated
126 * note : real_LZ4_uncompress() is not used in ZFS so its code
127 * is not present here.
131 * LZ4_compressBound() :
132 * Provides the maximum size that LZ4 may output in a "worst case"
133 * scenario (input data not compressible) primarily useful for memory
134 * allocation of output buffer.
136 * isize : is the input size. Max supported value is ~1.9GB
137 * return : maximum output size in a "worst case" scenario
138 * note : this function is limited by "int" range (2^31-1)
140 * LZ4_uncompress_unknownOutputSize() :
141 * isize : is the input size, therefore the compressed size
142 * maxOutputSize : is the size of the destination buffer (which must be
144 * return : the number of bytes decoded in the destination buffer
145 * (necessarily <= maxOutputSize). If the source stream is
146 * malformed, the function will stop decoding and return a
147 * negative result, indicating the byte position of the faulty
148 * instruction. This function never writes beyond dest +
149 * maxOutputSize, and is therefore protected against malicious
151 * note : Destination buffer must be already allocated.
152 * This version is slightly slower than real_LZ4_uncompress()
154 * LZ4_compressCtx() :
155 * This function explicitly handles the CTX memory structure.
157 * ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
158 * by the caller (either on the stack or using kmem_cache_alloc). Passing
161 * LZ4_compress64kCtx() :
162 * Same as LZ4_compressCtx(), but specific to small inputs (<64KB).
163 * isize *Must* be <64KB, otherwise the output will be corrupted.
165 * ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
166 * by the caller (either on the stack or using kmem_cache_alloc). Passing
175 * COMPRESSIONLEVEL: Increasing this value improves compression ratio
176 * Lowering this value reduces memory usage. Reduced memory usage
177 * typically improves speed, due to cache effect (ex: L1 32KB for Intel,
178 * L1 64KB for AMD). Memory usage formula : N->2^(N+2) Bytes
179 * (examples : 12 -> 16KB ; 17 -> 512KB)
181 #define COMPRESSIONLEVEL 12
184 * NOTCOMPRESSIBLE_CONFIRMATION: Decreasing this value will make the
185 * algorithm skip faster data segments considered "incompressible".
186 * This may decrease compression ratio dramatically, but will be
187 * faster on incompressible data. Increasing this value will make
188 * the algorithm search more before declaring a segment "incompressible".
189 * This could improve compression a bit, but will be slower on
190 * incompressible data. The default value (6) is recommended.
192 #define NOTCOMPRESSIBLE_CONFIRMATION 6
195 * BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE: This will provide a boost to
196 * performance for big endian cpu, but the resulting compressed stream
197 * will be incompatible with little-endian CPU. You can set this option
198 * to 1 in situations where data will stay within closed environment.
199 * This option is useless on Little_Endian CPU (such as x86).
201 /* #define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1 */
204 * CPU Feature Detection
207 /* 32 or 64 bits ? */
215 * Little Endian or Big Endian?
216 * Note: overwrite the below #define if you know your architecture endianness.
218 #if defined(_ZFS_BIG_ENDIAN)
219 #define LZ4_BIG_ENDIAN 1
222 * Little Endian assumed. PDP Endian and other very rare endian format
225 #undef LZ4_BIG_ENDIAN
229 * Unaligned memory access is automatically enabled for "common" CPU,
230 * such as x86. For others CPU, the compiler will be more cautious, and
231 * insert extra code to ensure aligned access is respected. If you know
232 * your target CPU supports unaligned memory access, you may want to
233 * force this option manually to improve performance
235 #if defined(__ARM_FEATURE_UNALIGNED)
236 #define LZ4_FORCE_UNALIGNED_ACCESS 1
240 * Illumos : we can't use GCC's __builtin_ctz family of builtins in the
242 * Linux : we can use GCC's __builtin_ctz family of builtins in the
245 #undef LZ4_FORCE_SW_BITCOUNT
247 #define LZ4_FORCE_SW_BITCOUNT
253 /* Disable restrict */
257 * Linux : GCC_VERSION is defined as of 3.9-rc1, so undefine it.
258 * torvalds/linux@3f3f8d2f48acfd8ed3b8e6b7377935da57b27b16
264 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
266 #if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
267 #define expect(expr, value) (__builtin_expect((expr), (value)))
269 #define expect(expr, value) (expr)
273 #define likely(expr) expect((expr) != 0, 1)
277 #define unlikely(expr) expect((expr) != 0, 0)
280 #define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | \
281 (((x) & 0xffu) << 8)))
290 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
294 typedef struct _U16_S {
297 typedef struct _U32_S {
300 typedef struct _U64_S {
304 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
308 #define A64(x) (((U64_S *)(x))->v)
309 #define A32(x) (((U32_S *)(x))->v)
310 #define A16(x) (((U16_S *)(x))->v)
317 #define HASH_LOG COMPRESSIONLEVEL
318 #define HASHTABLESIZE (1 << HASH_LOG)
319 #define HASH_MASK (HASHTABLESIZE - 1)
321 #define SKIPSTRENGTH (NOTCOMPRESSIBLE_CONFIRMATION > 2 ? \
322 NOTCOMPRESSIBLE_CONFIRMATION : 2)
325 #define LASTLITERALS 5
326 #define MFLIMIT (COPYLENGTH + MINMATCH)
327 #define MINLENGTH (MFLIMIT + 1)
330 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
333 #define ML_MASK ((1U<<ML_BITS)-1)
334 #define RUN_BITS (8-ML_BITS)
335 #define RUN_MASK ((1U<<RUN_BITS)-1)
339 * Architecture-specific macros
345 #define LZ4_COPYSTEP(s, d) A64(d) = A64(s); d += 8; s += 8;
346 #define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d)
347 #define LZ4_SECURECOPY(s, d, e) if (d < e) LZ4_WILDCOPY(s, d, e)
349 #define INITBASE(base) const BYTE* const base = ip
350 #else /* !LZ4_ARCH64 */
354 #define LZ4_COPYSTEP(s, d) A32(d) = A32(s); d += 4; s += 4;
355 #define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d); LZ4_COPYSTEP(s, d);
356 #define LZ4_SECURECOPY LZ4_WILDCOPY
357 #define HTYPE const BYTE *
358 #define INITBASE(base) const int base = 0
359 #endif /* !LZ4_ARCH64 */
361 #if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
362 #define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
363 { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
364 #define LZ4_WRITE_LITTLEENDIAN_16(p, i) \
365 { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p += 2; }
367 #define LZ4_READ_LITTLEENDIAN_16(d, s, p) { d = (s) - A16(p); }
368 #define LZ4_WRITE_LITTLEENDIAN_16(p, v) { A16(p) = v; p += 2; }
372 /* Local structures */
374 HTYPE hashTable[HASHTABLESIZE];
379 #define LZ4_HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH * 8) - \
381 #define LZ4_HASH_VALUE(p) LZ4_HASH_FUNCTION(A32(p))
382 #define LZ4_WILDCOPY(s, d, e) do { LZ4_COPYPACKET(s, d) } while (d < e);
383 #define LZ4_BLINDCOPY(s, d, l) { BYTE* e = (d) + l; LZ4_WILDCOPY(s, d, e); \
387 /* Private functions */
391 LZ4_NbCommonBytes(register U64 val)
393 #if defined(LZ4_BIG_ENDIAN)
394 #if ((defined(__GNUC__) && (GCC_VERSION >= 304)) || defined(__clang__)) && \
395 !defined(LZ4_FORCE_SW_BITCOUNT)
396 return (__builtin_clzll(val) >> 3);
415 #if ((defined(__GNUC__) && (GCC_VERSION >= 304)) || defined(__clang__)) && \
416 !defined(LZ4_FORCE_SW_BITCOUNT)
417 return (__builtin_ctzll(val) >> 3);
419 static const int DeBruijnBytePos[64] =
420 { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5,
421 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5,
422 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4,
423 4, 5, 7, 2, 6, 5, 7, 6, 7, 7
425 return DeBruijnBytePos[((U64) ((val & -val) * 0x0218A392CDABBD3F)) >>
434 LZ4_NbCommonBytes(register U32 val)
436 #if defined(LZ4_BIG_ENDIAN)
437 #if ((defined(__GNUC__) && (GCC_VERSION >= 304)) || defined(__clang__)) && \
438 !defined(LZ4_FORCE_SW_BITCOUNT)
439 return (__builtin_clz(val) >> 3);
453 #if defined(__GNUC__) && (GCC_VERSION >= 304) && \
454 !defined(LZ4_FORCE_SW_BITCOUNT)
455 return (__builtin_ctz(val) >> 3);
457 static const int DeBruijnBytePos[32] = {
458 0, 0, 3, 0, 3, 1, 3, 0,
459 3, 2, 2, 1, 3, 2, 0, 1,
460 3, 3, 1, 2, 2, 2, 2, 0,
461 3, 1, 2, 0, 1, 0, 1, 1
463 return DeBruijnBytePos[((U32) ((val & -(S32) val) * 0x077CB531U)) >>
471 /* Compression functions */
474 LZ4_compressCtx(void *ctx, const char *source, char *dest, int isize,
477 struct refTables *srt = (struct refTables *)ctx;
478 HTYPE *HashTable = (HTYPE *) (srt->hashTable);
480 const BYTE *ip = (BYTE *) source;
482 const BYTE *anchor = ip;
483 const BYTE *const iend = ip + isize;
484 const BYTE *const oend = (BYTE *) dest + osize;
485 const BYTE *const mflimit = iend - MFLIMIT;
486 #define matchlimit (iend - LASTLITERALS)
488 BYTE *op = (BYTE *) dest;
491 const int skipStrength = SKIPSTRENGTH;
496 if (isize < MINLENGTH)
500 HashTable[LZ4_HASH_VALUE(ip)] = ip - base;
502 forwardH = LZ4_HASH_VALUE(ip);
506 int findMatchAttempts = (1U << skipStrength) + 3;
507 const BYTE *forwardIp = ip;
514 int step = findMatchAttempts++ >> skipStrength;
516 forwardIp = ip + step;
518 if (unlikely(forwardIp > mflimit)) {
522 forwardH = LZ4_HASH_VALUE(forwardIp);
523 ref = base + HashTable[h];
524 HashTable[h] = ip - base;
526 } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip)));
529 while ((ip > anchor) && (ref > (BYTE *) source) &&
530 unlikely(ip[-1] == ref[-1])) {
535 /* Encode Literal length */
536 length = ip - anchor;
539 /* Check output limit */
540 if (unlikely(op + length + (2 + 1 + LASTLITERALS) +
541 (length >> 8) > oend))
544 if (length >= (int)RUN_MASK) {
545 *token = (RUN_MASK << ML_BITS);
546 len = length - RUN_MASK;
547 for (; len > 254; len -= 255)
551 *token = (length << ML_BITS);
554 LZ4_BLINDCOPY(anchor, op, length);
558 LZ4_WRITE_LITTLEENDIAN_16(op, ip - ref);
562 ref += MINMATCH; /* MinMatch verified */
564 while (likely(ip < matchlimit - (STEPSIZE - 1))) {
565 UARCH diff = AARCH(ref) ^ AARCH(ip);
571 ip += LZ4_NbCommonBytes(diff);
575 if ((ip < (matchlimit - 3)) && (A32(ref) == A32(ip))) {
580 if ((ip < (matchlimit - 1)) && (A16(ref) == A16(ip))) {
584 if ((ip < matchlimit) && (*ref == *ip))
588 /* Encode MatchLength */
590 /* Check output limit */
591 if (unlikely(op + (1 + LASTLITERALS) + (len >> 8) > oend))
593 if (len >= (int)ML_MASK) {
596 for (; len > 509; len -= 510) {
608 /* Test end of chunk */
614 HashTable[LZ4_HASH_VALUE(ip - 2)] = ip - 2 - base;
616 /* Test next position */
617 ref = base + HashTable[LZ4_HASH_VALUE(ip)];
618 HashTable[LZ4_HASH_VALUE(ip)] = ip - base;
619 if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) {
624 /* Prepare next loop */
626 forwardH = LZ4_HASH_VALUE(ip);
630 /* Encode Last Literals */
632 int lastRun = iend - anchor;
633 if (op + lastRun + 1 + ((lastRun + 255 - RUN_MASK) / 255) >
636 if (lastRun >= (int)RUN_MASK) {
637 *op++ = (RUN_MASK << ML_BITS);
639 for (; lastRun > 254; lastRun -= 255) {
642 *op++ = (BYTE)lastRun;
644 *op++ = (lastRun << ML_BITS);
645 (void) memcpy(op, anchor, iend - anchor);
650 return (int)(((char *)op) - dest);
655 /* Note : this function is valid only if isize < LZ4_64KLIMIT */
656 #define LZ4_64KLIMIT ((1 << 16) + (MFLIMIT - 1))
657 #define HASHLOG64K (HASH_LOG + 1)
658 #define HASH64KTABLESIZE (1U << HASHLOG64K)
659 #define LZ4_HASH64K_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8) - \
661 #define LZ4_HASH64K_VALUE(p) LZ4_HASH64K_FUNCTION(A32(p))
664 LZ4_compress64kCtx(void *ctx, const char *source, char *dest, int isize,
667 struct refTables *srt = (struct refTables *)ctx;
668 U16 *HashTable = (U16 *) (srt->hashTable);
670 const BYTE *ip = (BYTE *) source;
671 const BYTE *anchor = ip;
672 const BYTE *const base = ip;
673 const BYTE *const iend = ip + isize;
674 const BYTE *const oend = (BYTE *) dest + osize;
675 const BYTE *const mflimit = iend - MFLIMIT;
676 #define matchlimit (iend - LASTLITERALS)
678 BYTE *op = (BYTE *) dest;
681 const int skipStrength = SKIPSTRENGTH;
685 if (isize < MINLENGTH)
690 forwardH = LZ4_HASH64K_VALUE(ip);
694 int findMatchAttempts = (1U << skipStrength) + 3;
695 const BYTE *forwardIp = ip;
702 int step = findMatchAttempts++ >> skipStrength;
704 forwardIp = ip + step;
706 if (forwardIp > mflimit) {
710 forwardH = LZ4_HASH64K_VALUE(forwardIp);
711 ref = base + HashTable[h];
712 HashTable[h] = ip - base;
714 } while (A32(ref) != A32(ip));
717 while ((ip > anchor) && (ref > (BYTE *) source) &&
718 (ip[-1] == ref[-1])) {
723 /* Encode Literal length */
724 length = ip - anchor;
727 /* Check output limit */
728 if (unlikely(op + length + (2 + 1 + LASTLITERALS) +
729 (length >> 8) > oend))
732 if (length >= (int)RUN_MASK) {
733 *token = (RUN_MASK << ML_BITS);
734 len = length - RUN_MASK;
735 for (; len > 254; len -= 255)
739 *token = (length << ML_BITS);
742 LZ4_BLINDCOPY(anchor, op, length);
746 LZ4_WRITE_LITTLEENDIAN_16(op, ip - ref);
750 ref += MINMATCH; /* MinMatch verified */
752 while (ip < matchlimit - (STEPSIZE - 1)) {
753 UARCH diff = AARCH(ref) ^ AARCH(ip);
759 ip += LZ4_NbCommonBytes(diff);
763 if ((ip < (matchlimit - 3)) && (A32(ref) == A32(ip))) {
768 if ((ip < (matchlimit - 1)) && (A16(ref) == A16(ip))) {
772 if ((ip < matchlimit) && (*ref == *ip))
776 /* Encode MatchLength */
778 /* Check output limit */
779 if (unlikely(op + (1 + LASTLITERALS) + (len >> 8) > oend))
781 if (len >= (int)ML_MASK) {
784 for (; len > 509; len -= 510) {
796 /* Test end of chunk */
802 HashTable[LZ4_HASH64K_VALUE(ip - 2)] = ip - 2 - base;
804 /* Test next position */
805 ref = base + HashTable[LZ4_HASH64K_VALUE(ip)];
806 HashTable[LZ4_HASH64K_VALUE(ip)] = ip - base;
807 if (A32(ref) == A32(ip)) {
812 /* Prepare next loop */
814 forwardH = LZ4_HASH64K_VALUE(ip);
818 /* Encode Last Literals */
820 int lastRun = iend - anchor;
821 if (op + lastRun + 1 + ((lastRun + 255 - RUN_MASK) / 255) >
824 if (lastRun >= (int)RUN_MASK) {
825 *op++ = (RUN_MASK << ML_BITS);
827 for (; lastRun > 254; lastRun -= 255)
829 *op++ = (BYTE)lastRun;
831 *op++ = (lastRun << ML_BITS);
832 (void) memcpy(op, anchor, iend - anchor);
837 return (int)(((char *)op) - dest);
841 real_LZ4_compress(const char *source, char *dest, int isize, int osize)
846 ctx = lz4_alloc(KM_SLEEP);
849 * out of kernel memory, gently fall through - this will disable
850 * compression in zio_compress_data
855 memset(ctx, 0, sizeof (struct refTables));
857 if (isize < LZ4_64KLIMIT)
858 result = LZ4_compress64kCtx(ctx, source, dest, isize, osize);
860 result = LZ4_compressCtx(ctx, source, dest, isize, osize);
868 * FreeBSD has 4, 8 and 16 KB malloc zones which can be used here.
869 * Should struct refTables get resized this may need to be revisited, hence
870 * compiler-time asserts.
872 _Static_assert(sizeof(struct refTables) <= 16384,
873 "refTables too big for malloc");
874 _Static_assert((sizeof(struct refTables) % 4096) == 0,
875 "refTables not a multiple of page size");
877 #define ZFS_LZ4_USE_CACHE
880 #ifdef ZFS_LZ4_USE_CACHE
881 static kmem_cache_t *lz4_cache;
884 #ifdef ZFS_LZ4_USE_CACHE
888 lz4_cache = kmem_cache_create("lz4_cache",
889 sizeof (struct refTables), 0, NULL, NULL, NULL, NULL, NULL, 0);
896 kmem_cache_destroy(lz4_cache);
904 ASSERT(lz4_cache != NULL);
905 return (kmem_cache_alloc(lz4_cache, flags));
911 kmem_cache_free(lz4_cache, ctx);
927 return (kmem_alloc(sizeof (struct refTables), flags));
933 kmem_free(ctx, sizeof (struct refTables));