2 * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
12 #include "zstd_fast.h" /* ZSTD_fillHashTable() */
13 #include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */
15 #define LDM_BUCKET_SIZE_LOG 3
16 #define LDM_MIN_MATCH_LENGTH 64
17 #define LDM_HASH_RLOG 7
18 #define LDM_HASH_CHAR_OFFSET 10
20 size_t ZSTD_ldm_initializeParameters(ldmParams_t* params, U32 enableLdm)
22 ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
23 params->enableLdm = enableLdm>0;
25 params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
26 params->minMatchLength = LDM_MIN_MATCH_LENGTH;
27 params->hashEveryLog = ZSTD_LDM_HASHEVERYLOG_NOTSET;
31 void ZSTD_ldm_adjustParameters(ldmParams_t* params, U32 windowLog)
33 if (params->hashLog == 0) {
34 params->hashLog = MAX(ZSTD_HASHLOG_MIN, windowLog - LDM_HASH_RLOG);
35 assert(params->hashLog <= ZSTD_HASHLOG_MAX);
37 if (params->hashEveryLog == ZSTD_LDM_HASHEVERYLOG_NOTSET) {
38 params->hashEveryLog =
39 windowLog < params->hashLog ? 0 : windowLog - params->hashLog;
41 params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
44 size_t ZSTD_ldm_getTableSize(U32 hashLog, U32 bucketSizeLog) {
45 size_t const ldmHSize = ((size_t)1) << hashLog;
46 size_t const ldmBucketSizeLog = MIN(bucketSizeLog, hashLog);
47 size_t const ldmBucketSize =
48 ((size_t)1) << (hashLog - ldmBucketSizeLog);
49 return ldmBucketSize + (ldmHSize * (sizeof(ldmEntry_t)));
52 /** ZSTD_ldm_getSmallHash() :
53 * numBits should be <= 32
54 * If numBits==0, returns 0.
55 * @return : the most significant numBits of value. */
56 static U32 ZSTD_ldm_getSmallHash(U64 value, U32 numBits)
58 assert(numBits <= 32);
59 return numBits == 0 ? 0 : (U32)(value >> (64 - numBits));
62 /** ZSTD_ldm_getChecksum() :
63 * numBitsToDiscard should be <= 32
64 * @return : the next most significant 32 bits after numBitsToDiscard */
65 static U32 ZSTD_ldm_getChecksum(U64 hash, U32 numBitsToDiscard)
67 assert(numBitsToDiscard <= 32);
68 return (hash >> (64 - 32 - numBitsToDiscard)) & 0xFFFFFFFF;
71 /** ZSTD_ldm_getTag() ;
72 * Given the hash, returns the most significant numTagBits bits
73 * after (32 + hbits) bits.
75 * If there are not enough bits remaining, return the last
77 static U32 ZSTD_ldm_getTag(U64 hash, U32 hbits, U32 numTagBits)
79 assert(numTagBits < 32 && hbits <= 32);
80 if (32 - hbits < numTagBits) {
81 return hash & (((U32)1 << numTagBits) - 1);
83 return (hash >> (32 - hbits - numTagBits)) & (((U32)1 << numTagBits) - 1);
87 /** ZSTD_ldm_getBucket() :
88 * Returns a pointer to the start of the bucket associated with hash. */
89 static ldmEntry_t* ZSTD_ldm_getBucket(
90 ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
92 return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
95 /** ZSTD_ldm_insertEntry() :
96 * Insert the entry with corresponding hash into the hash table */
97 static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
98 size_t const hash, const ldmEntry_t entry,
99 ldmParams_t const ldmParams)
101 BYTE* const bucketOffsets = ldmState->bucketOffsets;
102 *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + bucketOffsets[hash]) = entry;
103 bucketOffsets[hash]++;
104 bucketOffsets[hash] &= ((U32)1 << ldmParams.bucketSizeLog) - 1;
107 /** ZSTD_ldm_makeEntryAndInsertByTag() :
109 * Gets the small hash, checksum, and tag from the rollingHash.
111 * If the tag matches (1 << ldmParams.hashEveryLog)-1, then
112 * creates an ldmEntry from the offset, and inserts it into the hash table.
114 * hBits is the length of the small hash, which is the most significant hBits
115 * of rollingHash. The checksum is the next 32 most significant bits, followed
116 * by ldmParams.hashEveryLog bits that make up the tag. */
117 static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState,
118 U64 const rollingHash,
121 ldmParams_t const ldmParams)
123 U32 const tag = ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashEveryLog);
124 U32 const tagMask = ((U32)1 << ldmParams.hashEveryLog) - 1;
125 if (tag == tagMask) {
126 U32 const hash = ZSTD_ldm_getSmallHash(rollingHash, hBits);
127 U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
129 entry.offset = offset;
130 entry.checksum = checksum;
131 ZSTD_ldm_insertEntry(ldmState, hash, entry, ldmParams);
135 /** ZSTD_ldm_getRollingHash() :
136 * Get a 64-bit hash using the first len bytes from buf.
138 * Giving bytes s = s_1, s_2, ... s_k, the hash is defined to be
139 * H(s) = s_1*(a^(k-1)) + s_2*(a^(k-2)) + ... + s_k*(a^0)
141 * where the constant a is defined to be prime8bytes.
143 * The implementation adds an offset to each byte, so
144 * H(s) = (s_1 + HASH_CHAR_OFFSET)*(a^(k-1)) + ... */
145 static U64 ZSTD_ldm_getRollingHash(const BYTE* buf, U32 len)
149 for (i = 0; i < len; i++) {
151 ret += buf[i] + LDM_HASH_CHAR_OFFSET;
156 /** ZSTD_ldm_ipow() :
157 * Return base^exp. */
158 static U64 ZSTD_ldm_ipow(U64 base, U64 exp)
162 if (exp & 1) { ret *= base; }
169 U64 ZSTD_ldm_getHashPower(U32 minMatchLength) {
170 assert(minMatchLength >= ZSTD_LDM_MINMATCH_MIN);
171 return ZSTD_ldm_ipow(prime8bytes, minMatchLength - 1);
174 /** ZSTD_ldm_updateHash() :
175 * Updates hash by removing toRemove and adding toAdd. */
176 static U64 ZSTD_ldm_updateHash(U64 hash, BYTE toRemove, BYTE toAdd, U64 hashPower)
178 hash -= ((toRemove + LDM_HASH_CHAR_OFFSET) * hashPower);
180 hash += toAdd + LDM_HASH_CHAR_OFFSET;
184 /** ZSTD_ldm_countBackwardsMatch() :
185 * Returns the number of bytes that match backwards before pIn and pMatch.
187 * We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
188 static size_t ZSTD_ldm_countBackwardsMatch(
189 const BYTE* pIn, const BYTE* pAnchor,
190 const BYTE* pMatch, const BYTE* pBase)
192 size_t matchLength = 0;
193 while (pIn > pAnchor && pMatch > pBase && pIn[-1] == pMatch[-1]) {
201 /** ZSTD_ldm_fillFastTables() :
203 * Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
204 * This is similar to ZSTD_loadDictionaryContent.
206 * The tables for the other strategies are filled within their
207 * block compressors. */
208 static size_t ZSTD_ldm_fillFastTables(ZSTD_CCtx* zc, const void* end)
210 const BYTE* const iend = (const BYTE*)end;
211 const U32 mls = zc->appliedParams.cParams.searchLength;
213 switch(zc->appliedParams.cParams.strategy)
216 ZSTD_fillHashTable(zc, iend, mls);
217 zc->nextToUpdate = (U32)(iend - zc->base);
221 ZSTD_fillDoubleHashTable(zc, iend, mls);
222 zc->nextToUpdate = (U32)(iend - zc->base);
233 assert(0); /* not possible : not a valid strategy id */
239 /** ZSTD_ldm_fillLdmHashTable() :
241 * Fills hashTable from (lastHashed + 1) to iend (non-inclusive).
242 * lastHash is the rolling hash that corresponds to lastHashed.
244 * Returns the rolling hash corresponding to position iend-1. */
245 static U64 ZSTD_ldm_fillLdmHashTable(ldmState_t* state,
246 U64 lastHash, const BYTE* lastHashed,
247 const BYTE* iend, const BYTE* base,
248 U32 hBits, ldmParams_t const ldmParams)
250 U64 rollingHash = lastHash;
251 const BYTE* cur = lastHashed + 1;
254 rollingHash = ZSTD_ldm_updateHash(rollingHash, cur[-1],
255 cur[ldmParams.minMatchLength-1],
257 ZSTD_ldm_makeEntryAndInsertByTag(state,
259 (U32)(cur - base), ldmParams);
266 /** ZSTD_ldm_limitTableUpdate() :
268 * Sets cctx->nextToUpdate to a position corresponding closer to anchor
270 * (after a long match, only update tables a limited amount). */
271 static void ZSTD_ldm_limitTableUpdate(ZSTD_CCtx* cctx, const BYTE* anchor)
273 U32 const current = (U32)(anchor - cctx->base);
274 if (current > cctx->nextToUpdate + 1024) {
276 current - MIN(512, current - cctx->nextToUpdate - 1024);
280 typedef size_t (*ZSTD_blockCompressor) (ZSTD_CCtx* ctx, const void* src, size_t srcSize);
281 /* defined in zstd_compress.c */
282 ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict);
284 FORCE_INLINE_TEMPLATE
285 size_t ZSTD_compressBlock_ldm_generic(ZSTD_CCtx* cctx,
286 const void* src, size_t srcSize)
288 ldmState_t* const ldmState = &(cctx->ldmState);
289 const ldmParams_t ldmParams = cctx->appliedParams.ldmParams;
290 const U64 hashPower = ldmState->hashPower;
291 const U32 hBits = ldmParams.hashLog - ldmParams.bucketSizeLog;
292 const U32 ldmBucketSize = ((U32)1 << ldmParams.bucketSizeLog);
293 const U32 ldmTagMask = ((U32)1 << ldmParams.hashEveryLog) - 1;
294 seqStore_t* const seqStorePtr = &(cctx->seqStore);
295 const BYTE* const base = cctx->base;
296 const BYTE* const istart = (const BYTE*)src;
297 const BYTE* ip = istart;
298 const BYTE* anchor = istart;
299 const U32 lowestIndex = cctx->dictLimit;
300 const BYTE* const lowest = base + lowestIndex;
301 const BYTE* const iend = istart + srcSize;
302 const BYTE* const ilimit = iend - MAX(ldmParams.minMatchLength, HASH_READ_SIZE);
304 const ZSTD_blockCompressor blockCompressor =
305 ZSTD_selectBlockCompressor(cctx->appliedParams.cParams.strategy, 0);
306 U32* const repToConfirm = seqStorePtr->repToConfirm;
307 U32 savedRep[ZSTD_REP_NUM];
309 const BYTE* lastHashed = NULL;
310 size_t i, lastLiterals;
312 /* Save seqStorePtr->rep and copy repToConfirm */
313 for (i = 0; i < ZSTD_REP_NUM; i++)
314 savedRep[i] = repToConfirm[i] = seqStorePtr->rep[i];
316 /* Main Search Loop */
317 while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
319 U32 const current = (U32)(ip - base);
320 size_t forwardMatchLength = 0, backwardMatchLength = 0;
321 ldmEntry_t* bestEntry = NULL;
323 rollingHash = ZSTD_ldm_updateHash(rollingHash, lastHashed[0],
324 lastHashed[ldmParams.minMatchLength],
327 rollingHash = ZSTD_ldm_getRollingHash(ip, ldmParams.minMatchLength);
331 /* Do not insert and do not look for a match */
332 if (ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashEveryLog) !=
338 /* Get the best entry and compute the match lengths */
340 ldmEntry_t* const bucket =
341 ZSTD_ldm_getBucket(ldmState,
342 ZSTD_ldm_getSmallHash(rollingHash, hBits),
345 size_t bestMatchLength = 0;
346 U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
348 for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) {
349 const BYTE* const pMatch = cur->offset + base;
350 size_t curForwardMatchLength, curBackwardMatchLength,
352 if (cur->checksum != checksum || cur->offset <= lowestIndex) {
356 curForwardMatchLength = ZSTD_count(ip, pMatch, iend);
357 if (curForwardMatchLength < ldmParams.minMatchLength) {
360 curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch(
361 ip, anchor, pMatch, lowest);
362 curTotalMatchLength = curForwardMatchLength +
363 curBackwardMatchLength;
365 if (curTotalMatchLength > bestMatchLength) {
366 bestMatchLength = curTotalMatchLength;
367 forwardMatchLength = curForwardMatchLength;
368 backwardMatchLength = curBackwardMatchLength;
374 /* No match found -- continue searching */
375 if (bestEntry == NULL) {
376 ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash,
384 mLength = forwardMatchLength + backwardMatchLength;
385 ip -= backwardMatchLength;
387 /* Call the block compressor on the remaining literals */
389 U32 const matchIndex = bestEntry->offset;
390 const BYTE* const match = base + matchIndex - backwardMatchLength;
391 U32 const offset = (U32)(ip - match);
393 /* Overwrite rep codes */
394 for (i = 0; i < ZSTD_REP_NUM; i++)
395 seqStorePtr->rep[i] = repToConfirm[i];
397 /* Fill tables for block compressor */
398 ZSTD_ldm_limitTableUpdate(cctx, anchor);
399 ZSTD_ldm_fillFastTables(cctx, anchor);
401 /* Call block compressor and get remaining literals */
402 lastLiterals = blockCompressor(cctx, anchor, ip - anchor);
403 cctx->nextToUpdate = (U32)(ip - base);
405 /* Update repToConfirm with the new offset */
406 for (i = ZSTD_REP_NUM - 1; i > 0; i--)
407 repToConfirm[i] = repToConfirm[i-1];
408 repToConfirm[0] = offset;
410 /* Store the sequence with the leftover literals */
411 ZSTD_storeSeq(seqStorePtr, lastLiterals, ip - lastLiterals,
412 offset + ZSTD_REP_MOVE, mLength - MINMATCH);
415 /* Insert the current entry into the hash table */
416 ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits,
417 (U32)(lastHashed - base),
420 assert(ip + backwardMatchLength == lastHashed);
422 /* Fill the hash table from lastHashed+1 to ip+mLength*/
423 /* Heuristic: don't need to fill the entire table at end of block */
424 if (ip + mLength < ilimit) {
425 rollingHash = ZSTD_ldm_fillLdmHashTable(
426 ldmState, rollingHash, lastHashed,
427 ip + mLength, base, hBits, ldmParams);
428 lastHashed = ip + mLength - 1;
432 /* Check immediate repcode */
433 while ( (ip < ilimit)
434 && ( (repToConfirm[1] > 0) && (repToConfirm[1] <= (U32)(ip-lowest))
435 && (MEM_read32(ip) == MEM_read32(ip - repToConfirm[1])) )) {
437 size_t const rLength = ZSTD_count(ip+4, ip+4-repToConfirm[1],
439 /* Swap repToConfirm[1] <=> repToConfirm[0] */
441 U32 const tmpOff = repToConfirm[1];
442 repToConfirm[1] = repToConfirm[0];
443 repToConfirm[0] = tmpOff;
446 ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH);
448 /* Fill the hash table from lastHashed+1 to ip+rLength*/
449 if (ip + rLength < ilimit) {
450 rollingHash = ZSTD_ldm_fillLdmHashTable(
451 ldmState, rollingHash, lastHashed,
452 ip + rLength, base, hBits, ldmParams);
453 lastHashed = ip + rLength - 1;
461 for (i = 0; i < ZSTD_REP_NUM; i++)
462 seqStorePtr->rep[i] = repToConfirm[i];
464 ZSTD_ldm_limitTableUpdate(cctx, anchor);
465 ZSTD_ldm_fillFastTables(cctx, anchor);
467 lastLiterals = blockCompressor(cctx, anchor, iend - anchor);
468 cctx->nextToUpdate = (U32)(iend - base);
470 /* Restore seqStorePtr->rep */
471 for (i = 0; i < ZSTD_REP_NUM; i++)
472 seqStorePtr->rep[i] = savedRep[i];
474 /* Return the last literals size */
478 size_t ZSTD_compressBlock_ldm(ZSTD_CCtx* ctx,
479 const void* src, size_t srcSize)
481 return ZSTD_compressBlock_ldm_generic(ctx, src, srcSize);
484 static size_t ZSTD_compressBlock_ldm_extDict_generic(
486 const void* src, size_t srcSize)
488 ldmState_t* const ldmState = &(ctx->ldmState);
489 const ldmParams_t ldmParams = ctx->appliedParams.ldmParams;
490 const U64 hashPower = ldmState->hashPower;
491 const U32 hBits = ldmParams.hashLog - ldmParams.bucketSizeLog;
492 const U32 ldmBucketSize = ((U32)1 << ldmParams.bucketSizeLog);
493 const U32 ldmTagMask = ((U32)1 << ldmParams.hashEveryLog) - 1;
494 seqStore_t* const seqStorePtr = &(ctx->seqStore);
495 const BYTE* const base = ctx->base;
496 const BYTE* const dictBase = ctx->dictBase;
497 const BYTE* const istart = (const BYTE*)src;
498 const BYTE* ip = istart;
499 const BYTE* anchor = istart;
500 const U32 lowestIndex = ctx->lowLimit;
501 const BYTE* const dictStart = dictBase + lowestIndex;
502 const U32 dictLimit = ctx->dictLimit;
503 const BYTE* const lowPrefixPtr = base + dictLimit;
504 const BYTE* const dictEnd = dictBase + dictLimit;
505 const BYTE* const iend = istart + srcSize;
506 const BYTE* const ilimit = iend - MAX(ldmParams.minMatchLength, HASH_READ_SIZE);
508 const ZSTD_blockCompressor blockCompressor =
509 ZSTD_selectBlockCompressor(ctx->appliedParams.cParams.strategy, 1);
510 U32* const repToConfirm = seqStorePtr->repToConfirm;
511 U32 savedRep[ZSTD_REP_NUM];
513 const BYTE* lastHashed = NULL;
514 size_t i, lastLiterals;
516 /* Save seqStorePtr->rep and copy repToConfirm */
517 for (i = 0; i < ZSTD_REP_NUM; i++) {
518 savedRep[i] = repToConfirm[i] = seqStorePtr->rep[i];
522 while (ip < ilimit) { /* < instead of <=, because (ip+1) */
524 const U32 current = (U32)(ip-base);
525 size_t forwardMatchLength = 0, backwardMatchLength = 0;
526 ldmEntry_t* bestEntry = NULL;
528 rollingHash = ZSTD_ldm_updateHash(rollingHash, lastHashed[0],
529 lastHashed[ldmParams.minMatchLength],
532 rollingHash = ZSTD_ldm_getRollingHash(ip, ldmParams.minMatchLength);
536 if (ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashEveryLog) !=
538 /* Don't insert and don't look for a match */
543 /* Get the best entry and compute the match lengths */
545 ldmEntry_t* const bucket =
546 ZSTD_ldm_getBucket(ldmState,
547 ZSTD_ldm_getSmallHash(rollingHash, hBits),
550 size_t bestMatchLength = 0;
551 U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
553 for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) {
554 const BYTE* const curMatchBase =
555 cur->offset < dictLimit ? dictBase : base;
556 const BYTE* const pMatch = curMatchBase + cur->offset;
557 const BYTE* const matchEnd =
558 cur->offset < dictLimit ? dictEnd : iend;
559 const BYTE* const lowMatchPtr =
560 cur->offset < dictLimit ? dictStart : lowPrefixPtr;
561 size_t curForwardMatchLength, curBackwardMatchLength,
564 if (cur->checksum != checksum || cur->offset <= lowestIndex) {
568 curForwardMatchLength = ZSTD_count_2segments(
570 matchEnd, lowPrefixPtr);
571 if (curForwardMatchLength < ldmParams.minMatchLength) {
574 curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch(
575 ip, anchor, pMatch, lowMatchPtr);
576 curTotalMatchLength = curForwardMatchLength +
577 curBackwardMatchLength;
579 if (curTotalMatchLength > bestMatchLength) {
580 bestMatchLength = curTotalMatchLength;
581 forwardMatchLength = curForwardMatchLength;
582 backwardMatchLength = curBackwardMatchLength;
588 /* No match found -- continue searching */
589 if (bestEntry == NULL) {
590 ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits,
591 (U32)(lastHashed - base),
598 mLength = forwardMatchLength + backwardMatchLength;
599 ip -= backwardMatchLength;
601 /* Call the block compressor on the remaining literals */
603 /* ip = current - backwardMatchLength
604 * The match is at (bestEntry->offset - backwardMatchLength) */
605 U32 const matchIndex = bestEntry->offset;
606 U32 const offset = current - matchIndex;
608 /* Overwrite rep codes */
609 for (i = 0; i < ZSTD_REP_NUM; i++)
610 seqStorePtr->rep[i] = repToConfirm[i];
612 /* Fill the hash table for the block compressor */
613 ZSTD_ldm_limitTableUpdate(ctx, anchor);
614 ZSTD_ldm_fillFastTables(ctx, anchor);
616 /* Call block compressor and get remaining literals */
617 lastLiterals = blockCompressor(ctx, anchor, ip - anchor);
618 ctx->nextToUpdate = (U32)(ip - base);
620 /* Update repToConfirm with the new offset */
621 for (i = ZSTD_REP_NUM - 1; i > 0; i--)
622 repToConfirm[i] = repToConfirm[i-1];
623 repToConfirm[0] = offset;
625 /* Store the sequence with the leftover literals */
626 ZSTD_storeSeq(seqStorePtr, lastLiterals, ip - lastLiterals,
627 offset + ZSTD_REP_MOVE, mLength - MINMATCH);
630 /* Insert the current entry into the hash table */
631 ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits,
632 (U32)(lastHashed - base),
635 /* Fill the hash table from lastHashed+1 to ip+mLength */
636 assert(ip + backwardMatchLength == lastHashed);
637 if (ip + mLength < ilimit) {
638 rollingHash = ZSTD_ldm_fillLdmHashTable(
639 ldmState, rollingHash, lastHashed,
640 ip + mLength, base, hBits,
642 lastHashed = ip + mLength - 1;
647 /* check immediate repcode */
648 while (ip < ilimit) {
649 U32 const current2 = (U32)(ip-base);
650 U32 const repIndex2 = current2 - repToConfirm[1];
651 const BYTE* repMatch2 = repIndex2 < dictLimit ?
652 dictBase + repIndex2 : base + repIndex2;
653 if ( (((U32)((dictLimit-1) - repIndex2) >= 3) &
654 (repIndex2 > lowestIndex)) /* intentional overflow */
655 && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
656 const BYTE* const repEnd2 = repIndex2 < dictLimit ?
658 size_t const repLength2 =
659 ZSTD_count_2segments(ip+4, repMatch2+4, iend,
660 repEnd2, lowPrefixPtr) + 4;
662 U32 tmpOffset = repToConfirm[1];
663 repToConfirm[1] = repToConfirm[0];
664 repToConfirm[0] = tmpOffset;
666 ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH);
668 /* Fill the hash table from lastHashed+1 to ip+repLength2*/
669 if (ip + repLength2 < ilimit) {
670 rollingHash = ZSTD_ldm_fillLdmHashTable(
671 ldmState, rollingHash, lastHashed,
672 ip + repLength2, base, hBits,
674 lastHashed = ip + repLength2 - 1;
685 for (i = 0; i < ZSTD_REP_NUM; i++)
686 seqStorePtr->rep[i] = repToConfirm[i];
688 ZSTD_ldm_limitTableUpdate(ctx, anchor);
689 ZSTD_ldm_fillFastTables(ctx, anchor);
691 /* Call the block compressor one last time on the last literals */
692 lastLiterals = blockCompressor(ctx, anchor, iend - anchor);
693 ctx->nextToUpdate = (U32)(iend - base);
695 /* Restore seqStorePtr->rep */
696 for (i = 0; i < ZSTD_REP_NUM; i++)
697 seqStorePtr->rep[i] = savedRep[i];
699 /* Return the last literals size */
703 size_t ZSTD_compressBlock_ldm_extDict(ZSTD_CCtx* ctx,
704 const void* src, size_t srcSize)
706 return ZSTD_compressBlock_ldm_extDict_generic(ctx, src, srcSize);