2 * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
11 #include "zstd_compress_internal.h"
12 #include "zstd_lazy.h"
15 /*-*************************************
17 ***************************************/
20 ZSTD_updateDUBT(ZSTD_matchState_t* ms,
21 const BYTE* ip, const BYTE* iend,
24 const ZSTD_compressionParameters* const cParams = &ms->cParams;
25 U32* const hashTable = ms->hashTable;
26 U32 const hashLog = cParams->hashLog;
28 U32* const bt = ms->chainTable;
29 U32 const btLog = cParams->chainLog - 1;
30 U32 const btMask = (1 << btLog) - 1;
32 const BYTE* const base = ms->window.base;
33 U32 const target = (U32)(ip - base);
34 U32 idx = ms->nextToUpdate;
37 DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)",
38 idx, target, ms->window.dictLimit);
39 assert(ip + 8 <= iend); /* condition for ZSTD_hashPtr */
42 assert(idx >= ms->window.dictLimit); /* condition for valid base+idx */
43 for ( ; idx < target ; idx++) {
44 size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); /* assumption : ip + 8 <= iend */
45 U32 const matchIndex = hashTable[h];
47 U32* const nextCandidatePtr = bt + 2*(idx&btMask);
48 U32* const sortMarkPtr = nextCandidatePtr + 1;
50 DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx);
51 hashTable[h] = idx; /* Update Hash Table */
52 *nextCandidatePtr = matchIndex; /* update BT like a chain */
53 *sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK;
55 ms->nextToUpdate = target;
59 /** ZSTD_insertDUBT1() :
60 * sort one already inserted but unsorted position
61 * assumption : current >= btlow == (current - btmask)
64 ZSTD_insertDUBT1(ZSTD_matchState_t* ms,
65 U32 current, const BYTE* inputEnd,
66 U32 nbCompares, U32 btLow, const ZSTD_dictMode_e dictMode)
68 const ZSTD_compressionParameters* const cParams = &ms->cParams;
69 U32* const bt = ms->chainTable;
70 U32 const btLog = cParams->chainLog - 1;
71 U32 const btMask = (1 << btLog) - 1;
72 size_t commonLengthSmaller=0, commonLengthLarger=0;
73 const BYTE* const base = ms->window.base;
74 const BYTE* const dictBase = ms->window.dictBase;
75 const U32 dictLimit = ms->window.dictLimit;
76 const BYTE* const ip = (current>=dictLimit) ? base + current : dictBase + current;
77 const BYTE* const iend = (current>=dictLimit) ? inputEnd : dictBase + dictLimit;
78 const BYTE* const dictEnd = dictBase + dictLimit;
79 const BYTE* const prefixStart = base + dictLimit;
81 U32* smallerPtr = bt + 2*(current&btMask);
82 U32* largerPtr = smallerPtr + 1;
83 U32 matchIndex = *smallerPtr;
84 U32 dummy32; /* to be nullified at the end */
85 U32 const windowLow = ms->window.lowLimit;
87 DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
88 current, dictLimit, windowLow);
89 assert(current >= btLow);
90 assert(ip < iend); /* condition for ZSTD_count */
92 while (nbCompares-- && (matchIndex > windowLow)) {
93 U32* const nextPtr = bt + 2*(matchIndex & btMask);
94 size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
95 assert(matchIndex < current);
97 if ( (dictMode != ZSTD_extDict)
98 || (matchIndex+matchLength >= dictLimit) /* both in current segment*/
99 || (current < dictLimit) /* both in extDict */) {
100 const BYTE* const mBase = ( (dictMode != ZSTD_extDict)
101 || (matchIndex+matchLength >= dictLimit)) ?
103 assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */
104 || (current < dictLimit) );
105 match = mBase + matchIndex;
106 matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
108 match = dictBase + matchIndex;
109 matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
110 if (matchIndex+matchLength >= dictLimit)
111 match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
114 DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
115 current, matchIndex, (U32)matchLength);
117 if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
118 break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
121 if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
122 /* match is smaller than current */
123 *smallerPtr = matchIndex; /* update smaller idx */
124 commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
125 if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
126 DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u",
127 matchIndex, btLow, nextPtr[1]);
128 smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
129 matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
131 /* match is larger than current */
132 *largerPtr = matchIndex;
133 commonLengthLarger = matchLength;
134 if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
135 DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u",
136 matchIndex, btLow, nextPtr[0]);
138 matchIndex = nextPtr[0];
141 *smallerPtr = *largerPtr = 0;
146 ZSTD_DUBT_findBetterDictMatch (
147 ZSTD_matchState_t* ms,
148 const BYTE* const ip, const BYTE* const iend,
153 const ZSTD_dictMode_e dictMode)
155 const ZSTD_matchState_t * const dms = ms->dictMatchState;
156 const ZSTD_compressionParameters* const dmsCParams = &dms->cParams;
157 const U32 * const dictHashTable = dms->hashTable;
158 U32 const hashLog = dmsCParams->hashLog;
159 size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
160 U32 dictMatchIndex = dictHashTable[h];
162 const BYTE* const base = ms->window.base;
163 const BYTE* const prefixStart = base + ms->window.dictLimit;
164 U32 const current = (U32)(ip-base);
165 const BYTE* const dictBase = dms->window.base;
166 const BYTE* const dictEnd = dms->window.nextSrc;
167 U32 const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base);
168 U32 const dictLowLimit = dms->window.lowLimit;
169 U32 const dictIndexDelta = ms->window.lowLimit - dictHighLimit;
171 U32* const dictBt = dms->chainTable;
172 U32 const btLog = dmsCParams->chainLog - 1;
173 U32 const btMask = (1 << btLog) - 1;
174 U32 const btLow = (btMask >= dictHighLimit - dictLowLimit) ? dictLowLimit : dictHighLimit - btMask;
176 size_t commonLengthSmaller=0, commonLengthLarger=0;
179 assert(dictMode == ZSTD_dictMatchState);
181 while (nbCompares-- && (dictMatchIndex > dictLowLimit)) {
182 U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask);
183 size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
184 const BYTE* match = dictBase + dictMatchIndex;
185 matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
186 if (dictMatchIndex+matchLength >= dictHighLimit)
187 match = base + dictMatchIndex + dictIndexDelta; /* to prepare for next usage of match[matchLength] */
189 if (matchLength > bestLength) {
190 U32 matchIndex = dictMatchIndex + dictIndexDelta;
191 if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
192 DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)",
193 current, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + current - matchIndex, dictMatchIndex, matchIndex);
194 bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
196 if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */
197 break; /* drop, to guarantee consistency (miss a little bit of compression) */
201 if (match[matchLength] < ip[matchLength]) {
202 if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */
203 commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
204 dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
206 /* match is larger than current */
207 if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */
208 commonLengthLarger = matchLength;
209 dictMatchIndex = nextPtr[0];
213 if (bestLength >= MINMATCH) {
214 U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
215 DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
216 current, (U32)bestLength, (U32)*offsetPtr, mIndex);
224 ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
225 const BYTE* const ip, const BYTE* const iend,
228 const ZSTD_dictMode_e dictMode)
230 const ZSTD_compressionParameters* const cParams = &ms->cParams;
231 U32* const hashTable = ms->hashTable;
232 U32 const hashLog = cParams->hashLog;
233 size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
234 U32 matchIndex = hashTable[h];
236 const BYTE* const base = ms->window.base;
237 U32 const current = (U32)(ip-base);
238 U32 const windowLow = ms->window.lowLimit;
240 U32* const bt = ms->chainTable;
241 U32 const btLog = cParams->chainLog - 1;
242 U32 const btMask = (1 << btLog) - 1;
243 U32 const btLow = (btMask >= current) ? 0 : current - btMask;
244 U32 const unsortLimit = MAX(btLow, windowLow);
246 U32* nextCandidate = bt + 2*(matchIndex&btMask);
247 U32* unsortedMark = bt + 2*(matchIndex&btMask) + 1;
248 U32 nbCompares = 1U << cParams->searchLog;
249 U32 nbCandidates = nbCompares;
250 U32 previousCandidate = 0;
252 DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", current);
253 assert(ip <= iend-8); /* required for h calculation */
255 /* reach end of unsorted candidates list */
256 while ( (matchIndex > unsortLimit)
257 && (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK)
258 && (nbCandidates > 1) ) {
259 DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted",
261 *unsortedMark = previousCandidate;
262 previousCandidate = matchIndex;
263 matchIndex = *nextCandidate;
264 nextCandidate = bt + 2*(matchIndex&btMask);
265 unsortedMark = bt + 2*(matchIndex&btMask) + 1;
269 if ( (matchIndex > unsortLimit)
270 && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {
271 DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u",
273 *nextCandidate = *unsortedMark = 0; /* nullify next candidate if it's still unsorted (note : simplification, detrimental to compression ratio, beneficial for speed) */
276 /* batch sort stacked candidates */
277 matchIndex = previousCandidate;
278 while (matchIndex) { /* will end on matchIndex == 0 */
279 U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;
280 U32 const nextCandidateIdx = *nextCandidateIdxPtr;
281 ZSTD_insertDUBT1(ms, matchIndex, iend,
282 nbCandidates, unsortLimit, dictMode);
283 matchIndex = nextCandidateIdx;
287 /* find longest match */
288 { size_t commonLengthSmaller=0, commonLengthLarger=0;
289 const BYTE* const dictBase = ms->window.dictBase;
290 const U32 dictLimit = ms->window.dictLimit;
291 const BYTE* const dictEnd = dictBase + dictLimit;
292 const BYTE* const prefixStart = base + dictLimit;
293 U32* smallerPtr = bt + 2*(current&btMask);
294 U32* largerPtr = bt + 2*(current&btMask) + 1;
295 U32 matchEndIdx = current+8+1;
296 U32 dummy32; /* to be nullified at the end */
297 size_t bestLength = 0;
299 matchIndex = hashTable[h];
300 hashTable[h] = current; /* Update Hash Table */
302 while (nbCompares-- && (matchIndex > windowLow)) {
303 U32* const nextPtr = bt + 2*(matchIndex & btMask);
304 size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
307 if ((dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) {
308 match = base + matchIndex;
309 matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
311 match = dictBase + matchIndex;
312 matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
313 if (matchIndex+matchLength >= dictLimit)
314 match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
317 if (matchLength > bestLength) {
318 if (matchLength > matchEndIdx - matchIndex)
319 matchEndIdx = matchIndex + (U32)matchLength;
320 if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
321 bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
322 if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
323 if (dictMode == ZSTD_dictMatchState) {
324 nbCompares = 0; /* in addition to avoiding checking any
325 * further in this loop, make sure we
326 * skip checking in the dictionary. */
328 break; /* drop, to guarantee consistency (miss a little bit of compression) */
332 if (match[matchLength] < ip[matchLength]) {
333 /* match is smaller than current */
334 *smallerPtr = matchIndex; /* update smaller idx */
335 commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
336 if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
337 smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
338 matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
340 /* match is larger than current */
341 *largerPtr = matchIndex;
342 commonLengthLarger = matchLength;
343 if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
345 matchIndex = nextPtr[0];
348 *smallerPtr = *largerPtr = 0;
350 if (dictMode == ZSTD_dictMatchState && nbCompares) {
351 bestLength = ZSTD_DUBT_findBetterDictMatch(
353 offsetPtr, bestLength, nbCompares,
357 assert(matchEndIdx > current+8); /* ensure nextToUpdate is increased */
358 ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
359 if (bestLength >= MINMATCH) {
360 U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
361 DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
362 current, (U32)bestLength, (U32)*offsetPtr, mIndex);
369 /** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
370 FORCE_INLINE_TEMPLATE size_t
371 ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
372 const BYTE* const ip, const BYTE* const iLimit,
374 const U32 mls /* template */,
375 const ZSTD_dictMode_e dictMode)
377 DEBUGLOG(7, "ZSTD_BtFindBestMatch");
378 if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
379 ZSTD_updateDUBT(ms, ip, iLimit, mls);
380 return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode);
385 ZSTD_BtFindBestMatch_selectMLS ( ZSTD_matchState_t* ms,
386 const BYTE* ip, const BYTE* const iLimit,
389 switch(ms->cParams.searchLength)
391 default : /* includes case 3 */
392 case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
393 case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
395 case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
400 static size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS (
401 ZSTD_matchState_t* ms,
402 const BYTE* ip, const BYTE* const iLimit,
405 switch(ms->cParams.searchLength)
407 default : /* includes case 3 */
408 case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
409 case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
411 case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
416 static size_t ZSTD_BtFindBestMatch_extDict_selectMLS (
417 ZSTD_matchState_t* ms,
418 const BYTE* ip, const BYTE* const iLimit,
421 switch(ms->cParams.searchLength)
423 default : /* includes case 3 */
424 case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
425 case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
427 case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
433 /* *********************************
435 ***********************************/
436 #define NEXT_IN_CHAIN(d, mask) chainTable[(d) & mask]
438 /* Update chains up to ip (excluded)
439 Assumption : always within prefix (i.e. not within extDict) */
440 static U32 ZSTD_insertAndFindFirstIndex_internal(
441 ZSTD_matchState_t* ms,
442 const ZSTD_compressionParameters* const cParams,
443 const BYTE* ip, U32 const mls)
445 U32* const hashTable = ms->hashTable;
446 const U32 hashLog = cParams->hashLog;
447 U32* const chainTable = ms->chainTable;
448 const U32 chainMask = (1 << cParams->chainLog) - 1;
449 const BYTE* const base = ms->window.base;
450 const U32 target = (U32)(ip - base);
451 U32 idx = ms->nextToUpdate;
453 while(idx < target) { /* catch up */
454 size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
455 NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
460 ms->nextToUpdate = target;
461 return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
464 U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
465 const ZSTD_compressionParameters* const cParams = &ms->cParams;
466 return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.searchLength);
470 /* inlining is important to hardwire a hot branch (template emulation) */
471 FORCE_INLINE_TEMPLATE
472 size_t ZSTD_HcFindBestMatch_generic (
473 ZSTD_matchState_t* ms,
474 const BYTE* const ip, const BYTE* const iLimit,
476 const U32 mls, const ZSTD_dictMode_e dictMode)
478 const ZSTD_compressionParameters* const cParams = &ms->cParams;
479 U32* const chainTable = ms->chainTable;
480 const U32 chainSize = (1 << cParams->chainLog);
481 const U32 chainMask = chainSize-1;
482 const BYTE* const base = ms->window.base;
483 const BYTE* const dictBase = ms->window.dictBase;
484 const U32 dictLimit = ms->window.dictLimit;
485 const BYTE* const prefixStart = base + dictLimit;
486 const BYTE* const dictEnd = dictBase + dictLimit;
487 const U32 lowLimit = ms->window.lowLimit;
488 const U32 current = (U32)(ip-base);
489 const U32 minChain = current > chainSize ? current - chainSize : 0;
490 U32 nbAttempts = 1U << cParams->searchLog;
493 /* HC4 match finder */
494 U32 matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
496 for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) {
498 if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
499 const BYTE* const match = base + matchIndex;
500 if (match[ml] == ip[ml]) /* potentially better */
501 currentMl = ZSTD_count(ip, match, iLimit);
503 const BYTE* const match = dictBase + matchIndex;
504 assert(match+4 <= dictEnd);
505 if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
506 currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
509 /* save best solution */
510 if (currentMl > ml) {
512 *offsetPtr = current - matchIndex + ZSTD_REP_MOVE;
513 if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
516 if (matchIndex <= minChain) break;
517 matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
520 if (dictMode == ZSTD_dictMatchState) {
521 const ZSTD_matchState_t* const dms = ms->dictMatchState;
522 const U32* const dmsChainTable = dms->chainTable;
523 const U32 dmsChainSize = (1 << dms->cParams.chainLog);
524 const U32 dmsChainMask = dmsChainSize - 1;
525 const U32 dmsLowestIndex = dms->window.dictLimit;
526 const BYTE* const dmsBase = dms->window.base;
527 const BYTE* const dmsEnd = dms->window.nextSrc;
528 const U32 dmsSize = (U32)(dmsEnd - dmsBase);
529 const U32 dmsIndexDelta = dictLimit - dmsSize;
530 const U32 dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0;
532 matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)];
534 for ( ; (matchIndex>dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) {
536 const BYTE* const match = dmsBase + matchIndex;
537 assert(match+4 <= dmsEnd);
538 if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
539 currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
541 /* save best solution */
542 if (currentMl > ml) {
544 *offsetPtr = current - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE;
545 if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
548 if (matchIndex <= dmsMinChain) break;
549 matchIndex = dmsChainTable[matchIndex & dmsChainMask];
557 FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
558 ZSTD_matchState_t* ms,
559 const BYTE* ip, const BYTE* const iLimit,
562 switch(ms->cParams.searchLength)
564 default : /* includes case 3 */
565 case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
566 case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
568 case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
573 static size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS (
574 ZSTD_matchState_t* ms,
575 const BYTE* ip, const BYTE* const iLimit,
578 switch(ms->cParams.searchLength)
580 default : /* includes case 3 */
581 case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
582 case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
584 case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
589 FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
590 ZSTD_matchState_t* ms,
591 const BYTE* ip, const BYTE* const iLimit,
594 switch(ms->cParams.searchLength)
596 default : /* includes case 3 */
597 case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
598 case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
600 case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
605 /* *******************************
606 * Common parser - lazy strategy
607 *********************************/
608 FORCE_INLINE_TEMPLATE
609 size_t ZSTD_compressBlock_lazy_generic(
610 ZSTD_matchState_t* ms, seqStore_t* seqStore,
611 U32 rep[ZSTD_REP_NUM],
612 const void* src, size_t srcSize,
613 const U32 searchMethod, const U32 depth,
614 ZSTD_dictMode_e const dictMode)
616 const BYTE* const istart = (const BYTE*)src;
617 const BYTE* ip = istart;
618 const BYTE* anchor = istart;
619 const BYTE* const iend = istart + srcSize;
620 const BYTE* const ilimit = iend - 8;
621 const BYTE* const base = ms->window.base;
622 const U32 prefixLowestIndex = ms->window.dictLimit;
623 const BYTE* const prefixLowest = base + prefixLowestIndex;
625 typedef size_t (*searchMax_f)(
626 ZSTD_matchState_t* ms,
627 const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
628 searchMax_f const searchMax = dictMode == ZSTD_dictMatchState ?
629 (searchMethod ? ZSTD_BtFindBestMatch_dictMatchState_selectMLS : ZSTD_HcFindBestMatch_dictMatchState_selectMLS) :
630 (searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS);
631 U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
633 const ZSTD_matchState_t* const dms = ms->dictMatchState;
634 const U32 dictLowestIndex = dictMode == ZSTD_dictMatchState ?
635 dms->window.dictLimit : 0;
636 const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ?
637 dms->window.base : NULL;
638 const BYTE* const dictLowest = dictMode == ZSTD_dictMatchState ?
639 dictBase + dictLowestIndex : NULL;
640 const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ?
641 dms->window.nextSrc : NULL;
642 const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ?
643 prefixLowestIndex - (U32)(dictEnd - dictBase) :
645 const U32 dictAndPrefixLength = (U32)(ip - prefixLowest + dictEnd - dictLowest);
648 ip += (dictAndPrefixLength == 0);
649 ms->nextToUpdate3 = ms->nextToUpdate;
650 if (dictMode == ZSTD_noDict) {
651 U32 const maxRep = (U32)(ip - prefixLowest);
652 if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
653 if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
655 if (dictMode == ZSTD_dictMatchState) {
656 /* dictMatchState repCode checks don't currently handle repCode == 0
658 assert(offset_1 <= dictAndPrefixLength);
659 assert(offset_2 <= dictAndPrefixLength);
663 while (ip < ilimit) {
664 size_t matchLength=0;
666 const BYTE* start=ip+1;
669 if (dictMode == ZSTD_dictMatchState) {
670 const U32 repIndex = (U32)(ip - base) + 1 - offset_1;
671 const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
672 && repIndex < prefixLowestIndex) ?
673 dictBase + (repIndex - dictIndexDelta) :
675 if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
676 && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
677 const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
678 matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
679 if (depth==0) goto _storeSequence;
682 if ( dictMode == ZSTD_noDict
683 && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
684 matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
685 if (depth==0) goto _storeSequence;
688 /* first search (depth 0) */
689 { size_t offsetFound = 999999999;
690 size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
691 if (ml2 > matchLength)
692 matchLength = ml2, start = ip, offset=offsetFound;
695 if (matchLength < 4) {
696 ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
700 /* let's try to find a better solution */
704 if ( (dictMode == ZSTD_noDict)
705 && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
706 size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
707 int const gain2 = (int)(mlRep * 3);
708 int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
709 if ((mlRep >= 4) && (gain2 > gain1))
710 matchLength = mlRep, offset = 0, start = ip;
712 if (dictMode == ZSTD_dictMatchState) {
713 const U32 repIndex = (U32)(ip - base) - offset_1;
714 const BYTE* repMatch = repIndex < prefixLowestIndex ?
715 dictBase + (repIndex - dictIndexDelta) :
717 if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
718 && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
719 const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
720 size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
721 int const gain2 = (int)(mlRep * 3);
722 int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
723 if ((mlRep >= 4) && (gain2 > gain1))
724 matchLength = mlRep, offset = 0, start = ip;
727 { size_t offset2=999999999;
728 size_t const ml2 = searchMax(ms, ip, iend, &offset2);
729 int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
730 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
731 if ((ml2 >= 4) && (gain2 > gain1)) {
732 matchLength = ml2, offset = offset2, start = ip;
733 continue; /* search a better one */
736 /* let's find an even better one */
737 if ((depth==2) && (ip<ilimit)) {
739 if ( (dictMode == ZSTD_noDict)
740 && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
741 size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
742 int const gain2 = (int)(mlRep * 4);
743 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
744 if ((mlRep >= 4) && (gain2 > gain1))
745 matchLength = mlRep, offset = 0, start = ip;
747 if (dictMode == ZSTD_dictMatchState) {
748 const U32 repIndex = (U32)(ip - base) - offset_1;
749 const BYTE* repMatch = repIndex < prefixLowestIndex ?
750 dictBase + (repIndex - dictIndexDelta) :
752 if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
753 && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
754 const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
755 size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
756 int const gain2 = (int)(mlRep * 4);
757 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
758 if ((mlRep >= 4) && (gain2 > gain1))
759 matchLength = mlRep, offset = 0, start = ip;
762 { size_t offset2=999999999;
763 size_t const ml2 = searchMax(ms, ip, iend, &offset2);
764 int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
765 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
766 if ((ml2 >= 4) && (gain2 > gain1)) {
767 matchLength = ml2, offset = offset2, start = ip;
770 break; /* nothing found : store previous solution */
774 * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
775 * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
776 * overflows the pointer, which is undefined behavior.
780 if (dictMode == ZSTD_noDict) {
781 while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > prefixLowest))
782 && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) ) /* only search for offset within prefix */
783 { start--; matchLength++; }
785 if (dictMode == ZSTD_dictMatchState) {
786 U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
787 const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex;
788 const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest;
789 while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
791 offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
795 { size_t const litLength = start - anchor;
796 ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
797 anchor = ip = start + matchLength;
800 /* check immediate repcode */
801 if (dictMode == ZSTD_dictMatchState) {
802 while (ip <= ilimit) {
803 U32 const current2 = (U32)(ip-base);
804 U32 const repIndex = current2 - offset_2;
805 const BYTE* repMatch = dictMode == ZSTD_dictMatchState
806 && repIndex < prefixLowestIndex ?
807 dictBase - dictIndexDelta + repIndex :
809 if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */)
810 && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
811 const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
812 matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
813 offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset_2 <=> offset_1 */
814 ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
823 if (dictMode == ZSTD_noDict) {
824 while ( ((ip <= ilimit) & (offset_2>0))
825 && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
827 matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
828 offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
829 ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
832 continue; /* faster when present ... (?) */
835 /* Save reps for next block */
836 rep[0] = offset_1 ? offset_1 : savedOffset;
837 rep[1] = offset_2 ? offset_2 : savedOffset;
839 /* Return the last literals size */
840 return iend - anchor;
844 size_t ZSTD_compressBlock_btlazy2(
845 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
846 void const* src, size_t srcSize)
848 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 1, 2, ZSTD_noDict);
851 size_t ZSTD_compressBlock_lazy2(
852 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
853 void const* src, size_t srcSize)
855 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 2, ZSTD_noDict);
858 size_t ZSTD_compressBlock_lazy(
859 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
860 void const* src, size_t srcSize)
862 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 1, ZSTD_noDict);
865 size_t ZSTD_compressBlock_greedy(
866 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
867 void const* src, size_t srcSize)
869 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 0, ZSTD_noDict);
872 size_t ZSTD_compressBlock_btlazy2_dictMatchState(
873 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
874 void const* src, size_t srcSize)
876 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 1, 2, ZSTD_dictMatchState);
879 size_t ZSTD_compressBlock_lazy2_dictMatchState(
880 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
881 void const* src, size_t srcSize)
883 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 2, ZSTD_dictMatchState);
886 size_t ZSTD_compressBlock_lazy_dictMatchState(
887 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
888 void const* src, size_t srcSize)
890 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 1, ZSTD_dictMatchState);
893 size_t ZSTD_compressBlock_greedy_dictMatchState(
894 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
895 void const* src, size_t srcSize)
897 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 0, ZSTD_dictMatchState);
901 FORCE_INLINE_TEMPLATE
902 size_t ZSTD_compressBlock_lazy_extDict_generic(
903 ZSTD_matchState_t* ms, seqStore_t* seqStore,
904 U32 rep[ZSTD_REP_NUM],
905 const void* src, size_t srcSize,
906 const U32 searchMethod, const U32 depth)
908 const BYTE* const istart = (const BYTE*)src;
909 const BYTE* ip = istart;
910 const BYTE* anchor = istart;
911 const BYTE* const iend = istart + srcSize;
912 const BYTE* const ilimit = iend - 8;
913 const BYTE* const base = ms->window.base;
914 const U32 dictLimit = ms->window.dictLimit;
915 const U32 lowestIndex = ms->window.lowLimit;
916 const BYTE* const prefixStart = base + dictLimit;
917 const BYTE* const dictBase = ms->window.dictBase;
918 const BYTE* const dictEnd = dictBase + dictLimit;
919 const BYTE* const dictStart = dictBase + lowestIndex;
921 typedef size_t (*searchMax_f)(
922 ZSTD_matchState_t* ms,
923 const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
924 searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
926 U32 offset_1 = rep[0], offset_2 = rep[1];
929 ms->nextToUpdate3 = ms->nextToUpdate;
930 ip += (ip == prefixStart);
933 while (ip < ilimit) {
934 size_t matchLength=0;
936 const BYTE* start=ip+1;
937 U32 current = (U32)(ip-base);
940 { const U32 repIndex = (U32)(current+1 - offset_1);
941 const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
942 const BYTE* const repMatch = repBase + repIndex;
943 if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
944 if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
945 /* repcode detected we should take it */
946 const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
947 matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;
948 if (depth==0) goto _storeSequence;
951 /* first search (depth 0) */
952 { size_t offsetFound = 999999999;
953 size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
954 if (ml2 > matchLength)
955 matchLength = ml2, start = ip, offset=offsetFound;
958 if (matchLength < 4) {
959 ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
963 /* let's try to find a better solution */
970 const U32 repIndex = (U32)(current - offset_1);
971 const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
972 const BYTE* const repMatch = repBase + repIndex;
973 if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
974 if (MEM_read32(ip) == MEM_read32(repMatch)) {
975 /* repcode detected */
976 const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
977 size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
978 int const gain2 = (int)(repLength * 3);
979 int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
980 if ((repLength >= 4) && (gain2 > gain1))
981 matchLength = repLength, offset = 0, start = ip;
984 /* search match, depth 1 */
985 { size_t offset2=999999999;
986 size_t const ml2 = searchMax(ms, ip, iend, &offset2);
987 int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
988 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
989 if ((ml2 >= 4) && (gain2 > gain1)) {
990 matchLength = ml2, offset = offset2, start = ip;
991 continue; /* search a better one */
994 /* let's find an even better one */
995 if ((depth==2) && (ip<ilimit)) {
1000 const U32 repIndex = (U32)(current - offset_1);
1001 const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
1002 const BYTE* const repMatch = repBase + repIndex;
1003 if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
1004 if (MEM_read32(ip) == MEM_read32(repMatch)) {
1005 /* repcode detected */
1006 const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
1007 size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
1008 int const gain2 = (int)(repLength * 4);
1009 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
1010 if ((repLength >= 4) && (gain2 > gain1))
1011 matchLength = repLength, offset = 0, start = ip;
1014 /* search match, depth 2 */
1015 { size_t offset2=999999999;
1016 size_t const ml2 = searchMax(ms, ip, iend, &offset2);
1017 int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
1018 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
1019 if ((ml2 >= 4) && (gain2 > gain1)) {
1020 matchLength = ml2, offset = offset2, start = ip;
1023 break; /* nothing found : store previous solution */
1028 U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
1029 const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
1030 const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
1031 while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
1032 offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
1035 /* store sequence */
1037 { size_t const litLength = start - anchor;
1038 ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
1039 anchor = ip = start + matchLength;
1042 /* check immediate repcode */
1043 while (ip <= ilimit) {
1044 const U32 repIndex = (U32)((ip-base) - offset_2);
1045 const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
1046 const BYTE* const repMatch = repBase + repIndex;
1047 if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
1048 if (MEM_read32(ip) == MEM_read32(repMatch)) {
1049 /* repcode detected we should take it */
1050 const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
1051 matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
1052 offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */
1053 ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
1056 continue; /* faster when present ... (?) */
1061 /* Save reps for next block */
1065 /* Return the last literals size */
1066 return iend - anchor;
1070 size_t ZSTD_compressBlock_greedy_extDict(
1071 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1072 void const* src, size_t srcSize)
1074 return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 0);
1077 size_t ZSTD_compressBlock_lazy_extDict(
1078 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1079 void const* src, size_t srcSize)
1082 return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 1);
1085 size_t ZSTD_compressBlock_lazy2_extDict(
1086 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1087 void const* src, size_t srcSize)
1090 return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 2);
1093 size_t ZSTD_compressBlock_btlazy2_extDict(
1094 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1095 void const* src, size_t srcSize)
1098 return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 1, 2);