2 * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
11 #include "zstd_compress_internal.h"
12 #include "zstd_lazy.h"
15 /*-*************************************
17 ***************************************/
20 ZSTD_updateDUBT(ZSTD_matchState_t* ms,
21 const BYTE* ip, const BYTE* iend,
24 const ZSTD_compressionParameters* const cParams = &ms->cParams;
25 U32* const hashTable = ms->hashTable;
26 U32 const hashLog = cParams->hashLog;
28 U32* const bt = ms->chainTable;
29 U32 const btLog = cParams->chainLog - 1;
30 U32 const btMask = (1 << btLog) - 1;
32 const BYTE* const base = ms->window.base;
33 U32 const target = (U32)(ip - base);
34 U32 idx = ms->nextToUpdate;
37 DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)",
38 idx, target, ms->window.dictLimit);
39 assert(ip + 8 <= iend); /* condition for ZSTD_hashPtr */
42 assert(idx >= ms->window.dictLimit); /* condition for valid base+idx */
43 for ( ; idx < target ; idx++) {
44 size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); /* assumption : ip + 8 <= iend */
45 U32 const matchIndex = hashTable[h];
47 U32* const nextCandidatePtr = bt + 2*(idx&btMask);
48 U32* const sortMarkPtr = nextCandidatePtr + 1;
50 DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx);
51 hashTable[h] = idx; /* Update Hash Table */
52 *nextCandidatePtr = matchIndex; /* update BT like a chain */
53 *sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK;
55 ms->nextToUpdate = target;
59 /** ZSTD_insertDUBT1() :
60 * sort one already inserted but unsorted position
61 * assumption : current >= btlow == (current - btmask)
64 ZSTD_insertDUBT1(ZSTD_matchState_t* ms,
65 U32 current, const BYTE* inputEnd,
66 U32 nbCompares, U32 btLow,
67 const ZSTD_dictMode_e dictMode)
69 const ZSTD_compressionParameters* const cParams = &ms->cParams;
70 U32* const bt = ms->chainTable;
71 U32 const btLog = cParams->chainLog - 1;
72 U32 const btMask = (1 << btLog) - 1;
73 size_t commonLengthSmaller=0, commonLengthLarger=0;
74 const BYTE* const base = ms->window.base;
75 const BYTE* const dictBase = ms->window.dictBase;
76 const U32 dictLimit = ms->window.dictLimit;
77 const BYTE* const ip = (current>=dictLimit) ? base + current : dictBase + current;
78 const BYTE* const iend = (current>=dictLimit) ? inputEnd : dictBase + dictLimit;
79 const BYTE* const dictEnd = dictBase + dictLimit;
80 const BYTE* const prefixStart = base + dictLimit;
82 U32* smallerPtr = bt + 2*(current&btMask);
83 U32* largerPtr = smallerPtr + 1;
84 U32 matchIndex = *smallerPtr; /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */
85 U32 dummy32; /* to be nullified at the end */
86 U32 const windowLow = ms->window.lowLimit;
88 DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
89 current, dictLimit, windowLow);
90 assert(current >= btLow);
91 assert(ip < iend); /* condition for ZSTD_count */
93 while (nbCompares-- && (matchIndex > windowLow)) {
94 U32* const nextPtr = bt + 2*(matchIndex & btMask);
95 size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
96 assert(matchIndex < current);
97 /* note : all candidates are now supposed sorted,
98 * but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK
99 * when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */
101 if ( (dictMode != ZSTD_extDict)
102 || (matchIndex+matchLength >= dictLimit) /* both in current segment*/
103 || (current < dictLimit) /* both in extDict */) {
104 const BYTE* const mBase = ( (dictMode != ZSTD_extDict)
105 || (matchIndex+matchLength >= dictLimit)) ?
107 assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */
108 || (current < dictLimit) );
109 match = mBase + matchIndex;
110 matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
112 match = dictBase + matchIndex;
113 matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
114 if (matchIndex+matchLength >= dictLimit)
115 match = base + matchIndex; /* preparation for next read of match[matchLength] */
118 DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
119 current, matchIndex, (U32)matchLength);
121 if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
122 break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
125 if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */
126 /* match is smaller than current */
127 *smallerPtr = matchIndex; /* update smaller idx */
128 commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
129 if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
130 DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u",
131 matchIndex, btLow, nextPtr[1]);
132 smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
133 matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
135 /* match is larger than current */
136 *largerPtr = matchIndex;
137 commonLengthLarger = matchLength;
138 if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
139 DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u",
140 matchIndex, btLow, nextPtr[0]);
142 matchIndex = nextPtr[0];
145 *smallerPtr = *largerPtr = 0;
150 ZSTD_DUBT_findBetterDictMatch (
151 ZSTD_matchState_t* ms,
152 const BYTE* const ip, const BYTE* const iend,
157 const ZSTD_dictMode_e dictMode)
159 const ZSTD_matchState_t * const dms = ms->dictMatchState;
160 const ZSTD_compressionParameters* const dmsCParams = &dms->cParams;
161 const U32 * const dictHashTable = dms->hashTable;
162 U32 const hashLog = dmsCParams->hashLog;
163 size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
164 U32 dictMatchIndex = dictHashTable[h];
166 const BYTE* const base = ms->window.base;
167 const BYTE* const prefixStart = base + ms->window.dictLimit;
168 U32 const current = (U32)(ip-base);
169 const BYTE* const dictBase = dms->window.base;
170 const BYTE* const dictEnd = dms->window.nextSrc;
171 U32 const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base);
172 U32 const dictLowLimit = dms->window.lowLimit;
173 U32 const dictIndexDelta = ms->window.lowLimit - dictHighLimit;
175 U32* const dictBt = dms->chainTable;
176 U32 const btLog = dmsCParams->chainLog - 1;
177 U32 const btMask = (1 << btLog) - 1;
178 U32 const btLow = (btMask >= dictHighLimit - dictLowLimit) ? dictLowLimit : dictHighLimit - btMask;
180 size_t commonLengthSmaller=0, commonLengthLarger=0;
183 assert(dictMode == ZSTD_dictMatchState);
185 while (nbCompares-- && (dictMatchIndex > dictLowLimit)) {
186 U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask);
187 size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
188 const BYTE* match = dictBase + dictMatchIndex;
189 matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
190 if (dictMatchIndex+matchLength >= dictHighLimit)
191 match = base + dictMatchIndex + dictIndexDelta; /* to prepare for next usage of match[matchLength] */
193 if (matchLength > bestLength) {
194 U32 matchIndex = dictMatchIndex + dictIndexDelta;
195 if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
196 DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)",
197 current, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + current - matchIndex, dictMatchIndex, matchIndex);
198 bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
200 if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */
201 break; /* drop, to guarantee consistency (miss a little bit of compression) */
205 if (match[matchLength] < ip[matchLength]) {
206 if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */
207 commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
208 dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
210 /* match is larger than current */
211 if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */
212 commonLengthLarger = matchLength;
213 dictMatchIndex = nextPtr[0];
217 if (bestLength >= MINMATCH) {
218 U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
219 DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
220 current, (U32)bestLength, (U32)*offsetPtr, mIndex);
228 ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
229 const BYTE* const ip, const BYTE* const iend,
232 const ZSTD_dictMode_e dictMode)
234 const ZSTD_compressionParameters* const cParams = &ms->cParams;
235 U32* const hashTable = ms->hashTable;
236 U32 const hashLog = cParams->hashLog;
237 size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
238 U32 matchIndex = hashTable[h];
240 const BYTE* const base = ms->window.base;
241 U32 const current = (U32)(ip-base);
242 U32 const windowLow = ms->window.lowLimit;
244 U32* const bt = ms->chainTable;
245 U32 const btLog = cParams->chainLog - 1;
246 U32 const btMask = (1 << btLog) - 1;
247 U32 const btLow = (btMask >= current) ? 0 : current - btMask;
248 U32 const unsortLimit = MAX(btLow, windowLow);
250 U32* nextCandidate = bt + 2*(matchIndex&btMask);
251 U32* unsortedMark = bt + 2*(matchIndex&btMask) + 1;
252 U32 nbCompares = 1U << cParams->searchLog;
253 U32 nbCandidates = nbCompares;
254 U32 previousCandidate = 0;
256 DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", current);
257 assert(ip <= iend-8); /* required for h calculation */
259 /* reach end of unsorted candidates list */
260 while ( (matchIndex > unsortLimit)
261 && (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK)
262 && (nbCandidates > 1) ) {
263 DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted",
265 *unsortedMark = previousCandidate; /* the unsortedMark becomes a reversed chain, to move up back to original position */
266 previousCandidate = matchIndex;
267 matchIndex = *nextCandidate;
268 nextCandidate = bt + 2*(matchIndex&btMask);
269 unsortedMark = bt + 2*(matchIndex&btMask) + 1;
273 /* nullify last candidate if it's still unsorted
274 * simplification, detrimental to compression ratio, beneficial for speed */
275 if ( (matchIndex > unsortLimit)
276 && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {
277 DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u",
279 *nextCandidate = *unsortedMark = 0;
282 /* batch sort stacked candidates */
283 matchIndex = previousCandidate;
284 while (matchIndex) { /* will end on matchIndex == 0 */
285 U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;
286 U32 const nextCandidateIdx = *nextCandidateIdxPtr;
287 ZSTD_insertDUBT1(ms, matchIndex, iend,
288 nbCandidates, unsortLimit, dictMode);
289 matchIndex = nextCandidateIdx;
293 /* find longest match */
294 { size_t commonLengthSmaller = 0, commonLengthLarger = 0;
295 const BYTE* const dictBase = ms->window.dictBase;
296 const U32 dictLimit = ms->window.dictLimit;
297 const BYTE* const dictEnd = dictBase + dictLimit;
298 const BYTE* const prefixStart = base + dictLimit;
299 U32* smallerPtr = bt + 2*(current&btMask);
300 U32* largerPtr = bt + 2*(current&btMask) + 1;
301 U32 matchEndIdx = current + 8 + 1;
302 U32 dummy32; /* to be nullified at the end */
303 size_t bestLength = 0;
305 matchIndex = hashTable[h];
306 hashTable[h] = current; /* Update Hash Table */
308 while (nbCompares-- && (matchIndex > windowLow)) {
309 U32* const nextPtr = bt + 2*(matchIndex & btMask);
310 size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
313 if ((dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) {
314 match = base + matchIndex;
315 matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
317 match = dictBase + matchIndex;
318 matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
319 if (matchIndex+matchLength >= dictLimit)
320 match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
323 if (matchLength > bestLength) {
324 if (matchLength > matchEndIdx - matchIndex)
325 matchEndIdx = matchIndex + (U32)matchLength;
326 if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
327 bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
328 if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
329 if (dictMode == ZSTD_dictMatchState) {
330 nbCompares = 0; /* in addition to avoiding checking any
331 * further in this loop, make sure we
332 * skip checking in the dictionary. */
334 break; /* drop, to guarantee consistency (miss a little bit of compression) */
338 if (match[matchLength] < ip[matchLength]) {
339 /* match is smaller than current */
340 *smallerPtr = matchIndex; /* update smaller idx */
341 commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
342 if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
343 smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
344 matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
346 /* match is larger than current */
347 *largerPtr = matchIndex;
348 commonLengthLarger = matchLength;
349 if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
351 matchIndex = nextPtr[0];
354 *smallerPtr = *largerPtr = 0;
356 if (dictMode == ZSTD_dictMatchState && nbCompares) {
357 bestLength = ZSTD_DUBT_findBetterDictMatch(
359 offsetPtr, bestLength, nbCompares,
363 assert(matchEndIdx > current+8); /* ensure nextToUpdate is increased */
364 ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
365 if (bestLength >= MINMATCH) {
366 U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
367 DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
368 current, (U32)bestLength, (U32)*offsetPtr, mIndex);
375 /** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
376 FORCE_INLINE_TEMPLATE size_t
377 ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
378 const BYTE* const ip, const BYTE* const iLimit,
380 const U32 mls /* template */,
381 const ZSTD_dictMode_e dictMode)
383 DEBUGLOG(7, "ZSTD_BtFindBestMatch");
384 if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
385 ZSTD_updateDUBT(ms, ip, iLimit, mls);
386 return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode);
391 ZSTD_BtFindBestMatch_selectMLS ( ZSTD_matchState_t* ms,
392 const BYTE* ip, const BYTE* const iLimit,
395 switch(ms->cParams.minMatch)
397 default : /* includes case 3 */
398 case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
399 case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
401 case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
406 static size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS (
407 ZSTD_matchState_t* ms,
408 const BYTE* ip, const BYTE* const iLimit,
411 switch(ms->cParams.minMatch)
413 default : /* includes case 3 */
414 case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
415 case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
417 case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
422 static size_t ZSTD_BtFindBestMatch_extDict_selectMLS (
423 ZSTD_matchState_t* ms,
424 const BYTE* ip, const BYTE* const iLimit,
427 switch(ms->cParams.minMatch)
429 default : /* includes case 3 */
430 case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
431 case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
433 case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
439 /* *********************************
441 ***********************************/
442 #define NEXT_IN_CHAIN(d, mask) chainTable[(d) & (mask)]
444 /* Update chains up to ip (excluded)
445 Assumption : always within prefix (i.e. not within extDict) */
446 static U32 ZSTD_insertAndFindFirstIndex_internal(
447 ZSTD_matchState_t* ms,
448 const ZSTD_compressionParameters* const cParams,
449 const BYTE* ip, U32 const mls)
451 U32* const hashTable = ms->hashTable;
452 const U32 hashLog = cParams->hashLog;
453 U32* const chainTable = ms->chainTable;
454 const U32 chainMask = (1 << cParams->chainLog) - 1;
455 const BYTE* const base = ms->window.base;
456 const U32 target = (U32)(ip - base);
457 U32 idx = ms->nextToUpdate;
459 while(idx < target) { /* catch up */
460 size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
461 NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
466 ms->nextToUpdate = target;
467 return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
470 U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
471 const ZSTD_compressionParameters* const cParams = &ms->cParams;
472 return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch);
476 /* inlining is important to hardwire a hot branch (template emulation) */
477 FORCE_INLINE_TEMPLATE
478 size_t ZSTD_HcFindBestMatch_generic (
479 ZSTD_matchState_t* ms,
480 const BYTE* const ip, const BYTE* const iLimit,
482 const U32 mls, const ZSTD_dictMode_e dictMode)
484 const ZSTD_compressionParameters* const cParams = &ms->cParams;
485 U32* const chainTable = ms->chainTable;
486 const U32 chainSize = (1 << cParams->chainLog);
487 const U32 chainMask = chainSize-1;
488 const BYTE* const base = ms->window.base;
489 const BYTE* const dictBase = ms->window.dictBase;
490 const U32 dictLimit = ms->window.dictLimit;
491 const BYTE* const prefixStart = base + dictLimit;
492 const BYTE* const dictEnd = dictBase + dictLimit;
493 const U32 lowLimit = ms->window.lowLimit;
494 const U32 current = (U32)(ip-base);
495 const U32 minChain = current > chainSize ? current - chainSize : 0;
496 U32 nbAttempts = 1U << cParams->searchLog;
499 /* HC4 match finder */
500 U32 matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
502 for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) {
504 if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
505 const BYTE* const match = base + matchIndex;
506 assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
507 if (match[ml] == ip[ml]) /* potentially better */
508 currentMl = ZSTD_count(ip, match, iLimit);
510 const BYTE* const match = dictBase + matchIndex;
511 assert(match+4 <= dictEnd);
512 if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
513 currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
516 /* save best solution */
517 if (currentMl > ml) {
519 *offsetPtr = current - matchIndex + ZSTD_REP_MOVE;
520 if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
523 if (matchIndex <= minChain) break;
524 matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
527 if (dictMode == ZSTD_dictMatchState) {
528 const ZSTD_matchState_t* const dms = ms->dictMatchState;
529 const U32* const dmsChainTable = dms->chainTable;
530 const U32 dmsChainSize = (1 << dms->cParams.chainLog);
531 const U32 dmsChainMask = dmsChainSize - 1;
532 const U32 dmsLowestIndex = dms->window.dictLimit;
533 const BYTE* const dmsBase = dms->window.base;
534 const BYTE* const dmsEnd = dms->window.nextSrc;
535 const U32 dmsSize = (U32)(dmsEnd - dmsBase);
536 const U32 dmsIndexDelta = dictLimit - dmsSize;
537 const U32 dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0;
539 matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)];
541 for ( ; (matchIndex>dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) {
543 const BYTE* const match = dmsBase + matchIndex;
544 assert(match+4 <= dmsEnd);
545 if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
546 currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
548 /* save best solution */
549 if (currentMl > ml) {
551 *offsetPtr = current - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE;
552 if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
555 if (matchIndex <= dmsMinChain) break;
556 matchIndex = dmsChainTable[matchIndex & dmsChainMask];
564 FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
565 ZSTD_matchState_t* ms,
566 const BYTE* ip, const BYTE* const iLimit,
569 switch(ms->cParams.minMatch)
571 default : /* includes case 3 */
572 case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
573 case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
575 case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
580 static size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS (
581 ZSTD_matchState_t* ms,
582 const BYTE* ip, const BYTE* const iLimit,
585 switch(ms->cParams.minMatch)
587 default : /* includes case 3 */
588 case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
589 case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
591 case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
596 FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
597 ZSTD_matchState_t* ms,
598 const BYTE* ip, const BYTE* const iLimit,
601 switch(ms->cParams.minMatch)
603 default : /* includes case 3 */
604 case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
605 case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
607 case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
612 /* *******************************
613 * Common parser - lazy strategy
614 *********************************/
615 FORCE_INLINE_TEMPLATE
616 size_t ZSTD_compressBlock_lazy_generic(
617 ZSTD_matchState_t* ms, seqStore_t* seqStore,
618 U32 rep[ZSTD_REP_NUM],
619 const void* src, size_t srcSize,
620 const U32 searchMethod, const U32 depth,
621 ZSTD_dictMode_e const dictMode)
623 const BYTE* const istart = (const BYTE*)src;
624 const BYTE* ip = istart;
625 const BYTE* anchor = istart;
626 const BYTE* const iend = istart + srcSize;
627 const BYTE* const ilimit = iend - 8;
628 const BYTE* const base = ms->window.base;
629 const U32 prefixLowestIndex = ms->window.dictLimit;
630 const BYTE* const prefixLowest = base + prefixLowestIndex;
632 typedef size_t (*searchMax_f)(
633 ZSTD_matchState_t* ms,
634 const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
635 searchMax_f const searchMax = dictMode == ZSTD_dictMatchState ?
636 (searchMethod ? ZSTD_BtFindBestMatch_dictMatchState_selectMLS : ZSTD_HcFindBestMatch_dictMatchState_selectMLS) :
637 (searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS);
638 U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
640 const ZSTD_matchState_t* const dms = ms->dictMatchState;
641 const U32 dictLowestIndex = dictMode == ZSTD_dictMatchState ?
642 dms->window.dictLimit : 0;
643 const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ?
644 dms->window.base : NULL;
645 const BYTE* const dictLowest = dictMode == ZSTD_dictMatchState ?
646 dictBase + dictLowestIndex : NULL;
647 const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ?
648 dms->window.nextSrc : NULL;
649 const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ?
650 prefixLowestIndex - (U32)(dictEnd - dictBase) :
652 const U32 dictAndPrefixLength = (U32)(ip - prefixLowest + dictEnd - dictLowest);
655 ip += (dictAndPrefixLength == 0);
656 ms->nextToUpdate3 = ms->nextToUpdate;
657 if (dictMode == ZSTD_noDict) {
658 U32 const maxRep = (U32)(ip - prefixLowest);
659 if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
660 if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
662 if (dictMode == ZSTD_dictMatchState) {
663 /* dictMatchState repCode checks don't currently handle repCode == 0
665 assert(offset_1 <= dictAndPrefixLength);
666 assert(offset_2 <= dictAndPrefixLength);
670 while (ip < ilimit) {
671 size_t matchLength=0;
673 const BYTE* start=ip+1;
676 if (dictMode == ZSTD_dictMatchState) {
677 const U32 repIndex = (U32)(ip - base) + 1 - offset_1;
678 const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
679 && repIndex < prefixLowestIndex) ?
680 dictBase + (repIndex - dictIndexDelta) :
682 if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
683 && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
684 const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
685 matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
686 if (depth==0) goto _storeSequence;
689 if ( dictMode == ZSTD_noDict
690 && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
691 matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
692 if (depth==0) goto _storeSequence;
695 /* first search (depth 0) */
696 { size_t offsetFound = 999999999;
697 size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
698 if (ml2 > matchLength)
699 matchLength = ml2, start = ip, offset=offsetFound;
702 if (matchLength < 4) {
703 ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
707 /* let's try to find a better solution */
711 if ( (dictMode == ZSTD_noDict)
712 && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
713 size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
714 int const gain2 = (int)(mlRep * 3);
715 int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
716 if ((mlRep >= 4) && (gain2 > gain1))
717 matchLength = mlRep, offset = 0, start = ip;
719 if (dictMode == ZSTD_dictMatchState) {
720 const U32 repIndex = (U32)(ip - base) - offset_1;
721 const BYTE* repMatch = repIndex < prefixLowestIndex ?
722 dictBase + (repIndex - dictIndexDelta) :
724 if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
725 && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
726 const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
727 size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
728 int const gain2 = (int)(mlRep * 3);
729 int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
730 if ((mlRep >= 4) && (gain2 > gain1))
731 matchLength = mlRep, offset = 0, start = ip;
734 { size_t offset2=999999999;
735 size_t const ml2 = searchMax(ms, ip, iend, &offset2);
736 int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
737 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
738 if ((ml2 >= 4) && (gain2 > gain1)) {
739 matchLength = ml2, offset = offset2, start = ip;
740 continue; /* search a better one */
743 /* let's find an even better one */
744 if ((depth==2) && (ip<ilimit)) {
746 if ( (dictMode == ZSTD_noDict)
747 && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
748 size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
749 int const gain2 = (int)(mlRep * 4);
750 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
751 if ((mlRep >= 4) && (gain2 > gain1))
752 matchLength = mlRep, offset = 0, start = ip;
754 if (dictMode == ZSTD_dictMatchState) {
755 const U32 repIndex = (U32)(ip - base) - offset_1;
756 const BYTE* repMatch = repIndex < prefixLowestIndex ?
757 dictBase + (repIndex - dictIndexDelta) :
759 if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
760 && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
761 const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
762 size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
763 int const gain2 = (int)(mlRep * 4);
764 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
765 if ((mlRep >= 4) && (gain2 > gain1))
766 matchLength = mlRep, offset = 0, start = ip;
769 { size_t offset2=999999999;
770 size_t const ml2 = searchMax(ms, ip, iend, &offset2);
771 int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
772 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
773 if ((ml2 >= 4) && (gain2 > gain1)) {
774 matchLength = ml2, offset = offset2, start = ip;
777 break; /* nothing found : store previous solution */
781 * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
782 * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
783 * overflows the pointer, which is undefined behavior.
787 if (dictMode == ZSTD_noDict) {
788 while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > prefixLowest))
789 && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) ) /* only search for offset within prefix */
790 { start--; matchLength++; }
792 if (dictMode == ZSTD_dictMatchState) {
793 U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
794 const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex;
795 const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest;
796 while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
798 offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
802 { size_t const litLength = start - anchor;
803 ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
804 anchor = ip = start + matchLength;
807 /* check immediate repcode */
808 if (dictMode == ZSTD_dictMatchState) {
809 while (ip <= ilimit) {
810 U32 const current2 = (U32)(ip-base);
811 U32 const repIndex = current2 - offset_2;
812 const BYTE* repMatch = dictMode == ZSTD_dictMatchState
813 && repIndex < prefixLowestIndex ?
814 dictBase - dictIndexDelta + repIndex :
816 if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */)
817 && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
818 const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
819 matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
820 offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset_2 <=> offset_1 */
821 ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
830 if (dictMode == ZSTD_noDict) {
831 while ( ((ip <= ilimit) & (offset_2>0))
832 && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
834 matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
835 offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
836 ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
839 continue; /* faster when present ... (?) */
842 /* Save reps for next block */
843 rep[0] = offset_1 ? offset_1 : savedOffset;
844 rep[1] = offset_2 ? offset_2 : savedOffset;
846 /* Return the last literals size */
847 return iend - anchor;
851 size_t ZSTD_compressBlock_btlazy2(
852 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
853 void const* src, size_t srcSize)
855 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 1, 2, ZSTD_noDict);
858 size_t ZSTD_compressBlock_lazy2(
859 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
860 void const* src, size_t srcSize)
862 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 2, ZSTD_noDict);
865 size_t ZSTD_compressBlock_lazy(
866 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
867 void const* src, size_t srcSize)
869 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 1, ZSTD_noDict);
872 size_t ZSTD_compressBlock_greedy(
873 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
874 void const* src, size_t srcSize)
876 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 0, ZSTD_noDict);
879 size_t ZSTD_compressBlock_btlazy2_dictMatchState(
880 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
881 void const* src, size_t srcSize)
883 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 1, 2, ZSTD_dictMatchState);
886 size_t ZSTD_compressBlock_lazy2_dictMatchState(
887 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
888 void const* src, size_t srcSize)
890 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 2, ZSTD_dictMatchState);
893 size_t ZSTD_compressBlock_lazy_dictMatchState(
894 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
895 void const* src, size_t srcSize)
897 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 1, ZSTD_dictMatchState);
900 size_t ZSTD_compressBlock_greedy_dictMatchState(
901 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
902 void const* src, size_t srcSize)
904 return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, 0, 0, ZSTD_dictMatchState);
908 FORCE_INLINE_TEMPLATE
909 size_t ZSTD_compressBlock_lazy_extDict_generic(
910 ZSTD_matchState_t* ms, seqStore_t* seqStore,
911 U32 rep[ZSTD_REP_NUM],
912 const void* src, size_t srcSize,
913 const U32 searchMethod, const U32 depth)
915 const BYTE* const istart = (const BYTE*)src;
916 const BYTE* ip = istart;
917 const BYTE* anchor = istart;
918 const BYTE* const iend = istart + srcSize;
919 const BYTE* const ilimit = iend - 8;
920 const BYTE* const base = ms->window.base;
921 const U32 dictLimit = ms->window.dictLimit;
922 const U32 lowestIndex = ms->window.lowLimit;
923 const BYTE* const prefixStart = base + dictLimit;
924 const BYTE* const dictBase = ms->window.dictBase;
925 const BYTE* const dictEnd = dictBase + dictLimit;
926 const BYTE* const dictStart = dictBase + lowestIndex;
928 typedef size_t (*searchMax_f)(
929 ZSTD_matchState_t* ms,
930 const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
931 searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
933 U32 offset_1 = rep[0], offset_2 = rep[1];
936 ms->nextToUpdate3 = ms->nextToUpdate;
937 ip += (ip == prefixStart);
940 while (ip < ilimit) {
941 size_t matchLength=0;
943 const BYTE* start=ip+1;
944 U32 current = (U32)(ip-base);
947 { const U32 repIndex = (U32)(current+1 - offset_1);
948 const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
949 const BYTE* const repMatch = repBase + repIndex;
950 if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
951 if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
952 /* repcode detected we should take it */
953 const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
954 matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;
955 if (depth==0) goto _storeSequence;
958 /* first search (depth 0) */
959 { size_t offsetFound = 999999999;
960 size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
961 if (ml2 > matchLength)
962 matchLength = ml2, start = ip, offset=offsetFound;
965 if (matchLength < 4) {
966 ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
970 /* let's try to find a better solution */
977 const U32 repIndex = (U32)(current - offset_1);
978 const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
979 const BYTE* const repMatch = repBase + repIndex;
980 if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
981 if (MEM_read32(ip) == MEM_read32(repMatch)) {
982 /* repcode detected */
983 const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
984 size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
985 int const gain2 = (int)(repLength * 3);
986 int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
987 if ((repLength >= 4) && (gain2 > gain1))
988 matchLength = repLength, offset = 0, start = ip;
991 /* search match, depth 1 */
992 { size_t offset2=999999999;
993 size_t const ml2 = searchMax(ms, ip, iend, &offset2);
994 int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
995 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
996 if ((ml2 >= 4) && (gain2 > gain1)) {
997 matchLength = ml2, offset = offset2, start = ip;
998 continue; /* search a better one */
1001 /* let's find an even better one */
1002 if ((depth==2) && (ip<ilimit)) {
1007 const U32 repIndex = (U32)(current - offset_1);
1008 const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
1009 const BYTE* const repMatch = repBase + repIndex;
1010 if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
1011 if (MEM_read32(ip) == MEM_read32(repMatch)) {
1012 /* repcode detected */
1013 const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
1014 size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
1015 int const gain2 = (int)(repLength * 4);
1016 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
1017 if ((repLength >= 4) && (gain2 > gain1))
1018 matchLength = repLength, offset = 0, start = ip;
1021 /* search match, depth 2 */
1022 { size_t offset2=999999999;
1023 size_t const ml2 = searchMax(ms, ip, iend, &offset2);
1024 int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
1025 int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
1026 if ((ml2 >= 4) && (gain2 > gain1)) {
1027 matchLength = ml2, offset = offset2, start = ip;
1030 break; /* nothing found : store previous solution */
1035 U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
1036 const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
1037 const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
1038 while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
1039 offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
1042 /* store sequence */
1044 { size_t const litLength = start - anchor;
1045 ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
1046 anchor = ip = start + matchLength;
1049 /* check immediate repcode */
1050 while (ip <= ilimit) {
1051 const U32 repIndex = (U32)((ip-base) - offset_2);
1052 const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
1053 const BYTE* const repMatch = repBase + repIndex;
1054 if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
1055 if (MEM_read32(ip) == MEM_read32(repMatch)) {
1056 /* repcode detected we should take it */
1057 const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
1058 matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
1059 offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */
1060 ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
1063 continue; /* faster when present ... (?) */
1068 /* Save reps for next block */
1072 /* Return the last literals size */
1073 return iend - anchor;
1077 size_t ZSTD_compressBlock_greedy_extDict(
1078 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1079 void const* src, size_t srcSize)
1081 return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 0);
1084 size_t ZSTD_compressBlock_lazy_extDict(
1085 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1086 void const* src, size_t srcSize)
1089 return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 1);
1092 size_t ZSTD_compressBlock_lazy2_extDict(
1093 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1094 void const* src, size_t srcSize)
1097 return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 0, 2);
1100 size_t ZSTD_compressBlock_btlazy2_extDict(
1101 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1102 void const* src, size_t srcSize)
1105 return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, 1, 2);