2 * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
12 /* ====== Compiler specifics ====== */
14 # pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
18 /* ====== Constants ====== */
19 #define ZSTDMT_OVERLAPLOG_DEFAULT 0
22 /* ====== Dependencies ====== */
23 #include <string.h> /* memcpy, memset */
24 #include <limits.h> /* INT_MAX, UINT_MAX */
25 #include "pool.h" /* threadpool */
26 #include "threading.h" /* mutex */
27 #include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
29 #include "zstdmt_compress.h"
31 /* Guards code to support resizing the SeqPool.
32 * We will want to resize the SeqPool to save memory in the future.
33 * Until then, comment the code out since it is unused.
35 #define ZSTD_RESIZE_SEQPOOL 0
37 /* ====== Debug ====== */
38 #if defined(DEBUGLEVEL) && (DEBUGLEVEL>=2) \
39 && !defined(_MSC_VER) \
40 && !defined(__MINGW32__)
44 # include <sys/times.h>
46 # define DEBUG_PRINTHEX(l,p,n) { \
48 for (debug_u=0; debug_u<(n); debug_u++) \
49 RAWLOG(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \
53 static unsigned long long GetCurrentClockTimeMicroseconds(void)
55 static clock_t _ticksPerSecond = 0;
56 if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK);
58 { struct tms junk; clock_t newTicks = (clock_t) times(&junk);
59 return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond);
62 #define MUTEX_WAIT_TIME_DLEVEL 6
63 #define ZSTD_PTHREAD_MUTEX_LOCK(mutex) { \
64 if (DEBUGLEVEL >= MUTEX_WAIT_TIME_DLEVEL) { \
65 unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \
66 ZSTD_pthread_mutex_lock(mutex); \
67 { unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \
68 unsigned long long const elapsedTime = (afterTime-beforeTime); \
69 if (elapsedTime > 1000) { /* or whatever threshold you like; I'm using 1 millisecond here */ \
70 DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \
71 elapsedTime, #mutex); \
74 ZSTD_pthread_mutex_lock(mutex); \
80 # define ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m)
81 # define DEBUG_PRINTHEX(l,p,n) {}
86 /* ===== Buffer Pool ===== */
87 /* a single Buffer Pool can be invoked from multiple threads in parallel */
89 typedef struct buffer_s {
94 static const buffer_t g_nullBuffer = { NULL, 0 };
96 typedef struct ZSTDMT_bufferPool_s {
97 ZSTD_pthread_mutex_t poolMutex;
99 unsigned totalBuffers;
102 buffer_t bTable[1]; /* variable size */
105 static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbWorkers, ZSTD_customMem cMem)
107 unsigned const maxNbBuffers = 2*nbWorkers + 3;
108 ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_calloc(
109 sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem);
110 if (bufPool==NULL) return NULL;
111 if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) {
112 ZSTD_free(bufPool, cMem);
115 bufPool->bufferSize = 64 KB;
116 bufPool->totalBuffers = maxNbBuffers;
117 bufPool->nbBuffers = 0;
118 bufPool->cMem = cMem;
122 static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
125 DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool);
126 if (!bufPool) return; /* compatibility with free on NULL */
127 for (u=0; u<bufPool->totalBuffers; u++) {
128 DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->bTable[u].start);
129 ZSTD_free(bufPool->bTable[u].start, bufPool->cMem);
131 ZSTD_pthread_mutex_destroy(&bufPool->poolMutex);
132 ZSTD_free(bufPool, bufPool->cMem);
135 /* only works at initialization, not during compression */
136 static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool)
138 size_t const poolSize = sizeof(*bufPool)
139 + (bufPool->totalBuffers - 1) * sizeof(buffer_t);
141 size_t totalBufferSize = 0;
142 ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
143 for (u=0; u<bufPool->totalBuffers; u++)
144 totalBufferSize += bufPool->bTable[u].capacity;
145 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
147 return poolSize + totalBufferSize;
150 /* ZSTDMT_setBufferSize() :
151 * all future buffers provided by this buffer pool will have _at least_ this size
152 * note : it's better for all buffers to have same size,
153 * as they become freely interchangeable, reducing malloc/free usages and memory fragmentation */
154 static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const bSize)
156 ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
157 DEBUGLOG(4, "ZSTDMT_setBufferSize: bSize = %u", (U32)bSize);
158 bufPool->bufferSize = bSize;
159 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
163 static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, U32 nbWorkers)
165 unsigned const maxNbBuffers = 2*nbWorkers + 3;
166 if (srcBufPool==NULL) return NULL;
167 if (srcBufPool->totalBuffers >= maxNbBuffers) /* good enough */
169 /* need a larger buffer pool */
170 { ZSTD_customMem const cMem = srcBufPool->cMem;
171 size_t const bSize = srcBufPool->bufferSize; /* forward parameters */
172 ZSTDMT_bufferPool* newBufPool;
173 ZSTDMT_freeBufferPool(srcBufPool);
174 newBufPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
175 if (newBufPool==NULL) return newBufPool;
176 ZSTDMT_setBufferSize(newBufPool, bSize);
181 /** ZSTDMT_getBuffer() :
182 * assumption : bufPool must be valid
183 * @return : a buffer, with start pointer and size
184 * note: allocation may fail, in this case, start==NULL and size==0 */
185 static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
187 size_t const bSize = bufPool->bufferSize;
188 DEBUGLOG(5, "ZSTDMT_getBuffer: bSize = %u", (U32)bufPool->bufferSize);
189 ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
190 if (bufPool->nbBuffers) { /* try to use an existing buffer */
191 buffer_t const buf = bufPool->bTable[--(bufPool->nbBuffers)];
192 size_t const availBufferSize = buf.capacity;
193 bufPool->bTable[bufPool->nbBuffers] = g_nullBuffer;
194 if ((availBufferSize >= bSize) & ((availBufferSize>>3) <= bSize)) {
195 /* large enough, but not too much */
196 DEBUGLOG(5, "ZSTDMT_getBuffer: provide buffer %u of size %u",
197 bufPool->nbBuffers, (U32)buf.capacity);
198 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
201 /* size conditions not respected : scratch this buffer, create new one */
202 DEBUGLOG(5, "ZSTDMT_getBuffer: existing buffer does not meet size conditions => freeing");
203 ZSTD_free(buf.start, bufPool->cMem);
205 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
206 /* create new buffer */
207 DEBUGLOG(5, "ZSTDMT_getBuffer: create a new buffer");
209 void* const start = ZSTD_malloc(bSize, bufPool->cMem);
210 buffer.start = start; /* note : start can be NULL if malloc fails ! */
211 buffer.capacity = (start==NULL) ? 0 : bSize;
213 DEBUGLOG(5, "ZSTDMT_getBuffer: buffer allocation failure !!");
215 DEBUGLOG(5, "ZSTDMT_getBuffer: created buffer of size %u", (U32)bSize);
221 #if ZSTD_RESIZE_SEQPOOL
222 /** ZSTDMT_resizeBuffer() :
223 * assumption : bufPool must be valid
224 * @return : a buffer that is at least the buffer pool buffer size.
225 * If a reallocation happens, the data in the input buffer is copied.
227 static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer)
229 size_t const bSize = bufPool->bufferSize;
230 if (buffer.capacity < bSize) {
231 void* const start = ZSTD_malloc(bSize, bufPool->cMem);
233 newBuffer.start = start;
234 newBuffer.capacity = start == NULL ? 0 : bSize;
236 assert(newBuffer.capacity >= buffer.capacity);
237 memcpy(newBuffer.start, buffer.start, buffer.capacity);
238 DEBUGLOG(5, "ZSTDMT_resizeBuffer: created buffer of size %u", (U32)bSize);
241 DEBUGLOG(5, "ZSTDMT_resizeBuffer: buffer allocation failure !!");
247 /* store buffer for later re-use, up to pool capacity */
248 static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
250 DEBUGLOG(5, "ZSTDMT_releaseBuffer");
251 if (buf.start == NULL) return; /* compatible with release on NULL */
252 ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
253 if (bufPool->nbBuffers < bufPool->totalBuffers) {
254 bufPool->bTable[bufPool->nbBuffers++] = buf; /* stored for later use */
255 DEBUGLOG(5, "ZSTDMT_releaseBuffer: stored buffer of size %u in slot %u",
256 (U32)buf.capacity, (U32)(bufPool->nbBuffers-1));
257 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
260 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
261 /* Reached bufferPool capacity (should not happen) */
262 DEBUGLOG(5, "ZSTDMT_releaseBuffer: pool capacity reached => freeing ");
263 ZSTD_free(buf.start, bufPool->cMem);
267 /* ===== Seq Pool Wrapper ====== */
269 static rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0};
271 typedef ZSTDMT_bufferPool ZSTDMT_seqPool;
273 static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool)
275 return ZSTDMT_sizeof_bufferPool(seqPool);
278 static rawSeqStore_t bufferToSeq(buffer_t buffer)
280 rawSeqStore_t seq = {NULL, 0, 0, 0};
281 seq.seq = (rawSeq*)buffer.start;
282 seq.capacity = buffer.capacity / sizeof(rawSeq);
286 static buffer_t seqToBuffer(rawSeqStore_t seq)
289 buffer.start = seq.seq;
290 buffer.capacity = seq.capacity * sizeof(rawSeq);
294 static rawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool)
296 if (seqPool->bufferSize == 0) {
297 return kNullRawSeqStore;
299 return bufferToSeq(ZSTDMT_getBuffer(seqPool));
302 #if ZSTD_RESIZE_SEQPOOL
303 static rawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)
305 return bufferToSeq(ZSTDMT_resizeBuffer(seqPool, seqToBuffer(seq)));
309 static void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)
311 ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq));
314 static void ZSTDMT_setNbSeq(ZSTDMT_seqPool* const seqPool, size_t const nbSeq)
316 ZSTDMT_setBufferSize(seqPool, nbSeq * sizeof(rawSeq));
319 static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem)
321 ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
322 if (seqPool == NULL) return NULL;
323 ZSTDMT_setNbSeq(seqPool, 0);
327 static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool)
329 ZSTDMT_freeBufferPool(seqPool);
332 static ZSTDMT_seqPool* ZSTDMT_expandSeqPool(ZSTDMT_seqPool* pool, U32 nbWorkers)
334 return ZSTDMT_expandBufferPool(pool, nbWorkers);
338 /* ===== CCtx Pool ===== */
339 /* a single CCtx Pool can be invoked from multiple threads in parallel */
342 ZSTD_pthread_mutex_t poolMutex;
346 ZSTD_CCtx* cctx[1]; /* variable size */
349 /* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */
350 static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
353 for (cid=0; cid<pool->totalCCtx; cid++)
354 ZSTD_freeCCtx(pool->cctx[cid]); /* note : compatible with free on NULL */
355 ZSTD_pthread_mutex_destroy(&pool->poolMutex);
356 ZSTD_free(pool, pool->cMem);
359 /* ZSTDMT_createCCtxPool() :
360 * implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */
361 static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers,
364 ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_calloc(
365 sizeof(ZSTDMT_CCtxPool) + (nbWorkers-1)*sizeof(ZSTD_CCtx*), cMem);
366 assert(nbWorkers > 0);
367 if (!cctxPool) return NULL;
368 if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) {
369 ZSTD_free(cctxPool, cMem);
372 cctxPool->cMem = cMem;
373 cctxPool->totalCCtx = nbWorkers;
374 cctxPool->availCCtx = 1; /* at least one cctx for single-thread mode */
375 cctxPool->cctx[0] = ZSTD_createCCtx_advanced(cMem);
376 if (!cctxPool->cctx[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; }
377 DEBUGLOG(3, "cctxPool created, with %u workers", nbWorkers);
381 static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool* srcPool,
384 if (srcPool==NULL) return NULL;
385 if (nbWorkers <= srcPool->totalCCtx) return srcPool; /* good enough */
386 /* need a larger cctx pool */
387 { ZSTD_customMem const cMem = srcPool->cMem;
388 ZSTDMT_freeCCtxPool(srcPool);
389 return ZSTDMT_createCCtxPool(nbWorkers, cMem);
393 /* only works during initialization phase, not during compression */
394 static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool)
396 ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
397 { unsigned const nbWorkers = cctxPool->totalCCtx;
398 size_t const poolSize = sizeof(*cctxPool)
399 + (nbWorkers-1) * sizeof(ZSTD_CCtx*);
401 size_t totalCCtxSize = 0;
402 for (u=0; u<nbWorkers; u++) {
403 totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctx[u]);
405 ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
406 assert(nbWorkers > 0);
407 return poolSize + totalCCtxSize;
411 static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool)
413 DEBUGLOG(5, "ZSTDMT_getCCtx");
414 ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
415 if (cctxPool->availCCtx) {
416 cctxPool->availCCtx--;
417 { ZSTD_CCtx* const cctx = cctxPool->cctx[cctxPool->availCCtx];
418 ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
421 ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
422 DEBUGLOG(5, "create one more CCtx");
423 return ZSTD_createCCtx_advanced(cctxPool->cMem); /* note : can be NULL, when creation fails ! */
426 static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx)
428 if (cctx==NULL) return; /* compatibility with release on NULL */
429 ZSTD_pthread_mutex_lock(&pool->poolMutex);
430 if (pool->availCCtx < pool->totalCCtx)
431 pool->cctx[pool->availCCtx++] = cctx;
433 /* pool overflow : should not happen, since totalCCtx==nbWorkers */
434 DEBUGLOG(4, "CCtx pool overflow : free cctx");
437 ZSTD_pthread_mutex_unlock(&pool->poolMutex);
440 /* ==== Serial State ==== */
448 /* All variables in the struct are protected by mutex. */
449 ZSTD_pthread_mutex_t mutex;
450 ZSTD_pthread_cond_t cond;
451 ZSTD_CCtx_params params;
453 XXH64_state_t xxhState;
455 /* Protects ldmWindow.
456 * Must be acquired after the main mutex when acquiring both.
458 ZSTD_pthread_mutex_t ldmWindowMutex;
459 ZSTD_pthread_cond_t ldmWindowCond; /* Signaled when ldmWindow is udpated */
460 ZSTD_window_t ldmWindow; /* A thread-safe copy of ldmState.window */
463 static int ZSTDMT_serialState_reset(serialState_t* serialState, ZSTDMT_seqPool* seqPool, ZSTD_CCtx_params params, size_t jobSize)
465 /* Adjust parameters */
466 if (params.ldmParams.enableLdm) {
467 DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10);
468 ZSTD_ldm_adjustParameters(¶ms.ldmParams, ¶ms.cParams);
469 assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
470 assert(params.ldmParams.hashRateLog < 32);
471 serialState->ldmState.hashPower =
472 ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength);
474 memset(¶ms.ldmParams, 0, sizeof(params.ldmParams));
476 serialState->nextJobID = 0;
477 if (params.fParams.checksumFlag)
478 XXH64_reset(&serialState->xxhState, 0);
479 if (params.ldmParams.enableLdm) {
480 ZSTD_customMem cMem = params.customMem;
481 unsigned const hashLog = params.ldmParams.hashLog;
482 size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t);
483 unsigned const bucketLog =
484 params.ldmParams.hashLog - params.ldmParams.bucketSizeLog;
485 size_t const bucketSize = (size_t)1 << bucketLog;
486 unsigned const prevBucketLog =
487 serialState->params.ldmParams.hashLog -
488 serialState->params.ldmParams.bucketSizeLog;
489 /* Size the seq pool tables */
490 ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, jobSize));
491 /* Reset the window */
492 ZSTD_window_clear(&serialState->ldmState.window);
493 serialState->ldmWindow = serialState->ldmState.window;
494 /* Resize tables and output space if necessary. */
495 if (serialState->ldmState.hashTable == NULL || serialState->params.ldmParams.hashLog < hashLog) {
496 ZSTD_free(serialState->ldmState.hashTable, cMem);
497 serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_malloc(hashSize, cMem);
499 if (serialState->ldmState.bucketOffsets == NULL || prevBucketLog < bucketLog) {
500 ZSTD_free(serialState->ldmState.bucketOffsets, cMem);
501 serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_malloc(bucketSize, cMem);
503 if (!serialState->ldmState.hashTable || !serialState->ldmState.bucketOffsets)
505 /* Zero the tables */
506 memset(serialState->ldmState.hashTable, 0, hashSize);
507 memset(serialState->ldmState.bucketOffsets, 0, bucketSize);
509 serialState->params = params;
510 serialState->params.jobSize = (U32)jobSize;
514 static int ZSTDMT_serialState_init(serialState_t* serialState)
517 memset(serialState, 0, sizeof(*serialState));
518 initError |= ZSTD_pthread_mutex_init(&serialState->mutex, NULL);
519 initError |= ZSTD_pthread_cond_init(&serialState->cond, NULL);
520 initError |= ZSTD_pthread_mutex_init(&serialState->ldmWindowMutex, NULL);
521 initError |= ZSTD_pthread_cond_init(&serialState->ldmWindowCond, NULL);
525 static void ZSTDMT_serialState_free(serialState_t* serialState)
527 ZSTD_customMem cMem = serialState->params.customMem;
528 ZSTD_pthread_mutex_destroy(&serialState->mutex);
529 ZSTD_pthread_cond_destroy(&serialState->cond);
530 ZSTD_pthread_mutex_destroy(&serialState->ldmWindowMutex);
531 ZSTD_pthread_cond_destroy(&serialState->ldmWindowCond);
532 ZSTD_free(serialState->ldmState.hashTable, cMem);
533 ZSTD_free(serialState->ldmState.bucketOffsets, cMem);
536 static void ZSTDMT_serialState_update(serialState_t* serialState,
537 ZSTD_CCtx* jobCCtx, rawSeqStore_t seqStore,
538 range_t src, unsigned jobID)
540 /* Wait for our turn */
541 ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
542 while (serialState->nextJobID < jobID) {
543 DEBUGLOG(5, "wait for serialState->cond");
544 ZSTD_pthread_cond_wait(&serialState->cond, &serialState->mutex);
546 /* A future job may error and skip our job */
547 if (serialState->nextJobID == jobID) {
548 /* It is now our turn, do any processing necessary */
549 if (serialState->params.ldmParams.enableLdm) {
551 assert(seqStore.seq != NULL && seqStore.pos == 0 &&
552 seqStore.size == 0 && seqStore.capacity > 0);
553 assert(src.size <= serialState->params.jobSize);
554 ZSTD_window_update(&serialState->ldmState.window, src.start, src.size);
555 error = ZSTD_ldm_generateSequences(
556 &serialState->ldmState, &seqStore,
557 &serialState->params.ldmParams, src.start, src.size);
558 /* We provide a large enough buffer to never fail. */
559 assert(!ZSTD_isError(error)); (void)error;
560 /* Update ldmWindow to match the ldmState.window and signal the main
561 * thread if it is waiting for a buffer.
563 ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);
564 serialState->ldmWindow = serialState->ldmState.window;
565 ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);
566 ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);
568 if (serialState->params.fParams.checksumFlag && src.size > 0)
569 XXH64_update(&serialState->xxhState, src.start, src.size);
571 /* Now it is the next jobs turn */
572 serialState->nextJobID++;
573 ZSTD_pthread_cond_broadcast(&serialState->cond);
574 ZSTD_pthread_mutex_unlock(&serialState->mutex);
576 if (seqStore.size > 0) {
577 size_t const err = ZSTD_referenceExternalSequences(
578 jobCCtx, seqStore.seq, seqStore.size);
579 assert(serialState->params.ldmParams.enableLdm);
580 assert(!ZSTD_isError(err));
585 static void ZSTDMT_serialState_ensureFinished(serialState_t* serialState,
586 unsigned jobID, size_t cSize)
588 ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
589 if (serialState->nextJobID <= jobID) {
590 assert(ZSTD_isError(cSize)); (void)cSize;
591 DEBUGLOG(5, "Skipping past job %u because of error", jobID);
592 serialState->nextJobID = jobID + 1;
593 ZSTD_pthread_cond_broadcast(&serialState->cond);
595 ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);
596 ZSTD_window_clear(&serialState->ldmWindow);
597 ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);
598 ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);
600 ZSTD_pthread_mutex_unlock(&serialState->mutex);
605 /* ------------------------------------------ */
606 /* ===== Worker thread ===== */
607 /* ------------------------------------------ */
609 static const range_t kNullRange = { NULL, 0 };
612 size_t consumed; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */
613 size_t cSize; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */
614 ZSTD_pthread_mutex_t job_mutex; /* Thread-safe - used by mtctx and worker */
615 ZSTD_pthread_cond_t job_cond; /* Thread-safe - used by mtctx and worker */
616 ZSTDMT_CCtxPool* cctxPool; /* Thread-safe - used by mtctx and (all) workers */
617 ZSTDMT_bufferPool* bufPool; /* Thread-safe - used by mtctx and (all) workers */
618 ZSTDMT_seqPool* seqPool; /* Thread-safe - used by mtctx and (all) workers */
619 serialState_t* serial; /* Thread-safe - used by mtctx and (all) workers */
620 buffer_t dstBuff; /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */
621 range_t prefix; /* set by mtctx, then read by worker & mtctx => no barrier */
622 range_t src; /* set by mtctx, then read by worker & mtctx => no barrier */
623 unsigned jobID; /* set by mtctx, then read by worker => no barrier */
624 unsigned firstJob; /* set by mtctx, then read by worker => no barrier */
625 unsigned lastJob; /* set by mtctx, then read by worker => no barrier */
626 ZSTD_CCtx_params params; /* set by mtctx, then read by worker => no barrier */
627 const ZSTD_CDict* cdict; /* set by mtctx, then read by worker => no barrier */
628 unsigned long long fullFrameSize; /* set by mtctx, then read by worker => no barrier */
629 size_t dstFlushed; /* used only by mtctx */
630 unsigned frameChecksumNeeded; /* used only by mtctx */
631 } ZSTDMT_jobDescription;
633 #define JOB_ERROR(e) { \
634 ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); \
636 ZSTD_pthread_mutex_unlock(&job->job_mutex); \
640 /* ZSTDMT_compressionJob() is a POOL_function type */
641 static void ZSTDMT_compressionJob(void* jobDescription)
643 ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription;
644 ZSTD_CCtx_params jobParams = job->params; /* do not modify job->params ! copy it, modify the copy */
645 ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool);
646 rawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool);
647 buffer_t dstBuff = job->dstBuff;
648 size_t lastCBlockSize = 0;
651 if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation));
652 if (dstBuff.start == NULL) { /* streaming job : doesn't provide a dstBuffer */
653 dstBuff = ZSTDMT_getBuffer(job->bufPool);
654 if (dstBuff.start==NULL) JOB_ERROR(ERROR(memory_allocation));
655 job->dstBuff = dstBuff; /* this value can be read in ZSTDMT_flush, when it copies the whole job */
657 if (jobParams.ldmParams.enableLdm && rawSeqStore.seq == NULL)
658 JOB_ERROR(ERROR(memory_allocation));
660 /* Don't compute the checksum for chunks, since we compute it externally,
661 * but write it in the header.
663 if (job->jobID != 0) jobParams.fParams.checksumFlag = 0;
664 /* Don't run LDM for the chunks, since we handle it externally */
665 jobParams.ldmParams.enableLdm = 0;
670 size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, jobParams, job->fullFrameSize);
671 assert(job->firstJob); /* only allowed for first job */
672 if (ZSTD_isError(initError)) JOB_ERROR(initError);
673 } else { /* srcStart points at reloaded section */
674 U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size;
675 { size_t const forceWindowError = ZSTD_CCtxParam_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob);
676 if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError);
678 { size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,
679 job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */
682 jobParams, pledgedSrcSize);
683 if (ZSTD_isError(initError)) JOB_ERROR(initError);
686 /* Perform serial step as early as possible, but after CCtx initialization */
687 ZSTDMT_serialState_update(job->serial, cctx, rawSeqStore, job->src, job->jobID);
689 if (!job->firstJob) { /* flush and overwrite frame header when it's not first job */
690 size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0);
691 if (ZSTD_isError(hSize)) JOB_ERROR(hSize);
692 DEBUGLOG(5, "ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)", (U32)hSize);
693 ZSTD_invalidateRepCodes(cctx);
697 { size_t const chunkSize = 4*ZSTD_BLOCKSIZE_MAX;
698 int const nbChunks = (int)((job->src.size + (chunkSize-1)) / chunkSize);
699 const BYTE* ip = (const BYTE*) job->src.start;
700 BYTE* const ostart = (BYTE*)dstBuff.start;
702 BYTE* oend = op + dstBuff.capacity;
704 if (sizeof(size_t) > sizeof(int)) assert(job->src.size < ((size_t)INT_MAX) * chunkSize); /* check overflow */
705 DEBUGLOG(5, "ZSTDMT_compressionJob: compress %u bytes in %i blocks", (U32)job->src.size, nbChunks);
706 assert(job->cSize == 0);
707 for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) {
708 size_t const cSize = ZSTD_compressContinue(cctx, op, oend-op, ip, chunkSize);
709 if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
711 op += cSize; assert(op < oend);
713 ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
715 job->consumed = chunkSize * chunkNb;
716 DEBUGLOG(5, "ZSTDMT_compressionJob: compress new block : cSize==%u bytes (total: %u)",
717 (U32)cSize, (U32)job->cSize);
718 ZSTD_pthread_cond_signal(&job->job_cond); /* warns some more data is ready to be flushed */
719 ZSTD_pthread_mutex_unlock(&job->job_mutex);
722 assert(chunkSize > 0);
723 assert((chunkSize & (chunkSize - 1)) == 0); /* chunkSize must be power of 2 for mask==(chunkSize-1) to work */
724 if ((nbChunks > 0) | job->lastJob /*must output a "last block" flag*/ ) {
725 size_t const lastBlockSize1 = job->src.size & (chunkSize-1);
726 size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1;
727 size_t const cSize = (job->lastJob) ?
728 ZSTD_compressEnd (cctx, op, oend-op, ip, lastBlockSize) :
729 ZSTD_compressContinue(cctx, op, oend-op, ip, lastBlockSize);
730 if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
731 lastCBlockSize = cSize;
735 ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize);
736 if (job->prefix.size > 0)
737 DEBUGLOG(5, "Finished with prefix: %zx", (size_t)job->prefix.start);
738 DEBUGLOG(5, "Finished with source: %zx", (size_t)job->src.start);
739 /* release resources */
740 ZSTDMT_releaseSeq(job->seqPool, rawSeqStore);
741 ZSTDMT_releaseCCtx(job->cctxPool, cctx);
743 ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
744 if (ZSTD_isError(job->cSize)) assert(lastCBlockSize == 0);
745 job->cSize += lastCBlockSize;
746 job->consumed = job->src.size; /* when job->consumed == job->src.size , compression job is presumed completed */
747 ZSTD_pthread_cond_signal(&job->job_cond);
748 ZSTD_pthread_mutex_unlock(&job->job_mutex);
752 /* ------------------------------------------ */
753 /* ===== Multi-threaded compression ===== */
754 /* ------------------------------------------ */
757 range_t prefix; /* read-only non-owned prefix buffer */
763 BYTE* buffer; /* The round input buffer. All jobs get references
764 * to pieces of the buffer. ZSTDMT_tryGetInputRange()
765 * handles handing out job input buffers, and makes
766 * sure it doesn't overlap with any pieces still in use.
768 size_t capacity; /* The capacity of buffer. */
769 size_t pos; /* The position of the current inBuff in the round
770 * buffer. Updated past the end if the inBuff once
771 * the inBuff is sent to the worker thread.
776 static const roundBuff_t kNullRoundBuff = {NULL, 0, 0};
778 #define RSYNC_LENGTH 32
786 struct ZSTDMT_CCtx_s {
788 ZSTDMT_jobDescription* jobs;
789 ZSTDMT_bufferPool* bufPool;
790 ZSTDMT_CCtxPool* cctxPool;
791 ZSTDMT_seqPool* seqPool;
792 ZSTD_CCtx_params params;
793 size_t targetSectionSize;
794 size_t targetPrefixSize;
795 int jobReady; /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */
797 roundBuff_t roundBuff;
798 serialState_t serial;
800 unsigned singleBlockingThread;
805 unsigned allJobsCompleted;
806 unsigned long long frameContentSize;
807 unsigned long long consumed;
808 unsigned long long produced;
810 ZSTD_CDict* cdictLocal;
811 const ZSTD_CDict* cdict;
814 static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZSTD_customMem cMem)
817 if (jobTable == NULL) return;
818 for (jobNb=0; jobNb<nbJobs; jobNb++) {
819 ZSTD_pthread_mutex_destroy(&jobTable[jobNb].job_mutex);
820 ZSTD_pthread_cond_destroy(&jobTable[jobNb].job_cond);
822 ZSTD_free(jobTable, cMem);
825 /* ZSTDMT_allocJobsTable()
826 * allocate and init a job table.
827 * update *nbJobsPtr to next power of 2 value, as size of table */
828 static ZSTDMT_jobDescription* ZSTDMT_createJobsTable(U32* nbJobsPtr, ZSTD_customMem cMem)
830 U32 const nbJobsLog2 = ZSTD_highbit32(*nbJobsPtr) + 1;
831 U32 const nbJobs = 1 << nbJobsLog2;
833 ZSTDMT_jobDescription* const jobTable = (ZSTDMT_jobDescription*)
834 ZSTD_calloc(nbJobs * sizeof(ZSTDMT_jobDescription), cMem);
836 if (jobTable==NULL) return NULL;
838 for (jobNb=0; jobNb<nbJobs; jobNb++) {
839 initError |= ZSTD_pthread_mutex_init(&jobTable[jobNb].job_mutex, NULL);
840 initError |= ZSTD_pthread_cond_init(&jobTable[jobNb].job_cond, NULL);
842 if (initError != 0) {
843 ZSTDMT_freeJobsTable(jobTable, nbJobs, cMem);
849 static size_t ZSTDMT_expandJobsTable (ZSTDMT_CCtx* mtctx, U32 nbWorkers) {
850 U32 nbJobs = nbWorkers + 2;
851 if (nbJobs > mtctx->jobIDMask+1) { /* need more job capacity */
852 ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
853 mtctx->jobIDMask = 0;
854 mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, mtctx->cMem);
855 if (mtctx->jobs==NULL) return ERROR(memory_allocation);
856 assert((nbJobs != 0) && ((nbJobs & (nbJobs - 1)) == 0)); /* ensure nbJobs is a power of 2 */
857 mtctx->jobIDMask = nbJobs - 1;
863 /* ZSTDMT_CCtxParam_setNbWorkers():
864 * Internal use only */
865 size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
867 if (nbWorkers > ZSTDMT_NBWORKERS_MAX) nbWorkers = ZSTDMT_NBWORKERS_MAX;
868 params->nbWorkers = nbWorkers;
869 params->overlapLog = ZSTDMT_OVERLAPLOG_DEFAULT;
874 ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem)
877 U32 nbJobs = nbWorkers + 2;
879 DEBUGLOG(3, "ZSTDMT_createCCtx_advanced (nbWorkers = %u)", nbWorkers);
881 if (nbWorkers < 1) return NULL;
882 nbWorkers = MIN(nbWorkers , ZSTDMT_NBWORKERS_MAX);
883 if ((cMem.customAlloc!=NULL) ^ (cMem.customFree!=NULL))
884 /* invalid custom allocator */
887 mtctx = (ZSTDMT_CCtx*) ZSTD_calloc(sizeof(ZSTDMT_CCtx), cMem);
888 if (!mtctx) return NULL;
889 ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
891 mtctx->allJobsCompleted = 1;
892 mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem);
893 mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem);
894 assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0); /* ensure nbJobs is a power of 2 */
895 mtctx->jobIDMask = nbJobs - 1;
896 mtctx->bufPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
897 mtctx->cctxPool = ZSTDMT_createCCtxPool(nbWorkers, cMem);
898 mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem);
899 initError = ZSTDMT_serialState_init(&mtctx->serial);
900 mtctx->roundBuff = kNullRoundBuff;
901 if (!mtctx->factory | !mtctx->jobs | !mtctx->bufPool | !mtctx->cctxPool | !mtctx->seqPool | initError) {
902 ZSTDMT_freeCCtx(mtctx);
905 DEBUGLOG(3, "mt_cctx created, for %u threads", nbWorkers);
909 ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers)
911 return ZSTDMT_createCCtx_advanced(nbWorkers, ZSTD_defaultCMem);
915 /* ZSTDMT_releaseAllJobResources() :
916 * note : ensure all workers are killed first ! */
917 static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)
920 DEBUGLOG(3, "ZSTDMT_releaseAllJobResources");
921 for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) {
922 DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start);
923 ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
924 mtctx->jobs[jobID].dstBuff = g_nullBuffer;
925 mtctx->jobs[jobID].cSize = 0;
927 memset(mtctx->jobs, 0, (mtctx->jobIDMask+1)*sizeof(ZSTDMT_jobDescription));
928 mtctx->inBuff.buffer = g_nullBuffer;
929 mtctx->inBuff.filled = 0;
930 mtctx->allJobsCompleted = 1;
933 static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* mtctx)
935 DEBUGLOG(4, "ZSTDMT_waitForAllJobsCompleted");
936 while (mtctx->doneJobID < mtctx->nextJobID) {
937 unsigned const jobID = mtctx->doneJobID & mtctx->jobIDMask;
938 ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);
939 while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {
940 DEBUGLOG(4, "waiting for jobCompleted signal from job %u", mtctx->doneJobID); /* we want to block when waiting for data to flush */
941 ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);
943 ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);
948 size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)
950 if (mtctx==NULL) return 0; /* compatible with free on NULL */
951 POOL_free(mtctx->factory); /* stop and free worker threads */
952 ZSTDMT_releaseAllJobResources(mtctx); /* release job resources into pools first */
953 ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
954 ZSTDMT_freeBufferPool(mtctx->bufPool);
955 ZSTDMT_freeCCtxPool(mtctx->cctxPool);
956 ZSTDMT_freeSeqPool(mtctx->seqPool);
957 ZSTDMT_serialState_free(&mtctx->serial);
958 ZSTD_freeCDict(mtctx->cdictLocal);
959 if (mtctx->roundBuff.buffer)
960 ZSTD_free(mtctx->roundBuff.buffer, mtctx->cMem);
961 ZSTD_free(mtctx, mtctx->cMem);
965 size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx)
967 if (mtctx == NULL) return 0; /* supports sizeof NULL */
968 return sizeof(*mtctx)
969 + POOL_sizeof(mtctx->factory)
970 + ZSTDMT_sizeof_bufferPool(mtctx->bufPool)
971 + (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription)
972 + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool)
973 + ZSTDMT_sizeof_seqPool(mtctx->seqPool)
974 + ZSTD_sizeof_CDict(mtctx->cdictLocal)
975 + mtctx->roundBuff.capacity;
980 ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params,
981 ZSTDMT_parameter parameter,
984 DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter");
987 case ZSTDMT_p_jobSize :
988 DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter : set jobSize to %i", value);
989 if ( value != 0 /* default */
990 && value < ZSTDMT_JOBSIZE_MIN)
991 value = ZSTDMT_JOBSIZE_MIN;
993 if (value > ZSTDMT_JOBSIZE_MAX) value = ZSTDMT_JOBSIZE_MAX;
994 params->jobSize = value;
997 case ZSTDMT_p_overlapLog :
998 DEBUGLOG(4, "ZSTDMT_p_overlapLog : %i", value);
999 if (value < ZSTD_OVERLAPLOG_MIN) value = ZSTD_OVERLAPLOG_MIN;
1000 if (value > ZSTD_OVERLAPLOG_MAX) value = ZSTD_OVERLAPLOG_MAX;
1001 params->overlapLog = value;
1004 case ZSTDMT_p_rsyncable :
1005 value = (value != 0);
1006 params->rsyncable = value;
1010 return ERROR(parameter_unsupported);
1014 size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value)
1016 DEBUGLOG(4, "ZSTDMT_setMTCtxParameter");
1017 return ZSTDMT_CCtxParam_setMTCtxParameter(&mtctx->params, parameter, value);
1020 size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value)
1022 switch (parameter) {
1023 case ZSTDMT_p_jobSize:
1024 assert(mtctx->params.jobSize <= INT_MAX);
1025 *value = (int)(mtctx->params.jobSize);
1027 case ZSTDMT_p_overlapLog:
1028 *value = mtctx->params.overlapLog;
1030 case ZSTDMT_p_rsyncable:
1031 *value = mtctx->params.rsyncable;
1034 return ERROR(parameter_unsupported);
1039 /* Sets parameters relevant to the compression job,
1040 * initializing others to default values. */
1041 static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params)
1043 ZSTD_CCtx_params jobParams;
1044 memset(&jobParams, 0, sizeof(jobParams));
1046 jobParams.cParams = params.cParams;
1047 jobParams.fParams = params.fParams;
1048 jobParams.compressionLevel = params.compressionLevel;
1054 /* ZSTDMT_resize() :
1055 * @return : error code if fails, 0 on success */
1056 static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers)
1058 if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation);
1059 CHECK_F( ZSTDMT_expandJobsTable(mtctx, nbWorkers) );
1060 mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, nbWorkers);
1061 if (mtctx->bufPool == NULL) return ERROR(memory_allocation);
1062 mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers);
1063 if (mtctx->cctxPool == NULL) return ERROR(memory_allocation);
1064 mtctx->seqPool = ZSTDMT_expandSeqPool(mtctx->seqPool, nbWorkers);
1065 if (mtctx->seqPool == NULL) return ERROR(memory_allocation);
1066 ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
1071 /*! ZSTDMT_updateCParams_whileCompressing() :
1072 * Updates a selected set of compression parameters, remaining compatible with currently active frame.
1073 * New parameters will be applied to next compression job. */
1074 void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams)
1076 U32 const saved_wlog = mtctx->params.cParams.windowLog; /* Do not modify windowLog while compressing */
1077 int const compressionLevel = cctxParams->compressionLevel;
1078 DEBUGLOG(5, "ZSTDMT_updateCParams_whileCompressing (level:%i)",
1080 mtctx->params.compressionLevel = compressionLevel;
1081 { ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, 0, 0);
1082 cParams.windowLog = saved_wlog;
1083 mtctx->params.cParams = cParams;
1087 /* ZSTDMT_getFrameProgression():
1088 * tells how much data has been consumed (input) and produced (output) for current frame.
1089 * able to count progression inside worker threads.
1090 * Note : mutex will be acquired during statistics collection inside workers. */
1091 ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx)
1093 ZSTD_frameProgression fps;
1094 DEBUGLOG(5, "ZSTDMT_getFrameProgression");
1095 fps.ingested = mtctx->consumed + mtctx->inBuff.filled;
1096 fps.consumed = mtctx->consumed;
1097 fps.produced = fps.flushed = mtctx->produced;
1098 fps.currentJobID = mtctx->nextJobID;
1099 fps.nbActiveWorkers = 0;
1101 unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1);
1102 DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)",
1103 mtctx->doneJobID, lastJobNb, mtctx->jobReady)
1104 for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) {
1105 unsigned const wJobID = jobNb & mtctx->jobIDMask;
1106 ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID];
1107 ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);
1108 { size_t const cResult = jobPtr->cSize;
1109 size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
1110 size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;
1111 assert(flushed <= produced);
1112 fps.ingested += jobPtr->src.size;
1113 fps.consumed += jobPtr->consumed;
1114 fps.produced += produced;
1115 fps.flushed += flushed;
1116 fps.nbActiveWorkers += (jobPtr->consumed < jobPtr->src.size);
1118 ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
1125 size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx)
1128 unsigned const jobID = mtctx->doneJobID;
1129 assert(jobID <= mtctx->nextJobID);
1130 if (jobID == mtctx->nextJobID) return 0; /* no active job => nothing to flush */
1132 /* look into oldest non-fully-flushed job */
1133 { unsigned const wJobID = jobID & mtctx->jobIDMask;
1134 ZSTDMT_jobDescription* const jobPtr = &mtctx->jobs[wJobID];
1135 ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);
1136 { size_t const cResult = jobPtr->cSize;
1137 size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
1138 size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;
1139 assert(flushed <= produced);
1140 toFlush = produced - flushed;
1141 if (toFlush==0 && (jobPtr->consumed >= jobPtr->src.size)) {
1142 /* doneJobID is not-fully-flushed, but toFlush==0 : doneJobID should be compressing some more data */
1143 assert(jobPtr->consumed < jobPtr->src.size);
1146 ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
1153 /* ------------------------------------------ */
1154 /* ===== Multi-threaded compression ===== */
1155 /* ------------------------------------------ */
1157 static unsigned ZSTDMT_computeTargetJobLog(ZSTD_CCtx_params const params)
1159 if (params.ldmParams.enableLdm)
1160 /* In Long Range Mode, the windowLog is typically oversized.
1161 * In which case, it's preferable to determine the jobSize
1162 * based on chainLog instead. */
1163 return MAX(21, params.cParams.chainLog + 4);
1164 return MAX(20, params.cParams.windowLog + 2);
1167 static int ZSTDMT_overlapLog_default(ZSTD_strategy strat)
1188 static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat)
1190 assert(0 <= ovlog && ovlog <= 9);
1191 if (ovlog == 0) return ZSTDMT_overlapLog_default(strat);
1195 static size_t ZSTDMT_computeOverlapSize(ZSTD_CCtx_params const params)
1197 int const overlapRLog = 9 - ZSTDMT_overlapLog(params.overlapLog, params.cParams.strategy);
1198 int ovLog = (overlapRLog >= 8) ? 0 : (params.cParams.windowLog - overlapRLog);
1199 assert(0 <= overlapRLog && overlapRLog <= 8);
1200 if (params.ldmParams.enableLdm) {
1201 /* In Long Range Mode, the windowLog is typically oversized.
1202 * In which case, it's preferable to determine the jobSize
1203 * based on chainLog instead.
1204 * Then, ovLog becomes a fraction of the jobSize, rather than windowSize */
1205 ovLog = MIN(params.cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2)
1208 assert(0 <= ovLog && ovLog <= 30);
1209 DEBUGLOG(4, "overlapLog : %i", params.overlapLog);
1210 DEBUGLOG(4, "overlap size : %i", 1 << ovLog);
1211 return (ovLog==0) ? 0 : (size_t)1 << ovLog;
1215 ZSTDMT_computeNbJobs(ZSTD_CCtx_params params, size_t srcSize, unsigned nbWorkers)
1217 assert(nbWorkers>0);
1218 { size_t const jobSizeTarget = (size_t)1 << ZSTDMT_computeTargetJobLog(params);
1219 size_t const jobMaxSize = jobSizeTarget << 2;
1220 size_t const passSizeMax = jobMaxSize * nbWorkers;
1221 unsigned const multiplier = (unsigned)(srcSize / passSizeMax) + 1;
1222 unsigned const nbJobsLarge = multiplier * nbWorkers;
1223 unsigned const nbJobsMax = (unsigned)(srcSize / jobSizeTarget) + 1;
1224 unsigned const nbJobsSmall = MIN(nbJobsMax, nbWorkers);
1225 return (multiplier>1) ? nbJobsLarge : nbJobsSmall;
1228 /* ZSTDMT_compress_advanced_internal() :
1229 * This is a blocking function : it will only give back control to caller after finishing its compression job.
1231 static size_t ZSTDMT_compress_advanced_internal(
1233 void* dst, size_t dstCapacity,
1234 const void* src, size_t srcSize,
1235 const ZSTD_CDict* cdict,
1236 ZSTD_CCtx_params params)
1238 ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams(params);
1239 size_t const overlapSize = ZSTDMT_computeOverlapSize(params);
1240 unsigned const nbJobs = ZSTDMT_computeNbJobs(params, srcSize, params.nbWorkers);
1241 size_t const proposedJobSize = (srcSize + (nbJobs-1)) / nbJobs;
1242 size_t const avgJobSize = (((proposedJobSize-1) & 0x1FFFF) < 0x7FFF) ? proposedJobSize + 0xFFFF : proposedJobSize; /* avoid too small last block */
1243 const char* const srcStart = (const char*)src;
1244 size_t remainingSrcSize = srcSize;
1245 unsigned const compressWithinDst = (dstCapacity >= ZSTD_compressBound(srcSize)) ? nbJobs : (unsigned)(dstCapacity / ZSTD_compressBound(avgJobSize)); /* presumes avgJobSize >= 256 KB, which should be the case */
1246 size_t frameStartPos = 0, dstBufferPos = 0;
1247 assert(jobParams.nbWorkers == 0);
1248 assert(mtctx->cctxPool->totalCCtx == params.nbWorkers);
1250 params.jobSize = (U32)avgJobSize;
1251 DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: nbJobs=%2u (rawSize=%u bytes; fixedSize=%u) ",
1252 nbJobs, (U32)proposedJobSize, (U32)avgJobSize);
1254 if ((nbJobs==1) | (params.nbWorkers<=1)) { /* fallback to single-thread mode : this is a blocking invocation anyway */
1255 ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0];
1256 DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: fallback to single-thread mode");
1257 if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, jobParams.fParams);
1258 return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, jobParams);
1261 assert(avgJobSize >= 256 KB); /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), required to compress directly into Dst (no additional buffer) */
1262 ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(avgJobSize) );
1263 if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, avgJobSize))
1264 return ERROR(memory_allocation);
1266 CHECK_F( ZSTDMT_expandJobsTable(mtctx, nbJobs) ); /* only expands if necessary */
1269 for (u=0; u<nbJobs; u++) {
1270 size_t const jobSize = MIN(remainingSrcSize, avgJobSize);
1271 size_t const dstBufferCapacity = ZSTD_compressBound(jobSize);
1272 buffer_t const dstAsBuffer = { (char*)dst + dstBufferPos, dstBufferCapacity };
1273 buffer_t const dstBuffer = u < compressWithinDst ? dstAsBuffer : g_nullBuffer;
1274 size_t dictSize = u ? overlapSize : 0;
1276 mtctx->jobs[u].prefix.start = srcStart + frameStartPos - dictSize;
1277 mtctx->jobs[u].prefix.size = dictSize;
1278 mtctx->jobs[u].src.start = srcStart + frameStartPos;
1279 mtctx->jobs[u].src.size = jobSize; assert(jobSize > 0); /* avoid job.src.size == 0 */
1280 mtctx->jobs[u].consumed = 0;
1281 mtctx->jobs[u].cSize = 0;
1282 mtctx->jobs[u].cdict = (u==0) ? cdict : NULL;
1283 mtctx->jobs[u].fullFrameSize = srcSize;
1284 mtctx->jobs[u].params = jobParams;
1285 /* do not calculate checksum within sections, but write it in header for first section */
1286 mtctx->jobs[u].dstBuff = dstBuffer;
1287 mtctx->jobs[u].cctxPool = mtctx->cctxPool;
1288 mtctx->jobs[u].bufPool = mtctx->bufPool;
1289 mtctx->jobs[u].seqPool = mtctx->seqPool;
1290 mtctx->jobs[u].serial = &mtctx->serial;
1291 mtctx->jobs[u].jobID = u;
1292 mtctx->jobs[u].firstJob = (u==0);
1293 mtctx->jobs[u].lastJob = (u==nbJobs-1);
1295 DEBUGLOG(5, "ZSTDMT_compress_advanced_internal: posting job %u (%u bytes)", u, (U32)jobSize);
1296 DEBUG_PRINTHEX(6, mtctx->jobs[u].prefix.start, 12);
1297 POOL_add(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[u]);
1299 frameStartPos += jobSize;
1300 dstBufferPos += dstBufferCapacity;
1301 remainingSrcSize -= jobSize;
1304 /* collect result */
1305 { size_t error = 0, dstPos = 0;
1307 for (jobID=0; jobID<nbJobs; jobID++) {
1308 DEBUGLOG(5, "waiting for job %u ", jobID);
1309 ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);
1310 while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {
1311 DEBUGLOG(5, "waiting for jobCompleted signal from job %u", jobID);
1312 ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);
1314 ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);
1315 DEBUGLOG(5, "ready to write job %u ", jobID);
1317 { size_t const cSize = mtctx->jobs[jobID].cSize;
1318 if (ZSTD_isError(cSize)) error = cSize;
1319 if ((!error) && (dstPos + cSize > dstCapacity)) error = ERROR(dstSize_tooSmall);
1320 if (jobID) { /* note : job 0 is written directly at dst, which is correct position */
1322 memmove((char*)dst + dstPos, mtctx->jobs[jobID].dstBuff.start, cSize); /* may overlap when job compressed within dst */
1323 if (jobID >= compressWithinDst) { /* job compressed into its own buffer, which must be released */
1324 DEBUGLOG(5, "releasing buffer %u>=%u", jobID, compressWithinDst);
1325 ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
1327 mtctx->jobs[jobID].dstBuff = g_nullBuffer;
1328 mtctx->jobs[jobID].cSize = 0;
1331 } /* for (jobID=0; jobID<nbJobs; jobID++) */
1333 DEBUGLOG(4, "checksumFlag : %u ", params.fParams.checksumFlag);
1334 if (params.fParams.checksumFlag) {
1335 U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState);
1336 if (dstPos + 4 > dstCapacity) {
1337 error = ERROR(dstSize_tooSmall);
1339 DEBUGLOG(4, "writing checksum : %08X \n", checksum);
1340 MEM_writeLE32((char*)dst + dstPos, checksum);
1344 if (!error) DEBUGLOG(4, "compressed size : %u ", (U32)dstPos);
1345 return error ? error : dstPos;
1349 size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
1350 void* dst, size_t dstCapacity,
1351 const void* src, size_t srcSize,
1352 const ZSTD_CDict* cdict,
1353 ZSTD_parameters params,
1356 ZSTD_CCtx_params cctxParams = mtctx->params;
1357 cctxParams.cParams = params.cParams;
1358 cctxParams.fParams = params.fParams;
1359 assert(ZSTD_OVERLAPLOG_MIN <= overlapLog && overlapLog <= ZSTD_OVERLAPLOG_MAX);
1360 cctxParams.overlapLog = overlapLog;
1361 return ZSTDMT_compress_advanced_internal(mtctx,
1368 size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
1369 void* dst, size_t dstCapacity,
1370 const void* src, size_t srcSize,
1371 int compressionLevel)
1373 ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0);
1374 int const overlapLog = ZSTDMT_overlapLog_default(params.cParams.strategy);
1375 params.fParams.contentSizeFlag = 1;
1376 return ZSTDMT_compress_advanced(mtctx, dst, dstCapacity, src, srcSize, NULL, params, overlapLog);
1380 /* ====================================== */
1381 /* ======= Streaming API ======= */
1382 /* ====================================== */
1384 size_t ZSTDMT_initCStream_internal(
1386 const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,
1387 const ZSTD_CDict* cdict, ZSTD_CCtx_params params,
1388 unsigned long long pledgedSrcSize)
1390 DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u)",
1391 (U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx);
1393 /* params supposed partially fully validated at this point */
1394 assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
1395 assert(!((dict) && (cdict))); /* either dict or cdict, not both */
1398 if (params.nbWorkers != mtctx->params.nbWorkers)
1399 CHECK_F( ZSTDMT_resize(mtctx, params.nbWorkers) );
1401 if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN;
1402 if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = ZSTDMT_JOBSIZE_MAX;
1404 mtctx->singleBlockingThread = (pledgedSrcSize <= ZSTDMT_JOBSIZE_MIN); /* do not trigger multi-threading when srcSize is too small */
1405 if (mtctx->singleBlockingThread) {
1406 ZSTD_CCtx_params const singleThreadParams = ZSTDMT_initJobCCtxParams(params);
1407 DEBUGLOG(5, "ZSTDMT_initCStream_internal: switch to single blocking thread mode");
1408 assert(singleThreadParams.nbWorkers == 0);
1409 return ZSTD_initCStream_internal(mtctx->cctxPool->cctx[0],
1410 dict, dictSize, cdict,
1411 singleThreadParams, pledgedSrcSize);
1414 DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers);
1416 if (mtctx->allJobsCompleted == 0) { /* previous compression not correctly finished */
1417 ZSTDMT_waitForAllJobsCompleted(mtctx);
1418 ZSTDMT_releaseAllJobResources(mtctx);
1419 mtctx->allJobsCompleted = 1;
1422 mtctx->params = params;
1423 mtctx->frameContentSize = pledgedSrcSize;
1425 ZSTD_freeCDict(mtctx->cdictLocal);
1426 mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
1427 ZSTD_dlm_byCopy, dictContentType, /* note : a loadPrefix becomes an internal CDict */
1428 params.cParams, mtctx->cMem);
1429 mtctx->cdict = mtctx->cdictLocal;
1430 if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation);
1432 ZSTD_freeCDict(mtctx->cdictLocal);
1433 mtctx->cdictLocal = NULL;
1434 mtctx->cdict = cdict;
1437 mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(params);
1438 DEBUGLOG(4, "overlapLog=%i => %u KB", params.overlapLog, (U32)(mtctx->targetPrefixSize>>10));
1439 mtctx->targetSectionSize = params.jobSize;
1440 if (mtctx->targetSectionSize == 0) {
1441 mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(params);
1443 if (params.rsyncable) {
1444 /* Aim for the targetsectionSize as the average job size. */
1445 U32 const jobSizeMB = (U32)(mtctx->targetSectionSize >> 20);
1446 U32 const rsyncBits = ZSTD_highbit32(jobSizeMB) + 20;
1447 assert(jobSizeMB >= 1);
1448 DEBUGLOG(4, "rsyncLog = %u", rsyncBits);
1449 mtctx->rsync.hash = 0;
1450 mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1;
1451 mtctx->rsync.primePower = ZSTD_rollingHash_primePower(RSYNC_LENGTH);
1453 if (mtctx->targetSectionSize < mtctx->targetPrefixSize) mtctx->targetSectionSize = mtctx->targetPrefixSize; /* job size must be >= overlap size */
1454 DEBUGLOG(4, "Job Size : %u KB (note : set to %u)", (U32)(mtctx->targetSectionSize>>10), (U32)params.jobSize);
1455 DEBUGLOG(4, "inBuff Size : %u KB", (U32)(mtctx->targetSectionSize>>10));
1456 ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize));
1458 /* If ldm is enabled we need windowSize space. */
1459 size_t const windowSize = mtctx->params.ldmParams.enableLdm ? (1U << mtctx->params.cParams.windowLog) : 0;
1460 /* Two buffers of slack, plus extra space for the overlap
1461 * This is the minimum slack that LDM works with. One extra because
1462 * flush might waste up to targetSectionSize-1 bytes. Another extra
1463 * for the overlap (if > 0), then one to fill which doesn't overlap
1464 * with the LDM window.
1466 size_t const nbSlackBuffers = 2 + (mtctx->targetPrefixSize > 0);
1467 size_t const slackSize = mtctx->targetSectionSize * nbSlackBuffers;
1468 /* Compute the total size, and always have enough slack */
1469 size_t const nbWorkers = MAX(mtctx->params.nbWorkers, 1);
1470 size_t const sectionsSize = mtctx->targetSectionSize * nbWorkers;
1471 size_t const capacity = MAX(windowSize, sectionsSize) + slackSize;
1472 if (mtctx->roundBuff.capacity < capacity) {
1473 if (mtctx->roundBuff.buffer)
1474 ZSTD_free(mtctx->roundBuff.buffer, mtctx->cMem);
1475 mtctx->roundBuff.buffer = (BYTE*)ZSTD_malloc(capacity, mtctx->cMem);
1476 if (mtctx->roundBuff.buffer == NULL) {
1477 mtctx->roundBuff.capacity = 0;
1478 return ERROR(memory_allocation);
1480 mtctx->roundBuff.capacity = capacity;
1483 DEBUGLOG(4, "roundBuff capacity : %u KB", (U32)(mtctx->roundBuff.capacity>>10));
1484 mtctx->roundBuff.pos = 0;
1485 mtctx->inBuff.buffer = g_nullBuffer;
1486 mtctx->inBuff.filled = 0;
1487 mtctx->inBuff.prefix = kNullRange;
1488 mtctx->doneJobID = 0;
1489 mtctx->nextJobID = 0;
1490 mtctx->frameEnded = 0;
1491 mtctx->allJobsCompleted = 0;
1492 mtctx->consumed = 0;
1493 mtctx->produced = 0;
1494 if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize))
1495 return ERROR(memory_allocation);
1499 size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
1500 const void* dict, size_t dictSize,
1501 ZSTD_parameters params,
1502 unsigned long long pledgedSrcSize)
1504 ZSTD_CCtx_params cctxParams = mtctx->params; /* retrieve sticky params */
1505 DEBUGLOG(4, "ZSTDMT_initCStream_advanced (pledgedSrcSize=%u)", (U32)pledgedSrcSize);
1506 cctxParams.cParams = params.cParams;
1507 cctxParams.fParams = params.fParams;
1508 return ZSTDMT_initCStream_internal(mtctx, dict, dictSize, ZSTD_dct_auto, NULL,
1509 cctxParams, pledgedSrcSize);
1512 size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
1513 const ZSTD_CDict* cdict,
1514 ZSTD_frameParameters fParams,
1515 unsigned long long pledgedSrcSize)
1517 ZSTD_CCtx_params cctxParams = mtctx->params;
1518 if (cdict==NULL) return ERROR(dictionary_wrong); /* method incompatible with NULL cdict */
1519 cctxParams.cParams = ZSTD_getCParamsFromCDict(cdict);
1520 cctxParams.fParams = fParams;
1521 return ZSTDMT_initCStream_internal(mtctx, NULL, 0 /*dictSize*/, ZSTD_dct_auto, cdict,
1522 cctxParams, pledgedSrcSize);
1526 /* ZSTDMT_resetCStream() :
1527 * pledgedSrcSize can be zero == unknown (for the time being)
1528 * prefer using ZSTD_CONTENTSIZE_UNKNOWN,
1529 * as `0` might mean "empty" in the future */
1530 size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize)
1532 if (!pledgedSrcSize) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
1533 return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, 0, mtctx->params,
1537 size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel) {
1538 ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0);
1539 ZSTD_CCtx_params cctxParams = mtctx->params; /* retrieve sticky params */
1540 DEBUGLOG(4, "ZSTDMT_initCStream (cLevel=%i)", compressionLevel);
1541 cctxParams.cParams = params.cParams;
1542 cctxParams.fParams = params.fParams;
1543 return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, NULL, cctxParams, ZSTD_CONTENTSIZE_UNKNOWN);
1547 /* ZSTDMT_writeLastEmptyBlock()
1548 * Write a single empty block with an end-of-frame to finish a frame.
1549 * Job must be created from streaming variant.
1550 * This function is always successfull if expected conditions are fulfilled.
1552 static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job)
1554 assert(job->lastJob == 1);
1555 assert(job->src.size == 0); /* last job is empty -> will be simplified into a last empty block */
1556 assert(job->firstJob == 0); /* cannot be first job, as it also needs to create frame header */
1557 assert(job->dstBuff.start == NULL); /* invoked from streaming variant only (otherwise, dstBuff might be user's output) */
1558 job->dstBuff = ZSTDMT_getBuffer(job->bufPool);
1559 if (job->dstBuff.start == NULL) {
1560 job->cSize = ERROR(memory_allocation);
1563 assert(job->dstBuff.capacity >= ZSTD_blockHeaderSize); /* no buffer should ever be that small */
1564 job->src = kNullRange;
1565 job->cSize = ZSTD_writeLastEmptyBlock(job->dstBuff.start, job->dstBuff.capacity);
1566 assert(!ZSTD_isError(job->cSize));
1567 assert(job->consumed == 0);
1570 static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* mtctx, size_t srcSize, ZSTD_EndDirective endOp)
1572 unsigned const jobID = mtctx->nextJobID & mtctx->jobIDMask;
1573 int const endFrame = (endOp == ZSTD_e_end);
1575 if (mtctx->nextJobID > mtctx->doneJobID + mtctx->jobIDMask) {
1576 DEBUGLOG(5, "ZSTDMT_createCompressionJob: will not create new job : table is full");
1577 assert((mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask));
1581 if (!mtctx->jobReady) {
1582 BYTE const* src = (BYTE const*)mtctx->inBuff.buffer.start;
1583 DEBUGLOG(5, "ZSTDMT_createCompressionJob: preparing job %u to compress %u bytes with %u preload ",
1584 mtctx->nextJobID, (U32)srcSize, (U32)mtctx->inBuff.prefix.size);
1585 mtctx->jobs[jobID].src.start = src;
1586 mtctx->jobs[jobID].src.size = srcSize;
1587 assert(mtctx->inBuff.filled >= srcSize);
1588 mtctx->jobs[jobID].prefix = mtctx->inBuff.prefix;
1589 mtctx->jobs[jobID].consumed = 0;
1590 mtctx->jobs[jobID].cSize = 0;
1591 mtctx->jobs[jobID].params = mtctx->params;
1592 mtctx->jobs[jobID].cdict = mtctx->nextJobID==0 ? mtctx->cdict : NULL;
1593 mtctx->jobs[jobID].fullFrameSize = mtctx->frameContentSize;
1594 mtctx->jobs[jobID].dstBuff = g_nullBuffer;
1595 mtctx->jobs[jobID].cctxPool = mtctx->cctxPool;
1596 mtctx->jobs[jobID].bufPool = mtctx->bufPool;
1597 mtctx->jobs[jobID].seqPool = mtctx->seqPool;
1598 mtctx->jobs[jobID].serial = &mtctx->serial;
1599 mtctx->jobs[jobID].jobID = mtctx->nextJobID;
1600 mtctx->jobs[jobID].firstJob = (mtctx->nextJobID==0);
1601 mtctx->jobs[jobID].lastJob = endFrame;
1602 mtctx->jobs[jobID].frameChecksumNeeded = mtctx->params.fParams.checksumFlag && endFrame && (mtctx->nextJobID>0);
1603 mtctx->jobs[jobID].dstFlushed = 0;
1605 /* Update the round buffer pos and clear the input buffer to be reset */
1606 mtctx->roundBuff.pos += srcSize;
1607 mtctx->inBuff.buffer = g_nullBuffer;
1608 mtctx->inBuff.filled = 0;
1609 /* Set the prefix */
1611 size_t const newPrefixSize = MIN(srcSize, mtctx->targetPrefixSize);
1612 mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize;
1613 mtctx->inBuff.prefix.size = newPrefixSize;
1614 } else { /* endFrame==1 => no need for another input buffer */
1615 mtctx->inBuff.prefix = kNullRange;
1616 mtctx->frameEnded = endFrame;
1617 if (mtctx->nextJobID == 0) {
1618 /* single job exception : checksum is already calculated directly within worker thread */
1619 mtctx->params.fParams.checksumFlag = 0;
1623 && (mtctx->nextJobID>0)/*single job must also write frame header*/ ) {
1624 DEBUGLOG(5, "ZSTDMT_createCompressionJob: creating a last empty block to end frame");
1625 assert(endOp == ZSTD_e_end); /* only possible case : need to end the frame with an empty last block */
1626 ZSTDMT_writeLastEmptyBlock(mtctx->jobs + jobID);
1632 DEBUGLOG(5, "ZSTDMT_createCompressionJob: posting job %u : %u bytes (end:%u, jobNb == %u (mod:%u))",
1634 (U32)mtctx->jobs[jobID].src.size,
1635 mtctx->jobs[jobID].lastJob,
1638 if (POOL_tryAdd(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[jobID])) {
1640 mtctx->jobReady = 0;
1642 DEBUGLOG(5, "ZSTDMT_createCompressionJob: no worker available for job %u", mtctx->nextJobID);
1643 mtctx->jobReady = 1;
1649 /*! ZSTDMT_flushProduced() :
1650 * flush whatever data has been produced but not yet flushed in current job.
1651 * move to next job if current one is fully flushed.
1652 * `output` : `pos` will be updated with amount of data flushed .
1653 * `blockToFlush` : if >0, the function will block and wait if there is no data available to flush .
1654 * @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */
1655 static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, unsigned blockToFlush, ZSTD_EndDirective end)
1657 unsigned const wJobID = mtctx->doneJobID & mtctx->jobIDMask;
1658 DEBUGLOG(5, "ZSTDMT_flushProduced (blocking:%u , job %u <= %u)",
1659 blockToFlush, mtctx->doneJobID, mtctx->nextJobID);
1660 assert(output->size >= output->pos);
1662 ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);
1664 && (mtctx->doneJobID < mtctx->nextJobID) ) {
1665 assert(mtctx->jobs[wJobID].dstFlushed <= mtctx->jobs[wJobID].cSize);
1666 while (mtctx->jobs[wJobID].dstFlushed == mtctx->jobs[wJobID].cSize) { /* nothing to flush */
1667 if (mtctx->jobs[wJobID].consumed == mtctx->jobs[wJobID].src.size) {
1668 DEBUGLOG(5, "job %u is completely consumed (%u == %u) => don't wait for cond, there will be none",
1669 mtctx->doneJobID, (U32)mtctx->jobs[wJobID].consumed, (U32)mtctx->jobs[wJobID].src.size);
1672 DEBUGLOG(5, "waiting for something to flush from job %u (currently flushed: %u bytes)",
1673 mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
1674 ZSTD_pthread_cond_wait(&mtctx->jobs[wJobID].job_cond, &mtctx->jobs[wJobID].job_mutex); /* block when nothing to flush but some to come */
1677 /* try to flush something */
1678 { size_t cSize = mtctx->jobs[wJobID].cSize; /* shared */
1679 size_t const srcConsumed = mtctx->jobs[wJobID].consumed; /* shared */
1680 size_t const srcSize = mtctx->jobs[wJobID].src.size; /* read-only, could be done after mutex lock, but no-declaration-after-statement */
1681 ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
1682 if (ZSTD_isError(cSize)) {
1683 DEBUGLOG(5, "ZSTDMT_flushProduced: job %u : compression error detected : %s",
1684 mtctx->doneJobID, ZSTD_getErrorName(cSize));
1685 ZSTDMT_waitForAllJobsCompleted(mtctx);
1686 ZSTDMT_releaseAllJobResources(mtctx);
1689 /* add frame checksum if necessary (can only happen once) */
1690 assert(srcConsumed <= srcSize);
1691 if ( (srcConsumed == srcSize) /* job completed -> worker no longer active */
1692 && mtctx->jobs[wJobID].frameChecksumNeeded ) {
1693 U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState);
1694 DEBUGLOG(4, "ZSTDMT_flushProduced: writing checksum : %08X \n", checksum);
1695 MEM_writeLE32((char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, checksum);
1697 mtctx->jobs[wJobID].cSize += 4; /* can write this shared value, as worker is no longer active */
1698 mtctx->jobs[wJobID].frameChecksumNeeded = 0;
1701 if (cSize > 0) { /* compression is ongoing or completed */
1702 size_t const toFlush = MIN(cSize - mtctx->jobs[wJobID].dstFlushed, output->size - output->pos);
1703 DEBUGLOG(5, "ZSTDMT_flushProduced: Flushing %u bytes from job %u (completion:%u/%u, generated:%u)",
1704 (U32)toFlush, mtctx->doneJobID, (U32)srcConsumed, (U32)srcSize, (U32)cSize);
1705 assert(mtctx->doneJobID < mtctx->nextJobID);
1706 assert(cSize >= mtctx->jobs[wJobID].dstFlushed);
1707 assert(mtctx->jobs[wJobID].dstBuff.start != NULL);
1708 memcpy((char*)output->dst + output->pos,
1709 (const char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed,
1711 output->pos += toFlush;
1712 mtctx->jobs[wJobID].dstFlushed += toFlush; /* can write : this value is only used by mtctx */
1714 if ( (srcConsumed == srcSize) /* job is completed */
1715 && (mtctx->jobs[wJobID].dstFlushed == cSize) ) { /* output buffer fully flushed => free this job position */
1716 DEBUGLOG(5, "Job %u completed (%u bytes), moving to next one",
1717 mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
1718 ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff);
1719 DEBUGLOG(5, "dstBuffer released");
1720 mtctx->jobs[wJobID].dstBuff = g_nullBuffer;
1721 mtctx->jobs[wJobID].cSize = 0; /* ensure this job slot is considered "not started" in future check */
1722 mtctx->consumed += srcSize;
1723 mtctx->produced += cSize;
1727 /* return value : how many bytes left in buffer ; fake it to 1 when unknown but >0 */
1728 if (cSize > mtctx->jobs[wJobID].dstFlushed) return (cSize - mtctx->jobs[wJobID].dstFlushed);
1729 if (srcSize > srcConsumed) return 1; /* current job not completely compressed */
1731 if (mtctx->doneJobID < mtctx->nextJobID) return 1; /* some more jobs ongoing */
1732 if (mtctx->jobReady) return 1; /* one job is ready to push, just not yet in the list */
1733 if (mtctx->inBuff.filled > 0) return 1; /* input is not empty, and still needs to be converted into a job */
1734 mtctx->allJobsCompleted = mtctx->frameEnded; /* all jobs are entirely flushed => if this one is last one, frame is completed */
1735 if (end == ZSTD_e_end) return !mtctx->frameEnded; /* for ZSTD_e_end, question becomes : is frame completed ? instead of : are internal buffers fully flushed ? */
1736 return 0; /* internal buffers fully flushed */
1740 * Returns the range of data used by the earliest job that is not yet complete.
1741 * If the data of the first job is broken up into two segments, we cover both
1744 static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx)
1746 unsigned const firstJobID = mtctx->doneJobID;
1747 unsigned const lastJobID = mtctx->nextJobID;
1750 for (jobID = firstJobID; jobID < lastJobID; ++jobID) {
1751 unsigned const wJobID = jobID & mtctx->jobIDMask;
1754 ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);
1755 consumed = mtctx->jobs[wJobID].consumed;
1756 ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
1758 if (consumed < mtctx->jobs[wJobID].src.size) {
1759 range_t range = mtctx->jobs[wJobID].prefix;
1760 if (range.size == 0) {
1762 range = mtctx->jobs[wJobID].src;
1764 /* Job source in multiple segments not supported yet */
1765 assert(range.start <= mtctx->jobs[wJobID].src.start);
1773 * Returns non-zero iff buffer and range overlap.
1775 static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range)
1777 BYTE const* const bufferStart = (BYTE const*)buffer.start;
1778 BYTE const* const bufferEnd = bufferStart + buffer.capacity;
1779 BYTE const* const rangeStart = (BYTE const*)range.start;
1780 BYTE const* const rangeEnd = rangeStart + range.size;
1782 if (rangeStart == NULL || bufferStart == NULL)
1784 /* Empty ranges cannot overlap */
1785 if (bufferStart == bufferEnd || rangeStart == rangeEnd)
1788 return bufferStart < rangeEnd && rangeStart < bufferEnd;
1791 static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window)
1796 DEBUGLOG(5, "ZSTDMT_doesOverlapWindow");
1797 extDict.start = window.dictBase + window.lowLimit;
1798 extDict.size = window.dictLimit - window.lowLimit;
1800 prefix.start = window.base + window.dictLimit;
1801 prefix.size = window.nextSrc - (window.base + window.dictLimit);
1802 DEBUGLOG(5, "extDict [0x%zx, 0x%zx)",
1803 (size_t)extDict.start,
1804 (size_t)extDict.start + extDict.size);
1805 DEBUGLOG(5, "prefix [0x%zx, 0x%zx)",
1806 (size_t)prefix.start,
1807 (size_t)prefix.start + prefix.size);
1809 return ZSTDMT_isOverlapped(buffer, extDict)
1810 || ZSTDMT_isOverlapped(buffer, prefix);
1813 static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer)
1815 if (mtctx->params.ldmParams.enableLdm) {
1816 ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex;
1817 DEBUGLOG(5, "ZSTDMT_waitForLdmComplete");
1818 DEBUGLOG(5, "source [0x%zx, 0x%zx)",
1819 (size_t)buffer.start,
1820 (size_t)buffer.start + buffer.capacity);
1821 ZSTD_PTHREAD_MUTEX_LOCK(mutex);
1822 while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow)) {
1823 DEBUGLOG(5, "Waiting for LDM to finish...");
1824 ZSTD_pthread_cond_wait(&mtctx->serial.ldmWindowCond, mutex);
1826 DEBUGLOG(6, "Done waiting for LDM to finish");
1827 ZSTD_pthread_mutex_unlock(mutex);
1832 * Attempts to set the inBuff to the next section to fill.
1833 * If any part of the new section is still in use we give up.
1834 * Returns non-zero if the buffer is filled.
1836 static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx)
1838 range_t const inUse = ZSTDMT_getInputDataInUse(mtctx);
1839 size_t const spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos;
1840 size_t const target = mtctx->targetSectionSize;
1843 DEBUGLOG(5, "ZSTDMT_tryGetInputRange");
1844 assert(mtctx->inBuff.buffer.start == NULL);
1845 assert(mtctx->roundBuff.capacity >= target);
1847 if (spaceLeft < target) {
1848 /* ZSTD_invalidateRepCodes() doesn't work for extDict variants.
1849 * Simply copy the prefix to the beginning in that case.
1851 BYTE* const start = (BYTE*)mtctx->roundBuff.buffer;
1852 size_t const prefixSize = mtctx->inBuff.prefix.size;
1854 buffer.start = start;
1855 buffer.capacity = prefixSize;
1856 if (ZSTDMT_isOverlapped(buffer, inUse)) {
1857 DEBUGLOG(5, "Waiting for buffer...");
1860 ZSTDMT_waitForLdmComplete(mtctx, buffer);
1861 memmove(start, mtctx->inBuff.prefix.start, prefixSize);
1862 mtctx->inBuff.prefix.start = start;
1863 mtctx->roundBuff.pos = prefixSize;
1865 buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos;
1866 buffer.capacity = target;
1868 if (ZSTDMT_isOverlapped(buffer, inUse)) {
1869 DEBUGLOG(5, "Waiting for buffer...");
1872 assert(!ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix));
1874 ZSTDMT_waitForLdmComplete(mtctx, buffer);
1876 DEBUGLOG(5, "Using prefix range [%zx, %zx)",
1877 (size_t)mtctx->inBuff.prefix.start,
1878 (size_t)mtctx->inBuff.prefix.start + mtctx->inBuff.prefix.size);
1879 DEBUGLOG(5, "Using source range [%zx, %zx)",
1880 (size_t)buffer.start,
1881 (size_t)buffer.start + buffer.capacity);
1884 mtctx->inBuff.buffer = buffer;
1885 mtctx->inBuff.filled = 0;
1886 assert(mtctx->roundBuff.pos + buffer.capacity <= mtctx->roundBuff.capacity);
1891 size_t toLoad; /* The number of bytes to load from the input. */
1892 int flush; /* Boolean declaring if we must flush because we found a synchronization point. */
1896 * Searches through the input for a synchronization point. If one is found, we
1897 * will instruct the caller to flush, and return the number of bytes to load.
1898 * Otherwise, we will load as many bytes as possible and instruct the caller
1899 * to continue as normal.
1902 findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input)
1904 BYTE const* const istart = (BYTE const*)input.src + input.pos;
1905 U64 const primePower = mtctx->rsync.primePower;
1906 U64 const hitMask = mtctx->rsync.hitMask;
1908 syncPoint_t syncPoint;
1913 syncPoint.toLoad = MIN(input.size - input.pos, mtctx->targetSectionSize - mtctx->inBuff.filled);
1914 syncPoint.flush = 0;
1915 if (!mtctx->params.rsyncable)
1916 /* Rsync is disabled. */
1918 if (mtctx->inBuff.filled + syncPoint.toLoad < RSYNC_LENGTH)
1919 /* Not enough to compute the hash.
1920 * We will miss any synchronization points in this RSYNC_LENGTH byte
1921 * window. However, since it depends only in the internal buffers, if the
1922 * state is already synchronized, we will remain synchronized.
1923 * Additionally, the probability that we miss a synchronization point is
1924 * low: RSYNC_LENGTH / targetSectionSize.
1927 /* Initialize the loop variables. */
1928 if (mtctx->inBuff.filled >= RSYNC_LENGTH) {
1929 /* We have enough bytes buffered to initialize the hash.
1930 * Start scanning at the beginning of the input.
1933 prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH;
1934 hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH);
1936 /* We don't have enough bytes buffered to initialize the hash, but
1937 * we know we have at least RSYNC_LENGTH bytes total.
1938 * Start scanning after the first RSYNC_LENGTH bytes less the bytes
1941 pos = RSYNC_LENGTH - mtctx->inBuff.filled;
1942 prev = (BYTE const*)mtctx->inBuff.buffer.start - pos;
1943 hash = ZSTD_rollingHash_compute(mtctx->inBuff.buffer.start, mtctx->inBuff.filled);
1944 hash = ZSTD_rollingHash_append(hash, istart, pos);
1946 /* Starting with the hash of the previous RSYNC_LENGTH bytes, roll
1947 * through the input. If we hit a synchronization point, then cut the
1948 * job off, and tell the compressor to flush the job. Otherwise, load
1949 * all the bytes and continue as normal.
1950 * If we go too long without a synchronization point (targetSectionSize)
1951 * then a block will be emitted anyways, but this is okay, since if we
1952 * are already synchronized we will remain synchronized.
1954 for (; pos < syncPoint.toLoad; ++pos) {
1955 BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH];
1956 /* if (pos >= RSYNC_LENGTH) assert(ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); */
1957 hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower);
1958 if ((hash & hitMask) == hitMask) {
1959 syncPoint.toLoad = pos + 1;
1960 syncPoint.flush = 1;
1967 size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx)
1969 size_t hintInSize = mtctx->targetSectionSize - mtctx->inBuff.filled;
1970 if (hintInSize==0) hintInSize = mtctx->targetSectionSize;
1974 /** ZSTDMT_compressStream_generic() :
1975 * internal use only - exposed to be invoked from zstd_compress.c
1976 * assumption : output and input are valid (pos <= size)
1977 * @return : minimum amount of data remaining to flush, 0 if none */
1978 size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
1979 ZSTD_outBuffer* output,
1980 ZSTD_inBuffer* input,
1981 ZSTD_EndDirective endOp)
1983 unsigned forwardInputProgress = 0;
1984 DEBUGLOG(5, "ZSTDMT_compressStream_generic (endOp=%u, srcSize=%u)",
1985 (U32)endOp, (U32)(input->size - input->pos));
1986 assert(output->pos <= output->size);
1987 assert(input->pos <= input->size);
1989 if (mtctx->singleBlockingThread) { /* delegate to single-thread (synchronous) */
1990 return ZSTD_compressStream_generic(mtctx->cctxPool->cctx[0], output, input, endOp);
1993 if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) {
1994 /* current frame being ended. Only flush/end are allowed */
1995 return ERROR(stage_wrong);
1998 /* single-pass shortcut (note : synchronous-mode) */
1999 if ( (!mtctx->params.rsyncable) /* rsyncable mode is disabled */
2000 && (mtctx->nextJobID == 0) /* just started */
2001 && (mtctx->inBuff.filled == 0) /* nothing buffered */
2002 && (!mtctx->jobReady) /* no job already created */
2003 && (endOp == ZSTD_e_end) /* end order */
2004 && (output->size - output->pos >= ZSTD_compressBound(input->size - input->pos)) ) { /* enough space in dst */
2005 size_t const cSize = ZSTDMT_compress_advanced_internal(mtctx,
2006 (char*)output->dst + output->pos, output->size - output->pos,
2007 (const char*)input->src + input->pos, input->size - input->pos,
2008 mtctx->cdict, mtctx->params);
2009 if (ZSTD_isError(cSize)) return cSize;
2010 input->pos = input->size;
2011 output->pos += cSize;
2012 mtctx->allJobsCompleted = 1;
2013 mtctx->frameEnded = 1;
2017 /* fill input buffer */
2018 if ( (!mtctx->jobReady)
2019 && (input->size > input->pos) ) { /* support NULL input */
2020 if (mtctx->inBuff.buffer.start == NULL) {
2021 assert(mtctx->inBuff.filled == 0); /* Can't fill an empty buffer */
2022 if (!ZSTDMT_tryGetInputRange(mtctx)) {
2023 /* It is only possible for this operation to fail if there are
2024 * still compression jobs ongoing.
2026 DEBUGLOG(5, "ZSTDMT_tryGetInputRange failed");
2027 assert(mtctx->doneJobID != mtctx->nextJobID);
2029 DEBUGLOG(5, "ZSTDMT_tryGetInputRange completed successfully : mtctx->inBuff.buffer.start = %p", mtctx->inBuff.buffer.start);
2031 if (mtctx->inBuff.buffer.start != NULL) {
2032 syncPoint_t const syncPoint = findSynchronizationPoint(mtctx, *input);
2033 if (syncPoint.flush && endOp == ZSTD_e_continue) {
2034 endOp = ZSTD_e_flush;
2036 assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize);
2037 DEBUGLOG(5, "ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u",
2038 (U32)syncPoint.toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize);
2039 memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, syncPoint.toLoad);
2040 input->pos += syncPoint.toLoad;
2041 mtctx->inBuff.filled += syncPoint.toLoad;
2042 forwardInputProgress = syncPoint.toLoad>0;
2044 if ((input->pos < input->size) && (endOp == ZSTD_e_end))
2045 endOp = ZSTD_e_flush; /* can't end now : not all input consumed */
2048 if ( (mtctx->jobReady)
2049 || (mtctx->inBuff.filled >= mtctx->targetSectionSize) /* filled enough : let's compress */
2050 || ((endOp != ZSTD_e_continue) && (mtctx->inBuff.filled > 0)) /* something to flush : let's go */
2051 || ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) { /* must finish the frame with a zero-size block */
2052 size_t const jobSize = mtctx->inBuff.filled;
2053 assert(mtctx->inBuff.filled <= mtctx->targetSectionSize);
2054 CHECK_F( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) );
2057 /* check for potential compressed data ready to be flushed */
2058 { size_t const remainingToFlush = ZSTDMT_flushProduced(mtctx, output, !forwardInputProgress, endOp); /* block if there was no forward input progress */
2059 if (input->pos < input->size) return MAX(remainingToFlush, 1); /* input not consumed : do not end flush yet */
2060 DEBUGLOG(5, "end of ZSTDMT_compressStream_generic: remainingToFlush = %u", (U32)remainingToFlush);
2061 return remainingToFlush;
2066 size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
2068 CHECK_F( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) );
2070 /* recommended next input size : fill current input buffer */
2071 return mtctx->targetSectionSize - mtctx->inBuff.filled; /* note : could be zero when input buffer is fully filled and no more availability to create new job */
2075 static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_EndDirective endFrame)
2077 size_t const srcSize = mtctx->inBuff.filled;
2078 DEBUGLOG(5, "ZSTDMT_flushStream_internal");
2080 if ( mtctx->jobReady /* one job ready for a worker to pick up */
2081 || (srcSize > 0) /* still some data within input buffer */
2082 || ((endFrame==ZSTD_e_end) && !mtctx->frameEnded)) { /* need a last 0-size block to end frame */
2083 DEBUGLOG(5, "ZSTDMT_flushStream_internal : create a new job (%u bytes, end:%u)",
2084 (U32)srcSize, (U32)endFrame);
2085 CHECK_F( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) );
2088 /* check if there is any data available to flush */
2089 return ZSTDMT_flushProduced(mtctx, output, 1 /* blockToFlush */, endFrame);
2093 size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output)
2095 DEBUGLOG(5, "ZSTDMT_flushStream");
2096 if (mtctx->singleBlockingThread)
2097 return ZSTD_flushStream(mtctx->cctxPool->cctx[0], output);
2098 return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_flush);
2101 size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output)
2103 DEBUGLOG(4, "ZSTDMT_endStream");
2104 if (mtctx->singleBlockingThread)
2105 return ZSTD_endStream(mtctx->cctxPool->cctx[0], output);
2106 return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_end);