2 * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
11 /* *****************************************************************************
12 * Constructs a dictionary using a heuristic based on the following paper:
14 * Liao, Petri, Moffat, Wirth
15 * Effective Construction of Relative Lempel-Ziv Dictionaries
16 * Published in WWW 2016.
18 * Adapted from code originally written by @ot (Giuseppe Ottaviano).
19 ******************************************************************************/
21 /*-*************************************
23 ***************************************/
24 #include <stdio.h> /* fprintf */
25 #include <stdlib.h> /* malloc, free, qsort */
26 #include <string.h> /* memset */
27 #include <time.h> /* clock */
29 #include "mem.h" /* read */
31 #include "threading.h"
32 #include "zstd_internal.h" /* includes zstd.h */
33 #ifndef ZDICT_STATIC_LINKING_ONLY
34 #define ZDICT_STATIC_LINKING_ONLY
38 /*-*************************************
40 ***************************************/
41 #define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((U32)-1) : ((U32)1 GB))
43 /*-*************************************
45 ***************************************/
46 static int g_displayLevel = 2;
47 #define DISPLAY(...) \
49 fprintf(stderr, __VA_ARGS__); \
52 #define LOCALDISPLAYLEVEL(displayLevel, l, ...) \
53 if (displayLevel >= l) { \
54 DISPLAY(__VA_ARGS__); \
55 } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */
56 #define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__)
58 #define LOCALDISPLAYUPDATE(displayLevel, l, ...) \
59 if (displayLevel >= l) { \
60 if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) { \
62 DISPLAY(__VA_ARGS__); \
65 #define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__)
66 static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100;
67 static clock_t g_time = 0;
69 /*-*************************************
71 ***************************************
72 * A small specialized hash map for storing activeDmers.
73 * The map does not resize, so if it becomes full it will loop forever.
74 * Thus, the map must be large enough to store every value.
75 * The map implements linear probing and keeps its load less than 0.5.
78 #define MAP_EMPTY_VALUE ((U32)-1)
79 typedef struct COVER_map_pair_t_s {
84 typedef struct COVER_map_s {
85 COVER_map_pair_t *data;
94 static void COVER_map_clear(COVER_map_t *map) {
95 memset(map->data, MAP_EMPTY_VALUE, map->size * sizeof(COVER_map_pair_t));
99 * Initializes a map of the given size.
100 * Returns 1 on success and 0 on failure.
101 * The map must be destroyed with COVER_map_destroy().
102 * The map is only guaranteed to be large enough to hold size elements.
104 static int COVER_map_init(COVER_map_t *map, U32 size) {
105 map->sizeLog = ZSTD_highbit32(size) + 2;
106 map->size = (U32)1 << map->sizeLog;
107 map->sizeMask = map->size - 1;
108 map->data = (COVER_map_pair_t *)malloc(map->size * sizeof(COVER_map_pair_t));
114 COVER_map_clear(map);
119 * Internal hash function
121 static const U32 prime4bytes = 2654435761U;
122 static U32 COVER_map_hash(COVER_map_t *map, U32 key) {
123 return (key * prime4bytes) >> (32 - map->sizeLog);
127 * Helper function that returns the index that a key should be placed into.
129 static U32 COVER_map_index(COVER_map_t *map, U32 key) {
130 const U32 hash = COVER_map_hash(map, key);
132 for (i = hash;; i = (i + 1) & map->sizeMask) {
133 COVER_map_pair_t *pos = &map->data[i];
134 if (pos->value == MAP_EMPTY_VALUE) {
137 if (pos->key == key) {
144 * Returns the pointer to the value for key.
145 * If key is not in the map, it is inserted and the value is set to 0.
146 * The map must not be full.
148 static U32 *COVER_map_at(COVER_map_t *map, U32 key) {
149 COVER_map_pair_t *pos = &map->data[COVER_map_index(map, key)];
150 if (pos->value == MAP_EMPTY_VALUE) {
158 * Deletes key from the map if present.
160 static void COVER_map_remove(COVER_map_t *map, U32 key) {
161 U32 i = COVER_map_index(map, key);
162 COVER_map_pair_t *del = &map->data[i];
164 if (del->value == MAP_EMPTY_VALUE) {
167 for (i = (i + 1) & map->sizeMask;; i = (i + 1) & map->sizeMask) {
168 COVER_map_pair_t *const pos = &map->data[i];
169 /* If the position is empty we are done */
170 if (pos->value == MAP_EMPTY_VALUE) {
171 del->value = MAP_EMPTY_VALUE;
174 /* If pos can be moved to del do so */
175 if (((i - COVER_map_hash(map, pos->key)) & map->sizeMask) >= shift) {
177 del->value = pos->value;
187 * Destroyes a map that is inited with COVER_map_init().
189 static void COVER_map_destroy(COVER_map_t *map) {
197 /*-*************************************
199 ***************************************/
204 const size_t *samplesSizes;
213 /* We need a global context for qsort... */
214 static COVER_ctx_t *g_ctx = NULL;
216 /*-*************************************
218 ***************************************/
221 * Returns the sum of the sample sizes.
223 static size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) {
226 for (i = 0; i < nbSamples; ++i) {
227 sum += samplesSizes[i];
233 * Returns -1 if the dmer at lp is less than the dmer at rp.
234 * Return 0 if the dmers at lp and rp are equal.
235 * Returns 1 if the dmer at lp is greater than the dmer at rp.
237 static int COVER_cmp(COVER_ctx_t *ctx, const void *lp, const void *rp) {
238 U32 const lhs = *(U32 const *)lp;
239 U32 const rhs = *(U32 const *)rp;
240 return memcmp(ctx->samples + lhs, ctx->samples + rhs, ctx->d);
243 * Faster version for d <= 8.
245 static int COVER_cmp8(COVER_ctx_t *ctx, const void *lp, const void *rp) {
246 U64 const mask = (ctx->d == 8) ? (U64)-1 : (((U64)1 << (8 * ctx->d)) - 1);
247 U64 const lhs = MEM_readLE64(ctx->samples + *(U32 const *)lp) & mask;
248 U64 const rhs = MEM_readLE64(ctx->samples + *(U32 const *)rp) & mask;
256 * Same as COVER_cmp() except ties are broken by pointer value
257 * NOTE: g_ctx must be set to call this function. A global is required because
258 * qsort doesn't take an opaque pointer.
260 static int COVER_strict_cmp(const void *lp, const void *rp) {
261 int result = COVER_cmp(g_ctx, lp, rp);
263 result = lp < rp ? -1 : 1;
268 * Faster version for d <= 8.
270 static int COVER_strict_cmp8(const void *lp, const void *rp) {
271 int result = COVER_cmp8(g_ctx, lp, rp);
273 result = lp < rp ? -1 : 1;
279 * Returns the first pointer in [first, last) whose element does not compare
280 * less than value. If no such element exists it returns last.
282 static const size_t *COVER_lower_bound(const size_t *first, const size_t *last,
284 size_t count = last - first;
286 size_t step = count / 2;
287 const size_t *ptr = first;
300 * Generic groupBy function.
301 * Groups an array sorted by cmp into groups with equivalent values.
302 * Calls grp for each group.
305 COVER_groupBy(const void *data, size_t count, size_t size, COVER_ctx_t *ctx,
306 int (*cmp)(COVER_ctx_t *, const void *, const void *),
307 void (*grp)(COVER_ctx_t *, const void *, const void *)) {
308 const BYTE *ptr = (const BYTE *)data;
310 while (num < count) {
311 const BYTE *grpEnd = ptr + size;
313 while (num < count && cmp(ctx, ptr, grpEnd) == 0) {
317 grp(ctx, ptr, grpEnd);
322 /*-*************************************
324 ***************************************/
327 * Called on each group of positions with the same dmer.
328 * Counts the frequency of each dmer and saves it in the suffix array.
329 * Fills `ctx->dmerAt`.
331 static void COVER_group(COVER_ctx_t *ctx, const void *group,
332 const void *groupEnd) {
333 /* The group consists of all the positions with the same first d bytes. */
334 const U32 *grpPtr = (const U32 *)group;
335 const U32 *grpEnd = (const U32 *)groupEnd;
336 /* The dmerId is how we will reference this dmer.
337 * This allows us to map the whole dmer space to a much smaller space, the
338 * size of the suffix array.
340 const U32 dmerId = (U32)(grpPtr - ctx->suffix);
341 /* Count the number of samples this dmer shows up in */
344 const size_t *curOffsetPtr = ctx->offsets;
345 const size_t *offsetsEnd = ctx->offsets + ctx->nbSamples;
346 /* Once *grpPtr >= curSampleEnd this occurrence of the dmer is in a
347 * different sample than the last.
349 size_t curSampleEnd = ctx->offsets[0];
350 for (; grpPtr != grpEnd; ++grpPtr) {
351 /* Save the dmerId for this position so we can get back to it. */
352 ctx->dmerAt[*grpPtr] = dmerId;
353 /* Dictionaries only help for the first reference to the dmer.
354 * After that zstd can reference the match from the previous reference.
355 * So only count each dmer once for each sample it is in.
357 if (*grpPtr < curSampleEnd) {
361 /* Binary search to find the end of the sample *grpPtr is in.
362 * In the common case that grpPtr + 1 == grpEnd we can skip the binary
363 * search because the loop is over.
365 if (grpPtr + 1 != grpEnd) {
366 const size_t *sampleEndPtr =
367 COVER_lower_bound(curOffsetPtr, offsetsEnd, *grpPtr);
368 curSampleEnd = *sampleEndPtr;
369 curOffsetPtr = sampleEndPtr + 1;
372 /* At this point we are never going to look at this segment of the suffix
373 * array again. We take advantage of this fact to save memory.
374 * We store the frequency of the dmer in the first position of the group,
377 ctx->suffix[dmerId] = freq;
381 * A segment is a range in the source as well as the score of the segment.
390 * Selects the best segment in an epoch.
391 * Segments of are scored according to the function:
393 * Let F(d) be the frequency of dmer d.
394 * Let S_i be the dmer at position i of segment S which has length k.
396 * Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
398 * Once the dmer d is in the dictionay we set F(d) = 0.
400 static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs,
401 COVER_map_t *activeDmers, U32 begin,
403 ZDICT_cover_params_t parameters) {
405 const U32 k = parameters.k;
406 const U32 d = parameters.d;
407 const U32 dmersInK = k - d + 1;
408 /* Try each segment (activeSegment) and save the best (bestSegment) */
409 COVER_segment_t bestSegment = {0, 0, 0};
410 COVER_segment_t activeSegment;
411 /* Reset the activeDmers in the segment */
412 COVER_map_clear(activeDmers);
413 /* The activeSegment starts at the beginning of the epoch. */
414 activeSegment.begin = begin;
415 activeSegment.end = begin;
416 activeSegment.score = 0;
417 /* Slide the activeSegment through the whole epoch.
418 * Save the best segment in bestSegment.
420 while (activeSegment.end < end) {
421 /* The dmerId for the dmer at the next position */
422 U32 newDmer = ctx->dmerAt[activeSegment.end];
423 /* The entry in activeDmers for this dmerId */
424 U32 *newDmerOcc = COVER_map_at(activeDmers, newDmer);
425 /* If the dmer isn't already present in the segment add its score. */
426 if (*newDmerOcc == 0) {
427 /* The paper suggest using the L-0.5 norm, but experiments show that it
430 activeSegment.score += freqs[newDmer];
432 /* Add the dmer to the segment */
433 activeSegment.end += 1;
436 /* If the window is now too large, drop the first position */
437 if (activeSegment.end - activeSegment.begin == dmersInK + 1) {
438 U32 delDmer = ctx->dmerAt[activeSegment.begin];
439 U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer);
440 activeSegment.begin += 1;
442 /* If this is the last occurence of the dmer, subtract its score */
443 if (*delDmerOcc == 0) {
444 COVER_map_remove(activeDmers, delDmer);
445 activeSegment.score -= freqs[delDmer];
449 /* If this segment is the best so far save it */
450 if (activeSegment.score > bestSegment.score) {
451 bestSegment = activeSegment;
455 /* Trim off the zero frequency head and tail from the segment. */
456 U32 newBegin = bestSegment.end;
457 U32 newEnd = bestSegment.begin;
459 for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
460 U32 freq = freqs[ctx->dmerAt[pos]];
462 newBegin = MIN(newBegin, pos);
466 bestSegment.begin = newBegin;
467 bestSegment.end = newEnd;
470 /* Zero out the frequency of each dmer covered by the chosen segment. */
472 for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
473 freqs[ctx->dmerAt[pos]] = 0;
480 * Check the validity of the parameters.
481 * Returns non-zero if the parameters are valid and 0 otherwise.
483 static int COVER_checkParameters(ZDICT_cover_params_t parameters,
484 size_t maxDictSize) {
485 /* k and d are required parameters */
486 if (parameters.d == 0 || parameters.k == 0) {
489 /* k <= maxDictSize */
490 if (parameters.k > maxDictSize) {
494 if (parameters.d > parameters.k) {
501 * Clean up a context initialized with `COVER_ctx_init()`.
503 static void COVER_ctx_destroy(COVER_ctx_t *ctx) {
526 * Prepare a context for dictionary building.
527 * The context is only dependent on the parameter `d` and can used multiple
529 * Returns 1 on success or zero on error.
530 * The context must be destroyed with `COVER_ctx_destroy()`.
532 static int COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
533 const size_t *samplesSizes, unsigned nbSamples,
535 const BYTE *const samples = (const BYTE *)samplesBuffer;
536 const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
538 if (totalSamplesSize < MAX(d, sizeof(U64)) ||
539 totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) {
540 DISPLAYLEVEL(1, "Total samples size is too large, maximum size is %u MB\n",
541 (COVER_MAX_SAMPLES_SIZE >> 20));
544 /* Zero the context */
545 memset(ctx, 0, sizeof(*ctx));
546 DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbSamples,
547 (U32)totalSamplesSize);
548 ctx->samples = samples;
549 ctx->samplesSizes = samplesSizes;
550 ctx->nbSamples = nbSamples;
551 /* Partial suffix array */
552 ctx->suffixSize = totalSamplesSize - MAX(d, sizeof(U64)) + 1;
553 ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
554 /* Maps index to the dmerID */
555 ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
556 /* The offsets of each file */
557 ctx->offsets = (size_t *)malloc((nbSamples + 1) * sizeof(size_t));
558 if (!ctx->suffix || !ctx->dmerAt || !ctx->offsets) {
559 DISPLAYLEVEL(1, "Failed to allocate scratch buffers\n");
560 COVER_ctx_destroy(ctx);
566 /* Fill offsets from the samlesSizes */
570 for (i = 1; i <= nbSamples; ++i) {
571 ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1];
574 DISPLAYLEVEL(2, "Constructing partial suffix array\n");
576 /* suffix is a partial suffix array.
577 * It only sorts suffixes by their first parameters.d bytes.
578 * The sort is stable, so each dmer group is sorted by position in input.
581 for (i = 0; i < ctx->suffixSize; ++i) {
584 /* qsort doesn't take an opaque pointer, so pass as a global */
586 qsort(ctx->suffix, ctx->suffixSize, sizeof(U32),
587 (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
589 DISPLAYLEVEL(2, "Computing frequencies\n");
590 /* For each dmer group (group of positions with the same first d bytes):
591 * 1. For each position we set dmerAt[position] = dmerID. The dmerID is
592 * (groupBeginPtr - suffix). This allows us to go from position to
593 * dmerID so we can look up values in freq.
594 * 2. We calculate how many samples the dmer occurs in and save it in
597 COVER_groupBy(ctx->suffix, ctx->suffixSize, sizeof(U32), ctx,
598 (ctx->d <= 8 ? &COVER_cmp8 : &COVER_cmp), &COVER_group);
599 ctx->freqs = ctx->suffix;
605 * Given the prepared context build the dictionary.
607 static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs,
608 COVER_map_t *activeDmers, void *dictBuffer,
609 size_t dictBufferCapacity,
610 ZDICT_cover_params_t parameters) {
611 BYTE *const dict = (BYTE *)dictBuffer;
612 size_t tail = dictBufferCapacity;
613 /* Divide the data up into epochs of equal size.
614 * We will select at least one segment from each epoch.
616 const U32 epochs = (U32)(dictBufferCapacity / parameters.k);
617 const U32 epochSize = (U32)(ctx->suffixSize / epochs);
619 DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", epochs,
621 /* Loop through the epochs until there are no more segments or the dictionary
624 for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs) {
625 const U32 epochBegin = (U32)(epoch * epochSize);
626 const U32 epochEnd = epochBegin + epochSize;
628 /* Select a segment */
629 COVER_segment_t segment = COVER_selectSegment(
630 ctx, freqs, activeDmers, epochBegin, epochEnd, parameters);
631 /* If the segment covers no dmers, then we are out of content */
632 if (segment.score == 0) {
635 /* Trim the segment if necessary and if it is too small then we are done */
636 segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail);
637 if (segmentSize < parameters.d) {
640 /* We fill the dictionary from the back to allow the best segments to be
641 * referenced with the smallest offsets.
644 memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);
647 (U32)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
649 DISPLAYLEVEL(2, "\r%79s\r", "");
653 ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(
654 void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer,
655 const size_t *samplesSizes, unsigned nbSamples,
656 ZDICT_cover_params_t parameters) {
657 BYTE *const dict = (BYTE *)dictBuffer;
659 COVER_map_t activeDmers;
661 if (!COVER_checkParameters(parameters, dictBufferCapacity)) {
662 DISPLAYLEVEL(1, "Cover parameters incorrect\n");
663 return ERROR(GENERIC);
665 if (nbSamples == 0) {
666 DISPLAYLEVEL(1, "Cover must have at least one input file\n");
667 return ERROR(GENERIC);
669 if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
670 DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
672 return ERROR(dstSize_tooSmall);
674 /* Initialize global data */
675 g_displayLevel = parameters.zParams.notificationLevel;
676 /* Initialize context and activeDmers */
677 if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
679 return ERROR(GENERIC);
681 if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
682 DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
683 COVER_ctx_destroy(&ctx);
684 return ERROR(GENERIC);
687 DISPLAYLEVEL(2, "Building dictionary\n");
690 COVER_buildDictionary(&ctx, ctx.freqs, &activeDmers, dictBuffer,
691 dictBufferCapacity, parameters);
692 const size_t dictionarySize = ZDICT_finalizeDictionary(
693 dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
694 samplesBuffer, samplesSizes, nbSamples, parameters.zParams);
695 if (!ZSTD_isError(dictionarySize)) {
696 DISPLAYLEVEL(2, "Constructed dictionary of size %u\n",
697 (U32)dictionarySize);
699 COVER_ctx_destroy(&ctx);
700 COVER_map_destroy(&activeDmers);
701 return dictionarySize;
706 * COVER_best_t is used for two purposes:
707 * 1. Synchronizing threads.
708 * 2. Saving the best parameters and dictionary.
710 * All of the methods except COVER_best_init() are thread safe if zstd is
711 * compiled with multithreaded support.
713 typedef struct COVER_best_s {
714 ZSTD_pthread_mutex_t mutex;
715 ZSTD_pthread_cond_t cond;
719 ZDICT_cover_params_t parameters;
720 size_t compressedSize;
724 * Initialize the `COVER_best_t`.
726 static void COVER_best_init(COVER_best_t *best) {
727 if (best==NULL) return; /* compatible with init on NULL */
728 (void)ZSTD_pthread_mutex_init(&best->mutex, NULL);
729 (void)ZSTD_pthread_cond_init(&best->cond, NULL);
733 best->compressedSize = (size_t)-1;
734 memset(&best->parameters, 0, sizeof(best->parameters));
738 * Wait until liveJobs == 0.
740 static void COVER_best_wait(COVER_best_t *best) {
744 ZSTD_pthread_mutex_lock(&best->mutex);
745 while (best->liveJobs != 0) {
746 ZSTD_pthread_cond_wait(&best->cond, &best->mutex);
748 ZSTD_pthread_mutex_unlock(&best->mutex);
752 * Call COVER_best_wait() and then destroy the COVER_best_t.
754 static void COVER_best_destroy(COVER_best_t *best) {
758 COVER_best_wait(best);
762 ZSTD_pthread_mutex_destroy(&best->mutex);
763 ZSTD_pthread_cond_destroy(&best->cond);
767 * Called when a thread is about to be launched.
768 * Increments liveJobs.
770 static void COVER_best_start(COVER_best_t *best) {
774 ZSTD_pthread_mutex_lock(&best->mutex);
776 ZSTD_pthread_mutex_unlock(&best->mutex);
780 * Called when a thread finishes executing, both on error or success.
781 * Decrements liveJobs and signals any waiting threads if liveJobs == 0.
782 * If this dictionary is the best so far save it and its parameters.
784 static void COVER_best_finish(COVER_best_t *best, size_t compressedSize,
785 ZDICT_cover_params_t parameters, void *dict,
792 ZSTD_pthread_mutex_lock(&best->mutex);
794 liveJobs = best->liveJobs;
795 /* If the new dictionary is better */
796 if (compressedSize < best->compressedSize) {
797 /* Allocate space if necessary */
798 if (!best->dict || best->dictSize < dictSize) {
802 best->dict = malloc(dictSize);
804 best->compressedSize = ERROR(GENERIC);
809 /* Save the dictionary, parameters, and size */
810 memcpy(best->dict, dict, dictSize);
811 best->dictSize = dictSize;
812 best->parameters = parameters;
813 best->compressedSize = compressedSize;
815 ZSTD_pthread_mutex_unlock(&best->mutex);
817 ZSTD_pthread_cond_broadcast(&best->cond);
823 * Parameters for COVER_tryParameters().
825 typedef struct COVER_tryParameters_data_s {
826 const COVER_ctx_t *ctx;
828 size_t dictBufferCapacity;
829 ZDICT_cover_params_t parameters;
830 } COVER_tryParameters_data_t;
833 * Tries a set of parameters and upates the COVER_best_t with the results.
834 * This function is thread safe if zstd is compiled with multithreaded support.
835 * It takes its parameters as an *OWNING* opaque pointer to support threading.
837 static void COVER_tryParameters(void *opaque) {
838 /* Save parameters as local variables */
839 COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t *)opaque;
840 const COVER_ctx_t *const ctx = data->ctx;
841 const ZDICT_cover_params_t parameters = data->parameters;
842 size_t dictBufferCapacity = data->dictBufferCapacity;
843 size_t totalCompressedSize = ERROR(GENERIC);
844 /* Allocate space for hash table, dict, and freqs */
845 COVER_map_t activeDmers;
846 BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity);
847 U32 *freqs = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
848 if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
849 DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
852 if (!dict || !freqs) {
853 DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n");
856 /* Copy the frequencies because we need to modify them */
857 memcpy(freqs, ctx->freqs, ctx->suffixSize * sizeof(U32));
858 /* Build the dictionary */
860 const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict,
861 dictBufferCapacity, parameters);
862 dictBufferCapacity = ZDICT_finalizeDictionary(
863 dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
864 ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbSamples,
866 if (ZDICT_isError(dictBufferCapacity)) {
867 DISPLAYLEVEL(1, "Failed to finalize dictionary\n");
871 /* Check total compressed size */
877 /* Local variables */
880 /* Allocate dst with enough space to compress the maximum sized sample */
882 size_t maxSampleSize = 0;
883 for (i = 0; i < ctx->nbSamples; ++i) {
884 maxSampleSize = MAX(ctx->samplesSizes[i], maxSampleSize);
886 dstCapacity = ZSTD_compressBound(maxSampleSize);
887 dst = malloc(dstCapacity);
889 /* Create the cctx and cdict */
890 cctx = ZSTD_createCCtx();
891 cdict = ZSTD_createCDict(dict, dictBufferCapacity,
892 parameters.zParams.compressionLevel);
893 if (!dst || !cctx || !cdict) {
894 goto _compressCleanup;
896 /* Compress each sample and sum their sizes (or error) */
897 totalCompressedSize = dictBufferCapacity;
898 for (i = 0; i < ctx->nbSamples; ++i) {
899 const size_t size = ZSTD_compress_usingCDict(
900 cctx, dst, dstCapacity, ctx->samples + ctx->offsets[i],
901 ctx->samplesSizes[i], cdict);
902 if (ZSTD_isError(size)) {
903 totalCompressedSize = ERROR(GENERIC);
904 goto _compressCleanup;
906 totalCompressedSize += size;
910 ZSTD_freeCDict(cdict);
917 COVER_best_finish(data->best, totalCompressedSize, parameters, dict,
920 COVER_map_destroy(&activeDmers);
929 ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
930 void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer,
931 const size_t *samplesSizes, unsigned nbSamples,
932 ZDICT_cover_params_t *parameters) {
934 const unsigned nbThreads = parameters->nbThreads;
935 const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;
936 const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d;
937 const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k;
938 const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k;
939 const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps;
940 const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1);
941 const unsigned kIterations =
942 (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
943 /* Local variables */
944 const int displayLevel = parameters->zParams.notificationLevel;
945 unsigned iteration = 1;
949 POOL_ctx *pool = NULL;
951 if (kMinK < kMaxD || kMaxK < kMinK) {
952 LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
953 return ERROR(GENERIC);
955 if (nbSamples == 0) {
956 DISPLAYLEVEL(1, "Cover must have at least one input file\n");
957 return ERROR(GENERIC);
959 if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
960 DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
962 return ERROR(dstSize_tooSmall);
965 pool = POOL_create(nbThreads, 1);
967 return ERROR(memory_allocation);
971 COVER_best_init(&best);
972 /* Turn down global display level to clean up display at level 2 and below */
973 g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1;
974 /* Loop through d first because each new value needs a new context */
975 LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n",
977 for (d = kMinD; d <= kMaxD; d += 2) {
978 /* Initialize the context for this value of d */
980 LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d);
981 if (!COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d)) {
982 LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n");
983 COVER_best_destroy(&best);
985 return ERROR(GENERIC);
987 /* Loop through k reusing the same context */
988 for (k = kMinK; k <= kMaxK; k += kStepSize) {
989 /* Prepare the arguments */
990 COVER_tryParameters_data_t *data = (COVER_tryParameters_data_t *)malloc(
991 sizeof(COVER_tryParameters_data_t));
992 LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k);
994 LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n");
995 COVER_best_destroy(&best);
996 COVER_ctx_destroy(&ctx);
998 return ERROR(GENERIC);
1002 data->dictBufferCapacity = dictBufferCapacity;
1003 data->parameters = *parameters;
1004 data->parameters.k = k;
1005 data->parameters.d = d;
1006 data->parameters.steps = kSteps;
1007 data->parameters.zParams.notificationLevel = g_displayLevel;
1008 /* Check the parameters */
1009 if (!COVER_checkParameters(data->parameters, dictBufferCapacity)) {
1010 DISPLAYLEVEL(1, "Cover parameters incorrect\n");
1014 /* Call the function and pass ownership of data to it */
1015 COVER_best_start(&best);
1017 POOL_add(pool, &COVER_tryParameters, data);
1019 COVER_tryParameters(data);
1022 LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ",
1023 (U32)((iteration * 100) / kIterations));
1026 COVER_best_wait(&best);
1027 COVER_ctx_destroy(&ctx);
1029 LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", "");
1030 /* Fill the output buffer and parameters with output of the best parameters */
1032 const size_t dictSize = best.dictSize;
1033 if (ZSTD_isError(best.compressedSize)) {
1034 const size_t compressedSize = best.compressedSize;
1035 COVER_best_destroy(&best);
1037 return compressedSize;
1039 *parameters = best.parameters;
1040 memcpy(dictBuffer, best.dict, dictSize);
1041 COVER_best_destroy(&best);