Project Alice
Loading...
Searching...
No Matches
zstd_compress_internal.h
Go to the documentation of this file.
1/*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
9 */
10
11/* This header contains definitions
12 * that shall **only** be used by modules within lib/compress.
13 */
14
15#ifndef ZSTD_COMPRESS_H
16#define ZSTD_COMPRESS_H
17
18/*-*************************************
19* Dependencies
20***************************************/
21#include "../common/zstd_internal.h"
22#include "zstd_cwksp.h"
23#ifdef ZSTD_MULTITHREAD
24# include "zstdmt_compress.h"
25#endif
26#include "../common/bits.h" /* ZSTD_highbit32, ZSTD_NbCommonBytes */
27
28#if defined (__cplusplus)
29extern "C" {
30#endif
31
32/*-*************************************
33* Constants
34***************************************/
35#define kSearchStrength 8
36#define HASH_READ_SIZE 8
37#define ZSTD_DUBT_UNSORTED_MARK 1 /* For btlazy2 strategy, index ZSTD_DUBT_UNSORTED_MARK==1 means "unsorted".
38 It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
39 It's not a big deal though : candidate will just be sorted again.
40 Additionally, candidate position 1 will be lost.
41 But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
42 The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table reuse with a different strategy.
43 This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
45
46/*-*************************************
47* Context memory management
48***************************************/
51
52typedef struct ZSTD_prefixDict_s {
53 const void* dict;
54 size_t dictSize;
55 ZSTD_dictContentType_e dictContentType;
58typedef struct {
59 void* dictBuffer;
60 void const* dict;
61 size_t dictSize;
62 ZSTD_dictContentType_e dictContentType;
63 ZSTD_CDict* cdict;
66typedef struct {
68 HUF_repeat repeatMode;
71typedef struct {
72 FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
73 FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
74 FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
75 FSE_repeat offcode_repeatMode;
76 FSE_repeat matchlength_repeatMode;
77 FSE_repeat litlength_repeatMode;
79
80typedef struct {
84
85/***********************************************
86* Entropy buffer statistics structs and funcs *
87***********************************************/
93typedef struct {
95 BYTE hufDesBuffer[ZSTD_MAX_HUF_HEADER_SIZE];
96 size_t hufDesSize;
104typedef struct {
109 size_t fseTablesSize;
110 size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */
112
113typedef struct {
114 ZSTD_hufCTablesMetadata_t hufMetadata;
115 ZSTD_fseCTablesMetadata_t fseMetadata;
117
122 const seqStore_t* seqStorePtr,
123 const ZSTD_entropyCTables_t* prevEntropy,
124 ZSTD_entropyCTables_t* nextEntropy,
125 const ZSTD_CCtx_params* cctxParams,
126 ZSTD_entropyCTablesMetadata_t* entropyMetadata,
127 void* workspace, size_t wkspSize);
129/*********************************
130* Compression internals structs *
131*********************************/
133typedef struct {
134 U32 off; /* Offset sumtype code for the match, using ZSTD_storeSeq() format */
135 U32 len; /* Raw length of match */
137
138typedef struct {
139 U32 offset; /* Offset of sequence */
140 U32 litLength; /* Length of literals prior to match */
141 U32 matchLength; /* Raw length of match */
142} rawSeq;
144typedef struct {
145 rawSeq* seq; /* The start of the sequences */
146 size_t pos; /* The index in seq where reading stopped. pos <= size. */
147 size_t posInSequence; /* The position within the sequence at seq[pos] where reading
148 stopped. posInSequence <= seq[pos].litLength + seq[pos].matchLength */
149 size_t size; /* The number of sequences. <= capacity. */
150 size_t capacity; /* The capacity starting from `seq` pointer */
152
153typedef struct {
154 U32 idx; /* Index in array of ZSTD_Sequence */
155 U32 posInSequence; /* Position within sequence at idx */
156 size_t posInSrc; /* Number of bytes given by sequences provided so far */
159UNUSED_ATTR static const rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0};
161typedef struct {
162 int price; /* price from beginning of segment to this position */
163 U32 off; /* offset of previous match */
164 U32 mlen; /* length of previous match */
165 U32 litlen; /* nb of literals since previous match */
166 U32 rep[ZSTD_REP_NUM]; /* offset history after previous match */
171#define ZSTD_OPT_SIZE (ZSTD_OPT_NUM+3)
172typedef struct {
173 /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */
174 unsigned* litFreq; /* table of literals statistics, of size 256 */
175 unsigned* litLengthFreq; /* table of litLength statistics, of size (MaxLL+1) */
176 unsigned* matchLengthFreq; /* table of matchLength statistics, of size (MaxML+1) */
177 unsigned* offCodeFreq; /* table of offCode statistics, of size (MaxOff+1) */
178 ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_SIZE */
179 ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_SIZE */
181 U32 litSum; /* nb of literals */
182 U32 litLengthSum; /* nb of litLength codes */
183 U32 matchLengthSum; /* nb of matchLength codes */
184 U32 offCodeSum; /* nb of offset codes */
185 U32 litSumBasePrice; /* to compare to log2(litfreq) */
186 U32 litLengthSumBasePrice; /* to compare to log2(llfreq) */
187 U32 matchLengthSumBasePrice;/* to compare to log2(mlfreq) */
188 U32 offCodeSumBasePrice; /* to compare to log2(offreq) */
189 ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */
190 const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */
191 ZSTD_paramSwitch_e literalCompressionMode;
192} optState_t;
194typedef struct {
199typedef struct {
200 BYTE const* nextSrc; /* next block here to continue on current prefix */
201 BYTE const* base; /* All regular indexes relative to this position */
202 BYTE const* dictBase; /* extDict indexes relative to this position */
203 U32 dictLimit; /* below that point, need extDict */
204 U32 lowLimit; /* below that point, no more valid data */
205 U32 nbOverflowCorrections; /* Number of times overflow correction has run since
206 * ZSTD_window_init(). Useful for debugging coredumps
207 * and for ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY.
208 */
210
211#define ZSTD_WINDOW_START_INDEX 2
214
215#define ZSTD_ROW_HASH_CACHE_SIZE 8 /* Size of prefetching hash cache for row-based matchfinder */
216
217struct ZSTD_matchState_t {
218 ZSTD_window_t window; /* State for window round buffer management */
219 U32 loadedDictEnd; /* index of end of dictionary, within context's referential.
220 * When loadedDictEnd != 0, a dictionary is in use, and still valid.
221 * This relies on a mechanism to set loadedDictEnd=0 when dictionary is no longer within distance.
222 * Such mechanism is provided within ZSTD_window_enforceMaxDist() and ZSTD_checkDictValidity().
223 * When dict referential is copied into active context (i.e. not attached),
224 * loadedDictEnd == dictSize, since referential starts from zero.
225 */
226 U32 nextToUpdate; /* index from which to continue table update */
227 U32 hashLog3; /* dispatch table for matches of len==3 : larger == faster, more memory */
228
229 U32 rowHashLog; /* For row-based matchfinder: Hashlog based on nb of rows in the hashTable.*/
230 BYTE* tagTable; /* For row-based matchFinder: A row-based table containing the hashes and head index. */
231 U32 hashCache[ZSTD_ROW_HASH_CACHE_SIZE]; /* For row-based matchFinder: a cache of hashes to improve speed */
232 U64 hashSalt; /* For row-based matchFinder: salts the hash for reuse of tag table */
233 U32 hashSaltEntropy; /* For row-based matchFinder: collects entropy for salt generation */
234
239 U32 forceNonContiguous; /* Non-zero if we should force non-contiguous load for the next window update. */
241 int dedicatedDictSearch; /* Indicates whether this matchState is using the
242 * dedicated dictionary search structure.
243 */
244 optState_t opt; /* optimal parser state */
246 ZSTD_compressionParameters cParams;
248
249 /* Controls prefetching in some dictMatchState matchfinders.
250 * This behavior is controlled from the cctx ms.
251 * This parameter has no effect in the cdict ms. */
254 /* When == 0, lazy match finders insert every position.
255 * When != 0, lazy match finders only insert positions they search.
256 * This allows them to skip much faster over incompressible data,
257 * at a small cost to compression ratio.
258 */
260};
261
262typedef struct {
265 ZSTD_matchState_t matchState;
268typedef struct {
269 U32 offset;
272
273typedef struct {
274 BYTE const* split;
275 U32 hash;
276 U32 checksum;
277 ldmEntry_t* bucket;
280#define LDM_BATCH_SIZE 64
282typedef struct {
283 ZSTD_window_t window; /* State for the window round buffer management */
284 ldmEntry_t* hashTable;
285 U32 loadedDictEnd;
286 BYTE* bucketOffsets; /* Next position in bucket to insert entry */
287 size_t splitIndices[LDM_BATCH_SIZE];
291typedef struct {
292 ZSTD_paramSwitch_e enableLdm; /* ZSTD_ps_enable to enable LDM. ZSTD_ps_auto by default */
293 U32 hashLog; /* Log size of hashTable */
294 U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */
295 U32 minMatchLength; /* Minimum match length */
296 U32 hashRateLog; /* Log number of entries to skip */
297 U32 windowLog; /* Window log for the LDM */
299
300typedef struct {
301 int collectSequences;
302 ZSTD_Sequence* seqStart;
303 size_t seqIndex;
304 size_t maxSequences;
308 ZSTD_format_e format;
309 ZSTD_compressionParameters cParams;
310 ZSTD_frameParameters fParams;
311
313 int forceWindow; /* force back-references to respect limit of
314 * 1<<wLog, even for dictionary */
315 size_t targetCBlockSize; /* Tries to fit compressed block size to be around targetCBlockSize.
316 * No target when targetCBlockSize == 0.
317 * There is no guarantee on compressed block size */
318 int srcSizeHint; /* User's best guess of source size.
319 * Hint is not valid when srcSizeHint == 0.
320 * There is no guarantee that hint is close to actual source size */
322 ZSTD_dictAttachPref_e attachDictPref;
323 ZSTD_paramSwitch_e literalCompressionMode;
324
325 /* Multithreading: used to pass parameters to mtctx */
327 size_t jobSize;
328 int overlapLog;
330
331 /* Long distance matching parameters */
334 /* Dedicated dict search algorithm trigger */
337 /* Input/output buffer modes */
341 /* Sequence compression API */
342 ZSTD_sequenceFormat_e blockDelimiters;
344
345 /* Block splitting */
346 ZSTD_paramSwitch_e useBlockSplitter;
347
348 /* Param for deciding whether to use row-based matchfinder */
349 ZSTD_paramSwitch_e useRowMatchFinder;
350
351 /* Always load a dictionary in ext-dict mode (not prefix mode)? */
353
354 /* Internal use, for createCCtxParams() and freeCCtxParams() only */
355 ZSTD_customMem customMem;
357 /* Controls prefetching in some dictMatchState matchfinders */
358 ZSTD_paramSwitch_e prefetchCDictTables;
359
360 /* Controls whether zstd will fall back to an internal matchfinder
361 * if the external matchfinder returns an error code. */
363
364 /* Parameters for the external sequence producer API.
365 * Users set these parameters through ZSTD_registerSequenceProducer().
366 * It is not possible to set these parameters individually through the public API. */
367 void* extSeqProdState;
368 ZSTD_sequenceProducer_F extSeqProdFunc;
369
370 /* Adjust the max block size*/
373 /* Controls repcode search in external sequence parsing */
374 ZSTD_paramSwitch_e searchForExternalRepcodes;
375}; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
376
377#define COMPRESS_SEQUENCES_WORKSPACE_SIZE (sizeof(unsigned) * (MaxSeq + 2))
378#define ENTROPY_WORKSPACE_SIZE (HUF_WORKSPACE_SIZE + COMPRESS_SEQUENCES_WORKSPACE_SIZE)
385typedef enum {
394#define ZSTD_MAX_NB_BLOCK_SPLITS 196
395typedef struct {
396 seqStore_t fullSeqStoreChunk;
397 seqStore_t firstHalfSeqStore;
398 seqStore_t secondHalfSeqStore;
399 seqStore_t currSeqStore;
400 seqStore_t nextSeqStore;
408 int cParamsChanged; /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
409 int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
410 ZSTD_CCtx_params requestedParams;
411 ZSTD_CCtx_params appliedParams;
412 ZSTD_CCtx_params simpleApiParams; /* Param storage used by the simple API - not sticky. Must only be used in top-level simple API functions for storage. */
416 ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */
417 size_t blockSize;
418 unsigned long long pledgedSrcSizePlusOne; /* this way, 0 (default) == unknown */
419 unsigned long long consumedSrcSize;
420 unsigned long long producedCSize;
422 ZSTD_customMem customMem;
423 ZSTD_threadPool* pool;
429 seqStore_t seqStore; /* sequences storage ptrs */
430 ldmState_t ldmState; /* long distance matching state */
431 rawSeq* ldmSequences; /* Storage for the ldm output sequences */
433 rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
435 U32* entropyWorkspace; /* entropy workspace of ENTROPY_WORKSPACE_SIZE bytes */
437 /* Whether we are streaming or not */
440 /* streaming */
441 char* inBuff;
444 size_t inBuffPos;
446 char* outBuff;
447 size_t outBuffSize;
452
453 /* Stable in/out buffer verification */
455 size_t stableIn_notConsumed; /* nb bytes within stable input buffer that are said to be consumed but are not */
457
458 /* Dictionary */
460 const ZSTD_CDict* cdict;
461 ZSTD_prefixDict prefixDict; /* single-usage dictionary */
462
463 /* Multi-threading */
464#ifdef ZSTD_MULTITHREAD
465 ZSTDMT_CCtx* mtctx;
466#endif
467
468 /* Tracing */
469#if ZSTD_TRACE
470 ZSTD_TraceCtx traceCtx;
471#endif
473 /* Workspace for block splitter */
476 /* Buffer for output from external sequence producer */
477 ZSTD_Sequence* extSeqBuf;
484typedef enum {
491typedef enum {
492 ZSTD_cpm_noAttachDict = 0, /* Compression with ZSTD_noDict or ZSTD_extDict.
493 * In this mode we use both the srcSize and the dictSize
494 * when selecting and adjusting parameters.
495 */
496 ZSTD_cpm_attachDict = 1, /* Compression with ZSTD_dictMatchState or ZSTD_dedicatedDictSearch.
497 * In this mode we only take the srcSize into account when selecting
498 * and adjusting parameters.
499 */
500 ZSTD_cpm_createCDict = 2, /* Creating a CDict.
501 * In this mode we take both the source size and the dictionary size
502 * into account when selecting and adjusting the parameters.
503 */
504 ZSTD_cpm_unknown = 3 /* ZSTD_getCParams, ZSTD_getParams, ZSTD_adjustParams.
505 * We don't know what these parameters are for. We default to the legacy
506 * behavior of taking both the source size and the dict size into account
507 * when selecting and adjusting parameters.
508 */
510
511typedef size_t (*ZSTD_blockCompressor) (
512 ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
513 void const* src, size_t srcSize);
514ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e rowMatchfinderMode, ZSTD_dictMode_e dictMode);
515
516
517MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
518{
519 static const BYTE LL_Code[64] = { 0, 1, 2, 3, 4, 5, 6, 7,
520 8, 9, 10, 11, 12, 13, 14, 15,
521 16, 16, 17, 17, 18, 18, 19, 19,
522 20, 20, 20, 20, 21, 21, 21, 21,
523 22, 22, 22, 22, 22, 22, 22, 22,
524 23, 23, 23, 23, 23, 23, 23, 23,
525 24, 24, 24, 24, 24, 24, 24, 24,
526 24, 24, 24, 24, 24, 24, 24, 24 };
527 static const U32 LL_deltaCode = 19;
528 return (litLength > 63) ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
529}
530
531/* ZSTD_MLcode() :
532 * note : mlBase = matchLength - MINMATCH;
533 * because it's the format it's stored in seqStore->sequences */
535{
536 static const BYTE ML_Code[128] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
537 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
538 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
539 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
540 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
541 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
542 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
543 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 };
544 static const U32 ML_deltaCode = 36;
545 return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase];
546}
547
548/* ZSTD_cParam_withinBounds:
549 * @return 1 if value is within cParam bounds,
550 * 0 otherwise */
552{
553 ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
554 if (ZSTD_isError(bounds.error)) return 0;
555 if (value < bounds.lowerBound) return 0;
556 if (value > bounds.upperBound) return 0;
557 return 1;
559
560/* ZSTD_noCompressBlock() :
561 * Writes uncompressed block to dst buffer from given src.
562 * Returns the size of the block */
563MEM_STATIC size_t
564ZSTD_noCompressBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
565{
566 U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
567 DEBUGLOG(5, "ZSTD_noCompressBlock (srcSize=%zu, dstCapacity=%zu)", srcSize, dstCapacity);
568 RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
569 dstSize_tooSmall, "dst buf too small for uncompressed block");
570 MEM_writeLE24(dst, cBlockHeader24);
571 ZSTD_memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
572 return ZSTD_blockHeaderSize + srcSize;
573}
574
575MEM_STATIC size_t
576ZSTD_rleCompressBlock(void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock)
577{
578 BYTE* const op = (BYTE*)dst;
579 U32 const cBlockHeader = lastBlock + (((U32)bt_rle)<<1) + (U32)(srcSize << 3);
580 RETURN_ERROR_IF(dstCapacity < 4, dstSize_tooSmall, "");
581 MEM_writeLE24(op, cBlockHeader);
582 op[3] = src;
583 return 4;
584}
586
587/* ZSTD_minGain() :
588 * minimum compression required
589 * to generate a compress block or a compressed literals section.
590 * note : use same formula for both situations */
591MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
592{
593 U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;
596 return (srcSize >> minlog) + 2;
597}
598
599MEM_STATIC int ZSTD_literalsCompressionIsDisabled(const ZSTD_CCtx_params* cctxParams)
600{
601 switch (cctxParams->literalCompressionMode) {
602 case ZSTD_ps_enable:
603 return 0;
604 case ZSTD_ps_disable:
605 return 1;
606 default:
607 assert(0 /* impossible: pre-validated */);
609 case ZSTD_ps_auto:
610 return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
611 }
612}
613
619static void
620ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w)
621{
622 assert(iend > ilimit_w);
623 if (ip <= ilimit_w) {
624 ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap);
625 op += ilimit_w - ip;
626 ip = ilimit_w;
628 while (ip < iend) *op++ = *ip++;
632#define REPCODE1_TO_OFFBASE REPCODE_TO_OFFBASE(1)
633#define REPCODE2_TO_OFFBASE REPCODE_TO_OFFBASE(2)
634#define REPCODE3_TO_OFFBASE REPCODE_TO_OFFBASE(3)
635#define REPCODE_TO_OFFBASE(r) (assert((r)>=1), assert((r)<=ZSTD_REP_NUM), (r)) /* accepts IDs 1,2,3 */
636#define OFFSET_TO_OFFBASE(o) (assert((o)>0), o + ZSTD_REP_NUM)
637#define OFFBASE_IS_OFFSET(o) ((o) > ZSTD_REP_NUM)
638#define OFFBASE_IS_REPCODE(o) ( 1 <= (o) && (o) <= ZSTD_REP_NUM)
639#define OFFBASE_TO_OFFSET(o) (assert(OFFBASE_IS_OFFSET(o)), (o) - ZSTD_REP_NUM)
640#define OFFBASE_TO_REPCODE(o) (assert(OFFBASE_IS_REPCODE(o)), (o)) /* returns ID 1,2,3 */
641
649ZSTD_storeSeq(seqStore_t* seqStorePtr,
650 size_t litLength, const BYTE* literals, const BYTE* litLimit,
651 U32 offBase,
652 size_t matchLength)
653{
654 BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;
655 BYTE const* const litEnd = literals + litLength;
656#if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6)
657 static const BYTE* g_start = NULL;
658 if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */
659 { U32 const pos = (U32)((const BYTE*)literals - g_start);
660 DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offBase%7u",
661 pos, (U32)litLength, (U32)matchLength, (U32)offBase);
662 }
663#endif
664 assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
665 /* copy Literals */
666 assert(seqStorePtr->maxNbLit <= 128 KB);
667 assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit);
668 assert(literals + litLength <= litLimit);
669 if (litEnd <= litLimit_w) {
670 /* Common case we can use wildcopy.
671 * First copy 16 bytes, because literals are likely short.
672 */
674 ZSTD_copy16(seqStorePtr->lit, literals);
675 if (litLength > 16) {
676 ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap);
677 }
678 } else {
679 ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w);
680 }
681 seqStorePtr->lit += litLength;
682
683 /* literal Length */
684 if (litLength>0xFFFF) {
685 assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
687 seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
688 }
689 seqStorePtr->sequences[0].litLength = (U16)litLength;
690
691 /* match offset */
692 seqStorePtr->sequences[0].offBase = offBase;
693
694 /* match Length */
695 assert(matchLength >= MINMATCH);
696 { size_t const mlBase = matchLength - MINMATCH;
697 if (mlBase>0xFFFF) {
698 assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
700 seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
701 }
702 seqStorePtr->sequences[0].mlBase = (U16)mlBase;
703 }
704
705 seqStorePtr->sequences++;
706}
708/* ZSTD_updateRep() :
709 * updates in-place @rep (array of repeat offsets)
710 * @offBase : sum-type, using numeric representation of ZSTD_storeSeq()
711 */
712MEM_STATIC void
713ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0)
714{
715 if (OFFBASE_IS_OFFSET(offBase)) { /* full offset */
716 rep[2] = rep[1];
717 rep[1] = rep[0];
718 rep[0] = OFFBASE_TO_OFFSET(offBase);
719 } else { /* repcode */
720 U32 const repCode = OFFBASE_TO_REPCODE(offBase) - 1 + ll0;
721 if (repCode > 0) { /* note : if repCode==0, no change */
722 U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
723 rep[2] = (repCode >= 2) ? rep[1] : rep[2];
724 rep[1] = rep[0];
725 rep[0] = currentOffset;
726 } else { /* repCode == 0 */
727 /* nothing to do */
729 }
730}
732typedef struct repcodes_s {
733 U32 rep[3];
734} repcodes_t;
735
737ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0)
738{
739 repcodes_t newReps;
740 ZSTD_memcpy(&newReps, rep, sizeof(newReps));
741 ZSTD_updateRep(newReps.rep, offBase, ll0);
742 return newReps;
744
745
746/*-*************************************
747* Match length counter
748***************************************/
749MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
750{
751 const BYTE* const pStart = pIn;
752 const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
753
754 if (pIn < pInLoopLimit) {
755 { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
756 if (diff) return ZSTD_NbCommonBytes(diff); }
757 pIn+=sizeof(size_t); pMatch+=sizeof(size_t);
758 while (pIn < pInLoopLimit) {
759 size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
760 if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
761 pIn += ZSTD_NbCommonBytes(diff);
762 return (size_t)(pIn - pStart);
763 } }
764 if (MEM_64bits() && (pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }
765 if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }
766 if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
767 return (size_t)(pIn - pStart);
768}
774MEM_STATIC size_t
775ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
776 const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
777{
778 const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
779 size_t const matchLength = ZSTD_count(ip, match, vEnd);
780 if (match + matchLength != mEnd) return matchLength;
781 DEBUGLOG(7, "ZSTD_count_2segments: found a 2-parts match (current length==%zu)", matchLength);
782 DEBUGLOG(7, "distance from match beginning to end dictionary = %zi", mEnd - match);
783 DEBUGLOG(7, "distance from current pos to end buffer = %zi", iEnd - ip);
784 DEBUGLOG(7, "next byte : ip==%02X, istart==%02X", ip[matchLength], *iStart);
785 DEBUGLOG(7, "final match length = %zu", matchLength + ZSTD_count(ip+matchLength, iStart, iEnd));
786 return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
787}
788
790/*-*************************************
791 * Hashes
792 ***************************************/
793static const U32 prime3bytes = 506832829U;
794static U32 ZSTD_hash3(U32 u, U32 h, U32 s) { assert(h <= 32); return (((u << (32-24)) * prime3bytes) ^ s) >> (32-h) ; }
795MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h, 0); } /* only in zstd_opt.h */
796MEM_STATIC size_t ZSTD_hash3PtrS(const void* ptr, U32 h, U32 s) { return ZSTD_hash3(MEM_readLE32(ptr), h, s); }
797
798static const U32 prime4bytes = 2654435761U;
799static U32 ZSTD_hash4(U32 u, U32 h, U32 s) { assert(h <= 32); return ((u * prime4bytes) ^ s) >> (32-h) ; }
800static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_readLE32(ptr), h, 0); }
801static size_t ZSTD_hash4PtrS(const void* ptr, U32 h, U32 s) { return ZSTD_hash4(MEM_readLE32(ptr), h, s); }
802
803static const U64 prime5bytes = 889523592379ULL;
804static size_t ZSTD_hash5(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-40)) * prime5bytes) ^ s) >> (64-h)) ; }
805static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h, 0); }
806static size_t ZSTD_hash5PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash5(MEM_readLE64(p), h, s); }
807
808static const U64 prime6bytes = 227718039650203ULL;
809static size_t ZSTD_hash6(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-48)) * prime6bytes) ^ s) >> (64-h)) ; }
810static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h, 0); }
811static size_t ZSTD_hash6PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash6(MEM_readLE64(p), h, s); }
812
813static const U64 prime7bytes = 58295818150454627ULL;
814static size_t ZSTD_hash7(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-56)) * prime7bytes) ^ s) >> (64-h)) ; }
815static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h, 0); }
816static size_t ZSTD_hash7PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash7(MEM_readLE64(p), h, s); }
817
818static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
819static size_t ZSTD_hash8(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u) * prime8bytes) ^ s) >> (64-h)) ; }
820static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h, 0); }
821static size_t ZSTD_hash8PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash8(MEM_readLE64(p), h, s); }
822
823
825size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
826{
827 /* Although some of these hashes do support hBits up to 64, some do not.
828 * To be on the safe side, always avoid hBits > 32. */
829 assert(hBits <= 32);
830
831 switch(mls)
832 {
833 default:
834 case 4: return ZSTD_hash4Ptr(p, hBits);
835 case 5: return ZSTD_hash5Ptr(p, hBits);
836 case 6: return ZSTD_hash6Ptr(p, hBits);
837 case 7: return ZSTD_hash7Ptr(p, hBits);
838 case 8: return ZSTD_hash8Ptr(p, hBits);
839 }
840}
841
843size_t ZSTD_hashPtrSalted(const void* p, U32 hBits, U32 mls, const U64 hashSalt) {
844 /* Although some of these hashes do support hBits up to 64, some do not.
845 * To be on the safe side, always avoid hBits > 32. */
846 assert(hBits <= 32);
847
848 switch(mls)
849 {
850 default:
851 case 4: return ZSTD_hash4PtrS(p, hBits, (U32)hashSalt);
852 case 5: return ZSTD_hash5PtrS(p, hBits, hashSalt);
853 case 6: return ZSTD_hash6PtrS(p, hBits, hashSalt);
854 case 7: return ZSTD_hash7PtrS(p, hBits, hashSalt);
855 case 8: return ZSTD_hash8PtrS(p, hBits, hashSalt);
856 }
857}
858
859
863static U64 ZSTD_ipow(U64 base, U64 exponent)
864{
865 U64 power = 1;
866 while (exponent) {
867 if (exponent & 1) power *= base;
868 exponent >>= 1;
869 base *= base;
870 }
871 return power;
872}
873
874#define ZSTD_ROLL_HASH_CHAR_OFFSET 10
875
879static U64 ZSTD_rollingHash_append(U64 hash, void const* buf, size_t size)
880{
881 BYTE const* istart = (BYTE const*)buf;
882 size_t pos;
883 for (pos = 0; pos < size; ++pos) {
884 hash *= prime8bytes;
885 hash += istart[pos] + ZSTD_ROLL_HASH_CHAR_OFFSET;
886 }
887 return hash;
888}
889
893MEM_STATIC U64 ZSTD_rollingHash_compute(void const* buf, size_t size)
894{
895 return ZSTD_rollingHash_append(0, buf, size);
897
903{
904 return ZSTD_ipow(prime8bytes, length - 1);
905}
906
910MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64 primePower)
911{
912 hash -= (toRemove + ZSTD_ROLL_HASH_CHAR_OFFSET) * primePower;
913 hash *= prime8bytes;
914 hash += toAdd + ZSTD_ROLL_HASH_CHAR_OFFSET;
915 return hash;
916}
917
918/*-*************************************
919* Round buffer management
920***************************************/
921#if (ZSTD_WINDOWLOG_MAX_64 > 31)
922# error "ZSTD_WINDOWLOG_MAX is too large : would overflow ZSTD_CURRENT_MAX"
923#endif
924/* Max current allowed */
925#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX))
926/* Maximum chunk size before overflow correction needs to be called again */
927#define ZSTD_CHUNKSIZE_MAX \
928 ( ((U32)-1) /* Maximum ending current index */ \
929 - ZSTD_CURRENT_MAX) /* Maximum beginning lowLimit */
930
936{
937 size_t const endT = (size_t)(window->nextSrc - window->base);
938 U32 const end = (U32)endT;
939
940 window->lowLimit = end;
941 window->dictLimit = end;
942}
943
945{
946 return window.dictLimit == ZSTD_WINDOW_START_INDEX &&
947 window.lowLimit == ZSTD_WINDOW_START_INDEX &&
948 (window.nextSrc - window.base) == ZSTD_WINDOW_START_INDEX;
950
956{
957 return window.lowLimit < window.dictLimit;
958}
966{
967 return ZSTD_window_hasExtDict(ms->window) ?
969 ms->dictMatchState != NULL ?
972}
973
974/* Defining this macro to non-zero tells zstd to run the overflow correction
975 * code much more frequently. This is very inefficient, and should only be
976 * used for tests and fuzzers.
977 */
978#ifndef ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY
979# ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
980# define ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY 1
981# else
982# define ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY 0
983# endif
984#endif
992 U32 cycleLog,
993 U32 maxDist,
994 U32 loadedDictEnd,
995 void const* src)
996{
997 U32 const cycleSize = 1u << cycleLog;
998 U32 const curr = (U32)((BYTE const*)src - window.base);
999 U32 const minIndexToOverflowCorrect = cycleSize
1000 + MAX(maxDist, cycleSize)
1002
1003 /* Adjust the min index to backoff the overflow correction frequency,
1004 * so we don't waste too much CPU in overflow correction. If this
1005 * computation overflows we don't really care, we just need to make
1006 * sure it is at least minIndexToOverflowCorrect.
1007 */
1008 U32 const adjustment = window.nbOverflowCorrections + 1;
1009 U32 const adjustedIndex = MAX(minIndexToOverflowCorrect * adjustment,
1010 minIndexToOverflowCorrect);
1011 U32 const indexLargeEnough = curr > adjustedIndex;
1012
1013 /* Only overflow correct early if the dictionary is invalidated already,
1014 * so we don't hurt compression ratio.
1015 */
1016 U32 const dictionaryInvalidated = curr > maxDist + loadedDictEnd;
1017
1018 return indexLargeEnough && dictionaryInvalidated;
1019}
1027 U32 cycleLog,
1028 U32 maxDist,
1029 U32 loadedDictEnd,
1030 void const* src,
1031 void const* srcEnd)
1032{
1033 U32 const curr = (U32)((BYTE const*)srcEnd - window.base);
1035 if (ZSTD_window_canOverflowCorrect(window, cycleLog, maxDist, loadedDictEnd, src)) {
1036 return 1;
1037 }
1038 }
1039 return curr > ZSTD_CURRENT_MAX;
1040}
1041
1054 U32 maxDist, void const* src)
1055{
1056 /* preemptive overflow correction:
1057 * 1. correction is large enough:
1058 * lowLimit > (3<<29) ==> current > 3<<29 + 1<<windowLog
1059 * 1<<windowLog <= newCurrent < 1<<chainLog + 1<<windowLog
1060 *
1061 * current - newCurrent
1062 * > (3<<29 + 1<<windowLog) - (1<<windowLog + 1<<chainLog)
1063 * > (3<<29) - (1<<chainLog)
1064 * > (3<<29) - (1<<30) (NOTE: chainLog <= 30)
1065 * > 1<<29
1066 *
1067 * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow:
1068 * After correction, current is less than (1<<chainLog + 1<<windowLog).
1069 * In 64-bit mode we are safe, because we have 64-bit ptrdiff_t.
1070 * In 32-bit mode we are safe, because (chainLog <= 29), so
1071 * ip+ZSTD_CHUNKSIZE_MAX - cctx->base < 1<<32.
1072 * 3. (cctx->lowLimit + 1<<windowLog) < 1<<32:
1073 * windowLog <= 31 ==> 3<<29 + 1<<windowLog < 7<<29 < 1<<32.
1074 */
1075 U32 const cycleSize = 1u << cycleLog;
1076 U32 const cycleMask = cycleSize - 1;
1077 U32 const curr = (U32)((BYTE const*)src - window->base);
1078 U32 const currentCycle = curr & cycleMask;
1079 /* Ensure newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX. */
1080 U32 const currentCycleCorrection = currentCycle < ZSTD_WINDOW_START_INDEX
1081 ? MAX(cycleSize, ZSTD_WINDOW_START_INDEX)
1082 : 0;
1083 U32 const newCurrent = currentCycle
1084 + currentCycleCorrection
1085 + MAX(maxDist, cycleSize);
1086 U32 const correction = curr - newCurrent;
1087 /* maxDist must be a power of two so that:
1088 * (newCurrent & cycleMask) == (curr & cycleMask)
1089 * This is required to not corrupt the chains / binary tree.
1090 */
1091 assert((maxDist & (maxDist - 1)) == 0);
1092 assert((curr & cycleMask) == (newCurrent & cycleMask));
1093 assert(curr > newCurrent);
1095 /* Loose bound, should be around 1<<29 (see above) */
1096 assert(correction > 1<<28);
1097 }
1098
1099 window->base += correction;
1100 window->dictBase += correction;
1101 if (window->lowLimit < correction + ZSTD_WINDOW_START_INDEX) {
1102 window->lowLimit = ZSTD_WINDOW_START_INDEX;
1103 } else {
1104 window->lowLimit -= correction;
1105 }
1106 if (window->dictLimit < correction + ZSTD_WINDOW_START_INDEX) {
1107 window->dictLimit = ZSTD_WINDOW_START_INDEX;
1108 } else {
1109 window->dictLimit -= correction;
1110 }
1111
1112 /* Ensure we can still reference the full window. */
1113 assert(newCurrent >= maxDist);
1114 assert(newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX);
1115 /* Ensure that lowLimit and dictLimit didn't underflow. */
1116 assert(window->lowLimit <= newCurrent);
1117 assert(window->dictLimit <= newCurrent);
1118
1119 ++window->nbOverflowCorrections;
1120
1121 DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction,
1122 window->lowLimit);
1123 return correction;
1124}
1125
1149MEM_STATIC void
1151 const void* blockEnd,
1152 U32 maxDist,
1153 U32* loadedDictEndPtr,
1154 const ZSTD_matchState_t** dictMatchStatePtr)
1155{
1156 U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
1157 U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0;
1158 DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
1159 (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
1160
1161 /* - When there is no dictionary : loadedDictEnd == 0.
1162 In which case, the test (blockEndIdx > maxDist) is merely to avoid
1163 overflowing next operation `newLowLimit = blockEndIdx - maxDist`.
1164 - When there is a standard dictionary :
1165 Index referential is copied from the dictionary,
1166 which means it starts from 0.
1167 In which case, loadedDictEnd == dictSize,
1168 and it makes sense to compare `blockEndIdx > maxDist + dictSize`
1169 since `blockEndIdx` also starts from zero.
1170 - When there is an attached dictionary :
1171 loadedDictEnd is expressed within the referential of the context,
1172 so it can be directly compared against blockEndIdx.
1173 */
1174 if (blockEndIdx > maxDist + loadedDictEnd) {
1175 U32 const newLowLimit = blockEndIdx - maxDist;
1176 if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit;
1177 if (window->dictLimit < window->lowLimit) {
1178 DEBUGLOG(5, "Update dictLimit to match lowLimit, from %u to %u",
1179 (unsigned)window->dictLimit, (unsigned)window->lowLimit);
1180 window->dictLimit = window->lowLimit;
1181 }
1182 /* On reaching window size, dictionaries are invalidated */
1183 if (loadedDictEndPtr) *loadedDictEndPtr = 0;
1184 if (dictMatchStatePtr) *dictMatchStatePtr = NULL;
1185 }
1186}
1187
1188/* Similar to ZSTD_window_enforceMaxDist(),
1189 * but only invalidates dictionary
1190 * when input progresses beyond window size.
1191 * assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL)
1192 * loadedDictEnd uses same referential as window->base
1193 * maxDist is the window size */
1194MEM_STATIC void
1196 const void* blockEnd,
1197 U32 maxDist,
1198 U32* loadedDictEndPtr,
1199 const ZSTD_matchState_t** dictMatchStatePtr)
1200{
1201 assert(loadedDictEndPtr != NULL);
1202 assert(dictMatchStatePtr != NULL);
1203 { U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base);
1204 U32 const loadedDictEnd = *loadedDictEndPtr;
1205 DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u",
1206 (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
1207 assert(blockEndIdx >= loadedDictEnd);
1208
1209 if (blockEndIdx > loadedDictEnd + maxDist || loadedDictEnd != window->dictLimit) {
1210 /* On reaching window size, dictionaries are invalidated.
1211 * For simplification, if window size is reached anywhere within next block,
1212 * the dictionary is invalidated for the full block.
1213 *
1214 * We also have to invalidate the dictionary if ZSTD_window_update() has detected
1215 * non-contiguous segments, which means that loadedDictEnd != window->dictLimit.
1216 * loadedDictEnd may be 0, if forceWindow is true, but in that case we never use
1217 * dictMatchState, so setting it to NULL is not a problem.
1218 */
1219 DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)");
1220 *loadedDictEndPtr = 0;
1221 *dictMatchStatePtr = NULL;
1222 } else {
1223 if (*loadedDictEndPtr != 0) {
1224 DEBUGLOG(6, "dictionary considered valid for current block");
1225 } } }
1226}
1227
1229 ZSTD_memset(window, 0, sizeof(*window));
1230 window->base = (BYTE const*)" ";
1231 window->dictBase = (BYTE const*)" ";
1232 ZSTD_STATIC_ASSERT(ZSTD_DUBT_UNSORTED_MARK < ZSTD_WINDOW_START_INDEX); /* Start above ZSTD_DUBT_UNSORTED_MARK */
1233 window->dictLimit = ZSTD_WINDOW_START_INDEX; /* start from >0, so that 1st position is valid */
1234 window->lowLimit = ZSTD_WINDOW_START_INDEX; /* it ensures first and later CCtx usages compress the same */
1235 window->nextSrc = window->base + ZSTD_WINDOW_START_INDEX; /* see issue #1241 */
1236 window->nbOverflowCorrections = 0;
1237}
1238
1249 void const* src, size_t srcSize,
1250 int forceNonContiguous)
1251{
1252 BYTE const* const ip = (BYTE const*)src;
1253 U32 contiguous = 1;
1254 DEBUGLOG(5, "ZSTD_window_update");
1255 if (srcSize == 0)
1256 return contiguous;
1257 assert(window->base != NULL);
1258 assert(window->dictBase != NULL);
1259 /* Check if blocks follow each other */
1260 if (src != window->nextSrc || forceNonContiguous) {
1261 /* not contiguous */
1262 size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);
1263 DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit);
1264 window->lowLimit = window->dictLimit;
1265 assert(distanceFromBase == (size_t)(U32)distanceFromBase); /* should never overflow */
1266 window->dictLimit = (U32)distanceFromBase;
1267 window->dictBase = window->base;
1268 window->base = ip - distanceFromBase;
1269 /* ms->nextToUpdate = window->dictLimit; */
1270 if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit; /* too small extDict */
1271 contiguous = 0;
1272 }
1273 window->nextSrc = ip + srcSize;
1274 /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
1275 if ( (ip+srcSize > window->dictBase + window->lowLimit)
1276 & (ip < window->dictBase + window->dictLimit)) {
1277 ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase;
1278 U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx;
1279 window->lowLimit = lowLimitMax;
1280 DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit);
1281 }
1282 return contiguous;
1283}
1284
1288MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
1289{
1290 U32 const maxDistance = 1U << windowLog;
1291 U32 const lowestValid = ms->window.lowLimit;
1292 U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
1293 U32 const isDictionary = (ms->loadedDictEnd != 0);
1294 /* When using a dictionary the entire dictionary is valid if a single byte of the dictionary
1295 * is within the window. We invalidate the dictionary (and set loadedDictEnd to 0) when it isn't
1296 * valid for the entire block. So this check is sufficient to find the lowest valid match index.
1297 */
1298 U32 const matchLowest = isDictionary ? lowestValid : withinWindow;
1299 return matchLowest;
1300}
1301
1305MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
1306{
1307 U32 const maxDistance = 1U << windowLog;
1308 U32 const lowestValid = ms->window.dictLimit;
1309 U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
1310 U32 const isDictionary = (ms->loadedDictEnd != 0);
1311 /* When computing the lowest prefix index we need to take the dictionary into account to handle
1312 * the edge case where the dictionary and the source are contiguous in memory.
1313 */
1314 U32 const matchLowest = isDictionary ? lowestValid : withinWindow;
1315 return matchLowest;
1316}
1317
1318
1319
1320/* debug functions */
1321#if (DEBUGLEVEL>=2)
1322
1323MEM_STATIC double ZSTD_fWeight(U32 rawStat)
1324{
1325 U32 const fp_accuracy = 8;
1326 U32 const fp_multiplier = (1 << fp_accuracy);
1327 U32 const newStat = rawStat + 1;
1328 U32 const hb = ZSTD_highbit32(newStat);
1329 U32 const BWeight = hb * fp_multiplier;
1330 U32 const FWeight = (newStat << fp_accuracy) >> hb;
1331 U32 const weight = BWeight + FWeight;
1332 assert(hb + fp_accuracy < 31);
1333 return (double)weight / fp_multiplier;
1334}
1335
1336/* display a table content,
1337 * listing each element, its frequency, and its predicted bit cost */
1338MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
1339{
1340 unsigned u, sum;
1341 for (u=0, sum=0; u<=max; u++) sum += table[u];
1342 DEBUGLOG(2, "total nb elts: %u", sum);
1343 for (u=0; u<=max; u++) {
1344 DEBUGLOG(2, "%2u: %5u (%.2f)",
1345 u, table[u], ZSTD_fWeight(sum) - ZSTD_fWeight(table[u]) );
1346 }
1347}
1348
1349#endif
1350
1351/* Short Cache */
1352
1353/* Normally, zstd matchfinders follow this flow:
1354 * 1. Compute hash at ip
1355 * 2. Load index from hashTable[hash]
1356 * 3. Check if *ip == *(base + index)
1357 * In dictionary compression, loading *(base + index) is often an L2 or even L3 miss.
1358 *
1359 * Short cache is an optimization which allows us to avoid step 3 most of the time
1360 * when the data doesn't actually match. With short cache, the flow becomes:
1361 * 1. Compute (hash, currentTag) at ip. currentTag is an 8-bit independent hash at ip.
1362 * 2. Load (index, matchTag) from hashTable[hash]. See ZSTD_writeTaggedIndex to understand how this works.
1363 * 3. Only if currentTag == matchTag, check *ip == *(base + index). Otherwise, continue.
1364 *
1365 * Currently, short cache is only implemented in CDict hashtables. Thus, its use is limited to
1366 * dictMatchState matchfinders.
1368#define ZSTD_SHORT_CACHE_TAG_BITS 8
1369#define ZSTD_SHORT_CACHE_TAG_MASK ((1u << ZSTD_SHORT_CACHE_TAG_BITS) - 1)
1370
1371/* Helper function for ZSTD_fillHashTable and ZSTD_fillDoubleHashTable.
1372 * Unpacks hashAndTag into (hash, tag), then packs (index, tag) into hashTable[hash]. */
1373MEM_STATIC void ZSTD_writeTaggedIndex(U32* const hashTable, size_t hashAndTag, U32 index) {
1374 size_t const hash = hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS;
1375 U32 const tag = (U32)(hashAndTag & ZSTD_SHORT_CACHE_TAG_MASK);
1376 assert(index >> (32 - ZSTD_SHORT_CACHE_TAG_BITS) == 0);
1377 hashTable[hash] = (index << ZSTD_SHORT_CACHE_TAG_BITS) | tag;
1378}
1379
1380/* Helper function for short cache matchfinders.
1381 * Unpacks tag1 and tag2 from lower bits of packedTag1 and packedTag2, then checks if the tags match. */
1382MEM_STATIC int ZSTD_comparePackedTags(size_t packedTag1, size_t packedTag2) {
1383 U32 const tag1 = packedTag1 & ZSTD_SHORT_CACHE_TAG_MASK;
1384 U32 const tag2 = packedTag2 & ZSTD_SHORT_CACHE_TAG_MASK;
1385 return tag1 == tag2;
1386}
1387
1388#if defined (__cplusplus)
1389}
1390#endif
1391
1392/* ===============================================================
1393 * Shared internal declarations
1394 * These prototypes may be called from sources not in lib/compress
1395 * =============================================================== */
1396
1397/* ZSTD_loadCEntropy() :
1398 * dict : must point at beginning of a valid zstd dictionary.
1399 * return : size of dictionary header (size of magic number + dict ID + entropy tables)
1400 * assumptions : magic number supposed already checked
1401 * and dictSize >= 8 */
1402size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
1403 const void* const dict, size_t dictSize);
1404
1406
1407/* ==============================================================
1408 * Private declarations
1409 * These prototypes shall only be called from within lib/compress
1410 * ============================================================== */
1411
1412/* ZSTD_getCParamsFromCCtxParams() :
1413 * cParams are built depending on compressionLevel, src size hints,
1414 * LDM and manually set compression parameters.
1415 * Note: srcSizeHint == 0 means 0!
1416 */
1417ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
1418 const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
1419
1426 const void* dict, size_t dictSize,
1427 const ZSTD_CDict* cdict,
1428 const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize);
1429
1430void ZSTD_resetSeqStore(seqStore_t* ssPtr);
1431
1434ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
1435
1436/* ZSTD_compressBegin_advanced_internal() :
1437 * Private use only. To be called from zstdmt_compress.c. */
1439 const void* dict, size_t dictSize,
1440 ZSTD_dictContentType_e dictContentType,
1442 const ZSTD_CDict* cdict,
1443 const ZSTD_CCtx_params* params,
1444 unsigned long long pledgedSrcSize);
1445
1446/* ZSTD_compress_advanced_internal() :
1447 * Private use only. To be called from zstdmt_compress.c. */
1449 void* dst, size_t dstCapacity,
1450 const void* src, size_t srcSize,
1451 const void* dict,size_t dictSize,
1452 const ZSTD_CCtx_params* params);
1453
1454
1455/* ZSTD_writeLastEmptyBlock() :
1456 * output an empty Block with end-of-frame mark to complete a frame
1457 * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
1458 * or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
1459 */
1460size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
1461
1462
1463/* ZSTD_referenceExternalSequences() :
1464 * Must be called before starting a compression operation.
1465 * seqs must parse a prefix of the source.
1466 * This cannot be used when long range matching is enabled.
1467 * Zstd will use these sequences, and pass the literals to a secondary block
1468 * compressor.
1469 * NOTE: seqs are not verified! Invalid sequences can cause out-of-bounds memory
1470 * access and data corruption.
1471 */
1472void ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq);
1473
1476U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat);
1477
1481void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize);
1482
1483/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of
1484 * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter.
1485 * Note that the block delimiter must include the last literals of the block.
1486 */
1487size_t
1489 ZSTD_sequencePosition* seqPos,
1490 const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
1491 const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch);
1492
1493/* Returns the number of bytes to move the current read position back by.
1494 * Only non-zero if we ended up splitting a sequence.
1495 * Otherwise, it may return a ZSTD error if something went wrong.
1496 *
1497 * This function will attempt to scan through blockSize bytes
1498 * represented by the sequences in @inSeqs,
1499 * storing any (partial) sequences.
1500 *
1501 * Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to
1502 * avoid splitting a match, or to avoid splitting a match such that it would produce a match
1503 * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block.
1504 */
1505size_t
1507 const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
1508 const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch);
1509
1510/* Returns 1 if an external sequence producer is registered, otherwise returns 0. */
1511MEM_STATIC int ZSTD_hasExtSeqProd(const ZSTD_CCtx_params* params) {
1512 return params->extSeqProdFunc != NULL;
1513}
1514
1515/* ===============================================================
1516 * Deprecated definitions that are still used internally to avoid
1517 * deprecation warnings. These functions are exactly equivalent to
1518 * their public variants, but avoid the deprecation warnings.
1519 * =============================================================== */
1520
1522
1524 void* dst, size_t dstCapacity,
1525 const void* src, size_t srcSize);
1526
1528 void* dst, size_t dstCapacity,
1529 const void* src, size_t srcSize);
1530
1531size_t ZSTD_compressBlock_deprecated(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
1532
1533
1534#endif /* ZSTD_COMPRESS_H */
MEM_STATIC unsigned ZSTD_NbCommonBytes(size_t val)
Definition: bits.h:152
MEM_STATIC unsigned ZSTD_highbit32(U32 val)
Definition: bits.h:169
#define HINT_INLINE
Definition: compiler.h:83
#define ZSTD_FALLTHROUGH
Definition: compiler.h:271
#define UNUSED_ATTR
Definition: compiler.h:60
#define FORCE_INLINE_ATTR
Definition: compiler.h:35
#define MEM_STATIC
Definition: compiler.h:103
#define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
Definition: compiler.h:322
#define DEBUGLOG(l,...)
Definition: debug.h:108
#define assert(condition)
Definition: debug.h:74
#define RETURN_ERROR_IF(cond, err,...)
unsigned FSE_CTable
Definition: fse.h:124
struct XXH64_state_s XXH64_state_t
The opaque state struct for the XXH64 streaming API.
Definition: xxhash.h:901
#define HUF_CTABLE_SIZE_ST(maxSymbolValue)
Definition: huf.h:63
size_t HUF_CElt
Definition: huf.h:62
HUF_repeat
Definition: huf.h:144
MEM_STATIC U32 MEM_read32(const void *memPtr)
Definition: mem.h:202
unsigned long long U64
Definition: mem.h:73
MEM_STATIC U16 MEM_read16(const void *memPtr)
Definition: mem.h:197
MEM_STATIC size_t MEM_readST(const void *memPtr)
Definition: mem.h:212
unsigned char BYTE
Definition: mem.h:58
MEM_STATIC unsigned MEM_64bits(void)
Definition: mem.h:141
MEM_STATIC U64 MEM_readLE64(const void *memPtr)
Definition: mem.h:336
unsigned int U32
Definition: mem.h:69
MEM_STATIC void MEM_writeLE24(void *memPtr, U32 val)
Definition: mem.h:314
unsigned short U16
Definition: mem.h:64
MEM_STATIC U32 MEM_readLE32(const void *memPtr)
Definition: mem.h:320
uint32_t size(sys::state const &state)
Definition: table.hpp:10
#define MIN(a, b)
#define MAX(a, b)
ZSTD_paramSwitch_e literalCompressionMode
ZSTD_frameParameters fParams
ZSTD_paramSwitch_e useRowMatchFinder
ZSTD_paramSwitch_e searchForExternalRepcodes
ZSTD_compressionParameters cParams
ZSTD_sequenceFormat_e blockDelimiters
ZSTD_bufferMode_e inBufferMode
ZSTD_bufferMode_e outBufferMode
ZSTD_paramSwitch_e useBlockSplitter
ZSTD_dictAttachPref_e attachDictPref
ZSTD_sequenceProducer_F extSeqProdFunc
ZSTD_paramSwitch_e prefetchCDictTables
ZSTD_buffered_policy_e bufferedPolicy
ZSTD_blockSplitCtx blockSplitCtx
ZSTD_CCtx_params appliedParams
ZSTD_customMem customMem
ZSTD_prefixDict prefixDict
SeqCollector seqCollector
ZSTD_CCtx_params simpleApiParams
ZSTD_compressionStage_e stage
ZSTD_CCtx_params requestedParams
rawSeqStore_t externSeqStore
unsigned long long pledgedSrcSizePlusOne
ZSTD_blockState_t blockState
ZSTD_threadPool * pool
ZSTD_cStreamStage streamStage
const ZSTD_CDict * cdict
XXH64_state_t xxhState
unsigned long long consumedSrcSize
ZSTD_Sequence * extSeqBuf
unsigned long long producedCSize
ZSTD_localDict localDict
ZSTD_inBuffer expectedInBuffer
int upperBound
Definition: zstd.h:524
size_t error
Definition: zstd.h:522
int lowerBound
Definition: zstd.h:523
const rawSeqStore_t * ldmSeqStore
ZSTD_compressionParameters cParams
const ZSTD_matchState_t * dictMatchState
U32 hashCache[ZSTD_ROW_HASH_CACHE_SIZE]
ZSTD_dictContentType_e dictContentType
ZSTD_longLengthType_e longLengthType
size_t maxNbSeq
size_t maxNbLit
seqDef * sequencesStart
seqDef * sequences
ZSTD_cParameter
Definition: zstd.h:328
@ ZSTD_c_strategy
Definition: zstd.h:389
ZSTD_strategy
Definition: zstd.h:315
@ ZSTD_btultra
Definition: zstd.h:322
@ ZSTD_fast
Definition: zstd.h:315
ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
ZSTD_buffered_policy_e
@ ZSTDb_buffered
@ ZSTDb_not_buffered
size_t ZSTD_compress_advanced_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize, const ZSTD_CCtx_params *params)
MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
MEM_STATIC U32 ZSTD_MLcode(U32 mlBase)
ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e rowMatchfinderMode, ZSTD_dictMode_e dictMode)
#define ZSTD_CURRENT_MAX
size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx *cctx, ZSTD_sequencePosition *seqPos, const ZSTD_Sequence *const inSeqs, size_t inSeqsSize, const void *src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch)
MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms)
MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window, U32 cycleLog, U32 maxDist, U32 loadedDictEnd, void const *src, void const *srcEnd)
void ZSTD_CCtx_trace(ZSTD_CCtx *cctx, size_t extraCSize)
MEM_STATIC void ZSTD_window_clear(ZSTD_window_t *window)
#define ZSTD_SHORT_CACHE_TAG_BITS
size_t ZSTD_compressEnd_public(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
MEM_STATIC U64 ZSTD_rollingHash_compute(void const *buf, size_t size)
#define OFFBASE_TO_OFFSET(o)
MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64 primePower)
ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(const ZSTD_CCtx_params *CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
@ ZSTD_cpm_attachDict
@ ZSTD_cpm_unknown
@ ZSTD_cpm_noAttachDict
@ ZSTD_cpm_createCDict
size_t ZSTD_compressBlock_deprecated(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
MEM_STATIC int ZSTD_literalsCompressionIsDisabled(const ZSTD_CCtx_params *cctxParams)
MEM_STATIC size_t ZSTD_noCompressBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 lastBlock)
MEM_STATIC size_t ZSTD_count_2segments(const BYTE *ip, const BYTE *match, const BYTE *iEnd, const BYTE *mEnd, const BYTE *iStart)
MEM_STATIC U64 ZSTD_rollingHash_primePower(U32 length)
MEM_STATIC void ZSTD_writeTaggedIndex(U32 *const hashTable, size_t hashAndTag, U32 index)
MEM_STATIC U32 ZSTD_window_isEmpty(ZSTD_window_t const window)
#define ZSTD_SHORT_CACHE_TAG_MASK
size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t *bs, void *workspace, const void *const dict, size_t dictSize)
MEM_STATIC int ZSTD_comparePackedTags(size_t packedTag1, size_t packedTag2)
MEM_STATIC void ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0)
MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window)
struct repcodes_s repcodes_t
MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
#define ZSTD_MAX_NB_BLOCK_SPLITS
void ZSTD_resetSeqStore(seqStore_t *ssPtr)
#define ZSTD_WINDOW_START_INDEX
MEM_STATIC ZSTD_ALLOW_POINTER_OVERFLOW_ATTR U32 ZSTD_window_update(ZSTD_window_t *window, void const *src, size_t srcSize, int forceNonContiguous)
MEM_STATIC void ZSTD_window_init(ZSTD_window_t *window)
size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx *cctx, ZSTD_sequencePosition *seqPos, const ZSTD_Sequence *const inSeqs, size_t inSeqsSize, const void *src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch)
MEM_STATIC FORCE_INLINE_ATTR size_t ZSTD_hashPtr(const void *p, U32 hBits, U32 mls)
MEM_STATIC ZSTD_ALLOW_POINTER_OVERFLOW_ATTR U32 ZSTD_window_correctOverflow(ZSTD_window_t *window, U32 cycleLog, U32 maxDist, void const *src)
MEM_STATIC size_t ZSTD_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *const pInLimit)
size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, ZSTD_dictTableLoadMethod_e dtlm, const ZSTD_CDict *cdict, const ZSTD_CCtx_params *params, unsigned long long pledgedSrcSize)
struct ZSTD_prefixDict_s ZSTD_prefixDict
size_t ZSTD_initCStream_internal(ZSTD_CStream *zcs, const void *dict, size_t dictSize, const ZSTD_CDict *cdict, const ZSTD_CCtx_params *params, unsigned long long pledgedSrcSize)
size_t(* ZSTD_blockCompressor)(ZSTD_matchState_t *bs, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
HINT_INLINE UNUSED_ATTR void ZSTD_storeSeq(seqStore_t *seqStorePtr, size_t litLength, const BYTE *literals, const BYTE *litLimit, U32 offBase, size_t matchLength)
MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t *ms, U32 curr, unsigned windowLog)
MEM_STATIC U32 ZSTD_window_canOverflowCorrect(ZSTD_window_t const window, U32 cycleLog, U32 maxDist, U32 loadedDictEnd, void const *src)
MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t *ms, U32 curr, unsigned windowLog)
MEM_STATIC size_t ZSTD_rleCompressBlock(void *dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock)
size_t ZSTD_compressBegin_usingCDict_deprecated(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict)
MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
#define ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY
MEM_STATIC void ZSTD_checkDictValidity(const ZSTD_window_t *window, const void *blockEnd, U32 maxDist, U32 *loadedDictEndPtr, const ZSTD_matchState_t **dictMatchStatePtr)
#define LDM_BATCH_SIZE
void ZSTD_referenceExternalSequences(ZSTD_CCtx *cctx, rawSeq *seq, size_t nbSeq)
size_t ZSTD_buildBlockEntropyStats(const seqStore_t *seqStorePtr, const ZSTD_entropyCTables_t *prevEntropy, ZSTD_entropyCTables_t *nextEntropy, const ZSTD_CCtx_params *cctxParams, ZSTD_entropyCTablesMetadata_t *entropyMetadata, void *workspace, size_t wkspSize)
ZSTD_compressionStage_e
@ ZSTDcs_ongoing
@ ZSTDcs_created
@ ZSTDcs_ending
MEM_STATIC size_t ZSTD_hash3Ptr(const void *ptr, U32 h)
MEM_STATIC FORCE_INLINE_ATTR size_t ZSTD_hashPtrSalted(const void *p, U32 hBits, U32 mls, const U64 hashSalt)
MEM_STATIC void ZSTD_window_enforceMaxDist(ZSTD_window_t *window, const void *blockEnd, U32 maxDist, U32 *loadedDictEndPtr, const ZSTD_matchState_t **dictMatchStatePtr)
U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
#define OFFBASE_IS_OFFSET(o)
ZSTD_dictTableLoadMethod_e
@ ZSTD_dtlm_full
@ ZSTD_dtlm_fast
size_t ZSTD_writeLastEmptyBlock(void *dst, size_t dstCapacity)
MEM_STATIC size_t ZSTD_hash3PtrS(const void *ptr, U32 h, U32 s)
void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t *bs)
#define OFFBASE_TO_REPCODE(o)
#define ZSTD_DUBT_UNSORTED_MARK
MEM_STATIC repcodes_t ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0)
MEM_STATIC int ZSTD_hasExtSeqProd(const ZSTD_CCtx_params *params)
#define HASH_READ_SIZE
size_t ZSTD_compressContinue_public(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
#define ZSTD_ROLL_HASH_CHAR_OFFSET
ZSTD_tableFillPurpose_e
@ ZSTD_tfp_forCDict
@ ZSTD_tfp_forCCtx
ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict *cdict)
@ ZSTD_dictMatchState
@ ZSTD_dedicatedDictSearch
#define ZSTD_ROW_HASH_CACHE_SIZE
#define ZSTD_memcpy(d, s, l)
Definition: zstd_deps.h:36
#define ZSTD_memset(p, v, l)
Definition: zstd_deps.h:38
#define KB
Definition: zstd_internal.h:71
#define ZSTD_MAX_HUF_HEADER_SIZE
#define MINMATCH
@ ZSTD_llt_matchLength
@ ZSTD_llt_literalLength
@ ZSTD_llt_none
#define MaxLL
#define WILDCOPY_OVERLENGTH
#define MaxML
#define ZSTD_isError
Definition: zstd_internal.h:48
#define MLFSELog
@ bt_rle
Definition: zstd_internal.h:90
@ bt_raw
Definition: zstd_internal.h:90
symbolEncodingType_e
Definition: zstd_internal.h:98
#define MaxOff
MEM_STATIC FORCE_INLINE_ATTR void ZSTD_wildcopy(void *dst, const void *src, ptrdiff_t length, ZSTD_overlap_e const ovtype)
#define LLFSELog
#define ZSTD_STATIC_ASSERT(c)
Definition: zstd_internal.h:47
#define ZSTD_MAX_FSE_HEADERS_SIZE
#define ZSTD_REP_NUM
Definition: zstd_internal.h:68
@ ZSTD_no_overlap
#define OffFSELog
ZSTD_bufferMode_e