This source file includes following definitions.
- ZSTD_compressBound
- ZSTD_resetSeqStore
- ZSTD_CCtxWorkspaceBound
- ZSTD_createCCtx_advanced
- ZSTD_initCCtx
- ZSTD_freeCCtx
- ZSTD_getSeqStore
- ZSTD_getParamsFromCCtx
- ZSTD_checkCParams
- ZSTD_cycleLog
- ZSTD_adjustCParams
- ZSTD_equivalentParams
- ZSTD_continueCCtx
- ZSTD_resetCCtx_advanced
- ZSTD_invalidateRepCodes
- ZSTD_copyCCtx
- ZSTD_reduceTable
- ZSTD_reduceIndex
- ZSTD_noCompressBlock
- ZSTD_noCompressLiterals
- ZSTD_compressRleLiteralsBlock
- ZSTD_minGain
- ZSTD_compressLiterals
- ZSTD_seqToCodes
- ZSTD_compressSequences_internal
- ZSTD_compressSequences
- ZSTD_storeSeq
- ZSTD_NbCommonBytes
- ZSTD_count
- ZSTD_count_2segments
- ZSTD_hash3
- ZSTD_hash3Ptr
- ZSTD_hash4
- ZSTD_hash4Ptr
- ZSTD_hash5
- ZSTD_hash5Ptr
- ZSTD_hash6
- ZSTD_hash6Ptr
- ZSTD_hash7
- ZSTD_hash7Ptr
- ZSTD_hash8
- ZSTD_hash8Ptr
- ZSTD_hashPtr
- ZSTD_fillHashTable
- ZSTD_compressBlock_fast_generic
- ZSTD_compressBlock_fast
- ZSTD_compressBlock_fast_extDict_generic
- ZSTD_compressBlock_fast_extDict
- ZSTD_fillDoubleHashTable
- ZSTD_compressBlock_doubleFast_generic
- ZSTD_compressBlock_doubleFast
- ZSTD_compressBlock_doubleFast_extDict_generic
- ZSTD_compressBlock_doubleFast_extDict
- ZSTD_insertBt1
- ZSTD_insertBtAndFindBestMatch
- ZSTD_updateTree
- ZSTD_BtFindBestMatch
- ZSTD_BtFindBestMatch_selectMLS
- ZSTD_updateTree_extDict
- ZSTD_BtFindBestMatch_extDict
- ZSTD_BtFindBestMatch_selectMLS_extDict
- ZSTD_insertAndFindFirstIndex
- ZSTD_HcFindBestMatch_generic
- ZSTD_HcFindBestMatch_selectMLS
- ZSTD_HcFindBestMatch_extDict_selectMLS
- ZSTD_compressBlock_lazy_generic
- ZSTD_compressBlock_btlazy2
- ZSTD_compressBlock_lazy2
- ZSTD_compressBlock_lazy
- ZSTD_compressBlock_greedy
- ZSTD_compressBlock_lazy_extDict_generic
- ZSTD_compressBlock_greedy_extDict
- ZSTD_compressBlock_lazy_extDict
- ZSTD_compressBlock_lazy2_extDict
- ZSTD_compressBlock_btlazy2_extDict
- ZSTD_compressBlock_btopt
- ZSTD_compressBlock_btopt2
- ZSTD_compressBlock_btopt_extDict
- ZSTD_compressBlock_btopt2_extDict
- ZSTD_selectBlockCompressor
- ZSTD_compressBlock_internal
- ZSTD_compress_generic
- ZSTD_writeFrameHeader
- ZSTD_compressContinue_internal
- ZSTD_compressContinue
- ZSTD_getBlockSizeMax
- ZSTD_compressBlock
- ZSTD_loadDictionaryContent
- ZSTD_checkDictNCount
- ZSTD_loadZstdDictionary
- ZSTD_compress_insertDictionary
- ZSTD_compressBegin_internal
- ZSTD_compressBegin_advanced
- ZSTD_compressBegin_usingDict
- ZSTD_compressBegin
- ZSTD_writeEpilogue
- ZSTD_compressEnd
- ZSTD_compress_internal
- ZSTD_compress_usingDict
- ZSTD_compressCCtx
- ZSTD_CDictWorkspaceBound
- ZSTD_createCDict_advanced
- ZSTD_initCDict
- ZSTD_freeCDict
- ZSTD_getParamsFromCDict
- ZSTD_compressBegin_usingCDict
- ZSTD_compress_usingCDict
- ZSTD_CStreamWorkspaceBound
- ZSTD_createCStream_advanced
- ZSTD_freeCStream
- ZSTD_CStreamInSize
- ZSTD_CStreamOutSize
- ZSTD_resetCStream_internal
- ZSTD_resetCStream
- ZSTD_initCStream_advanced
- ZSTD_initCStream
- ZSTD_initCStream_usingCDict
- ZSTD_limitCopy
- ZSTD_compressStream_generic
- ZSTD_compressStream
- ZSTD_flushStream
- ZSTD_endStream
- ZSTD_maxCLevel
- ZSTD_getCParams
- ZSTD_getParams
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 #include "fse.h"
21 #include "huf.h"
22 #include "mem.h"
23 #include "zstd_internal.h"
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/string.h>
27
28
29
30
31 static const U32 g_searchStrength = 8;
32 #define HASH_READ_SIZE 8
33 typedef enum { ZSTDcs_created = 0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
34
35
36
37
38 size_t ZSTD_compressBound(size_t srcSize) { return FSE_compressBound(srcSize) + 12; }
39
40
41
42
43 static void ZSTD_resetSeqStore(seqStore_t *ssPtr)
44 {
45 ssPtr->lit = ssPtr->litStart;
46 ssPtr->sequences = ssPtr->sequencesStart;
47 ssPtr->longLengthID = 0;
48 }
49
50
51
52
53 struct ZSTD_CCtx_s {
54 const BYTE *nextSrc;
55 const BYTE *base;
56 const BYTE *dictBase;
57 U32 dictLimit;
58 U32 lowLimit;
59 U32 nextToUpdate;
60 U32 nextToUpdate3;
61 U32 hashLog3;
62 U32 loadedDictEnd;
63 U32 forceWindow;
64 U32 forceRawDict;
65 ZSTD_compressionStage_e stage;
66 U32 rep[ZSTD_REP_NUM];
67 U32 repToConfirm[ZSTD_REP_NUM];
68 U32 dictID;
69 ZSTD_parameters params;
70 void *workSpace;
71 size_t workSpaceSize;
72 size_t blockSize;
73 U64 frameContentSize;
74 struct xxh64_state xxhState;
75 ZSTD_customMem customMem;
76
77 seqStore_t seqStore;
78 U32 *hashTable;
79 U32 *hashTable3;
80 U32 *chainTable;
81 HUF_CElt *hufTable;
82 U32 flagStaticTables;
83 HUF_repeat flagStaticHufTable;
84 FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
85 FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
86 FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
87 unsigned tmpCounters[HUF_COMPRESS_WORKSPACE_SIZE_U32];
88 };
89
90 size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams)
91 {
92 size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << cParams.windowLog);
93 U32 const divider = (cParams.searchLength == 3) ? 3 : 4;
94 size_t const maxNbSeq = blockSize / divider;
95 size_t const tokenSpace = blockSize + 11 * maxNbSeq;
96 size_t const chainSize = (cParams.strategy == ZSTD_fast) ? 0 : (1 << cParams.chainLog);
97 size_t const hSize = ((size_t)1) << cParams.hashLog;
98 U32 const hashLog3 = (cParams.searchLength > 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog);
99 size_t const h3Size = ((size_t)1) << hashLog3;
100 size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
101 size_t const optSpace =
102 ((MaxML + 1) + (MaxLL + 1) + (MaxOff + 1) + (1 << Litbits)) * sizeof(U32) + (ZSTD_OPT_NUM + 1) * (sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
103 size_t const workspaceSize = tableSpace + (256 * sizeof(U32)) + tokenSpace +
104 (((cParams.strategy == ZSTD_btopt) || (cParams.strategy == ZSTD_btopt2)) ? optSpace : 0);
105
106 return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_CCtx)) + ZSTD_ALIGN(workspaceSize);
107 }
108
109 static ZSTD_CCtx *ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
110 {
111 ZSTD_CCtx *cctx;
112 if (!customMem.customAlloc || !customMem.customFree)
113 return NULL;
114 cctx = (ZSTD_CCtx *)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);
115 if (!cctx)
116 return NULL;
117 memset(cctx, 0, sizeof(ZSTD_CCtx));
118 cctx->customMem = customMem;
119 return cctx;
120 }
121
122 ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize)
123 {
124 ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
125 ZSTD_CCtx *cctx = ZSTD_createCCtx_advanced(stackMem);
126 if (cctx) {
127 cctx->workSpace = ZSTD_stackAllocAll(cctx->customMem.opaque, &cctx->workSpaceSize);
128 }
129 return cctx;
130 }
131
132 size_t ZSTD_freeCCtx(ZSTD_CCtx *cctx)
133 {
134 if (cctx == NULL)
135 return 0;
136 ZSTD_free(cctx->workSpace, cctx->customMem);
137 ZSTD_free(cctx, cctx->customMem);
138 return 0;
139 }
140
141 const seqStore_t *ZSTD_getSeqStore(const ZSTD_CCtx *ctx) { return &(ctx->seqStore); }
142
143 static ZSTD_parameters ZSTD_getParamsFromCCtx(const ZSTD_CCtx *cctx) { return cctx->params; }
144
145
146
147
148 size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
149 {
150 #define CLAMPCHECK(val, min, max) \
151 { \
152 if ((val < min) | (val > max)) \
153 return ERROR(compressionParameter_unsupported); \
154 }
155 CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
156 CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
157 CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
158 CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
159 CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
160 CLAMPCHECK(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX);
161 if ((U32)(cParams.strategy) > (U32)ZSTD_btopt2)
162 return ERROR(compressionParameter_unsupported);
163 return 0;
164 }
165
166
167
168 static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
169 {
170 U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
171 return hashLog - btScale;
172 }
173
174
175
176
177
178
179
180 ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
181 {
182 if (srcSize + dictSize == 0)
183 return cPar;
184
185
186 {
187 U32 const minSrcSize = (srcSize == 0) ? 500 : 0;
188 U64 const rSize = srcSize + dictSize + minSrcSize;
189 if (rSize < ((U64)1 << ZSTD_WINDOWLOG_MAX)) {
190 U32 const srcLog = MAX(ZSTD_HASHLOG_MIN, ZSTD_highbit32((U32)(rSize)-1) + 1);
191 if (cPar.windowLog > srcLog)
192 cPar.windowLog = srcLog;
193 }
194 }
195 if (cPar.hashLog > cPar.windowLog)
196 cPar.hashLog = cPar.windowLog;
197 {
198 U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
199 if (cycleLog > cPar.windowLog)
200 cPar.chainLog -= (cycleLog - cPar.windowLog);
201 }
202
203 if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
204 cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN;
205
206 return cPar;
207 }
208
209 static U32 ZSTD_equivalentParams(ZSTD_parameters param1, ZSTD_parameters param2)
210 {
211 return (param1.cParams.hashLog == param2.cParams.hashLog) & (param1.cParams.chainLog == param2.cParams.chainLog) &
212 (param1.cParams.strategy == param2.cParams.strategy) & ((param1.cParams.searchLength == 3) == (param2.cParams.searchLength == 3));
213 }
214
215
216
217 static size_t ZSTD_continueCCtx(ZSTD_CCtx *cctx, ZSTD_parameters params, U64 frameContentSize)
218 {
219 U32 const end = (U32)(cctx->nextSrc - cctx->base);
220 cctx->params = params;
221 cctx->frameContentSize = frameContentSize;
222 cctx->lowLimit = end;
223 cctx->dictLimit = end;
224 cctx->nextToUpdate = end + 1;
225 cctx->stage = ZSTDcs_init;
226 cctx->dictID = 0;
227 cctx->loadedDictEnd = 0;
228 {
229 int i;
230 for (i = 0; i < ZSTD_REP_NUM; i++)
231 cctx->rep[i] = repStartValue[i];
232 }
233 cctx->seqStore.litLengthSum = 0;
234 xxh64_reset(&cctx->xxhState, 0);
235 return 0;
236 }
237
238 typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset, ZSTDcrp_fullReset } ZSTD_compResetPolicy_e;
239
240
241
242 static size_t ZSTD_resetCCtx_advanced(ZSTD_CCtx *zc, ZSTD_parameters params, U64 frameContentSize, ZSTD_compResetPolicy_e const crp)
243 {
244 if (crp == ZSTDcrp_continue)
245 if (ZSTD_equivalentParams(params, zc->params)) {
246 zc->flagStaticTables = 0;
247 zc->flagStaticHufTable = HUF_repeat_none;
248 return ZSTD_continueCCtx(zc, params, frameContentSize);
249 }
250
251 {
252 size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog);
253 U32 const divider = (params.cParams.searchLength == 3) ? 3 : 4;
254 size_t const maxNbSeq = blockSize / divider;
255 size_t const tokenSpace = blockSize + 11 * maxNbSeq;
256 size_t const chainSize = (params.cParams.strategy == ZSTD_fast) ? 0 : (1 << params.cParams.chainLog);
257 size_t const hSize = ((size_t)1) << params.cParams.hashLog;
258 U32 const hashLog3 = (params.cParams.searchLength > 3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, params.cParams.windowLog);
259 size_t const h3Size = ((size_t)1) << hashLog3;
260 size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
261 void *ptr;
262
263
264 {
265 size_t const optSpace = ((MaxML + 1) + (MaxLL + 1) + (MaxOff + 1) + (1 << Litbits)) * sizeof(U32) +
266 (ZSTD_OPT_NUM + 1) * (sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
267 size_t const neededSpace = tableSpace + (256 * sizeof(U32)) + tokenSpace +
268 (((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) ? optSpace : 0);
269 if (zc->workSpaceSize < neededSpace) {
270 ZSTD_free(zc->workSpace, zc->customMem);
271 zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
272 if (zc->workSpace == NULL)
273 return ERROR(memory_allocation);
274 zc->workSpaceSize = neededSpace;
275 }
276 }
277
278 if (crp != ZSTDcrp_noMemset)
279 memset(zc->workSpace, 0, tableSpace);
280 xxh64_reset(&zc->xxhState, 0);
281 zc->hashLog3 = hashLog3;
282 zc->hashTable = (U32 *)(zc->workSpace);
283 zc->chainTable = zc->hashTable + hSize;
284 zc->hashTable3 = zc->chainTable + chainSize;
285 ptr = zc->hashTable3 + h3Size;
286 zc->hufTable = (HUF_CElt *)ptr;
287 zc->flagStaticTables = 0;
288 zc->flagStaticHufTable = HUF_repeat_none;
289 ptr = ((U32 *)ptr) + 256;
290
291 zc->nextToUpdate = 1;
292 zc->nextSrc = NULL;
293 zc->base = NULL;
294 zc->dictBase = NULL;
295 zc->dictLimit = 0;
296 zc->lowLimit = 0;
297 zc->params = params;
298 zc->blockSize = blockSize;
299 zc->frameContentSize = frameContentSize;
300 {
301 int i;
302 for (i = 0; i < ZSTD_REP_NUM; i++)
303 zc->rep[i] = repStartValue[i];
304 }
305
306 if ((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) {
307 zc->seqStore.litFreq = (U32 *)ptr;
308 zc->seqStore.litLengthFreq = zc->seqStore.litFreq + (1 << Litbits);
309 zc->seqStore.matchLengthFreq = zc->seqStore.litLengthFreq + (MaxLL + 1);
310 zc->seqStore.offCodeFreq = zc->seqStore.matchLengthFreq + (MaxML + 1);
311 ptr = zc->seqStore.offCodeFreq + (MaxOff + 1);
312 zc->seqStore.matchTable = (ZSTD_match_t *)ptr;
313 ptr = zc->seqStore.matchTable + ZSTD_OPT_NUM + 1;
314 zc->seqStore.priceTable = (ZSTD_optimal_t *)ptr;
315 ptr = zc->seqStore.priceTable + ZSTD_OPT_NUM + 1;
316 zc->seqStore.litLengthSum = 0;
317 }
318 zc->seqStore.sequencesStart = (seqDef *)ptr;
319 ptr = zc->seqStore.sequencesStart + maxNbSeq;
320 zc->seqStore.llCode = (BYTE *)ptr;
321 zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
322 zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
323 zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
324
325 zc->stage = ZSTDcs_init;
326 zc->dictID = 0;
327 zc->loadedDictEnd = 0;
328
329 return 0;
330 }
331 }
332
333
334
335
336
337 void ZSTD_invalidateRepCodes(ZSTD_CCtx *cctx)
338 {
339 int i;
340 for (i = 0; i < ZSTD_REP_NUM; i++)
341 cctx->rep[i] = 0;
342 }
343
344
345
346
347
348 size_t ZSTD_copyCCtx(ZSTD_CCtx *dstCCtx, const ZSTD_CCtx *srcCCtx, unsigned long long pledgedSrcSize)
349 {
350 if (srcCCtx->stage != ZSTDcs_init)
351 return ERROR(stage_wrong);
352
353 memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
354 {
355 ZSTD_parameters params = srcCCtx->params;
356 params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
357 ZSTD_resetCCtx_advanced(dstCCtx, params, pledgedSrcSize, ZSTDcrp_noMemset);
358 }
359
360
361 {
362 size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog);
363 size_t const hSize = ((size_t)1) << srcCCtx->params.cParams.hashLog;
364 size_t const h3Size = (size_t)1 << srcCCtx->hashLog3;
365 size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
366 memcpy(dstCCtx->workSpace, srcCCtx->workSpace, tableSpace);
367 }
368
369
370 dstCCtx->nextToUpdate = srcCCtx->nextToUpdate;
371 dstCCtx->nextToUpdate3 = srcCCtx->nextToUpdate3;
372 dstCCtx->nextSrc = srcCCtx->nextSrc;
373 dstCCtx->base = srcCCtx->base;
374 dstCCtx->dictBase = srcCCtx->dictBase;
375 dstCCtx->dictLimit = srcCCtx->dictLimit;
376 dstCCtx->lowLimit = srcCCtx->lowLimit;
377 dstCCtx->loadedDictEnd = srcCCtx->loadedDictEnd;
378 dstCCtx->dictID = srcCCtx->dictID;
379
380
381 dstCCtx->flagStaticTables = srcCCtx->flagStaticTables;
382 dstCCtx->flagStaticHufTable = srcCCtx->flagStaticHufTable;
383 if (srcCCtx->flagStaticTables) {
384 memcpy(dstCCtx->litlengthCTable, srcCCtx->litlengthCTable, sizeof(dstCCtx->litlengthCTable));
385 memcpy(dstCCtx->matchlengthCTable, srcCCtx->matchlengthCTable, sizeof(dstCCtx->matchlengthCTable));
386 memcpy(dstCCtx->offcodeCTable, srcCCtx->offcodeCTable, sizeof(dstCCtx->offcodeCTable));
387 }
388 if (srcCCtx->flagStaticHufTable) {
389 memcpy(dstCCtx->hufTable, srcCCtx->hufTable, 256 * 4);
390 }
391
392 return 0;
393 }
394
395
396
397 static void ZSTD_reduceTable(U32 *const table, U32 const size, U32 const reducerValue)
398 {
399 U32 u;
400 for (u = 0; u < size; u++) {
401 if (table[u] < reducerValue)
402 table[u] = 0;
403 else
404 table[u] -= reducerValue;
405 }
406 }
407
408
409
410 static void ZSTD_reduceIndex(ZSTD_CCtx *zc, const U32 reducerValue)
411 {
412 {
413 U32 const hSize = 1 << zc->params.cParams.hashLog;
414 ZSTD_reduceTable(zc->hashTable, hSize, reducerValue);
415 }
416
417 {
418 U32 const chainSize = (zc->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << zc->params.cParams.chainLog);
419 ZSTD_reduceTable(zc->chainTable, chainSize, reducerValue);
420 }
421
422 {
423 U32 const h3Size = (zc->hashLog3) ? 1 << zc->hashLog3 : 0;
424 ZSTD_reduceTable(zc->hashTable3, h3Size, reducerValue);
425 }
426 }
427
428
429
430
431
432
433
434 size_t ZSTD_noCompressBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
435 {
436 if (srcSize + ZSTD_blockHeaderSize > dstCapacity)
437 return ERROR(dstSize_tooSmall);
438 memcpy((BYTE *)dst + ZSTD_blockHeaderSize, src, srcSize);
439 ZSTD_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw);
440 return ZSTD_blockHeaderSize + srcSize;
441 }
442
443 static size_t ZSTD_noCompressLiterals(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
444 {
445 BYTE *const ostart = (BYTE * const)dst;
446 U32 const flSize = 1 + (srcSize > 31) + (srcSize > 4095);
447
448 if (srcSize + flSize > dstCapacity)
449 return ERROR(dstSize_tooSmall);
450
451 switch (flSize) {
452 case 1: ostart[0] = (BYTE)((U32)set_basic + (srcSize << 3)); break;
453 case 2: ZSTD_writeLE16(ostart, (U16)((U32)set_basic + (1 << 2) + (srcSize << 4))); break;
454 default:
455 case 3: ZSTD_writeLE32(ostart, (U32)((U32)set_basic + (3 << 2) + (srcSize << 4))); break;
456 }
457
458 memcpy(ostart + flSize, src, srcSize);
459 return srcSize + flSize;
460 }
461
462 static size_t ZSTD_compressRleLiteralsBlock(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
463 {
464 BYTE *const ostart = (BYTE * const)dst;
465 U32 const flSize = 1 + (srcSize > 31) + (srcSize > 4095);
466
467 (void)dstCapacity;
468
469 switch (flSize) {
470 case 1: ostart[0] = (BYTE)((U32)set_rle + (srcSize << 3)); break;
471 case 2: ZSTD_writeLE16(ostart, (U16)((U32)set_rle + (1 << 2) + (srcSize << 4))); break;
472 default:
473 case 3: ZSTD_writeLE32(ostart, (U32)((U32)set_rle + (3 << 2) + (srcSize << 4))); break;
474 }
475
476 ostart[flSize] = *(const BYTE *)src;
477 return flSize + 1;
478 }
479
480 static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; }
481
482 static size_t ZSTD_compressLiterals(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
483 {
484 size_t const minGain = ZSTD_minGain(srcSize);
485 size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
486 BYTE *const ostart = (BYTE *)dst;
487 U32 singleStream = srcSize < 256;
488 symbolEncodingType_e hType = set_compressed;
489 size_t cLitSize;
490
491
492 #define LITERAL_NOENTROPY 63
493 {
494 size_t const minLitSize = zc->flagStaticHufTable == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY;
495 if (srcSize <= minLitSize)
496 return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
497 }
498
499 if (dstCapacity < lhSize + 1)
500 return ERROR(dstSize_tooSmall);
501 {
502 HUF_repeat repeat = zc->flagStaticHufTable;
503 int const preferRepeat = zc->params.cParams.strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
504 if (repeat == HUF_repeat_valid && lhSize == 3)
505 singleStream = 1;
506 cLitSize = singleStream ? HUF_compress1X_repeat(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, zc->tmpCounters,
507 sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat)
508 : HUF_compress4X_repeat(ostart + lhSize, dstCapacity - lhSize, src, srcSize, 255, 11, zc->tmpCounters,
509 sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat);
510 if (repeat != HUF_repeat_none) {
511 hType = set_repeat;
512 }
513 else {
514 zc->flagStaticHufTable = HUF_repeat_check;
515 }
516 }
517
518 if ((cLitSize == 0) | (cLitSize >= srcSize - minGain)) {
519 zc->flagStaticHufTable = HUF_repeat_none;
520 return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
521 }
522 if (cLitSize == 1) {
523 zc->flagStaticHufTable = HUF_repeat_none;
524 return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
525 }
526
527
528 switch (lhSize) {
529 case 3:
530 {
531 U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 14);
532 ZSTD_writeLE24(ostart, lhc);
533 break;
534 }
535 case 4:
536 {
537 U32 const lhc = hType + (2 << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 18);
538 ZSTD_writeLE32(ostart, lhc);
539 break;
540 }
541 default:
542 case 5:
543 {
544 U32 const lhc = hType + (3 << 2) + ((U32)srcSize << 4) + ((U32)cLitSize << 22);
545 ZSTD_writeLE32(ostart, lhc);
546 ostart[4] = (BYTE)(cLitSize >> 10);
547 break;
548 }
549 }
550 return lhSize + cLitSize;
551 }
552
553 static const BYTE LL_Code[64] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 16, 17, 17, 18, 18,
554 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23,
555 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24};
556
557 static const BYTE ML_Code[128] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
558 26, 27, 28, 29, 30, 31, 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38,
559 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
560 40, 40, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 42,
561 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42};
562
563 void ZSTD_seqToCodes(const seqStore_t *seqStorePtr)
564 {
565 BYTE const LL_deltaCode = 19;
566 BYTE const ML_deltaCode = 36;
567 const seqDef *const sequences = seqStorePtr->sequencesStart;
568 BYTE *const llCodeTable = seqStorePtr->llCode;
569 BYTE *const ofCodeTable = seqStorePtr->ofCode;
570 BYTE *const mlCodeTable = seqStorePtr->mlCode;
571 U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
572 U32 u;
573 for (u = 0; u < nbSeq; u++) {
574 U32 const llv = sequences[u].litLength;
575 U32 const mlv = sequences[u].matchLength;
576 llCodeTable[u] = (llv > 63) ? (BYTE)ZSTD_highbit32(llv) + LL_deltaCode : LL_Code[llv];
577 ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
578 mlCodeTable[u] = (mlv > 127) ? (BYTE)ZSTD_highbit32(mlv) + ML_deltaCode : ML_Code[mlv];
579 }
580 if (seqStorePtr->longLengthID == 1)
581 llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
582 if (seqStorePtr->longLengthID == 2)
583 mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
584 }
585
586 ZSTD_STATIC size_t ZSTD_compressSequences_internal(ZSTD_CCtx *zc, void *dst, size_t dstCapacity)
587 {
588 const int longOffsets = zc->params.cParams.windowLog > STREAM_ACCUMULATOR_MIN;
589 const seqStore_t *seqStorePtr = &(zc->seqStore);
590 FSE_CTable *CTable_LitLength = zc->litlengthCTable;
591 FSE_CTable *CTable_OffsetBits = zc->offcodeCTable;
592 FSE_CTable *CTable_MatchLength = zc->matchlengthCTable;
593 U32 LLtype, Offtype, MLtype;
594 const seqDef *const sequences = seqStorePtr->sequencesStart;
595 const BYTE *const ofCodeTable = seqStorePtr->ofCode;
596 const BYTE *const llCodeTable = seqStorePtr->llCode;
597 const BYTE *const mlCodeTable = seqStorePtr->mlCode;
598 BYTE *const ostart = (BYTE *)dst;
599 BYTE *const oend = ostart + dstCapacity;
600 BYTE *op = ostart;
601 size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
602 BYTE *seqHead;
603
604 U32 *count;
605 S16 *norm;
606 U32 *workspace;
607 size_t workspaceSize = sizeof(zc->tmpCounters);
608 {
609 size_t spaceUsed32 = 0;
610 count = (U32 *)zc->tmpCounters + spaceUsed32;
611 spaceUsed32 += MaxSeq + 1;
612 norm = (S16 *)((U32 *)zc->tmpCounters + spaceUsed32);
613 spaceUsed32 += ALIGN(sizeof(S16) * (MaxSeq + 1), sizeof(U32)) >> 2;
614
615 workspace = (U32 *)zc->tmpCounters + spaceUsed32;
616 workspaceSize -= (spaceUsed32 << 2);
617 }
618
619
620 {
621 const BYTE *const literals = seqStorePtr->litStart;
622 size_t const litSize = seqStorePtr->lit - literals;
623 size_t const cSize = ZSTD_compressLiterals(zc, op, dstCapacity, literals, litSize);
624 if (ZSTD_isError(cSize))
625 return cSize;
626 op += cSize;
627 }
628
629
630 if ((oend - op) < 3 + 1 )
631 return ERROR(dstSize_tooSmall);
632 if (nbSeq < 0x7F)
633 *op++ = (BYTE)nbSeq;
634 else if (nbSeq < LONGNBSEQ)
635 op[0] = (BYTE)((nbSeq >> 8) + 0x80), op[1] = (BYTE)nbSeq, op += 2;
636 else
637 op[0] = 0xFF, ZSTD_writeLE16(op + 1, (U16)(nbSeq - LONGNBSEQ)), op += 3;
638 if (nbSeq == 0)
639 return op - ostart;
640
641
642 seqHead = op++;
643
644 #define MIN_SEQ_FOR_DYNAMIC_FSE 64
645 #define MAX_SEQ_FOR_STATIC_FSE 1000
646
647
648 ZSTD_seqToCodes(seqStorePtr);
649
650
651 {
652 U32 max = MaxLL;
653 size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, workspace);
654 if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
655 *op++ = llCodeTable[0];
656 FSE_buildCTable_rle(CTable_LitLength, (BYTE)max);
657 LLtype = set_rle;
658 } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
659 LLtype = set_repeat;
660 } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (LL_defaultNormLog - 1)))) {
661 FSE_buildCTable_wksp(CTable_LitLength, LL_defaultNorm, MaxLL, LL_defaultNormLog, workspace, workspaceSize);
662 LLtype = set_basic;
663 } else {
664 size_t nbSeq_1 = nbSeq;
665 const U32 tableLog = FSE_optimalTableLog(LLFSELog, nbSeq, max);
666 if (count[llCodeTable[nbSeq - 1]] > 1) {
667 count[llCodeTable[nbSeq - 1]]--;
668 nbSeq_1--;
669 }
670 FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
671 {
672 size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog);
673 if (FSE_isError(NCountSize))
674 return NCountSize;
675 op += NCountSize;
676 }
677 FSE_buildCTable_wksp(CTable_LitLength, norm, max, tableLog, workspace, workspaceSize);
678 LLtype = set_compressed;
679 }
680 }
681
682
683 {
684 U32 max = MaxOff;
685 size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, workspace);
686 if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
687 *op++ = ofCodeTable[0];
688 FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max);
689 Offtype = set_rle;
690 } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
691 Offtype = set_repeat;
692 } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (OF_defaultNormLog - 1)))) {
693 FSE_buildCTable_wksp(CTable_OffsetBits, OF_defaultNorm, MaxOff, OF_defaultNormLog, workspace, workspaceSize);
694 Offtype = set_basic;
695 } else {
696 size_t nbSeq_1 = nbSeq;
697 const U32 tableLog = FSE_optimalTableLog(OffFSELog, nbSeq, max);
698 if (count[ofCodeTable[nbSeq - 1]] > 1) {
699 count[ofCodeTable[nbSeq - 1]]--;
700 nbSeq_1--;
701 }
702 FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
703 {
704 size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog);
705 if (FSE_isError(NCountSize))
706 return NCountSize;
707 op += NCountSize;
708 }
709 FSE_buildCTable_wksp(CTable_OffsetBits, norm, max, tableLog, workspace, workspaceSize);
710 Offtype = set_compressed;
711 }
712 }
713
714
715 {
716 U32 max = MaxML;
717 size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, workspace);
718 if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
719 *op++ = *mlCodeTable;
720 FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max);
721 MLtype = set_rle;
722 } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
723 MLtype = set_repeat;
724 } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (ML_defaultNormLog - 1)))) {
725 FSE_buildCTable_wksp(CTable_MatchLength, ML_defaultNorm, MaxML, ML_defaultNormLog, workspace, workspaceSize);
726 MLtype = set_basic;
727 } else {
728 size_t nbSeq_1 = nbSeq;
729 const U32 tableLog = FSE_optimalTableLog(MLFSELog, nbSeq, max);
730 if (count[mlCodeTable[nbSeq - 1]] > 1) {
731 count[mlCodeTable[nbSeq - 1]]--;
732 nbSeq_1--;
733 }
734 FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
735 {
736 size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog);
737 if (FSE_isError(NCountSize))
738 return NCountSize;
739 op += NCountSize;
740 }
741 FSE_buildCTable_wksp(CTable_MatchLength, norm, max, tableLog, workspace, workspaceSize);
742 MLtype = set_compressed;
743 }
744 }
745
746 *seqHead = (BYTE)((LLtype << 6) + (Offtype << 4) + (MLtype << 2));
747 zc->flagStaticTables = 0;
748
749
750 {
751 BIT_CStream_t blockStream;
752 FSE_CState_t stateMatchLength;
753 FSE_CState_t stateOffsetBits;
754 FSE_CState_t stateLitLength;
755
756 CHECK_E(BIT_initCStream(&blockStream, op, oend - op), dstSize_tooSmall);
757
758
759 FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq - 1]);
760 FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq - 1]);
761 FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq - 1]);
762 BIT_addBits(&blockStream, sequences[nbSeq - 1].litLength, LL_bits[llCodeTable[nbSeq - 1]]);
763 if (ZSTD_32bits())
764 BIT_flushBits(&blockStream);
765 BIT_addBits(&blockStream, sequences[nbSeq - 1].matchLength, ML_bits[mlCodeTable[nbSeq - 1]]);
766 if (ZSTD_32bits())
767 BIT_flushBits(&blockStream);
768 if (longOffsets) {
769 U32 const ofBits = ofCodeTable[nbSeq - 1];
770 int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN - 1);
771 if (extraBits) {
772 BIT_addBits(&blockStream, sequences[nbSeq - 1].offset, extraBits);
773 BIT_flushBits(&blockStream);
774 }
775 BIT_addBits(&blockStream, sequences[nbSeq - 1].offset >> extraBits, ofBits - extraBits);
776 } else {
777 BIT_addBits(&blockStream, sequences[nbSeq - 1].offset, ofCodeTable[nbSeq - 1]);
778 }
779 BIT_flushBits(&blockStream);
780
781 {
782 size_t n;
783 for (n = nbSeq - 2; n < nbSeq; n--) {
784 BYTE const llCode = llCodeTable[n];
785 BYTE const ofCode = ofCodeTable[n];
786 BYTE const mlCode = mlCodeTable[n];
787 U32 const llBits = LL_bits[llCode];
788 U32 const ofBits = ofCode;
789 U32 const mlBits = ML_bits[mlCode];
790
791 FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode);
792 FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode);
793 if (ZSTD_32bits())
794 BIT_flushBits(&blockStream);
795 FSE_encodeSymbol(&blockStream, &stateLitLength, llCode);
796 if (ZSTD_32bits() || (ofBits + mlBits + llBits >= 64 - 7 - (LLFSELog + MLFSELog + OffFSELog)))
797 BIT_flushBits(&blockStream);
798 BIT_addBits(&blockStream, sequences[n].litLength, llBits);
799 if (ZSTD_32bits() && ((llBits + mlBits) > 24))
800 BIT_flushBits(&blockStream);
801 BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
802 if (ZSTD_32bits())
803 BIT_flushBits(&blockStream);
804 if (longOffsets) {
805 int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN - 1);
806 if (extraBits) {
807 BIT_addBits(&blockStream, sequences[n].offset, extraBits);
808 BIT_flushBits(&blockStream);
809 }
810 BIT_addBits(&blockStream, sequences[n].offset >> extraBits, ofBits - extraBits);
811 } else {
812 BIT_addBits(&blockStream, sequences[n].offset, ofBits);
813 }
814 BIT_flushBits(&blockStream);
815 }
816 }
817
818 FSE_flushCState(&blockStream, &stateMatchLength);
819 FSE_flushCState(&blockStream, &stateOffsetBits);
820 FSE_flushCState(&blockStream, &stateLitLength);
821
822 {
823 size_t const streamSize = BIT_closeCStream(&blockStream);
824 if (streamSize == 0)
825 return ERROR(dstSize_tooSmall);
826 op += streamSize;
827 }
828 }
829 return op - ostart;
830 }
831
832 ZSTD_STATIC size_t ZSTD_compressSequences(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, size_t srcSize)
833 {
834 size_t const cSize = ZSTD_compressSequences_internal(zc, dst, dstCapacity);
835 size_t const minGain = ZSTD_minGain(srcSize);
836 size_t const maxCSize = srcSize - minGain;
837
838
839
840
841 int const uncompressibleError = cSize == ERROR(dstSize_tooSmall) && srcSize <= dstCapacity;
842 int i;
843
844 if (ZSTD_isError(cSize) && !uncompressibleError)
845 return cSize;
846 if (cSize >= maxCSize || uncompressibleError) {
847 zc->flagStaticHufTable = HUF_repeat_none;
848 return 0;
849 }
850
851 for (i = 0; i < ZSTD_REP_NUM; i++)
852 zc->rep[i] = zc->repToConfirm[i];
853 return cSize;
854 }
855
856
857
858
859
860
861 ZSTD_STATIC void ZSTD_storeSeq(seqStore_t *seqStorePtr, size_t litLength, const void *literals, U32 offsetCode, size_t matchCode)
862 {
863
864 ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
865 seqStorePtr->lit += litLength;
866
867
868 if (litLength > 0xFFFF) {
869 seqStorePtr->longLengthID = 1;
870 seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
871 }
872 seqStorePtr->sequences[0].litLength = (U16)litLength;
873
874
875 seqStorePtr->sequences[0].offset = offsetCode + 1;
876
877
878 if (matchCode > 0xFFFF) {
879 seqStorePtr->longLengthID = 2;
880 seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
881 }
882 seqStorePtr->sequences[0].matchLength = (U16)matchCode;
883
884 seqStorePtr->sequences++;
885 }
886
887
888
889
890 static unsigned ZSTD_NbCommonBytes(register size_t val)
891 {
892 if (ZSTD_isLittleEndian()) {
893 if (ZSTD_64bits()) {
894 return (__builtin_ctzll((U64)val) >> 3);
895 } else {
896 return (__builtin_ctz((U32)val) >> 3);
897 }
898 } else {
899 if (ZSTD_64bits()) {
900 return (__builtin_clzll(val) >> 3);
901 } else {
902 return (__builtin_clz((U32)val) >> 3);
903 }
904 }
905 }
906
907 static size_t ZSTD_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *const pInLimit)
908 {
909 const BYTE *const pStart = pIn;
910 const BYTE *const pInLoopLimit = pInLimit - (sizeof(size_t) - 1);
911
912 while (pIn < pInLoopLimit) {
913 size_t const diff = ZSTD_readST(pMatch) ^ ZSTD_readST(pIn);
914 if (!diff) {
915 pIn += sizeof(size_t);
916 pMatch += sizeof(size_t);
917 continue;
918 }
919 pIn += ZSTD_NbCommonBytes(diff);
920 return (size_t)(pIn - pStart);
921 }
922 if (ZSTD_64bits())
923 if ((pIn < (pInLimit - 3)) && (ZSTD_read32(pMatch) == ZSTD_read32(pIn))) {
924 pIn += 4;
925 pMatch += 4;
926 }
927 if ((pIn < (pInLimit - 1)) && (ZSTD_read16(pMatch) == ZSTD_read16(pIn))) {
928 pIn += 2;
929 pMatch += 2;
930 }
931 if ((pIn < pInLimit) && (*pMatch == *pIn))
932 pIn++;
933 return (size_t)(pIn - pStart);
934 }
935
936
937
938
939
940 static size_t ZSTD_count_2segments(const BYTE *ip, const BYTE *match, const BYTE *iEnd, const BYTE *mEnd, const BYTE *iStart)
941 {
942 const BYTE *const vEnd = MIN(ip + (mEnd - match), iEnd);
943 size_t const matchLength = ZSTD_count(ip, match, vEnd);
944 if (match + matchLength != mEnd)
945 return matchLength;
946 return matchLength + ZSTD_count(ip + matchLength, iStart, iEnd);
947 }
948
949
950
951
952 static const U32 prime3bytes = 506832829U;
953 static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32 - 24)) * prime3bytes) >> (32 - h); }
954 ZSTD_STATIC size_t ZSTD_hash3Ptr(const void *ptr, U32 h) { return ZSTD_hash3(ZSTD_readLE32(ptr), h); }
955
956 static const U32 prime4bytes = 2654435761U;
957 static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32 - h); }
958 static size_t ZSTD_hash4Ptr(const void *ptr, U32 h) { return ZSTD_hash4(ZSTD_read32(ptr), h); }
959
960 static const U64 prime5bytes = 889523592379ULL;
961 static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64 - 40)) * prime5bytes) >> (64 - h)); }
962 static size_t ZSTD_hash5Ptr(const void *p, U32 h) { return ZSTD_hash5(ZSTD_readLE64(p), h); }
963
964 static const U64 prime6bytes = 227718039650203ULL;
965 static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64 - 48)) * prime6bytes) >> (64 - h)); }
966 static size_t ZSTD_hash6Ptr(const void *p, U32 h) { return ZSTD_hash6(ZSTD_readLE64(p), h); }
967
968 static const U64 prime7bytes = 58295818150454627ULL;
969 static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64 - 56)) * prime7bytes) >> (64 - h)); }
970 static size_t ZSTD_hash7Ptr(const void *p, U32 h) { return ZSTD_hash7(ZSTD_readLE64(p), h); }
971
972 static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
973 static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u)*prime8bytes) >> (64 - h)); }
974 static size_t ZSTD_hash8Ptr(const void *p, U32 h) { return ZSTD_hash8(ZSTD_readLE64(p), h); }
975
976 static size_t ZSTD_hashPtr(const void *p, U32 hBits, U32 mls)
977 {
978 switch (mls) {
979
980 default:
981 case 4: return ZSTD_hash4Ptr(p, hBits);
982 case 5: return ZSTD_hash5Ptr(p, hBits);
983 case 6: return ZSTD_hash6Ptr(p, hBits);
984 case 7: return ZSTD_hash7Ptr(p, hBits);
985 case 8: return ZSTD_hash8Ptr(p, hBits);
986 }
987 }
988
989
990
991
992 static void ZSTD_fillHashTable(ZSTD_CCtx *zc, const void *end, const U32 mls)
993 {
994 U32 *const hashTable = zc->hashTable;
995 U32 const hBits = zc->params.cParams.hashLog;
996 const BYTE *const base = zc->base;
997 const BYTE *ip = base + zc->nextToUpdate;
998 const BYTE *const iend = ((const BYTE *)end) - HASH_READ_SIZE;
999 const size_t fastHashFillStep = 3;
1000
1001 while (ip <= iend) {
1002 hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
1003 ip += fastHashFillStep;
1004 }
1005 }
1006
1007 FORCE_INLINE
1008 void ZSTD_compressBlock_fast_generic(ZSTD_CCtx *cctx, const void *src, size_t srcSize, const U32 mls)
1009 {
1010 U32 *const hashTable = cctx->hashTable;
1011 U32 const hBits = cctx->params.cParams.hashLog;
1012 seqStore_t *seqStorePtr = &(cctx->seqStore);
1013 const BYTE *const base = cctx->base;
1014 const BYTE *const istart = (const BYTE *)src;
1015 const BYTE *ip = istart;
1016 const BYTE *anchor = istart;
1017 const U32 lowestIndex = cctx->dictLimit;
1018 const BYTE *const lowest = base + lowestIndex;
1019 const BYTE *const iend = istart + srcSize;
1020 const BYTE *const ilimit = iend - HASH_READ_SIZE;
1021 U32 offset_1 = cctx->rep[0], offset_2 = cctx->rep[1];
1022 U32 offsetSaved = 0;
1023
1024
1025 ip += (ip == lowest);
1026 {
1027 U32 const maxRep = (U32)(ip - lowest);
1028 if (offset_2 > maxRep)
1029 offsetSaved = offset_2, offset_2 = 0;
1030 if (offset_1 > maxRep)
1031 offsetSaved = offset_1, offset_1 = 0;
1032 }
1033
1034
1035 while (ip < ilimit) {
1036 size_t mLength;
1037 size_t const h = ZSTD_hashPtr(ip, hBits, mls);
1038 U32 const curr = (U32)(ip - base);
1039 U32 const matchIndex = hashTable[h];
1040 const BYTE *match = base + matchIndex;
1041 hashTable[h] = curr;
1042
1043 if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
1044 mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4;
1045 ip++;
1046 ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
1047 } else {
1048 U32 offset;
1049 if ((matchIndex <= lowestIndex) || (ZSTD_read32(match) != ZSTD_read32(ip))) {
1050 ip += ((ip - anchor) >> g_searchStrength) + 1;
1051 continue;
1052 }
1053 mLength = ZSTD_count(ip + 4, match + 4, iend) + 4;
1054 offset = (U32)(ip - match);
1055 while (((ip > anchor) & (match > lowest)) && (ip[-1] == match[-1])) {
1056 ip--;
1057 match--;
1058 mLength++;
1059 }
1060 offset_2 = offset_1;
1061 offset_1 = offset;
1062
1063 ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
1064 }
1065
1066
1067 ip += mLength;
1068 anchor = ip;
1069
1070 if (ip <= ilimit) {
1071
1072 hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2;
1073 hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base);
1074
1075 while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
1076
1077 size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4;
1078 {
1079 U32 const tmpOff = offset_2;
1080 offset_2 = offset_1;
1081 offset_1 = tmpOff;
1082 }
1083 hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
1084 ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength - MINMATCH);
1085 ip += rLength;
1086 anchor = ip;
1087 continue;
1088 }
1089 }
1090 }
1091
1092
1093 cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
1094 cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
1095
1096
1097 {
1098 size_t const lastLLSize = iend - anchor;
1099 memcpy(seqStorePtr->lit, anchor, lastLLSize);
1100 seqStorePtr->lit += lastLLSize;
1101 }
1102 }
1103
1104 static void ZSTD_compressBlock_fast(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
1105 {
1106 const U32 mls = ctx->params.cParams.searchLength;
1107 switch (mls) {
1108 default:
1109 case 4: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4); return;
1110 case 5: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5); return;
1111 case 6: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6); return;
1112 case 7: ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7); return;
1113 }
1114 }
1115
1116 static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 mls)
1117 {
1118 U32 *hashTable = ctx->hashTable;
1119 const U32 hBits = ctx->params.cParams.hashLog;
1120 seqStore_t *seqStorePtr = &(ctx->seqStore);
1121 const BYTE *const base = ctx->base;
1122 const BYTE *const dictBase = ctx->dictBase;
1123 const BYTE *const istart = (const BYTE *)src;
1124 const BYTE *ip = istart;
1125 const BYTE *anchor = istart;
1126 const U32 lowestIndex = ctx->lowLimit;
1127 const BYTE *const dictStart = dictBase + lowestIndex;
1128 const U32 dictLimit = ctx->dictLimit;
1129 const BYTE *const lowPrefixPtr = base + dictLimit;
1130 const BYTE *const dictEnd = dictBase + dictLimit;
1131 const BYTE *const iend = istart + srcSize;
1132 const BYTE *const ilimit = iend - 8;
1133 U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
1134
1135
1136 while (ip < ilimit) {
1137 const size_t h = ZSTD_hashPtr(ip, hBits, mls);
1138 const U32 matchIndex = hashTable[h];
1139 const BYTE *matchBase = matchIndex < dictLimit ? dictBase : base;
1140 const BYTE *match = matchBase + matchIndex;
1141 const U32 curr = (U32)(ip - base);
1142 const U32 repIndex = curr + 1 - offset_1;
1143 const BYTE *repBase = repIndex < dictLimit ? dictBase : base;
1144 const BYTE *repMatch = repBase + repIndex;
1145 size_t mLength;
1146 hashTable[h] = curr;
1147
1148 if ((((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) &&
1149 (ZSTD_read32(repMatch) == ZSTD_read32(ip + 1))) {
1150 const BYTE *repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
1151 mLength = ZSTD_count_2segments(ip + 1 + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repMatchEnd, lowPrefixPtr) + EQUAL_READ32;
1152 ip++;
1153 ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
1154 } else {
1155 if ((matchIndex < lowestIndex) || (ZSTD_read32(match) != ZSTD_read32(ip))) {
1156 ip += ((ip - anchor) >> g_searchStrength) + 1;
1157 continue;
1158 }
1159 {
1160 const BYTE *matchEnd = matchIndex < dictLimit ? dictEnd : iend;
1161 const BYTE *lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
1162 U32 offset;
1163 mLength = ZSTD_count_2segments(ip + EQUAL_READ32, match + EQUAL_READ32, iend, matchEnd, lowPrefixPtr) + EQUAL_READ32;
1164 while (((ip > anchor) & (match > lowMatchPtr)) && (ip[-1] == match[-1])) {
1165 ip--;
1166 match--;
1167 mLength++;
1168 }
1169 offset = curr - matchIndex;
1170 offset_2 = offset_1;
1171 offset_1 = offset;
1172 ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
1173 }
1174 }
1175
1176
1177 ip += mLength;
1178 anchor = ip;
1179
1180 if (ip <= ilimit) {
1181
1182 hashTable[ZSTD_hashPtr(base + curr + 2, hBits, mls)] = curr + 2;
1183 hashTable[ZSTD_hashPtr(ip - 2, hBits, mls)] = (U32)(ip - 2 - base);
1184
1185 while (ip <= ilimit) {
1186 U32 const curr2 = (U32)(ip - base);
1187 U32 const repIndex2 = curr2 - offset_2;
1188 const BYTE *repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
1189 if ((((U32)((dictLimit - 1) - repIndex2) >= 3) & (repIndex2 > lowestIndex))
1190 && (ZSTD_read32(repMatch2) == ZSTD_read32(ip))) {
1191 const BYTE *const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
1192 size_t repLength2 =
1193 ZSTD_count_2segments(ip + EQUAL_READ32, repMatch2 + EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
1194 U32 tmpOffset = offset_2;
1195 offset_2 = offset_1;
1196 offset_1 = tmpOffset;
1197 ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2 - MINMATCH);
1198 hashTable[ZSTD_hashPtr(ip, hBits, mls)] = curr2;
1199 ip += repLength2;
1200 anchor = ip;
1201 continue;
1202 }
1203 break;
1204 }
1205 }
1206 }
1207
1208
1209 ctx->repToConfirm[0] = offset_1;
1210 ctx->repToConfirm[1] = offset_2;
1211
1212
1213 {
1214 size_t const lastLLSize = iend - anchor;
1215 memcpy(seqStorePtr->lit, anchor, lastLLSize);
1216 seqStorePtr->lit += lastLLSize;
1217 }
1218 }
1219
1220 static void ZSTD_compressBlock_fast_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
1221 {
1222 U32 const mls = ctx->params.cParams.searchLength;
1223 switch (mls) {
1224 default:
1225 case 4: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4); return;
1226 case 5: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5); return;
1227 case 6: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6); return;
1228 case 7: ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7); return;
1229 }
1230 }
1231
1232
1233
1234
1235 static void ZSTD_fillDoubleHashTable(ZSTD_CCtx *cctx, const void *end, const U32 mls)
1236 {
1237 U32 *const hashLarge = cctx->hashTable;
1238 U32 const hBitsL = cctx->params.cParams.hashLog;
1239 U32 *const hashSmall = cctx->chainTable;
1240 U32 const hBitsS = cctx->params.cParams.chainLog;
1241 const BYTE *const base = cctx->base;
1242 const BYTE *ip = base + cctx->nextToUpdate;
1243 const BYTE *const iend = ((const BYTE *)end) - HASH_READ_SIZE;
1244 const size_t fastHashFillStep = 3;
1245
1246 while (ip <= iend) {
1247 hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
1248 hashLarge[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
1249 ip += fastHashFillStep;
1250 }
1251 }
1252
1253 FORCE_INLINE
1254 void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx *cctx, const void *src, size_t srcSize, const U32 mls)
1255 {
1256 U32 *const hashLong = cctx->hashTable;
1257 const U32 hBitsL = cctx->params.cParams.hashLog;
1258 U32 *const hashSmall = cctx->chainTable;
1259 const U32 hBitsS = cctx->params.cParams.chainLog;
1260 seqStore_t *seqStorePtr = &(cctx->seqStore);
1261 const BYTE *const base = cctx->base;
1262 const BYTE *const istart = (const BYTE *)src;
1263 const BYTE *ip = istart;
1264 const BYTE *anchor = istart;
1265 const U32 lowestIndex = cctx->dictLimit;
1266 const BYTE *const lowest = base + lowestIndex;
1267 const BYTE *const iend = istart + srcSize;
1268 const BYTE *const ilimit = iend - HASH_READ_SIZE;
1269 U32 offset_1 = cctx->rep[0], offset_2 = cctx->rep[1];
1270 U32 offsetSaved = 0;
1271
1272
1273 ip += (ip == lowest);
1274 {
1275 U32 const maxRep = (U32)(ip - lowest);
1276 if (offset_2 > maxRep)
1277 offsetSaved = offset_2, offset_2 = 0;
1278 if (offset_1 > maxRep)
1279 offsetSaved = offset_1, offset_1 = 0;
1280 }
1281
1282
1283 while (ip < ilimit) {
1284 size_t mLength;
1285 size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
1286 size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
1287 U32 const curr = (U32)(ip - base);
1288 U32 const matchIndexL = hashLong[h2];
1289 U32 const matchIndexS = hashSmall[h];
1290 const BYTE *matchLong = base + matchIndexL;
1291 const BYTE *match = base + matchIndexS;
1292 hashLong[h2] = hashSmall[h] = curr;
1293
1294 if ((offset_1 > 0) & (ZSTD_read32(ip + 1 - offset_1) == ZSTD_read32(ip + 1))) {
1295 mLength = ZSTD_count(ip + 1 + 4, ip + 1 + 4 - offset_1, iend) + 4;
1296 ip++;
1297 ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
1298 } else {
1299 U32 offset;
1300 if ((matchIndexL > lowestIndex) && (ZSTD_read64(matchLong) == ZSTD_read64(ip))) {
1301 mLength = ZSTD_count(ip + 8, matchLong + 8, iend) + 8;
1302 offset = (U32)(ip - matchLong);
1303 while (((ip > anchor) & (matchLong > lowest)) && (ip[-1] == matchLong[-1])) {
1304 ip--;
1305 matchLong--;
1306 mLength++;
1307 }
1308 } else if ((matchIndexS > lowestIndex) && (ZSTD_read32(match) == ZSTD_read32(ip))) {
1309 size_t const h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8);
1310 U32 const matchIndex3 = hashLong[h3];
1311 const BYTE *match3 = base + matchIndex3;
1312 hashLong[h3] = curr + 1;
1313 if ((matchIndex3 > lowestIndex) && (ZSTD_read64(match3) == ZSTD_read64(ip + 1))) {
1314 mLength = ZSTD_count(ip + 9, match3 + 8, iend) + 8;
1315 ip++;
1316 offset = (U32)(ip - match3);
1317 while (((ip > anchor) & (match3 > lowest)) && (ip[-1] == match3[-1])) {
1318 ip--;
1319 match3--;
1320 mLength++;
1321 }
1322 } else {
1323 mLength = ZSTD_count(ip + 4, match + 4, iend) + 4;
1324 offset = (U32)(ip - match);
1325 while (((ip > anchor) & (match > lowest)) && (ip[-1] == match[-1])) {
1326 ip--;
1327 match--;
1328 mLength++;
1329 }
1330 }
1331 } else {
1332 ip += ((ip - anchor) >> g_searchStrength) + 1;
1333 continue;
1334 }
1335
1336 offset_2 = offset_1;
1337 offset_1 = offset;
1338
1339 ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
1340 }
1341
1342
1343 ip += mLength;
1344 anchor = ip;
1345
1346 if (ip <= ilimit) {
1347
1348 hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] =
1349 curr + 2;
1350 hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base);
1351
1352
1353 while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
1354
1355 size_t const rLength = ZSTD_count(ip + 4, ip + 4 - offset_2, iend) + 4;
1356 {
1357 U32 const tmpOff = offset_2;
1358 offset_2 = offset_1;
1359 offset_1 = tmpOff;
1360 }
1361 hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
1362 hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
1363 ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength - MINMATCH);
1364 ip += rLength;
1365 anchor = ip;
1366 continue;
1367 }
1368 }
1369 }
1370
1371
1372 cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
1373 cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
1374
1375
1376 {
1377 size_t const lastLLSize = iend - anchor;
1378 memcpy(seqStorePtr->lit, anchor, lastLLSize);
1379 seqStorePtr->lit += lastLLSize;
1380 }
1381 }
1382
1383 static void ZSTD_compressBlock_doubleFast(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
1384 {
1385 const U32 mls = ctx->params.cParams.searchLength;
1386 switch (mls) {
1387 default:
1388 case 4: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4); return;
1389 case 5: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 5); return;
1390 case 6: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 6); return;
1391 case 7: ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 7); return;
1392 }
1393 }
1394
1395 static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 mls)
1396 {
1397 U32 *const hashLong = ctx->hashTable;
1398 U32 const hBitsL = ctx->params.cParams.hashLog;
1399 U32 *const hashSmall = ctx->chainTable;
1400 U32 const hBitsS = ctx->params.cParams.chainLog;
1401 seqStore_t *seqStorePtr = &(ctx->seqStore);
1402 const BYTE *const base = ctx->base;
1403 const BYTE *const dictBase = ctx->dictBase;
1404 const BYTE *const istart = (const BYTE *)src;
1405 const BYTE *ip = istart;
1406 const BYTE *anchor = istart;
1407 const U32 lowestIndex = ctx->lowLimit;
1408 const BYTE *const dictStart = dictBase + lowestIndex;
1409 const U32 dictLimit = ctx->dictLimit;
1410 const BYTE *const lowPrefixPtr = base + dictLimit;
1411 const BYTE *const dictEnd = dictBase + dictLimit;
1412 const BYTE *const iend = istart + srcSize;
1413 const BYTE *const ilimit = iend - 8;
1414 U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
1415
1416
1417 while (ip < ilimit) {
1418 const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
1419 const U32 matchIndex = hashSmall[hSmall];
1420 const BYTE *matchBase = matchIndex < dictLimit ? dictBase : base;
1421 const BYTE *match = matchBase + matchIndex;
1422
1423 const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
1424 const U32 matchLongIndex = hashLong[hLong];
1425 const BYTE *matchLongBase = matchLongIndex < dictLimit ? dictBase : base;
1426 const BYTE *matchLong = matchLongBase + matchLongIndex;
1427
1428 const U32 curr = (U32)(ip - base);
1429 const U32 repIndex = curr + 1 - offset_1;
1430 const BYTE *repBase = repIndex < dictLimit ? dictBase : base;
1431 const BYTE *repMatch = repBase + repIndex;
1432 size_t mLength;
1433 hashSmall[hSmall] = hashLong[hLong] = curr;
1434
1435 if ((((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex)) &&
1436 (ZSTD_read32(repMatch) == ZSTD_read32(ip + 1))) {
1437 const BYTE *repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
1438 mLength = ZSTD_count_2segments(ip + 1 + 4, repMatch + 4, iend, repMatchEnd, lowPrefixPtr) + 4;
1439 ip++;
1440 ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, 0, mLength - MINMATCH);
1441 } else {
1442 if ((matchLongIndex > lowestIndex) && (ZSTD_read64(matchLong) == ZSTD_read64(ip))) {
1443 const BYTE *matchEnd = matchLongIndex < dictLimit ? dictEnd : iend;
1444 const BYTE *lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr;
1445 U32 offset;
1446 mLength = ZSTD_count_2segments(ip + 8, matchLong + 8, iend, matchEnd, lowPrefixPtr) + 8;
1447 offset = curr - matchLongIndex;
1448 while (((ip > anchor) & (matchLong > lowMatchPtr)) && (ip[-1] == matchLong[-1])) {
1449 ip--;
1450 matchLong--;
1451 mLength++;
1452 }
1453 offset_2 = offset_1;
1454 offset_1 = offset;
1455 ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
1456
1457 } else if ((matchIndex > lowestIndex) && (ZSTD_read32(match) == ZSTD_read32(ip))) {
1458 size_t const h3 = ZSTD_hashPtr(ip + 1, hBitsL, 8);
1459 U32 const matchIndex3 = hashLong[h3];
1460 const BYTE *const match3Base = matchIndex3 < dictLimit ? dictBase : base;
1461 const BYTE *match3 = match3Base + matchIndex3;
1462 U32 offset;
1463 hashLong[h3] = curr + 1;
1464 if ((matchIndex3 > lowestIndex) && (ZSTD_read64(match3) == ZSTD_read64(ip + 1))) {
1465 const BYTE *matchEnd = matchIndex3 < dictLimit ? dictEnd : iend;
1466 const BYTE *lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr;
1467 mLength = ZSTD_count_2segments(ip + 9, match3 + 8, iend, matchEnd, lowPrefixPtr) + 8;
1468 ip++;
1469 offset = curr + 1 - matchIndex3;
1470 while (((ip > anchor) & (match3 > lowMatchPtr)) && (ip[-1] == match3[-1])) {
1471 ip--;
1472 match3--;
1473 mLength++;
1474 }
1475 } else {
1476 const BYTE *matchEnd = matchIndex < dictLimit ? dictEnd : iend;
1477 const BYTE *lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
1478 mLength = ZSTD_count_2segments(ip + 4, match + 4, iend, matchEnd, lowPrefixPtr) + 4;
1479 offset = curr - matchIndex;
1480 while (((ip > anchor) & (match > lowMatchPtr)) && (ip[-1] == match[-1])) {
1481 ip--;
1482 match--;
1483 mLength++;
1484 }
1485 }
1486 offset_2 = offset_1;
1487 offset_1 = offset;
1488 ZSTD_storeSeq(seqStorePtr, ip - anchor, anchor, offset + ZSTD_REP_MOVE, mLength - MINMATCH);
1489
1490 } else {
1491 ip += ((ip - anchor) >> g_searchStrength) + 1;
1492 continue;
1493 }
1494 }
1495
1496
1497 ip += mLength;
1498 anchor = ip;
1499
1500 if (ip <= ilimit) {
1501
1502 hashSmall[ZSTD_hashPtr(base + curr + 2, hBitsS, mls)] = curr + 2;
1503 hashLong[ZSTD_hashPtr(base + curr + 2, hBitsL, 8)] = curr + 2;
1504 hashSmall[ZSTD_hashPtr(ip - 2, hBitsS, mls)] = (U32)(ip - 2 - base);
1505 hashLong[ZSTD_hashPtr(ip - 2, hBitsL, 8)] = (U32)(ip - 2 - base);
1506
1507 while (ip <= ilimit) {
1508 U32 const curr2 = (U32)(ip - base);
1509 U32 const repIndex2 = curr2 - offset_2;
1510 const BYTE *repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
1511 if ((((U32)((dictLimit - 1) - repIndex2) >= 3) & (repIndex2 > lowestIndex))
1512 && (ZSTD_read32(repMatch2) == ZSTD_read32(ip))) {
1513 const BYTE *const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
1514 size_t const repLength2 =
1515 ZSTD_count_2segments(ip + EQUAL_READ32, repMatch2 + EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
1516 U32 tmpOffset = offset_2;
1517 offset_2 = offset_1;
1518 offset_1 = tmpOffset;
1519 ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2 - MINMATCH);
1520 hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = curr2;
1521 hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = curr2;
1522 ip += repLength2;
1523 anchor = ip;
1524 continue;
1525 }
1526 break;
1527 }
1528 }
1529 }
1530
1531
1532 ctx->repToConfirm[0] = offset_1;
1533 ctx->repToConfirm[1] = offset_2;
1534
1535
1536 {
1537 size_t const lastLLSize = iend - anchor;
1538 memcpy(seqStorePtr->lit, anchor, lastLLSize);
1539 seqStorePtr->lit += lastLLSize;
1540 }
1541 }
1542
1543 static void ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
1544 {
1545 U32 const mls = ctx->params.cParams.searchLength;
1546 switch (mls) {
1547 default:
1548 case 4: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4); return;
1549 case 5: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 5); return;
1550 case 6: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 6); return;
1551 case 7: ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 7); return;
1552 }
1553 }
1554
1555
1556
1557
1558
1559
1560
1561 static U32 ZSTD_insertBt1(ZSTD_CCtx *zc, const BYTE *const ip, const U32 mls, const BYTE *const iend, U32 nbCompares, U32 extDict)
1562 {
1563 U32 *const hashTable = zc->hashTable;
1564 U32 const hashLog = zc->params.cParams.hashLog;
1565 size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
1566 U32 *const bt = zc->chainTable;
1567 U32 const btLog = zc->params.cParams.chainLog - 1;
1568 U32 const btMask = (1 << btLog) - 1;
1569 U32 matchIndex = hashTable[h];
1570 size_t commonLengthSmaller = 0, commonLengthLarger = 0;
1571 const BYTE *const base = zc->base;
1572 const BYTE *const dictBase = zc->dictBase;
1573 const U32 dictLimit = zc->dictLimit;
1574 const BYTE *const dictEnd = dictBase + dictLimit;
1575 const BYTE *const prefixStart = base + dictLimit;
1576 const BYTE *match;
1577 const U32 curr = (U32)(ip - base);
1578 const U32 btLow = btMask >= curr ? 0 : curr - btMask;
1579 U32 *smallerPtr = bt + 2 * (curr & btMask);
1580 U32 *largerPtr = smallerPtr + 1;
1581 U32 dummy32;
1582 U32 const windowLow = zc->lowLimit;
1583 U32 matchEndIdx = curr + 8;
1584 size_t bestLength = 8;
1585
1586 hashTable[h] = curr;
1587
1588 while (nbCompares-- && (matchIndex > windowLow)) {
1589 U32 *const nextPtr = bt + 2 * (matchIndex & btMask);
1590 size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);
1591
1592 if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
1593 match = base + matchIndex;
1594 if (match[matchLength] == ip[matchLength])
1595 matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iend) + 1;
1596 } else {
1597 match = dictBase + matchIndex;
1598 matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart);
1599 if (matchIndex + matchLength >= dictLimit)
1600 match = base + matchIndex;
1601 }
1602
1603 if (matchLength > bestLength) {
1604 bestLength = matchLength;
1605 if (matchLength > matchEndIdx - matchIndex)
1606 matchEndIdx = matchIndex + (U32)matchLength;
1607 }
1608
1609 if (ip + matchLength == iend)
1610 break;
1611
1612 if (match[matchLength] < ip[matchLength]) {
1613
1614 *smallerPtr = matchIndex;
1615 commonLengthSmaller = matchLength;
1616 if (matchIndex <= btLow) {
1617 smallerPtr = &dummy32;
1618 break;
1619 }
1620 smallerPtr = nextPtr + 1;
1621 matchIndex = nextPtr[1];
1622 } else {
1623
1624 *largerPtr = matchIndex;
1625 commonLengthLarger = matchLength;
1626 if (matchIndex <= btLow) {
1627 largerPtr = &dummy32;
1628 break;
1629 }
1630 largerPtr = nextPtr;
1631 matchIndex = nextPtr[0];
1632 }
1633 }
1634
1635 *smallerPtr = *largerPtr = 0;
1636 if (bestLength > 384)
1637 return MIN(192, (U32)(bestLength - 384));
1638 if (matchEndIdx > curr + 8)
1639 return matchEndIdx - curr - 8;
1640 return 1;
1641 }
1642
1643 static size_t ZSTD_insertBtAndFindBestMatch(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, size_t *offsetPtr, U32 nbCompares, const U32 mls,
1644 U32 extDict)
1645 {
1646 U32 *const hashTable = zc->hashTable;
1647 U32 const hashLog = zc->params.cParams.hashLog;
1648 size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
1649 U32 *const bt = zc->chainTable;
1650 U32 const btLog = zc->params.cParams.chainLog - 1;
1651 U32 const btMask = (1 << btLog) - 1;
1652 U32 matchIndex = hashTable[h];
1653 size_t commonLengthSmaller = 0, commonLengthLarger = 0;
1654 const BYTE *const base = zc->base;
1655 const BYTE *const dictBase = zc->dictBase;
1656 const U32 dictLimit = zc->dictLimit;
1657 const BYTE *const dictEnd = dictBase + dictLimit;
1658 const BYTE *const prefixStart = base + dictLimit;
1659 const U32 curr = (U32)(ip - base);
1660 const U32 btLow = btMask >= curr ? 0 : curr - btMask;
1661 const U32 windowLow = zc->lowLimit;
1662 U32 *smallerPtr = bt + 2 * (curr & btMask);
1663 U32 *largerPtr = bt + 2 * (curr & btMask) + 1;
1664 U32 matchEndIdx = curr + 8;
1665 U32 dummy32;
1666 size_t bestLength = 0;
1667
1668 hashTable[h] = curr;
1669
1670 while (nbCompares-- && (matchIndex > windowLow)) {
1671 U32 *const nextPtr = bt + 2 * (matchIndex & btMask);
1672 size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger);
1673 const BYTE *match;
1674
1675 if ((!extDict) || (matchIndex + matchLength >= dictLimit)) {
1676 match = base + matchIndex;
1677 if (match[matchLength] == ip[matchLength])
1678 matchLength += ZSTD_count(ip + matchLength + 1, match + matchLength + 1, iend) + 1;
1679 } else {
1680 match = dictBase + matchIndex;
1681 matchLength += ZSTD_count_2segments(ip + matchLength, match + matchLength, iend, dictEnd, prefixStart);
1682 if (matchIndex + matchLength >= dictLimit)
1683 match = base + matchIndex;
1684 }
1685
1686 if (matchLength > bestLength) {
1687 if (matchLength > matchEndIdx - matchIndex)
1688 matchEndIdx = matchIndex + (U32)matchLength;
1689 if ((4 * (int)(matchLength - bestLength)) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((U32)offsetPtr[0] + 1)))
1690 bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
1691 if (ip + matchLength == iend)
1692 break;
1693 }
1694
1695 if (match[matchLength] < ip[matchLength]) {
1696
1697 *smallerPtr = matchIndex;
1698 commonLengthSmaller = matchLength;
1699 if (matchIndex <= btLow) {
1700 smallerPtr = &dummy32;
1701 break;
1702 }
1703 smallerPtr = nextPtr + 1;
1704 matchIndex = nextPtr[1];
1705 } else {
1706
1707 *largerPtr = matchIndex;
1708 commonLengthLarger = matchLength;
1709 if (matchIndex <= btLow) {
1710 largerPtr = &dummy32;
1711 break;
1712 }
1713 largerPtr = nextPtr;
1714 matchIndex = nextPtr[0];
1715 }
1716 }
1717
1718 *smallerPtr = *largerPtr = 0;
1719
1720 zc->nextToUpdate = (matchEndIdx > curr + 8) ? matchEndIdx - 8 : curr + 1;
1721 return bestLength;
1722 }
1723
1724 static void ZSTD_updateTree(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, const U32 nbCompares, const U32 mls)
1725 {
1726 const BYTE *const base = zc->base;
1727 const U32 target = (U32)(ip - base);
1728 U32 idx = zc->nextToUpdate;
1729
1730 while (idx < target)
1731 idx += ZSTD_insertBt1(zc, base + idx, mls, iend, nbCompares, 0);
1732 }
1733
1734
1735 static size_t ZSTD_BtFindBestMatch(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 mls)
1736 {
1737 if (ip < zc->base + zc->nextToUpdate)
1738 return 0;
1739 ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
1740 return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0);
1741 }
1742
1743 static size_t ZSTD_BtFindBestMatch_selectMLS(ZSTD_CCtx *zc,
1744 const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 matchLengthSearch)
1745 {
1746 switch (matchLengthSearch) {
1747 default:
1748 case 4: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
1749 case 5: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
1750 case 7:
1751 case 6: return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
1752 }
1753 }
1754
1755 static void ZSTD_updateTree_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iend, const U32 nbCompares, const U32 mls)
1756 {
1757 const BYTE *const base = zc->base;
1758 const U32 target = (U32)(ip - base);
1759 U32 idx = zc->nextToUpdate;
1760
1761 while (idx < target)
1762 idx += ZSTD_insertBt1(zc, base + idx, mls, iend, nbCompares, 1);
1763 }
1764
1765
1766 static size_t ZSTD_BtFindBestMatch_extDict(ZSTD_CCtx *zc, const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
1767 const U32 mls)
1768 {
1769 if (ip < zc->base + zc->nextToUpdate)
1770 return 0;
1771 ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
1772 return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1);
1773 }
1774
1775 static size_t ZSTD_BtFindBestMatch_selectMLS_extDict(ZSTD_CCtx *zc,
1776 const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
1777 const U32 matchLengthSearch)
1778 {
1779 switch (matchLengthSearch) {
1780 default:
1781 case 4: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
1782 case 5: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
1783 case 7:
1784 case 6: return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
1785 }
1786 }
1787
1788
1789
1790
1791 #define NEXT_IN_CHAIN(d, mask) chainTable[(d)&mask]
1792
1793
1794
1795 FORCE_INLINE
1796 U32 ZSTD_insertAndFindFirstIndex(ZSTD_CCtx *zc, const BYTE *ip, U32 mls)
1797 {
1798 U32 *const hashTable = zc->hashTable;
1799 const U32 hashLog = zc->params.cParams.hashLog;
1800 U32 *const chainTable = zc->chainTable;
1801 const U32 chainMask = (1 << zc->params.cParams.chainLog) - 1;
1802 const BYTE *const base = zc->base;
1803 const U32 target = (U32)(ip - base);
1804 U32 idx = zc->nextToUpdate;
1805
1806 while (idx < target) {
1807 size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls);
1808 NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
1809 hashTable[h] = idx;
1810 idx++;
1811 }
1812
1813 zc->nextToUpdate = target;
1814 return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
1815 }
1816
1817
1818 FORCE_INLINE
1819 size_t ZSTD_HcFindBestMatch_generic(ZSTD_CCtx *zc,
1820 const BYTE *const ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts, const U32 mls,
1821 const U32 extDict)
1822 {
1823 U32 *const chainTable = zc->chainTable;
1824 const U32 chainSize = (1 << zc->params.cParams.chainLog);
1825 const U32 chainMask = chainSize - 1;
1826 const BYTE *const base = zc->base;
1827 const BYTE *const dictBase = zc->dictBase;
1828 const U32 dictLimit = zc->dictLimit;
1829 const BYTE *const prefixStart = base + dictLimit;
1830 const BYTE *const dictEnd = dictBase + dictLimit;
1831 const U32 lowLimit = zc->lowLimit;
1832 const U32 curr = (U32)(ip - base);
1833 const U32 minChain = curr > chainSize ? curr - chainSize : 0;
1834 int nbAttempts = maxNbAttempts;
1835 size_t ml = EQUAL_READ32 - 1;
1836
1837
1838 U32 matchIndex = ZSTD_insertAndFindFirstIndex(zc, ip, mls);
1839
1840 for (; (matchIndex > lowLimit) & (nbAttempts > 0); nbAttempts--) {
1841 const BYTE *match;
1842 size_t currMl = 0;
1843 if ((!extDict) || matchIndex >= dictLimit) {
1844 match = base + matchIndex;
1845 if (match[ml] == ip[ml])
1846 currMl = ZSTD_count(ip, match, iLimit);
1847 } else {
1848 match = dictBase + matchIndex;
1849 if (ZSTD_read32(match) == ZSTD_read32(ip))
1850 currMl = ZSTD_count_2segments(ip + EQUAL_READ32, match + EQUAL_READ32, iLimit, dictEnd, prefixStart) + EQUAL_READ32;
1851 }
1852
1853
1854 if (currMl > ml) {
1855 ml = currMl;
1856 *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE;
1857 if (ip + currMl == iLimit)
1858 break;
1859 }
1860
1861 if (matchIndex <= minChain)
1862 break;
1863 matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
1864 }
1865
1866 return ml;
1867 }
1868
1869 FORCE_INLINE size_t ZSTD_HcFindBestMatch_selectMLS(ZSTD_CCtx *zc, const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
1870 const U32 matchLengthSearch)
1871 {
1872 switch (matchLengthSearch) {
1873 default:
1874 case 4: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0);
1875 case 5: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0);
1876 case 7:
1877 case 6: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0);
1878 }
1879 }
1880
1881 FORCE_INLINE size_t ZSTD_HcFindBestMatch_extDict_selectMLS(ZSTD_CCtx *zc, const BYTE *ip, const BYTE *const iLimit, size_t *offsetPtr, const U32 maxNbAttempts,
1882 const U32 matchLengthSearch)
1883 {
1884 switch (matchLengthSearch) {
1885 default:
1886 case 4: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1);
1887 case 5: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1);
1888 case 7:
1889 case 6: return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1);
1890 }
1891 }
1892
1893
1894
1895
1896 FORCE_INLINE
1897 void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 searchMethod, const U32 depth)
1898 {
1899 seqStore_t *seqStorePtr = &(ctx->seqStore);
1900 const BYTE *const istart = (const BYTE *)src;
1901 const BYTE *ip = istart;
1902 const BYTE *anchor = istart;
1903 const BYTE *const iend = istart + srcSize;
1904 const BYTE *const ilimit = iend - 8;
1905 const BYTE *const base = ctx->base + ctx->dictLimit;
1906
1907 U32 const maxSearches = 1 << ctx->params.cParams.searchLog;
1908 U32 const mls = ctx->params.cParams.searchLength;
1909
1910 typedef size_t (*searchMax_f)(ZSTD_CCtx * zc, const BYTE *ip, const BYTE *iLimit, size_t *offsetPtr, U32 maxNbAttempts, U32 matchLengthSearch);
1911 searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
1912 U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1], savedOffset = 0;
1913
1914
1915 ip += (ip == base);
1916 ctx->nextToUpdate3 = ctx->nextToUpdate;
1917 {
1918 U32 const maxRep = (U32)(ip - base);
1919 if (offset_2 > maxRep)
1920 savedOffset = offset_2, offset_2 = 0;
1921 if (offset_1 > maxRep)
1922 savedOffset = offset_1, offset_1 = 0;
1923 }
1924
1925
1926 while (ip < ilimit) {
1927 size_t matchLength = 0;
1928 size_t offset = 0;
1929 const BYTE *start = ip + 1;
1930
1931
1932 if ((offset_1 > 0) & (ZSTD_read32(ip + 1) == ZSTD_read32(ip + 1 - offset_1))) {
1933
1934 matchLength = ZSTD_count(ip + 1 + EQUAL_READ32, ip + 1 + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
1935 if (depth == 0)
1936 goto _storeSequence;
1937 }
1938
1939
1940 {
1941 size_t offsetFound = 99999999;
1942 size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
1943 if (ml2 > matchLength)
1944 matchLength = ml2, start = ip, offset = offsetFound;
1945 }
1946
1947 if (matchLength < EQUAL_READ32) {
1948 ip += ((ip - anchor) >> g_searchStrength) + 1;
1949 continue;
1950 }
1951
1952
1953 if (depth >= 1)
1954 while (ip < ilimit) {
1955 ip++;
1956 if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
1957 size_t const mlRep = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
1958 int const gain2 = (int)(mlRep * 3);
1959 int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1);
1960 if ((mlRep >= EQUAL_READ32) && (gain2 > gain1))
1961 matchLength = mlRep, offset = 0, start = ip;
1962 }
1963 {
1964 size_t offset2 = 99999999;
1965 size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
1966 int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1));
1967 int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 4);
1968 if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
1969 matchLength = ml2, offset = offset2, start = ip;
1970 continue;
1971 }
1972 }
1973
1974
1975 if ((depth == 2) && (ip < ilimit)) {
1976 ip++;
1977 if ((offset) && ((offset_1 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_1)))) {
1978 size_t const ml2 = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_1, iend) + EQUAL_READ32;
1979 int const gain2 = (int)(ml2 * 4);
1980 int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1);
1981 if ((ml2 >= EQUAL_READ32) && (gain2 > gain1))
1982 matchLength = ml2, offset = 0, start = ip;
1983 }
1984 {
1985 size_t offset2 = 99999999;
1986 size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
1987 int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1));
1988 int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 7);
1989 if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
1990 matchLength = ml2, offset = offset2, start = ip;
1991 continue;
1992 }
1993 }
1994 }
1995 break;
1996 }
1997
1998
1999
2000
2001
2002
2003
2004 if (offset) {
2005 while ((start > anchor) && (start > base + offset - ZSTD_REP_MOVE) &&
2006 (start[-1] == (start-offset+ZSTD_REP_MOVE)[-1]))
2007 {
2008 start--;
2009 matchLength++;
2010 }
2011 offset_2 = offset_1;
2012 offset_1 = (U32)(offset - ZSTD_REP_MOVE);
2013 }
2014
2015
2016 _storeSequence:
2017 {
2018 size_t const litLength = start - anchor;
2019 ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength - MINMATCH);
2020 anchor = ip = start + matchLength;
2021 }
2022
2023
2024 while ((ip <= ilimit) && ((offset_2 > 0) & (ZSTD_read32(ip) == ZSTD_read32(ip - offset_2)))) {
2025
2026 matchLength = ZSTD_count(ip + EQUAL_READ32, ip + EQUAL_READ32 - offset_2, iend) + EQUAL_READ32;
2027 offset = offset_2;
2028 offset_2 = offset_1;
2029 offset_1 = (U32)offset;
2030 ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength - MINMATCH);
2031 ip += matchLength;
2032 anchor = ip;
2033 continue;
2034 }
2035 }
2036
2037
2038 ctx->repToConfirm[0] = offset_1 ? offset_1 : savedOffset;
2039 ctx->repToConfirm[1] = offset_2 ? offset_2 : savedOffset;
2040
2041
2042 {
2043 size_t const lastLLSize = iend - anchor;
2044 memcpy(seqStorePtr->lit, anchor, lastLLSize);
2045 seqStorePtr->lit += lastLLSize;
2046 }
2047 }
2048
2049 static void ZSTD_compressBlock_btlazy2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2); }
2050
2051 static void ZSTD_compressBlock_lazy2(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2); }
2052
2053 static void ZSTD_compressBlock_lazy(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1); }
2054
2055 static void ZSTD_compressBlock_greedy(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0); }
2056
2057 FORCE_INLINE
2058 void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx *ctx, const void *src, size_t srcSize, const U32 searchMethod, const U32 depth)
2059 {
2060 seqStore_t *seqStorePtr = &(ctx->seqStore);
2061 const BYTE *const istart = (const BYTE *)src;
2062 const BYTE *ip = istart;
2063 const BYTE *anchor = istart;
2064 const BYTE *const iend = istart + srcSize;
2065 const BYTE *const ilimit = iend - 8;
2066 const BYTE *const base = ctx->base;
2067 const U32 dictLimit = ctx->dictLimit;
2068 const U32 lowestIndex = ctx->lowLimit;
2069 const BYTE *const prefixStart = base + dictLimit;
2070 const BYTE *const dictBase = ctx->dictBase;
2071 const BYTE *const dictEnd = dictBase + dictLimit;
2072 const BYTE *const dictStart = dictBase + ctx->lowLimit;
2073
2074 const U32 maxSearches = 1 << ctx->params.cParams.searchLog;
2075 const U32 mls = ctx->params.cParams.searchLength;
2076
2077 typedef size_t (*searchMax_f)(ZSTD_CCtx * zc, const BYTE *ip, const BYTE *iLimit, size_t *offsetPtr, U32 maxNbAttempts, U32 matchLengthSearch);
2078 searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
2079
2080 U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
2081
2082
2083 ctx->nextToUpdate3 = ctx->nextToUpdate;
2084 ip += (ip == prefixStart);
2085
2086
2087 while (ip < ilimit) {
2088 size_t matchLength = 0;
2089 size_t offset = 0;
2090 const BYTE *start = ip + 1;
2091 U32 curr = (U32)(ip - base);
2092
2093
2094 {
2095 const U32 repIndex = (U32)(curr + 1 - offset_1);
2096 const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
2097 const BYTE *const repMatch = repBase + repIndex;
2098 if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex))
2099 if (ZSTD_read32(ip + 1) == ZSTD_read32(repMatch)) {
2100
2101 const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
2102 matchLength =
2103 ZSTD_count_2segments(ip + 1 + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
2104 if (depth == 0)
2105 goto _storeSequence;
2106 }
2107 }
2108
2109
2110 {
2111 size_t offsetFound = 99999999;
2112 size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
2113 if (ml2 > matchLength)
2114 matchLength = ml2, start = ip, offset = offsetFound;
2115 }
2116
2117 if (matchLength < EQUAL_READ32) {
2118 ip += ((ip - anchor) >> g_searchStrength) + 1;
2119 continue;
2120 }
2121
2122
2123 if (depth >= 1)
2124 while (ip < ilimit) {
2125 ip++;
2126 curr++;
2127
2128 if (offset) {
2129 const U32 repIndex = (U32)(curr - offset_1);
2130 const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
2131 const BYTE *const repMatch = repBase + repIndex;
2132 if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex))
2133 if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
2134
2135 const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
2136 size_t const repLength =
2137 ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) +
2138 EQUAL_READ32;
2139 int const gain2 = (int)(repLength * 3);
2140 int const gain1 = (int)(matchLength * 3 - ZSTD_highbit32((U32)offset + 1) + 1);
2141 if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
2142 matchLength = repLength, offset = 0, start = ip;
2143 }
2144 }
2145
2146
2147 {
2148 size_t offset2 = 99999999;
2149 size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
2150 int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1));
2151 int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 4);
2152 if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
2153 matchLength = ml2, offset = offset2, start = ip;
2154 continue;
2155 }
2156 }
2157
2158
2159 if ((depth == 2) && (ip < ilimit)) {
2160 ip++;
2161 curr++;
2162
2163 if (offset) {
2164 const U32 repIndex = (U32)(curr - offset_1);
2165 const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
2166 const BYTE *const repMatch = repBase + repIndex;
2167 if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex))
2168 if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
2169
2170 const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
2171 size_t repLength = ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend,
2172 repEnd, prefixStart) +
2173 EQUAL_READ32;
2174 int gain2 = (int)(repLength * 4);
2175 int gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 1);
2176 if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
2177 matchLength = repLength, offset = 0, start = ip;
2178 }
2179 }
2180
2181
2182 {
2183 size_t offset2 = 99999999;
2184 size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
2185 int const gain2 = (int)(ml2 * 4 - ZSTD_highbit32((U32)offset2 + 1));
2186 int const gain1 = (int)(matchLength * 4 - ZSTD_highbit32((U32)offset + 1) + 7);
2187 if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
2188 matchLength = ml2, offset = offset2, start = ip;
2189 continue;
2190 }
2191 }
2192 }
2193 break;
2194 }
2195
2196
2197 if (offset) {
2198 U32 const matchIndex = (U32)((start - base) - (offset - ZSTD_REP_MOVE));
2199 const BYTE *match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
2200 const BYTE *const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
2201 while ((start > anchor) && (match > mStart) && (start[-1] == match[-1])) {
2202 start--;
2203 match--;
2204 matchLength++;
2205 }
2206 offset_2 = offset_1;
2207 offset_1 = (U32)(offset - ZSTD_REP_MOVE);
2208 }
2209
2210
2211 _storeSequence : {
2212 size_t const litLength = start - anchor;
2213 ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength - MINMATCH);
2214 anchor = ip = start + matchLength;
2215 }
2216
2217
2218 while (ip <= ilimit) {
2219 const U32 repIndex = (U32)((ip - base) - offset_2);
2220 const BYTE *const repBase = repIndex < dictLimit ? dictBase : base;
2221 const BYTE *const repMatch = repBase + repIndex;
2222 if (((U32)((dictLimit - 1) - repIndex) >= 3) & (repIndex > lowestIndex))
2223 if (ZSTD_read32(ip) == ZSTD_read32(repMatch)) {
2224
2225 const BYTE *const repEnd = repIndex < dictLimit ? dictEnd : iend;
2226 matchLength =
2227 ZSTD_count_2segments(ip + EQUAL_READ32, repMatch + EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
2228 offset = offset_2;
2229 offset_2 = offset_1;
2230 offset_1 = (U32)offset;
2231 ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength - MINMATCH);
2232 ip += matchLength;
2233 anchor = ip;
2234 continue;
2235 }
2236 break;
2237 }
2238 }
2239
2240
2241 ctx->repToConfirm[0] = offset_1;
2242 ctx->repToConfirm[1] = offset_2;
2243
2244
2245 {
2246 size_t const lastLLSize = iend - anchor;
2247 memcpy(seqStorePtr->lit, anchor, lastLLSize);
2248 seqStorePtr->lit += lastLLSize;
2249 }
2250 }
2251
2252 void ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize) { ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0); }
2253
2254 static void ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
2255 {
2256 ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1);
2257 }
2258
2259 static void ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
2260 {
2261 ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2);
2262 }
2263
2264 static void ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
2265 {
2266 ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2);
2267 }
2268
2269
2270 #include "zstd_opt.h"
2271
2272 static void ZSTD_compressBlock_btopt(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
2273 {
2274 #ifdef ZSTD_OPT_H_91842398743
2275 ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 0);
2276 #else
2277 (void)ctx;
2278 (void)src;
2279 (void)srcSize;
2280 return;
2281 #endif
2282 }
2283
2284 static void ZSTD_compressBlock_btopt2(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
2285 {
2286 #ifdef ZSTD_OPT_H_91842398743
2287 ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 1);
2288 #else
2289 (void)ctx;
2290 (void)src;
2291 (void)srcSize;
2292 return;
2293 #endif
2294 }
2295
2296 static void ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
2297 {
2298 #ifdef ZSTD_OPT_H_91842398743
2299 ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 0);
2300 #else
2301 (void)ctx;
2302 (void)src;
2303 (void)srcSize;
2304 return;
2305 #endif
2306 }
2307
2308 static void ZSTD_compressBlock_btopt2_extDict(ZSTD_CCtx *ctx, const void *src, size_t srcSize)
2309 {
2310 #ifdef ZSTD_OPT_H_91842398743
2311 ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 1);
2312 #else
2313 (void)ctx;
2314 (void)src;
2315 (void)srcSize;
2316 return;
2317 #endif
2318 }
2319
2320 typedef void (*ZSTD_blockCompressor)(ZSTD_CCtx *ctx, const void *src, size_t srcSize);
2321
2322 static ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict)
2323 {
2324 static const ZSTD_blockCompressor blockCompressor[2][8] = {
2325 {ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2,
2326 ZSTD_compressBlock_btlazy2, ZSTD_compressBlock_btopt, ZSTD_compressBlock_btopt2},
2327 {ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, ZSTD_compressBlock_lazy_extDict,
2328 ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btopt2_extDict}};
2329
2330 return blockCompressor[extDict][(U32)strat];
2331 }
2332
2333 static size_t ZSTD_compressBlock_internal(ZSTD_CCtx *zc, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
2334 {
2335 ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->params.cParams.strategy, zc->lowLimit < zc->dictLimit);
2336 const BYTE *const base = zc->base;
2337 const BYTE *const istart = (const BYTE *)src;
2338 const U32 curr = (U32)(istart - base);
2339 if (srcSize < MIN_CBLOCK_SIZE + ZSTD_blockHeaderSize + 1)
2340 return 0;
2341 ZSTD_resetSeqStore(&(zc->seqStore));
2342 if (curr > zc->nextToUpdate + 384)
2343 zc->nextToUpdate = curr - MIN(192, (U32)(curr - zc->nextToUpdate - 384));
2344 blockCompressor(zc, src, srcSize);
2345 return ZSTD_compressSequences(zc, dst, dstCapacity, srcSize);
2346 }
2347
2348
2349
2350
2351
2352
2353
2354
2355 static size_t ZSTD_compress_generic(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 lastFrameChunk)
2356 {
2357 size_t blockSize = cctx->blockSize;
2358 size_t remaining = srcSize;
2359 const BYTE *ip = (const BYTE *)src;
2360 BYTE *const ostart = (BYTE *)dst;
2361 BYTE *op = ostart;
2362 U32 const maxDist = 1 << cctx->params.cParams.windowLog;
2363
2364 if (cctx->params.fParams.checksumFlag && srcSize)
2365 xxh64_update(&cctx->xxhState, src, srcSize);
2366
2367 while (remaining) {
2368 U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
2369 size_t cSize;
2370
2371 if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE)
2372 return ERROR(dstSize_tooSmall);
2373 if (remaining < blockSize)
2374 blockSize = remaining;
2375
2376
2377 if (cctx->lowLimit > (3U << 29)) {
2378 U32 const cycleMask = (1 << ZSTD_cycleLog(cctx->params.cParams.hashLog, cctx->params.cParams.strategy)) - 1;
2379 U32 const curr = (U32)(ip - cctx->base);
2380 U32 const newCurr = (curr & cycleMask) + (1 << cctx->params.cParams.windowLog);
2381 U32 const correction = curr - newCurr;
2382 ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_64 <= 30);
2383 ZSTD_reduceIndex(cctx, correction);
2384 cctx->base += correction;
2385 cctx->dictBase += correction;
2386 cctx->lowLimit -= correction;
2387 cctx->dictLimit -= correction;
2388 if (cctx->nextToUpdate < correction)
2389 cctx->nextToUpdate = 0;
2390 else
2391 cctx->nextToUpdate -= correction;
2392 }
2393
2394 if ((U32)(ip + blockSize - cctx->base) > cctx->loadedDictEnd + maxDist) {
2395
2396 U32 const newLowLimit = (U32)(ip + blockSize - cctx->base) - maxDist;
2397 if (cctx->lowLimit < newLowLimit)
2398 cctx->lowLimit = newLowLimit;
2399 if (cctx->dictLimit < cctx->lowLimit)
2400 cctx->dictLimit = cctx->lowLimit;
2401 }
2402
2403 cSize = ZSTD_compressBlock_internal(cctx, op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize, ip, blockSize);
2404 if (ZSTD_isError(cSize))
2405 return cSize;
2406
2407 if (cSize == 0) {
2408 U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw) << 1) + (U32)(blockSize << 3);
2409 if (blockSize + ZSTD_blockHeaderSize > dstCapacity)
2410 return ERROR(dstSize_tooSmall);
2411 ZSTD_writeLE32(op, cBlockHeader24);
2412 memcpy(op + ZSTD_blockHeaderSize, ip, blockSize);
2413 cSize = ZSTD_blockHeaderSize + blockSize;
2414 } else {
2415 U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed) << 1) + (U32)(cSize << 3);
2416 ZSTD_writeLE24(op, cBlockHeader24);
2417 cSize += ZSTD_blockHeaderSize;
2418 }
2419
2420 remaining -= blockSize;
2421 dstCapacity -= cSize;
2422 ip += blockSize;
2423 op += cSize;
2424 }
2425
2426 if (lastFrameChunk && (op > ostart))
2427 cctx->stage = ZSTDcs_ending;
2428 return op - ostart;
2429 }
2430
2431 static size_t ZSTD_writeFrameHeader(void *dst, size_t dstCapacity, ZSTD_parameters params, U64 pledgedSrcSize, U32 dictID)
2432 {
2433 BYTE *const op = (BYTE *)dst;
2434 U32 const dictIDSizeCode = (dictID > 0) + (dictID >= 256) + (dictID >= 65536);
2435 U32 const checksumFlag = params.fParams.checksumFlag > 0;
2436 U32 const windowSize = 1U << params.cParams.windowLog;
2437 U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
2438 BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
2439 U32 const fcsCode =
2440 params.fParams.contentSizeFlag ? (pledgedSrcSize >= 256) + (pledgedSrcSize >= 65536 + 256) + (pledgedSrcSize >= 0xFFFFFFFFU) : 0;
2441 BYTE const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag << 2) + (singleSegment << 5) + (fcsCode << 6));
2442 size_t pos;
2443
2444 if (dstCapacity < ZSTD_frameHeaderSize_max)
2445 return ERROR(dstSize_tooSmall);
2446
2447 ZSTD_writeLE32(dst, ZSTD_MAGICNUMBER);
2448 op[4] = frameHeaderDecriptionByte;
2449 pos = 5;
2450 if (!singleSegment)
2451 op[pos++] = windowLogByte;
2452 switch (dictIDSizeCode) {
2453 default:
2454 case 0: break;
2455 case 1:
2456 op[pos] = (BYTE)(dictID);
2457 pos++;
2458 break;
2459 case 2:
2460 ZSTD_writeLE16(op + pos, (U16)dictID);
2461 pos += 2;
2462 break;
2463 case 3:
2464 ZSTD_writeLE32(op + pos, dictID);
2465 pos += 4;
2466 break;
2467 }
2468 switch (fcsCode) {
2469 default:
2470 case 0:
2471 if (singleSegment)
2472 op[pos++] = (BYTE)(pledgedSrcSize);
2473 break;
2474 case 1:
2475 ZSTD_writeLE16(op + pos, (U16)(pledgedSrcSize - 256));
2476 pos += 2;
2477 break;
2478 case 2:
2479 ZSTD_writeLE32(op + pos, (U32)(pledgedSrcSize));
2480 pos += 4;
2481 break;
2482 case 3:
2483 ZSTD_writeLE64(op + pos, (U64)(pledgedSrcSize));
2484 pos += 8;
2485 break;
2486 }
2487 return pos;
2488 }
2489
2490 static size_t ZSTD_compressContinue_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, U32 frame, U32 lastFrameChunk)
2491 {
2492 const BYTE *const ip = (const BYTE *)src;
2493 size_t fhSize = 0;
2494
2495 if (cctx->stage == ZSTDcs_created)
2496 return ERROR(stage_wrong);
2497
2498 if (frame && (cctx->stage == ZSTDcs_init)) {
2499 fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, cctx->frameContentSize, cctx->dictID);
2500 if (ZSTD_isError(fhSize))
2501 return fhSize;
2502 dstCapacity -= fhSize;
2503 dst = (char *)dst + fhSize;
2504 cctx->stage = ZSTDcs_ongoing;
2505 }
2506
2507
2508 if (src != cctx->nextSrc) {
2509
2510 ptrdiff_t const delta = cctx->nextSrc - ip;
2511 cctx->lowLimit = cctx->dictLimit;
2512 cctx->dictLimit = (U32)(cctx->nextSrc - cctx->base);
2513 cctx->dictBase = cctx->base;
2514 cctx->base -= delta;
2515 cctx->nextToUpdate = cctx->dictLimit;
2516 if (cctx->dictLimit - cctx->lowLimit < HASH_READ_SIZE)
2517 cctx->lowLimit = cctx->dictLimit;
2518 }
2519
2520
2521 if ((ip + srcSize > cctx->dictBase + cctx->lowLimit) & (ip < cctx->dictBase + cctx->dictLimit)) {
2522 ptrdiff_t const highInputIdx = (ip + srcSize) - cctx->dictBase;
2523 U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)cctx->dictLimit) ? cctx->dictLimit : (U32)highInputIdx;
2524 cctx->lowLimit = lowLimitMax;
2525 }
2526
2527 cctx->nextSrc = ip + srcSize;
2528
2529 if (srcSize) {
2530 size_t const cSize = frame ? ZSTD_compress_generic(cctx, dst, dstCapacity, src, srcSize, lastFrameChunk)
2531 : ZSTD_compressBlock_internal(cctx, dst, dstCapacity, src, srcSize);
2532 if (ZSTD_isError(cSize))
2533 return cSize;
2534 return cSize + fhSize;
2535 } else
2536 return fhSize;
2537 }
2538
2539 size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
2540 {
2541 return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 0);
2542 }
2543
2544 size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx) { return MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, 1 << cctx->params.cParams.windowLog); }
2545
2546 size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
2547 {
2548 size_t const blockSizeMax = ZSTD_getBlockSizeMax(cctx);
2549 if (srcSize > blockSizeMax)
2550 return ERROR(srcSize_wrong);
2551 return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0, 0);
2552 }
2553
2554
2555
2556
2557 static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx *zc, const void *src, size_t srcSize)
2558 {
2559 const BYTE *const ip = (const BYTE *)src;
2560 const BYTE *const iend = ip + srcSize;
2561
2562
2563 zc->lowLimit = zc->dictLimit;
2564 zc->dictLimit = (U32)(zc->nextSrc - zc->base);
2565 zc->dictBase = zc->base;
2566 zc->base += ip - zc->nextSrc;
2567 zc->nextToUpdate = zc->dictLimit;
2568 zc->loadedDictEnd = zc->forceWindow ? 0 : (U32)(iend - zc->base);
2569
2570 zc->nextSrc = iend;
2571 if (srcSize <= HASH_READ_SIZE)
2572 return 0;
2573
2574 switch (zc->params.cParams.strategy) {
2575 case ZSTD_fast: ZSTD_fillHashTable(zc, iend, zc->params.cParams.searchLength); break;
2576
2577 case ZSTD_dfast: ZSTD_fillDoubleHashTable(zc, iend, zc->params.cParams.searchLength); break;
2578
2579 case ZSTD_greedy:
2580 case ZSTD_lazy:
2581 case ZSTD_lazy2:
2582 if (srcSize >= HASH_READ_SIZE)
2583 ZSTD_insertAndFindFirstIndex(zc, iend - HASH_READ_SIZE, zc->params.cParams.searchLength);
2584 break;
2585
2586 case ZSTD_btlazy2:
2587 case ZSTD_btopt:
2588 case ZSTD_btopt2:
2589 if (srcSize >= HASH_READ_SIZE)
2590 ZSTD_updateTree(zc, iend - HASH_READ_SIZE, iend, 1 << zc->params.cParams.searchLog, zc->params.cParams.searchLength);
2591 break;
2592
2593 default:
2594 return ERROR(GENERIC);
2595 }
2596
2597 zc->nextToUpdate = (U32)(iend - zc->base);
2598 return 0;
2599 }
2600
2601
2602
2603
2604
2605 static size_t ZSTD_checkDictNCount(short *normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue)
2606 {
2607 U32 s;
2608 if (dictMaxSymbolValue < maxSymbolValue)
2609 return ERROR(dictionary_corrupted);
2610 for (s = 0; s <= maxSymbolValue; ++s) {
2611 if (normalizedCounter[s] == 0)
2612 return ERROR(dictionary_corrupted);
2613 }
2614 return 0;
2615 }
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626 static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx *cctx, const void *dict, size_t dictSize)
2627 {
2628 const BYTE *dictPtr = (const BYTE *)dict;
2629 const BYTE *const dictEnd = dictPtr + dictSize;
2630 short offcodeNCount[MaxOff + 1];
2631 unsigned offcodeMaxValue = MaxOff;
2632
2633 dictPtr += 4;
2634 cctx->dictID = cctx->params.fParams.noDictIDFlag ? 0 : ZSTD_readLE32(dictPtr);
2635 dictPtr += 4;
2636
2637 {
2638 size_t const hufHeaderSize = HUF_readCTable_wksp(cctx->hufTable, 255, dictPtr, dictEnd - dictPtr, cctx->tmpCounters, sizeof(cctx->tmpCounters));
2639 if (HUF_isError(hufHeaderSize))
2640 return ERROR(dictionary_corrupted);
2641 dictPtr += hufHeaderSize;
2642 }
2643
2644 {
2645 unsigned offcodeLog;
2646 size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd - dictPtr);
2647 if (FSE_isError(offcodeHeaderSize))
2648 return ERROR(dictionary_corrupted);
2649 if (offcodeLog > OffFSELog)
2650 return ERROR(dictionary_corrupted);
2651
2652 CHECK_E(FSE_buildCTable_wksp(cctx->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
2653 dictionary_corrupted);
2654 dictPtr += offcodeHeaderSize;
2655 }
2656
2657 {
2658 short matchlengthNCount[MaxML + 1];
2659 unsigned matchlengthMaxValue = MaxML, matchlengthLog;
2660 size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd - dictPtr);
2661 if (FSE_isError(matchlengthHeaderSize))
2662 return ERROR(dictionary_corrupted);
2663 if (matchlengthLog > MLFSELog)
2664 return ERROR(dictionary_corrupted);
2665
2666 CHECK_F(ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
2667 CHECK_E(
2668 FSE_buildCTable_wksp(cctx->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
2669 dictionary_corrupted);
2670 dictPtr += matchlengthHeaderSize;
2671 }
2672
2673 {
2674 short litlengthNCount[MaxLL + 1];
2675 unsigned litlengthMaxValue = MaxLL, litlengthLog;
2676 size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd - dictPtr);
2677 if (FSE_isError(litlengthHeaderSize))
2678 return ERROR(dictionary_corrupted);
2679 if (litlengthLog > LLFSELog)
2680 return ERROR(dictionary_corrupted);
2681
2682 CHECK_F(ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
2683 CHECK_E(FSE_buildCTable_wksp(cctx->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, cctx->tmpCounters, sizeof(cctx->tmpCounters)),
2684 dictionary_corrupted);
2685 dictPtr += litlengthHeaderSize;
2686 }
2687
2688 if (dictPtr + 12 > dictEnd)
2689 return ERROR(dictionary_corrupted);
2690 cctx->rep[0] = ZSTD_readLE32(dictPtr + 0);
2691 cctx->rep[1] = ZSTD_readLE32(dictPtr + 4);
2692 cctx->rep[2] = ZSTD_readLE32(dictPtr + 8);
2693 dictPtr += 12;
2694
2695 {
2696 size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
2697 U32 offcodeMax = MaxOff;
2698 if (dictContentSize <= ((U32)-1) - 128 KB) {
2699 U32 const maxOffset = (U32)dictContentSize + 128 KB;
2700 offcodeMax = ZSTD_highbit32(maxOffset);
2701 }
2702
2703 CHECK_F(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
2704
2705 {
2706 U32 u;
2707 for (u = 0; u < 3; u++) {
2708 if (cctx->rep[u] == 0)
2709 return ERROR(dictionary_corrupted);
2710 if (cctx->rep[u] > dictContentSize)
2711 return ERROR(dictionary_corrupted);
2712 }
2713 }
2714
2715 cctx->flagStaticTables = 1;
2716 cctx->flagStaticHufTable = HUF_repeat_valid;
2717 return ZSTD_loadDictionaryContent(cctx, dictPtr, dictContentSize);
2718 }
2719 }
2720
2721
2722
2723 static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx *cctx, const void *dict, size_t dictSize)
2724 {
2725 if ((dict == NULL) || (dictSize <= 8))
2726 return 0;
2727
2728
2729 if ((ZSTD_readLE32(dict) != ZSTD_DICT_MAGIC) || (cctx->forceRawDict))
2730 return ZSTD_loadDictionaryContent(cctx, dict, dictSize);
2731
2732
2733 return ZSTD_loadZstdDictionary(cctx, dict, dictSize);
2734 }
2735
2736
2737
2738 static size_t ZSTD_compressBegin_internal(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, U64 pledgedSrcSize)
2739 {
2740 ZSTD_compResetPolicy_e const crp = dictSize ? ZSTDcrp_fullReset : ZSTDcrp_continue;
2741 CHECK_F(ZSTD_resetCCtx_advanced(cctx, params, pledgedSrcSize, crp));
2742 return ZSTD_compress_insertDictionary(cctx, dict, dictSize);
2743 }
2744
2745
2746
2747 size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize)
2748 {
2749
2750 CHECK_F(ZSTD_checkCParams(params.cParams));
2751 return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, pledgedSrcSize);
2752 }
2753
2754 size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict, size_t dictSize, int compressionLevel)
2755 {
2756 ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
2757 return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, 0);
2758 }
2759
2760 size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel) { return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); }
2761
2762
2763
2764
2765 static size_t ZSTD_writeEpilogue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity)
2766 {
2767 BYTE *const ostart = (BYTE *)dst;
2768 BYTE *op = ostart;
2769 size_t fhSize = 0;
2770
2771 if (cctx->stage == ZSTDcs_created)
2772 return ERROR(stage_wrong);
2773
2774
2775 if (cctx->stage == ZSTDcs_init) {
2776 fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, 0, 0);
2777 if (ZSTD_isError(fhSize))
2778 return fhSize;
2779 dstCapacity -= fhSize;
2780 op += fhSize;
2781 cctx->stage = ZSTDcs_ongoing;
2782 }
2783
2784 if (cctx->stage != ZSTDcs_ending) {
2785
2786 U32 const cBlockHeader24 = 1 + (((U32)bt_raw) << 1) + 0;
2787 if (dstCapacity < 4)
2788 return ERROR(dstSize_tooSmall);
2789 ZSTD_writeLE32(op, cBlockHeader24);
2790 op += ZSTD_blockHeaderSize;
2791 dstCapacity -= ZSTD_blockHeaderSize;
2792 }
2793
2794 if (cctx->params.fParams.checksumFlag) {
2795 U32 const checksum = (U32)xxh64_digest(&cctx->xxhState);
2796 if (dstCapacity < 4)
2797 return ERROR(dstSize_tooSmall);
2798 ZSTD_writeLE32(op, checksum);
2799 op += 4;
2800 }
2801
2802 cctx->stage = ZSTDcs_created;
2803 return op - ostart;
2804 }
2805
2806 size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize)
2807 {
2808 size_t endResult;
2809 size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 1);
2810 if (ZSTD_isError(cSize))
2811 return cSize;
2812 endResult = ZSTD_writeEpilogue(cctx, (char *)dst + cSize, dstCapacity - cSize);
2813 if (ZSTD_isError(endResult))
2814 return endResult;
2815 return cSize + endResult;
2816 }
2817
2818 static size_t ZSTD_compress_internal(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
2819 ZSTD_parameters params)
2820 {
2821 CHECK_F(ZSTD_compressBegin_internal(cctx, dict, dictSize, params, srcSize));
2822 return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
2823 }
2824
2825 size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const void *dict, size_t dictSize,
2826 ZSTD_parameters params)
2827 {
2828 return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
2829 }
2830
2831 size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, ZSTD_parameters params)
2832 {
2833 return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, NULL, 0, params);
2834 }
2835
2836
2837
2838 struct ZSTD_CDict_s {
2839 void *dictBuffer;
2840 const void *dictContent;
2841 size_t dictContentSize;
2842 ZSTD_CCtx *refContext;
2843 };
2844
2845 size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams) { return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CDict)); }
2846
2847 static ZSTD_CDict *ZSTD_createCDict_advanced(const void *dictBuffer, size_t dictSize, unsigned byReference, ZSTD_parameters params, ZSTD_customMem customMem)
2848 {
2849 if (!customMem.customAlloc || !customMem.customFree)
2850 return NULL;
2851
2852 {
2853 ZSTD_CDict *const cdict = (ZSTD_CDict *)ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
2854 ZSTD_CCtx *const cctx = ZSTD_createCCtx_advanced(customMem);
2855
2856 if (!cdict || !cctx) {
2857 ZSTD_free(cdict, customMem);
2858 ZSTD_freeCCtx(cctx);
2859 return NULL;
2860 }
2861
2862 if ((byReference) || (!dictBuffer) || (!dictSize)) {
2863 cdict->dictBuffer = NULL;
2864 cdict->dictContent = dictBuffer;
2865 } else {
2866 void *const internalBuffer = ZSTD_malloc(dictSize, customMem);
2867 if (!internalBuffer) {
2868 ZSTD_free(cctx, customMem);
2869 ZSTD_free(cdict, customMem);
2870 return NULL;
2871 }
2872 memcpy(internalBuffer, dictBuffer, dictSize);
2873 cdict->dictBuffer = internalBuffer;
2874 cdict->dictContent = internalBuffer;
2875 }
2876
2877 {
2878 size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0);
2879 if (ZSTD_isError(errorCode)) {
2880 ZSTD_free(cdict->dictBuffer, customMem);
2881 ZSTD_free(cdict, customMem);
2882 ZSTD_freeCCtx(cctx);
2883 return NULL;
2884 }
2885 }
2886
2887 cdict->refContext = cctx;
2888 cdict->dictContentSize = dictSize;
2889 return cdict;
2890 }
2891 }
2892
2893 ZSTD_CDict *ZSTD_initCDict(const void *dict, size_t dictSize, ZSTD_parameters params, void *workspace, size_t workspaceSize)
2894 {
2895 ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
2896 return ZSTD_createCDict_advanced(dict, dictSize, 1, params, stackMem);
2897 }
2898
2899 size_t ZSTD_freeCDict(ZSTD_CDict *cdict)
2900 {
2901 if (cdict == NULL)
2902 return 0;
2903 {
2904 ZSTD_customMem const cMem = cdict->refContext->customMem;
2905 ZSTD_freeCCtx(cdict->refContext);
2906 ZSTD_free(cdict->dictBuffer, cMem);
2907 ZSTD_free(cdict, cMem);
2908 return 0;
2909 }
2910 }
2911
2912 static ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict *cdict) { return ZSTD_getParamsFromCCtx(cdict->refContext); }
2913
2914 size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict, unsigned long long pledgedSrcSize)
2915 {
2916 if (cdict->dictContentSize)
2917 CHECK_F(ZSTD_copyCCtx(cctx, cdict->refContext, pledgedSrcSize))
2918 else {
2919 ZSTD_parameters params = cdict->refContext->params;
2920 params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
2921 CHECK_F(ZSTD_compressBegin_advanced(cctx, NULL, 0, params, pledgedSrcSize));
2922 }
2923 return 0;
2924 }
2925
2926
2927
2928
2929
2930 size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, const void *src, size_t srcSize, const ZSTD_CDict *cdict)
2931 {
2932 CHECK_F(ZSTD_compressBegin_usingCDict(cctx, cdict, srcSize));
2933
2934 if (cdict->refContext->params.fParams.contentSizeFlag == 1) {
2935 cctx->params.fParams.contentSizeFlag = 1;
2936 cctx->frameContentSize = srcSize;
2937 } else {
2938 cctx->params.fParams.contentSizeFlag = 0;
2939 }
2940
2941 return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
2942 }
2943
2944
2945
2946
2947
2948 typedef enum { zcss_init, zcss_load, zcss_flush, zcss_final } ZSTD_cStreamStage;
2949
2950 struct ZSTD_CStream_s {
2951 ZSTD_CCtx *cctx;
2952 ZSTD_CDict *cdictLocal;
2953 const ZSTD_CDict *cdict;
2954 char *inBuff;
2955 size_t inBuffSize;
2956 size_t inToCompress;
2957 size_t inBuffPos;
2958 size_t inBuffTarget;
2959 size_t blockSize;
2960 char *outBuff;
2961 size_t outBuffSize;
2962 size_t outBuffContentSize;
2963 size_t outBuffFlushedSize;
2964 ZSTD_cStreamStage stage;
2965 U32 checksum;
2966 U32 frameEnded;
2967 U64 pledgedSrcSize;
2968 U64 inputProcessed;
2969 ZSTD_parameters params;
2970 ZSTD_customMem customMem;
2971 };
2972
2973 size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams)
2974 {
2975 size_t const inBuffSize = (size_t)1 << cParams.windowLog;
2976 size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, inBuffSize);
2977 size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
2978
2979 return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize);
2980 }
2981
2982 ZSTD_CStream *ZSTD_createCStream_advanced(ZSTD_customMem customMem)
2983 {
2984 ZSTD_CStream *zcs;
2985
2986 if (!customMem.customAlloc || !customMem.customFree)
2987 return NULL;
2988
2989 zcs = (ZSTD_CStream *)ZSTD_malloc(sizeof(ZSTD_CStream), customMem);
2990 if (zcs == NULL)
2991 return NULL;
2992 memset(zcs, 0, sizeof(ZSTD_CStream));
2993 memcpy(&zcs->customMem, &customMem, sizeof(ZSTD_customMem));
2994 zcs->cctx = ZSTD_createCCtx_advanced(customMem);
2995 if (zcs->cctx == NULL) {
2996 ZSTD_freeCStream(zcs);
2997 return NULL;
2998 }
2999 return zcs;
3000 }
3001
3002 size_t ZSTD_freeCStream(ZSTD_CStream *zcs)
3003 {
3004 if (zcs == NULL)
3005 return 0;
3006 {
3007 ZSTD_customMem const cMem = zcs->customMem;
3008 ZSTD_freeCCtx(zcs->cctx);
3009 zcs->cctx = NULL;
3010 ZSTD_freeCDict(zcs->cdictLocal);
3011 zcs->cdictLocal = NULL;
3012 ZSTD_free(zcs->inBuff, cMem);
3013 zcs->inBuff = NULL;
3014 ZSTD_free(zcs->outBuff, cMem);
3015 zcs->outBuff = NULL;
3016 ZSTD_free(zcs, cMem);
3017 return 0;
3018 }
3019 }
3020
3021
3022
3023 size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; }
3024 size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_ABSOLUTEMAX) + ZSTD_blockHeaderSize + 4 ; }
3025
3026 static size_t ZSTD_resetCStream_internal(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize)
3027 {
3028 if (zcs->inBuffSize == 0)
3029 return ERROR(stage_wrong);
3030
3031 if (zcs->cdict)
3032 CHECK_F(ZSTD_compressBegin_usingCDict(zcs->cctx, zcs->cdict, pledgedSrcSize))
3033 else
3034 CHECK_F(ZSTD_compressBegin_advanced(zcs->cctx, NULL, 0, zcs->params, pledgedSrcSize));
3035
3036 zcs->inToCompress = 0;
3037 zcs->inBuffPos = 0;
3038 zcs->inBuffTarget = zcs->blockSize;
3039 zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
3040 zcs->stage = zcss_load;
3041 zcs->frameEnded = 0;
3042 zcs->pledgedSrcSize = pledgedSrcSize;
3043 zcs->inputProcessed = 0;
3044 return 0;
3045 }
3046
3047 size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize)
3048 {
3049
3050 zcs->params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
3051
3052 return ZSTD_resetCStream_internal(zcs, pledgedSrcSize);
3053 }
3054
3055 static size_t ZSTD_initCStream_advanced(ZSTD_CStream *zcs, const void *dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize)
3056 {
3057
3058 {
3059 size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog;
3060 if (zcs->inBuffSize < neededInBuffSize) {
3061 zcs->inBuffSize = neededInBuffSize;
3062 ZSTD_free(zcs->inBuff, zcs->customMem);
3063 zcs->inBuff = (char *)ZSTD_malloc(neededInBuffSize, zcs->customMem);
3064 if (zcs->inBuff == NULL)
3065 return ERROR(memory_allocation);
3066 }
3067 zcs->blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, neededInBuffSize);
3068 }
3069 if (zcs->outBuffSize < ZSTD_compressBound(zcs->blockSize) + 1) {
3070 zcs->outBuffSize = ZSTD_compressBound(zcs->blockSize) + 1;
3071 ZSTD_free(zcs->outBuff, zcs->customMem);
3072 zcs->outBuff = (char *)ZSTD_malloc(zcs->outBuffSize, zcs->customMem);
3073 if (zcs->outBuff == NULL)
3074 return ERROR(memory_allocation);
3075 }
3076
3077 if (dict && dictSize >= 8) {
3078 ZSTD_freeCDict(zcs->cdictLocal);
3079 zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, 0, params, zcs->customMem);
3080 if (zcs->cdictLocal == NULL)
3081 return ERROR(memory_allocation);
3082 zcs->cdict = zcs->cdictLocal;
3083 } else
3084 zcs->cdict = NULL;
3085
3086 zcs->checksum = params.fParams.checksumFlag > 0;
3087 zcs->params = params;
3088
3089 return ZSTD_resetCStream_internal(zcs, pledgedSrcSize);
3090 }
3091
3092 ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params, unsigned long long pledgedSrcSize, void *workspace, size_t workspaceSize)
3093 {
3094 ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize);
3095 ZSTD_CStream *const zcs = ZSTD_createCStream_advanced(stackMem);
3096 if (zcs) {
3097 size_t const code = ZSTD_initCStream_advanced(zcs, NULL, 0, params, pledgedSrcSize);
3098 if (ZSTD_isError(code)) {
3099 return NULL;
3100 }
3101 }
3102 return zcs;
3103 }
3104
3105 ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict, unsigned long long pledgedSrcSize, void *workspace, size_t workspaceSize)
3106 {
3107 ZSTD_parameters const params = ZSTD_getParamsFromCDict(cdict);
3108 ZSTD_CStream *const zcs = ZSTD_initCStream(params, pledgedSrcSize, workspace, workspaceSize);
3109 if (zcs) {
3110 zcs->cdict = cdict;
3111 if (ZSTD_isError(ZSTD_resetCStream_internal(zcs, pledgedSrcSize))) {
3112 return NULL;
3113 }
3114 }
3115 return zcs;
3116 }
3117
3118
3119
3120 typedef enum { zsf_gather, zsf_flush, zsf_end } ZSTD_flush_e;
3121
3122 ZSTD_STATIC size_t ZSTD_limitCopy(void *dst, size_t dstCapacity, const void *src, size_t srcSize)
3123 {
3124 size_t const length = MIN(dstCapacity, srcSize);
3125 memcpy(dst, src, length);
3126 return length;
3127 }
3128
3129 static size_t ZSTD_compressStream_generic(ZSTD_CStream *zcs, void *dst, size_t *dstCapacityPtr, const void *src, size_t *srcSizePtr, ZSTD_flush_e const flush)
3130 {
3131 U32 someMoreWork = 1;
3132 const char *const istart = (const char *)src;
3133 const char *const iend = istart + *srcSizePtr;
3134 const char *ip = istart;
3135 char *const ostart = (char *)dst;
3136 char *const oend = ostart + *dstCapacityPtr;
3137 char *op = ostart;
3138
3139 while (someMoreWork) {
3140 switch (zcs->stage) {
3141 case zcss_init:
3142 return ERROR(init_missing);
3143
3144 case zcss_load:
3145
3146 {
3147 size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
3148 size_t const loaded = ZSTD_limitCopy(zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend - ip);
3149 zcs->inBuffPos += loaded;
3150 ip += loaded;
3151 if ((zcs->inBuffPos == zcs->inToCompress) || (!flush && (toLoad != loaded))) {
3152 someMoreWork = 0;
3153 break;
3154 }
3155 }
3156
3157 {
3158 void *cDst;
3159 size_t cSize;
3160 size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
3161 size_t oSize = oend - op;
3162 if (oSize >= ZSTD_compressBound(iSize))
3163 cDst = op;
3164 else
3165 cDst = zcs->outBuff, oSize = zcs->outBuffSize;
3166 cSize = (flush == zsf_end) ? ZSTD_compressEnd(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize)
3167 : ZSTD_compressContinue(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize);
3168 if (ZSTD_isError(cSize))
3169 return cSize;
3170 if (flush == zsf_end)
3171 zcs->frameEnded = 1;
3172
3173 zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
3174 if (zcs->inBuffTarget > zcs->inBuffSize)
3175 zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
3176 zcs->inToCompress = zcs->inBuffPos;
3177 if (cDst == op) {
3178 op += cSize;
3179 break;
3180 }
3181 zcs->outBuffContentSize = cSize;
3182 zcs->outBuffFlushedSize = 0;
3183 zcs->stage = zcss_flush;
3184 }
3185
3186
3187 case zcss_flush: {
3188 size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
3189 size_t const flushed = ZSTD_limitCopy(op, oend - op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
3190 op += flushed;
3191 zcs->outBuffFlushedSize += flushed;
3192 if (toFlush != flushed) {
3193 someMoreWork = 0;
3194 break;
3195 }
3196 zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
3197 zcs->stage = zcss_load;
3198 break;
3199 }
3200
3201 case zcss_final:
3202 someMoreWork = 0;
3203 break;
3204
3205 default:
3206 return ERROR(GENERIC);
3207 }
3208 }
3209
3210 *srcSizePtr = ip - istart;
3211 *dstCapacityPtr = op - ostart;
3212 zcs->inputProcessed += *srcSizePtr;
3213 if (zcs->frameEnded)
3214 return 0;
3215 {
3216 size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos;
3217 if (hintInSize == 0)
3218 hintInSize = zcs->blockSize;
3219 return hintInSize;
3220 }
3221 }
3222
3223 size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output, ZSTD_inBuffer *input)
3224 {
3225 size_t sizeRead = input->size - input->pos;
3226 size_t sizeWritten = output->size - output->pos;
3227 size_t const result =
3228 ZSTD_compressStream_generic(zcs, (char *)(output->dst) + output->pos, &sizeWritten, (const char *)(input->src) + input->pos, &sizeRead, zsf_gather);
3229 input->pos += sizeRead;
3230 output->pos += sizeWritten;
3231 return result;
3232 }
3233
3234
3235
3236
3237
3238 size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output)
3239 {
3240 size_t srcSize = 0;
3241 size_t sizeWritten = output->size - output->pos;
3242 size_t const result = ZSTD_compressStream_generic(zcs, (char *)(output->dst) + output->pos, &sizeWritten, &srcSize,
3243 &srcSize,
3244 zsf_flush);
3245 output->pos += sizeWritten;
3246 if (ZSTD_isError(result))
3247 return result;
3248 return zcs->outBuffContentSize - zcs->outBuffFlushedSize;
3249 }
3250
3251 size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output)
3252 {
3253 BYTE *const ostart = (BYTE *)(output->dst) + output->pos;
3254 BYTE *const oend = (BYTE *)(output->dst) + output->size;
3255 BYTE *op = ostart;
3256
3257 if ((zcs->pledgedSrcSize) && (zcs->inputProcessed != zcs->pledgedSrcSize))
3258 return ERROR(srcSize_wrong);
3259
3260 if (zcs->stage != zcss_final) {
3261
3262 size_t srcSize = 0;
3263 size_t sizeWritten = output->size - output->pos;
3264 size_t const notEnded =
3265 ZSTD_compressStream_generic(zcs, ostart, &sizeWritten, &srcSize, &srcSize, zsf_end);
3266 size_t const remainingToFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
3267 op += sizeWritten;
3268 if (remainingToFlush) {
3269 output->pos += sizeWritten;
3270 return remainingToFlush + ZSTD_BLOCKHEADERSIZE + (zcs->checksum * 4);
3271 }
3272
3273 zcs->stage = zcss_final;
3274 zcs->outBuffContentSize = !notEnded ? 0 : ZSTD_compressEnd(zcs->cctx, zcs->outBuff, zcs->outBuffSize, NULL,
3275 0);
3276 }
3277
3278
3279 {
3280 size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
3281 size_t const flushed = ZSTD_limitCopy(op, oend - op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
3282 op += flushed;
3283 zcs->outBuffFlushedSize += flushed;
3284 output->pos += op - ostart;
3285 if (toFlush == flushed)
3286 zcs->stage = zcss_init;
3287 return toFlush - flushed;
3288 }
3289 }
3290
3291
3292
3293 #define ZSTD_DEFAULT_CLEVEL 1
3294 #define ZSTD_MAX_CLEVEL 22
3295 int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
3296
3297 static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL + 1] = {
3298 {
3299
3300
3301 {18, 12, 12, 1, 7, 16, ZSTD_fast},
3302 {19, 13, 14, 1, 7, 16, ZSTD_fast},
3303 {19, 15, 16, 1, 6, 16, ZSTD_fast},
3304 {20, 16, 17, 1, 5, 16, ZSTD_dfast},
3305 {20, 18, 18, 1, 5, 16, ZSTD_dfast},
3306 {20, 15, 18, 3, 5, 16, ZSTD_greedy},
3307 {21, 16, 19, 2, 5, 16, ZSTD_lazy},
3308 {21, 17, 20, 3, 5, 16, ZSTD_lazy},
3309 {21, 18, 20, 3, 5, 16, ZSTD_lazy2},
3310 {21, 20, 20, 3, 5, 16, ZSTD_lazy2},
3311 {21, 19, 21, 4, 5, 16, ZSTD_lazy2},
3312 {22, 20, 22, 4, 5, 16, ZSTD_lazy2},
3313 {22, 20, 22, 5, 5, 16, ZSTD_lazy2},
3314 {22, 21, 22, 5, 5, 16, ZSTD_lazy2},
3315 {22, 21, 22, 6, 5, 16, ZSTD_lazy2},
3316 {22, 21, 21, 5, 5, 16, ZSTD_btlazy2},
3317 {23, 22, 22, 5, 5, 16, ZSTD_btlazy2},
3318 {23, 21, 22, 4, 5, 24, ZSTD_btopt},
3319 {23, 23, 22, 6, 5, 32, ZSTD_btopt},
3320 {23, 23, 22, 6, 3, 48, ZSTD_btopt},
3321 {25, 25, 23, 7, 3, 64, ZSTD_btopt2},
3322 {26, 26, 23, 7, 3, 256, ZSTD_btopt2},
3323 {27, 27, 25, 9, 3, 512, ZSTD_btopt2},
3324 },
3325 {
3326
3327
3328 {0, 0, 0, 0, 0, 0, ZSTD_fast},
3329 {18, 13, 14, 1, 6, 8, ZSTD_fast},
3330 {18, 14, 13, 1, 5, 8, ZSTD_dfast},
3331 {18, 16, 15, 1, 5, 8, ZSTD_dfast},
3332 {18, 15, 17, 1, 5, 8, ZSTD_greedy},
3333 {18, 16, 17, 4, 5, 8, ZSTD_greedy},
3334 {18, 16, 17, 3, 5, 8, ZSTD_lazy},
3335 {18, 17, 17, 4, 4, 8, ZSTD_lazy},
3336 {18, 17, 17, 4, 4, 8, ZSTD_lazy2},
3337 {18, 17, 17, 5, 4, 8, ZSTD_lazy2},
3338 {18, 17, 17, 6, 4, 8, ZSTD_lazy2},
3339 {18, 18, 17, 6, 4, 8, ZSTD_lazy2},
3340 {18, 18, 17, 7, 4, 8, ZSTD_lazy2},
3341 {18, 19, 17, 6, 4, 8, ZSTD_btlazy2},
3342 {18, 18, 18, 4, 4, 16, ZSTD_btopt},
3343 {18, 18, 18, 4, 3, 16, ZSTD_btopt},
3344 {18, 19, 18, 6, 3, 32, ZSTD_btopt},
3345 {18, 19, 18, 8, 3, 64, ZSTD_btopt},
3346 {18, 19, 18, 9, 3, 128, ZSTD_btopt},
3347 {18, 19, 18, 10, 3, 256, ZSTD_btopt},
3348 {18, 19, 18, 11, 3, 512, ZSTD_btopt2},
3349 {18, 19, 18, 12, 3, 512, ZSTD_btopt2},
3350 {18, 19, 18, 13, 3, 512, ZSTD_btopt2},
3351 },
3352 {
3353
3354
3355 {17, 12, 12, 1, 7, 8, ZSTD_fast},
3356 {17, 12, 13, 1, 6, 8, ZSTD_fast},
3357 {17, 13, 16, 1, 5, 8, ZSTD_fast},
3358 {17, 16, 16, 2, 5, 8, ZSTD_dfast},
3359 {17, 13, 15, 3, 4, 8, ZSTD_greedy},
3360 {17, 15, 17, 4, 4, 8, ZSTD_greedy},
3361 {17, 16, 17, 3, 4, 8, ZSTD_lazy},
3362 {17, 15, 17, 4, 4, 8, ZSTD_lazy2},
3363 {17, 17, 17, 4, 4, 8, ZSTD_lazy2},
3364 {17, 17, 17, 5, 4, 8, ZSTD_lazy2},
3365 {17, 17, 17, 6, 4, 8, ZSTD_lazy2},
3366 {17, 17, 17, 7, 4, 8, ZSTD_lazy2},
3367 {17, 17, 17, 8, 4, 8, ZSTD_lazy2},
3368 {17, 18, 17, 6, 4, 8, ZSTD_btlazy2},
3369 {17, 17, 17, 7, 3, 8, ZSTD_btopt},
3370 {17, 17, 17, 7, 3, 16, ZSTD_btopt},
3371 {17, 18, 17, 7, 3, 32, ZSTD_btopt},
3372 {17, 18, 17, 7, 3, 64, ZSTD_btopt},
3373 {17, 18, 17, 7, 3, 256, ZSTD_btopt},
3374 {17, 18, 17, 8, 3, 256, ZSTD_btopt},
3375 {17, 18, 17, 9, 3, 256, ZSTD_btopt2},
3376 {17, 18, 17, 10, 3, 256, ZSTD_btopt2},
3377 {17, 18, 17, 11, 3, 512, ZSTD_btopt2},
3378 },
3379 {
3380
3381
3382 {14, 12, 12, 1, 7, 6, ZSTD_fast},
3383 {14, 14, 14, 1, 6, 6, ZSTD_fast},
3384 {14, 14, 14, 1, 4, 6, ZSTD_fast},
3385 {14, 14, 14, 1, 4, 6, ZSTD_dfast},
3386 {14, 14, 14, 4, 4, 6, ZSTD_greedy},
3387 {14, 14, 14, 3, 4, 6, ZSTD_lazy},
3388 {14, 14, 14, 4, 4, 6, ZSTD_lazy2},
3389 {14, 14, 14, 5, 4, 6, ZSTD_lazy2},
3390 {14, 14, 14, 6, 4, 6, ZSTD_lazy2},
3391 {14, 15, 14, 6, 4, 6, ZSTD_btlazy2},
3392 {14, 15, 14, 3, 3, 6, ZSTD_btopt},
3393 {14, 15, 14, 6, 3, 8, ZSTD_btopt},
3394 {14, 15, 14, 6, 3, 16, ZSTD_btopt},
3395 {14, 15, 14, 6, 3, 24, ZSTD_btopt},
3396 {14, 15, 15, 6, 3, 48, ZSTD_btopt},
3397 {14, 15, 15, 6, 3, 64, ZSTD_btopt},
3398 {14, 15, 15, 6, 3, 96, ZSTD_btopt},
3399 {14, 15, 15, 6, 3, 128, ZSTD_btopt},
3400 {14, 15, 15, 6, 3, 256, ZSTD_btopt},
3401 {14, 15, 15, 7, 3, 256, ZSTD_btopt},
3402 {14, 15, 15, 8, 3, 256, ZSTD_btopt2},
3403 {14, 15, 15, 9, 3, 256, ZSTD_btopt2},
3404 {14, 15, 15, 10, 3, 256, ZSTD_btopt2},
3405 },
3406 };
3407
3408
3409
3410
3411 ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSize, size_t dictSize)
3412 {
3413 ZSTD_compressionParameters cp;
3414 size_t const addedSize = srcSize ? 0 : 500;
3415 U64 const rSize = srcSize + dictSize ? srcSize + dictSize + addedSize : (U64)-1;
3416 U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
3417 if (compressionLevel <= 0)
3418 compressionLevel = ZSTD_DEFAULT_CLEVEL;
3419 if (compressionLevel > ZSTD_MAX_CLEVEL)
3420 compressionLevel = ZSTD_MAX_CLEVEL;
3421 cp = ZSTD_defaultCParameters[tableID][compressionLevel];
3422 if (ZSTD_32bits()) {
3423 if (cp.windowLog > ZSTD_WINDOWLOG_MAX)
3424 cp.windowLog = ZSTD_WINDOWLOG_MAX;
3425 if (cp.chainLog > ZSTD_CHAINLOG_MAX)
3426 cp.chainLog = ZSTD_CHAINLOG_MAX;
3427 if (cp.hashLog > ZSTD_HASHLOG_MAX)
3428 cp.hashLog = ZSTD_HASHLOG_MAX;
3429 }
3430 cp = ZSTD_adjustCParams(cp, srcSize, dictSize);
3431 return cp;
3432 }
3433
3434
3435
3436
3437 ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSize, size_t dictSize)
3438 {
3439 ZSTD_parameters params;
3440 ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSize, dictSize);
3441 memset(¶ms, 0, sizeof(params));
3442 params.cParams = cParams;
3443 return params;
3444 }
3445
3446 EXPORT_SYMBOL(ZSTD_maxCLevel);
3447 EXPORT_SYMBOL(ZSTD_compressBound);
3448
3449 EXPORT_SYMBOL(ZSTD_CCtxWorkspaceBound);
3450 EXPORT_SYMBOL(ZSTD_initCCtx);
3451 EXPORT_SYMBOL(ZSTD_compressCCtx);
3452 EXPORT_SYMBOL(ZSTD_compress_usingDict);
3453
3454 EXPORT_SYMBOL(ZSTD_CDictWorkspaceBound);
3455 EXPORT_SYMBOL(ZSTD_initCDict);
3456 EXPORT_SYMBOL(ZSTD_compress_usingCDict);
3457
3458 EXPORT_SYMBOL(ZSTD_CStreamWorkspaceBound);
3459 EXPORT_SYMBOL(ZSTD_initCStream);
3460 EXPORT_SYMBOL(ZSTD_initCStream_usingCDict);
3461 EXPORT_SYMBOL(ZSTD_resetCStream);
3462 EXPORT_SYMBOL(ZSTD_compressStream);
3463 EXPORT_SYMBOL(ZSTD_flushStream);
3464 EXPORT_SYMBOL(ZSTD_endStream);
3465 EXPORT_SYMBOL(ZSTD_CStreamInSize);
3466 EXPORT_SYMBOL(ZSTD_CStreamOutSize);
3467
3468 EXPORT_SYMBOL(ZSTD_getCParams);
3469 EXPORT_SYMBOL(ZSTD_getParams);
3470 EXPORT_SYMBOL(ZSTD_checkCParams);
3471 EXPORT_SYMBOL(ZSTD_adjustCParams);
3472
3473 EXPORT_SYMBOL(ZSTD_compressBegin);
3474 EXPORT_SYMBOL(ZSTD_compressBegin_usingDict);
3475 EXPORT_SYMBOL(ZSTD_compressBegin_advanced);
3476 EXPORT_SYMBOL(ZSTD_copyCCtx);
3477 EXPORT_SYMBOL(ZSTD_compressBegin_usingCDict);
3478 EXPORT_SYMBOL(ZSTD_compressContinue);
3479 EXPORT_SYMBOL(ZSTD_compressEnd);
3480
3481 EXPORT_SYMBOL(ZSTD_getBlockSizeMax);
3482 EXPORT_SYMBOL(ZSTD_compressBlock);
3483
3484 MODULE_LICENSE("Dual BSD/GPL");
3485 MODULE_DESCRIPTION("Zstd Compressor");