Project Alice
Loading...
Searching...
No Matches
zstd_fast.c
Go to the documentation of this file.
1/*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
9 */
10
11#include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
12#include "zstd_fast.h"
13
14static
16void ZSTD_fillHashTableForCDict(ZSTD_matchState_t* ms,
17 const void* const end,
19{
20 const ZSTD_compressionParameters* const cParams = &ms->cParams;
21 U32* const hashTable = ms->hashTable;
22 U32 const hBits = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
23 U32 const mls = cParams->minMatch;
24 const BYTE* const base = ms->window.base;
25 const BYTE* ip = base + ms->nextToUpdate;
26 const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
27 const U32 fastHashFillStep = 3;
28
29 /* Currently, we always use ZSTD_dtlm_full for filling CDict tables.
30 * Feel free to remove this assert if there's a good reason! */
31 assert(dtlm == ZSTD_dtlm_full);
32
33 /* Always insert every fastHashFillStep position into the hash table.
34 * Insert the other positions if their hash entry is empty.
35 */
36 for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
37 U32 const curr = (U32)(ip - base);
38 { size_t const hashAndTag = ZSTD_hashPtr(ip, hBits, mls);
39 ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr); }
40
41 if (dtlm == ZSTD_dtlm_fast) continue;
42 /* Only load extra positions for ZSTD_dtlm_full */
43 { U32 p;
44 for (p = 1; p < fastHashFillStep; ++p) {
45 size_t const hashAndTag = ZSTD_hashPtr(ip + p, hBits, mls);
46 if (hashTable[hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) { /* not yet filled */
47 ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr + p);
48 } } } }
49}
50
51static
53void ZSTD_fillHashTableForCCtx(ZSTD_matchState_t* ms,
54 const void* const end,
56{
57 const ZSTD_compressionParameters* const cParams = &ms->cParams;
58 U32* const hashTable = ms->hashTable;
59 U32 const hBits = cParams->hashLog;
60 U32 const mls = cParams->minMatch;
61 const BYTE* const base = ms->window.base;
62 const BYTE* ip = base + ms->nextToUpdate;
63 const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
64 const U32 fastHashFillStep = 3;
65
66 /* Currently, we always use ZSTD_dtlm_fast for filling CCtx tables.
67 * Feel free to remove this assert if there's a good reason! */
68 assert(dtlm == ZSTD_dtlm_fast);
69
70 /* Always insert every fastHashFillStep position into the hash table.
71 * Insert the other positions if their hash entry is empty.
72 */
73 for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
74 U32 const curr = (U32)(ip - base);
75 size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
76 hashTable[hash0] = curr;
77 if (dtlm == ZSTD_dtlm_fast) continue;
78 /* Only load extra positions for ZSTD_dtlm_full */
79 { U32 p;
80 for (p = 1; p < fastHashFillStep; ++p) {
81 size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);
82 if (hashTable[hash] == 0) { /* not yet filled */
83 hashTable[hash] = curr + p;
84 } } } }
85}
86
88 const void* const end,
91{
92 if (tfp == ZSTD_tfp_forCDict) {
93 ZSTD_fillHashTableForCDict(ms, end, dtlm);
94 } else {
95 ZSTD_fillHashTableForCCtx(ms, end, dtlm);
96 }
97}
98
99
149 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
150 void const* src, size_t srcSize,
151 U32 const mls, U32 const hasStep)
152{
153 const ZSTD_compressionParameters* const cParams = &ms->cParams;
154 U32* const hashTable = ms->hashTable;
155 U32 const hlog = cParams->hashLog;
156 /* support stepSize of 0 */
157 size_t const stepSize = hasStep ? (cParams->targetLength + !(cParams->targetLength) + 1) : 2;
158 const BYTE* const base = ms->window.base;
159 const BYTE* const istart = (const BYTE*)src;
160 const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
161 const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
162 const BYTE* const prefixStart = base + prefixStartIndex;
163 const BYTE* const iend = istart + srcSize;
164 const BYTE* const ilimit = iend - HASH_READ_SIZE;
165
166 const BYTE* anchor = istart;
167 const BYTE* ip0 = istart;
168 const BYTE* ip1;
169 const BYTE* ip2;
170 const BYTE* ip3;
171 U32 current0;
172
173 U32 rep_offset1 = rep[0];
174 U32 rep_offset2 = rep[1];
175 U32 offsetSaved1 = 0, offsetSaved2 = 0;
176
177 size_t hash0; /* hash for ip0 */
178 size_t hash1; /* hash for ip1 */
179 U32 idx; /* match idx for ip0 */
180 U32 mval; /* src value at match idx */
181
182 U32 offcode;
183 const BYTE* match0;
184 size_t mLength;
185
186 /* ip0 and ip1 are always adjacent. The targetLength skipping and
187 * uncompressibility acceleration is applied to every other position,
188 * matching the behavior of #1562. step therefore represents the gap
189 * between pairs of positions, from ip0 to ip2 or ip1 to ip3. */
190 size_t step;
191 const BYTE* nextStep;
192 const size_t kStepIncr = (1 << (kSearchStrength - 1));
193
194 DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
195 ip0 += (ip0 == prefixStart);
196 { U32 const curr = (U32)(ip0 - base);
197 U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);
198 U32 const maxRep = curr - windowLow;
199 if (rep_offset2 > maxRep) offsetSaved2 = rep_offset2, rep_offset2 = 0;
200 if (rep_offset1 > maxRep) offsetSaved1 = rep_offset1, rep_offset1 = 0;
201 }
202
203 /* start each op */
204_start: /* Requires: ip0 */
205
206 step = stepSize;
207 nextStep = ip0 + kStepIncr;
208
209 /* calculate positions, ip0 - anchor == 0, so we skip step calc */
210 ip1 = ip0 + 1;
211 ip2 = ip0 + step;
212 ip3 = ip2 + 1;
213
214 if (ip3 >= ilimit) {
215 goto _cleanup;
216 }
217
218 hash0 = ZSTD_hashPtr(ip0, hlog, mls);
219 hash1 = ZSTD_hashPtr(ip1, hlog, mls);
220
221 idx = hashTable[hash0];
222
223 do {
224 /* load repcode match for ip[2]*/
225 const U32 rval = MEM_read32(ip2 - rep_offset1);
226
227 /* write back hash table entry */
228 current0 = (U32)(ip0 - base);
229 hashTable[hash0] = current0;
230
231 /* check repcode at ip[2] */
232 if ((MEM_read32(ip2) == rval) & (rep_offset1 > 0)) {
233 ip0 = ip2;
234 match0 = ip0 - rep_offset1;
235 mLength = ip0[-1] == match0[-1];
236 ip0 -= mLength;
237 match0 -= mLength;
238 offcode = REPCODE1_TO_OFFBASE;
239 mLength += 4;
240
241 /* First write next hash table entry; we've already calculated it.
242 * This write is known to be safe because the ip1 is before the
243 * repcode (ip2). */
244 hashTable[hash1] = (U32)(ip1 - base);
245
246 goto _match;
247 }
248
249 /* load match for ip[0] */
250 if (idx >= prefixStartIndex) {
251 mval = MEM_read32(base + idx);
252 } else {
253 mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
254 }
255
256 /* check match at ip[0] */
257 if (MEM_read32(ip0) == mval) {
258 /* found a match! */
259
260 /* First write next hash table entry; we've already calculated it.
261 * This write is known to be safe because the ip1 == ip0 + 1, so
262 * we know we will resume searching after ip1 */
263 hashTable[hash1] = (U32)(ip1 - base);
264
265 goto _offset;
266 }
267
268 /* lookup ip[1] */
269 idx = hashTable[hash1];
270
271 /* hash ip[2] */
272 hash0 = hash1;
273 hash1 = ZSTD_hashPtr(ip2, hlog, mls);
274
275 /* advance to next positions */
276 ip0 = ip1;
277 ip1 = ip2;
278 ip2 = ip3;
279
280 /* write back hash table entry */
281 current0 = (U32)(ip0 - base);
282 hashTable[hash0] = current0;
283
284 /* load match for ip[0] */
285 if (idx >= prefixStartIndex) {
286 mval = MEM_read32(base + idx);
287 } else {
288 mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */
289 }
290
291 /* check match at ip[0] */
292 if (MEM_read32(ip0) == mval) {
293 /* found a match! */
294
295 /* first write next hash table entry; we've already calculated it */
296 if (step <= 4) {
297 /* We need to avoid writing an index into the hash table >= the
298 * position at which we will pick up our searching after we've
299 * taken this match.
300 *
301 * The minimum possible match has length 4, so the earliest ip0
302 * can be after we take this match will be the current ip0 + 4.
303 * ip1 is ip0 + step - 1. If ip1 is >= ip0 + 4, we can't safely
304 * write this position.
305 */
306 hashTable[hash1] = (U32)(ip1 - base);
307 }
308
309 goto _offset;
310 }
311
312 /* lookup ip[1] */
313 idx = hashTable[hash1];
314
315 /* hash ip[2] */
316 hash0 = hash1;
317 hash1 = ZSTD_hashPtr(ip2, hlog, mls);
318
319 /* advance to next positions */
320 ip0 = ip1;
321 ip1 = ip2;
322 ip2 = ip0 + step;
323 ip3 = ip1 + step;
324
325 /* calculate step */
326 if (ip2 >= nextStep) {
327 step++;
328 PREFETCH_L1(ip1 + 64);
329 PREFETCH_L1(ip1 + 128);
330 nextStep += kStepIncr;
331 }
332 } while (ip3 < ilimit);
333
334_cleanup:
335 /* Note that there are probably still a couple positions we could search.
336 * However, it seems to be a meaningful performance hit to try to search
337 * them. So let's not. */
338
339 /* When the repcodes are outside of the prefix, we set them to zero before the loop.
340 * When the offsets are still zero, we need to restore them after the block to have a correct
341 * repcode history. If only one offset was invalid, it is easy. The tricky case is when both
342 * offsets were invalid. We need to figure out which offset to refill with.
343 * - If both offsets are zero they are in the same order.
344 * - If both offsets are non-zero, we won't restore the offsets from `offsetSaved[12]`.
345 * - If only one is zero, we need to decide which offset to restore.
346 * - If rep_offset1 is non-zero, then rep_offset2 must be offsetSaved1.
347 * - It is impossible for rep_offset2 to be non-zero.
348 *
349 * So if rep_offset1 started invalid (offsetSaved1 != 0) and became valid (rep_offset1 != 0), then
350 * set rep[0] = rep_offset1 and rep[1] = offsetSaved1.
351 */
352 offsetSaved2 = ((offsetSaved1 != 0) && (rep_offset1 != 0)) ? offsetSaved1 : offsetSaved2;
353
354 /* save reps for next block */
355 rep[0] = rep_offset1 ? rep_offset1 : offsetSaved1;
356 rep[1] = rep_offset2 ? rep_offset2 : offsetSaved2;
357
358 /* Return the last literals size */
359 return (size_t)(iend - anchor);
360
361_offset: /* Requires: ip0, idx */
362
363 /* Compute the offset code. */
364 match0 = base + idx;
365 rep_offset2 = rep_offset1;
366 rep_offset1 = (U32)(ip0-match0);
367 offcode = OFFSET_TO_OFFBASE(rep_offset1);
368 mLength = 4;
369
370 /* Count the backwards match length. */
371 while (((ip0>anchor) & (match0>prefixStart)) && (ip0[-1] == match0[-1])) {
372 ip0--;
373 match0--;
374 mLength++;
375 }
376
377_match: /* Requires: ip0, match0, offcode */
378
379 /* Count the forward length. */
380 mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend);
381
382 ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
383
384 ip0 += mLength;
385 anchor = ip0;
386
387 /* Fill table and check for immediate repcode. */
388 if (ip0 <= ilimit) {
389 /* Fill Table */
390 assert(base+current0+2 > istart); /* check base overflow */
391 hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
392 hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
393
394 if (rep_offset2 > 0) { /* rep_offset2==0 means rep_offset2 is invalidated */
395 while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - rep_offset2)) ) {
396 /* store sequence */
397 size_t const rLength = ZSTD_count(ip0+4, ip0+4-rep_offset2, iend) + 4;
398 { U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */
399 hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
400 ip0 += rLength;
401 ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, REPCODE1_TO_OFFBASE, rLength);
402 anchor = ip0;
403 continue; /* faster when present (confirmed on gcc-8) ... (?) */
404 } } }
405
406 goto _start;
407}
408
409#define ZSTD_GEN_FAST_FN(dictMode, mls, step) \
410 static size_t ZSTD_compressBlock_fast_##dictMode##_##mls##_##step( \
411 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
412 void const* src, size_t srcSize) \
413 { \
414 return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls, step); \
415 }
416
417ZSTD_GEN_FAST_FN(noDict, 4, 1)
418ZSTD_GEN_FAST_FN(noDict, 5, 1)
419ZSTD_GEN_FAST_FN(noDict, 6, 1)
420ZSTD_GEN_FAST_FN(noDict, 7, 1)
421
422ZSTD_GEN_FAST_FN(noDict, 4, 0)
423ZSTD_GEN_FAST_FN(noDict, 5, 0)
424ZSTD_GEN_FAST_FN(noDict, 6, 0)
425ZSTD_GEN_FAST_FN(noDict, 7, 0)
426
428 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
429 void const* src, size_t srcSize)
430{
431 U32 const mls = ms->cParams.minMatch;
432 assert(ms->dictMatchState == NULL);
433 if (ms->cParams.targetLength > 1) {
434 switch(mls)
435 {
436 default: /* includes case 3 */
437 case 4 :
438 return ZSTD_compressBlock_fast_noDict_4_1(ms, seqStore, rep, src, srcSize);
439 case 5 :
440 return ZSTD_compressBlock_fast_noDict_5_1(ms, seqStore, rep, src, srcSize);
441 case 6 :
442 return ZSTD_compressBlock_fast_noDict_6_1(ms, seqStore, rep, src, srcSize);
443 case 7 :
444 return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize);
445 }
446 } else {
447 switch(mls)
448 {
449 default: /* includes case 3 */
450 case 4 :
451 return ZSTD_compressBlock_fast_noDict_4_0(ms, seqStore, rep, src, srcSize);
452 case 5 :
453 return ZSTD_compressBlock_fast_noDict_5_0(ms, seqStore, rep, src, srcSize);
454 case 6 :
455 return ZSTD_compressBlock_fast_noDict_6_0(ms, seqStore, rep, src, srcSize);
456 case 7 :
457 return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize);
458 }
459
460 }
461}
462
466 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
467 void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
468{
469 const ZSTD_compressionParameters* const cParams = &ms->cParams;
470 U32* const hashTable = ms->hashTable;
471 U32 const hlog = cParams->hashLog;
472 /* support stepSize of 0 */
473 U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
474 const BYTE* const base = ms->window.base;
475 const BYTE* const istart = (const BYTE*)src;
476 const BYTE* ip0 = istart;
477 const BYTE* ip1 = ip0 + stepSize; /* we assert below that stepSize >= 1 */
478 const BYTE* anchor = istart;
479 const U32 prefixStartIndex = ms->window.dictLimit;
480 const BYTE* const prefixStart = base + prefixStartIndex;
481 const BYTE* const iend = istart + srcSize;
482 const BYTE* const ilimit = iend - HASH_READ_SIZE;
483 U32 offset_1=rep[0], offset_2=rep[1];
484
485 const ZSTD_matchState_t* const dms = ms->dictMatchState;
486 const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
487 const U32* const dictHashTable = dms->hashTable;
488 const U32 dictStartIndex = dms->window.dictLimit;
489 const BYTE* const dictBase = dms->window.base;
490 const BYTE* const dictStart = dictBase + dictStartIndex;
491 const BYTE* const dictEnd = dms->window.nextSrc;
492 const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase);
493 const U32 dictAndPrefixLength = (U32)(istart - prefixStart + dictEnd - dictStart);
494 const U32 dictHBits = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;
495
496 /* if a dictionary is still attached, it necessarily means that
497 * it is within window size. So we just check it. */
498 const U32 maxDistance = 1U << cParams->windowLog;
499 const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
500 assert(endIndex - prefixStartIndex <= maxDistance);
501 (void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */
502
503 (void)hasStep; /* not currently specialized on whether it's accelerated */
504
505 /* ensure there will be no underflow
506 * when translating a dict index into a local index */
507 assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
508
509 if (ms->prefetchCDictTables) {
510 size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32);
511 PREFETCH_AREA(dictHashTable, hashTableBytes);
512 }
513
514 /* init */
515 DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
516 ip0 += (dictAndPrefixLength == 0);
517 /* dictMatchState repCode checks don't currently handle repCode == 0
518 * disabling. */
519 assert(offset_1 <= dictAndPrefixLength);
520 assert(offset_2 <= dictAndPrefixLength);
521
522 /* Outer search loop */
523 assert(stepSize >= 1);
524 while (ip1 <= ilimit) { /* repcode check at (ip0 + 1) is safe because ip0 < ip1 */
525 size_t mLength;
526 size_t hash0 = ZSTD_hashPtr(ip0, hlog, mls);
527
528 size_t const dictHashAndTag0 = ZSTD_hashPtr(ip0, dictHBits, mls);
529 U32 dictMatchIndexAndTag = dictHashTable[dictHashAndTag0 >> ZSTD_SHORT_CACHE_TAG_BITS];
530 int dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag0);
531
532 U32 matchIndex = hashTable[hash0];
533 U32 curr = (U32)(ip0 - base);
534 size_t step = stepSize;
535 const size_t kStepIncr = 1 << kSearchStrength;
536 const BYTE* nextStep = ip0 + kStepIncr;
537
538 /* Inner search loop */
539 while (1) {
540 const BYTE* match = base + matchIndex;
541 const U32 repIndex = curr + 1 - offset_1;
542 const BYTE* repMatch = (repIndex < prefixStartIndex) ?
543 dictBase + (repIndex - dictIndexDelta) :
544 base + repIndex;
545 const size_t hash1 = ZSTD_hashPtr(ip1, hlog, mls);
546 size_t const dictHashAndTag1 = ZSTD_hashPtr(ip1, dictHBits, mls);
547 hashTable[hash0] = curr; /* update hash table */
548
549 if (((U32) ((prefixStartIndex - 1) - repIndex) >=
550 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
551 && (MEM_read32(repMatch) == MEM_read32(ip0 + 1))) {
552 const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
553 mLength = ZSTD_count_2segments(ip0 + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixStart) + 4;
554 ip0++;
555 ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);
556 break;
557 }
558
559 if (dictTagsMatch) {
560 /* Found a possible dict match */
561 const U32 dictMatchIndex = dictMatchIndexAndTag >> ZSTD_SHORT_CACHE_TAG_BITS;
562 const BYTE* dictMatch = dictBase + dictMatchIndex;
563 if (dictMatchIndex > dictStartIndex &&
564 MEM_read32(dictMatch) == MEM_read32(ip0)) {
565 /* To replicate extDict parse behavior, we only use dict matches when the normal matchIndex is invalid */
566 if (matchIndex <= prefixStartIndex) {
567 U32 const offset = (U32) (curr - dictMatchIndex - dictIndexDelta);
568 mLength = ZSTD_count_2segments(ip0 + 4, dictMatch + 4, iend, dictEnd, prefixStart) + 4;
569 while (((ip0 > anchor) & (dictMatch > dictStart))
570 && (ip0[-1] == dictMatch[-1])) {
571 ip0--;
572 dictMatch--;
573 mLength++;
574 } /* catch up */
575 offset_2 = offset_1;
576 offset_1 = offset;
577 ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
578 break;
579 }
580 }
581 }
582
583 if (matchIndex > prefixStartIndex && MEM_read32(match) == MEM_read32(ip0)) {
584 /* found a regular match */
585 U32 const offset = (U32) (ip0 - match);
586 mLength = ZSTD_count(ip0 + 4, match + 4, iend) + 4;
587 while (((ip0 > anchor) & (match > prefixStart))
588 && (ip0[-1] == match[-1])) {
589 ip0--;
590 match--;
591 mLength++;
592 } /* catch up */
593 offset_2 = offset_1;
594 offset_1 = offset;
595 ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);
596 break;
597 }
598
599 /* Prepare for next iteration */
600 dictMatchIndexAndTag = dictHashTable[dictHashAndTag1 >> ZSTD_SHORT_CACHE_TAG_BITS];
601 dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag1);
602 matchIndex = hashTable[hash1];
603
604 if (ip1 >= nextStep) {
605 step++;
606 nextStep += kStepIncr;
607 }
608 ip0 = ip1;
609 ip1 = ip1 + step;
610 if (ip1 > ilimit) goto _cleanup;
611
612 curr = (U32)(ip0 - base);
613 hash0 = hash1;
614 } /* end inner search loop */
615
616 /* match found */
617 assert(mLength);
618 ip0 += mLength;
619 anchor = ip0;
620
621 if (ip0 <= ilimit) {
622 /* Fill Table */
623 assert(base+curr+2 > istart); /* check base overflow */
624 hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; /* here because curr+2 could be > iend-8 */
625 hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
626
627 /* check immediate repcode */
628 while (ip0 <= ilimit) {
629 U32 const current2 = (U32)(ip0-base);
630 U32 const repIndex2 = current2 - offset_2;
631 const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
632 dictBase - dictIndexDelta + repIndex2 :
633 base + repIndex2;
634 if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
635 && (MEM_read32(repMatch2) == MEM_read32(ip0))) {
636 const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
637 size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
638 U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
639 ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
640 hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = current2;
641 ip0 += repLength2;
642 anchor = ip0;
643 continue;
644 }
645 break;
646 }
647 }
648
649 /* Prepare for next iteration */
650 assert(ip0 == anchor);
651 ip1 = ip0 + stepSize;
652 }
653
654_cleanup:
655 /* save reps for next block */
656 rep[0] = offset_1;
657 rep[1] = offset_2;
658
659 /* Return the last literals size */
660 return (size_t)(iend - anchor);
661}
662
663
664ZSTD_GEN_FAST_FN(dictMatchState, 4, 0)
665ZSTD_GEN_FAST_FN(dictMatchState, 5, 0)
666ZSTD_GEN_FAST_FN(dictMatchState, 6, 0)
667ZSTD_GEN_FAST_FN(dictMatchState, 7, 0)
668
670 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
671 void const* src, size_t srcSize)
672{
673 U32 const mls = ms->cParams.minMatch;
674 assert(ms->dictMatchState != NULL);
675 switch(mls)
676 {
677 default: /* includes case 3 */
678 case 4 :
679 return ZSTD_compressBlock_fast_dictMatchState_4_0(ms, seqStore, rep, src, srcSize);
680 case 5 :
681 return ZSTD_compressBlock_fast_dictMatchState_5_0(ms, seqStore, rep, src, srcSize);
682 case 6 :
683 return ZSTD_compressBlock_fast_dictMatchState_6_0(ms, seqStore, rep, src, srcSize);
684 case 7 :
685 return ZSTD_compressBlock_fast_dictMatchState_7_0(ms, seqStore, rep, src, srcSize);
686 }
687}
688
689
690static
692size_t ZSTD_compressBlock_fast_extDict_generic(
693 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
694 void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
695{
696 const ZSTD_compressionParameters* const cParams = &ms->cParams;
697 U32* const hashTable = ms->hashTable;
698 U32 const hlog = cParams->hashLog;
699 /* support stepSize of 0 */
700 size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
701 const BYTE* const base = ms->window.base;
702 const BYTE* const dictBase = ms->window.dictBase;
703 const BYTE* const istart = (const BYTE*)src;
704 const BYTE* anchor = istart;
705 const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
706 const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
707 const U32 dictStartIndex = lowLimit;
708 const BYTE* const dictStart = dictBase + dictStartIndex;
709 const U32 dictLimit = ms->window.dictLimit;
710 const U32 prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit;
711 const BYTE* const prefixStart = base + prefixStartIndex;
712 const BYTE* const dictEnd = dictBase + prefixStartIndex;
713 const BYTE* const iend = istart + srcSize;
714 const BYTE* const ilimit = iend - 8;
715 U32 offset_1=rep[0], offset_2=rep[1];
716 U32 offsetSaved1 = 0, offsetSaved2 = 0;
717
718 const BYTE* ip0 = istart;
719 const BYTE* ip1;
720 const BYTE* ip2;
721 const BYTE* ip3;
722 U32 current0;
723
724
725 size_t hash0; /* hash for ip0 */
726 size_t hash1; /* hash for ip1 */
727 U32 idx; /* match idx for ip0 */
728 const BYTE* idxBase; /* base pointer for idx */
729
730 U32 offcode;
731 const BYTE* match0;
732 size_t mLength;
733 const BYTE* matchEnd = 0; /* initialize to avoid warning, assert != 0 later */
734
735 size_t step;
736 const BYTE* nextStep;
737 const size_t kStepIncr = (1 << (kSearchStrength - 1));
738
739 (void)hasStep; /* not currently specialized on whether it's accelerated */
740
741 DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);
742
743 /* switch to "regular" variant if extDict is invalidated due to maxDistance */
744 if (prefixStartIndex == dictStartIndex)
745 return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize);
746
747 { U32 const curr = (U32)(ip0 - base);
748 U32 const maxRep = curr - dictStartIndex;
749 if (offset_2 >= maxRep) offsetSaved2 = offset_2, offset_2 = 0;
750 if (offset_1 >= maxRep) offsetSaved1 = offset_1, offset_1 = 0;
751 }
752
753 /* start each op */
754_start: /* Requires: ip0 */
755
756 step = stepSize;
757 nextStep = ip0 + kStepIncr;
758
759 /* calculate positions, ip0 - anchor == 0, so we skip step calc */
760 ip1 = ip0 + 1;
761 ip2 = ip0 + step;
762 ip3 = ip2 + 1;
763
764 if (ip3 >= ilimit) {
765 goto _cleanup;
766 }
767
768 hash0 = ZSTD_hashPtr(ip0, hlog, mls);
769 hash1 = ZSTD_hashPtr(ip1, hlog, mls);
770
771 idx = hashTable[hash0];
772 idxBase = idx < prefixStartIndex ? dictBase : base;
773
774 do {
775 { /* load repcode match for ip[2] */
776 U32 const current2 = (U32)(ip2 - base);
777 U32 const repIndex = current2 - offset_1;
778 const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
779 U32 rval;
780 if ( ((U32)(prefixStartIndex - repIndex) >= 4) /* intentional underflow */
781 & (offset_1 > 0) ) {
782 rval = MEM_read32(repBase + repIndex);
783 } else {
784 rval = MEM_read32(ip2) ^ 1; /* guaranteed to not match. */
785 }
786
787 /* write back hash table entry */
788 current0 = (U32)(ip0 - base);
789 hashTable[hash0] = current0;
790
791 /* check repcode at ip[2] */
792 if (MEM_read32(ip2) == rval) {
793 ip0 = ip2;
794 match0 = repBase + repIndex;
795 matchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
796 assert((match0 != prefixStart) & (match0 != dictStart));
797 mLength = ip0[-1] == match0[-1];
798 ip0 -= mLength;
799 match0 -= mLength;
800 offcode = REPCODE1_TO_OFFBASE;
801 mLength += 4;
802 goto _match;
803 } }
804
805 { /* load match for ip[0] */
806 U32 const mval = idx >= dictStartIndex ?
807 MEM_read32(idxBase + idx) :
808 MEM_read32(ip0) ^ 1; /* guaranteed not to match */
809
810 /* check match at ip[0] */
811 if (MEM_read32(ip0) == mval) {
812 /* found a match! */
813 goto _offset;
814 } }
815
816 /* lookup ip[1] */
817 idx = hashTable[hash1];
818 idxBase = idx < prefixStartIndex ? dictBase : base;
819
820 /* hash ip[2] */
821 hash0 = hash1;
822 hash1 = ZSTD_hashPtr(ip2, hlog, mls);
823
824 /* advance to next positions */
825 ip0 = ip1;
826 ip1 = ip2;
827 ip2 = ip3;
828
829 /* write back hash table entry */
830 current0 = (U32)(ip0 - base);
831 hashTable[hash0] = current0;
832
833 { /* load match for ip[0] */
834 U32 const mval = idx >= dictStartIndex ?
835 MEM_read32(idxBase + idx) :
836 MEM_read32(ip0) ^ 1; /* guaranteed not to match */
837
838 /* check match at ip[0] */
839 if (MEM_read32(ip0) == mval) {
840 /* found a match! */
841 goto _offset;
842 } }
843
844 /* lookup ip[1] */
845 idx = hashTable[hash1];
846 idxBase = idx < prefixStartIndex ? dictBase : base;
847
848 /* hash ip[2] */
849 hash0 = hash1;
850 hash1 = ZSTD_hashPtr(ip2, hlog, mls);
851
852 /* advance to next positions */
853 ip0 = ip1;
854 ip1 = ip2;
855 ip2 = ip0 + step;
856 ip3 = ip1 + step;
857
858 /* calculate step */
859 if (ip2 >= nextStep) {
860 step++;
861 PREFETCH_L1(ip1 + 64);
862 PREFETCH_L1(ip1 + 128);
863 nextStep += kStepIncr;
864 }
865 } while (ip3 < ilimit);
866
867_cleanup:
868 /* Note that there are probably still a couple positions we could search.
869 * However, it seems to be a meaningful performance hit to try to search
870 * them. So let's not. */
871
872 /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0),
873 * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */
874 offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2;
875
876 /* save reps for next block */
877 rep[0] = offset_1 ? offset_1 : offsetSaved1;
878 rep[1] = offset_2 ? offset_2 : offsetSaved2;
879
880 /* Return the last literals size */
881 return (size_t)(iend - anchor);
882
883_offset: /* Requires: ip0, idx, idxBase */
884
885 /* Compute the offset code. */
886 { U32 const offset = current0 - idx;
887 const BYTE* const lowMatchPtr = idx < prefixStartIndex ? dictStart : prefixStart;
888 matchEnd = idx < prefixStartIndex ? dictEnd : iend;
889 match0 = idxBase + idx;
890 offset_2 = offset_1;
891 offset_1 = offset;
892 offcode = OFFSET_TO_OFFBASE(offset);
893 mLength = 4;
894
895 /* Count the backwards match length. */
896 while (((ip0>anchor) & (match0>lowMatchPtr)) && (ip0[-1] == match0[-1])) {
897 ip0--;
898 match0--;
899 mLength++;
900 } }
901
902_match: /* Requires: ip0, match0, offcode, matchEnd */
903
904 /* Count the forward length. */
905 assert(matchEnd != 0);
906 mLength += ZSTD_count_2segments(ip0 + mLength, match0 + mLength, iend, matchEnd, prefixStart);
907
908 ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
909
910 ip0 += mLength;
911 anchor = ip0;
912
913 /* write next hash table entry */
914 if (ip1 < ip0) {
915 hashTable[hash1] = (U32)(ip1 - base);
916 }
917
918 /* Fill table and check for immediate repcode. */
919 if (ip0 <= ilimit) {
920 /* Fill Table */
921 assert(base+current0+2 > istart); /* check base overflow */
922 hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
923 hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
924
925 while (ip0 <= ilimit) {
926 U32 const repIndex2 = (U32)(ip0-base) - offset_2;
927 const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
928 if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 > 0)) /* intentional underflow */
929 && (MEM_read32(repMatch2) == MEM_read32(ip0)) ) {
930 const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
931 size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
932 { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
933 ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);
934 hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
935 ip0 += repLength2;
936 anchor = ip0;
937 continue;
938 }
939 break;
940 } }
941
942 goto _start;
943}
944
945ZSTD_GEN_FAST_FN(extDict, 4, 0)
946ZSTD_GEN_FAST_FN(extDict, 5, 0)
947ZSTD_GEN_FAST_FN(extDict, 6, 0)
948ZSTD_GEN_FAST_FN(extDict, 7, 0)
949
951 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
952 void const* src, size_t srcSize)
953{
954 U32 const mls = ms->cParams.minMatch;
955 assert(ms->dictMatchState == NULL);
956 switch(mls)
957 {
958 default: /* includes case 3 */
959 case 4 :
960 return ZSTD_compressBlock_fast_extDict_4_0(ms, seqStore, rep, src, srcSize);
961 case 5 :
962 return ZSTD_compressBlock_fast_extDict_5_0(ms, seqStore, rep, src, srcSize);
963 case 6 :
964 return ZSTD_compressBlock_fast_extDict_6_0(ms, seqStore, rep, src, srcSize);
965 case 7 :
966 return ZSTD_compressBlock_fast_extDict_7_0(ms, seqStore, rep, src, srcSize);
967 }
968}
#define PREFETCH_AREA(p, s)
Definition: compiler.h:156
#define PREFETCH_L1(ptr)
Definition: compiler.h:149
#define FORCE_INLINE_TEMPLATE
Definition: compiler.h:68
#define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
Definition: compiler.h:322
#define DEBUGLOG(l,...)
Definition: debug.h:108
#define assert(condition)
Definition: debug.h:74
MEM_STATIC U32 MEM_read32(const void *memPtr)
Definition: mem.h:202
unsigned char BYTE
Definition: mem.h:58
unsigned int U32
Definition: mem.h:69
ZSTD_compressionParameters cParams
const ZSTD_matchState_t * dictMatchState
#define ZSTD_SHORT_CACHE_TAG_BITS
MEM_STATIC size_t ZSTD_count_2segments(const BYTE *ip, const BYTE *match, const BYTE *iEnd, const BYTE *mEnd, const BYTE *iStart)
#define OFFSET_TO_OFFBASE(o)
MEM_STATIC void ZSTD_writeTaggedIndex(U32 *const hashTable, size_t hashAndTag, U32 index)
#define kSearchStrength
MEM_STATIC int ZSTD_comparePackedTags(size_t packedTag1, size_t packedTag2)
MEM_STATIC FORCE_INLINE_ATTR size_t ZSTD_hashPtr(const void *p, U32 hBits, U32 mls)
MEM_STATIC size_t ZSTD_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *const pInLimit)
HINT_INLINE UNUSED_ATTR void ZSTD_storeSeq(seqStore_t *seqStorePtr, size_t litLength, const BYTE *literals, const BYTE *litLimit, U32 offBase, size_t matchLength)
MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t *ms, U32 curr, unsigned windowLog)
MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t *ms, U32 curr, unsigned windowLog)
#define REPCODE1_TO_OFFBASE
ZSTD_dictTableLoadMethod_e
@ ZSTD_dtlm_full
@ ZSTD_dtlm_fast
#define HASH_READ_SIZE
ZSTD_tableFillPurpose_e
@ ZSTD_tfp_forCDict
size_t ZSTD_compressBlock_fast_dictMatchState(ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
Definition: zstd_fast.c:669
#define ZSTD_GEN_FAST_FN(dictMode, mls, step)
Definition: zstd_fast.c:409
size_t ZSTD_compressBlock_fast(ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
Definition: zstd_fast.c:427
FORCE_INLINE_TEMPLATE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_compressBlock_fast_noDict_generic(ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize, U32 const mls, U32 const hasStep)
Definition: zstd_fast.c:148
FORCE_INLINE_TEMPLATE ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_compressBlock_fast_dictMatchState_generic(ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize, U32 const mls, U32 const hasStep)
Definition: zstd_fast.c:465
void ZSTD_fillHashTable(ZSTD_matchState_t *ms, const void *const end, ZSTD_dictTableLoadMethod_e dtlm, ZSTD_tableFillPurpose_e tfp)
Definition: zstd_fast.c:87
size_t ZSTD_compressBlock_fast_extDict(ZSTD_matchState_t *ms, seqStore_t *seqStore, U32 rep[ZSTD_REP_NUM], void const *src, size_t srcSize)
Definition: zstd_fast.c:950
#define ZSTD_REP_NUM
Definition: zstd_internal.h:68