Project Alice
Loading...
Searching...
No Matches
xxhash.h
Go to the documentation of this file.
1/*
2 * xxHash - Extremely Fast Hash algorithm
3 * Header File
4 * Copyright (c) Yann Collet - Meta Platforms, Inc
5 *
6 * This source code is licensed under both the BSD-style license (found in the
7 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
8 * in the COPYING file in the root directory of this source tree).
9 * You may select, at your option, one of the above-listed licenses.
10 */
11
12/* Local adaptations for Zstandard */
13
14#ifndef XXH_NO_XXH3
15# define XXH_NO_XXH3
16#endif
17
18#ifndef XXH_NAMESPACE
19# define XXH_NAMESPACE ZSTD_
20#endif
21
230#if defined (__cplusplus)
231extern "C" {
232#endif
233
234/* ****************************
235 * INLINE mode
236 ******************************/
242#ifdef XXH_DOXYGEN
254# define XXH_STATIC_LINKING_ONLY
255/* Do not undef XXH_STATIC_LINKING_ONLY for Doxygen */
256
267# define XXH_IMPLEMENTATION
268/* Do not undef XXH_IMPLEMENTATION for Doxygen */
269
288# define XXH_INLINE_ALL
289# undef XXH_INLINE_ALL
293# define XXH_PRIVATE_API
294# undef XXH_PRIVATE_API
308# define XXH_NAMESPACE /* YOUR NAME HERE */
309# undef XXH_NAMESPACE
310#endif
311
312#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
313 && !defined(XXH_INLINE_ALL_31684351384)
314 /* this section should be traversed only once */
315# define XXH_INLINE_ALL_31684351384
316 /* give access to the advanced API, required to compile implementations */
317# undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */
318# define XXH_STATIC_LINKING_ONLY
319 /* make all functions private */
320# undef XXH_PUBLIC_API
321# if defined(__GNUC__)
322# define XXH_PUBLIC_API static __inline __attribute__((unused))
323# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
324# define XXH_PUBLIC_API static inline
325# elif defined(_MSC_VER)
326# define XXH_PUBLIC_API static __inline
327# else
328 /* note: this version may generate warnings for unused static functions */
329# define XXH_PUBLIC_API static
330# endif
331
332 /*
333 * This part deals with the special case where a unit wants to inline xxHash,
334 * but "xxhash.h" has previously been included without XXH_INLINE_ALL,
335 * such as part of some previously included *.h header file.
336 * Without further action, the new include would just be ignored,
337 * and functions would effectively _not_ be inlined (silent failure).
338 * The following macros solve this situation by prefixing all inlined names,
339 * avoiding naming collision with previous inclusions.
340 */
341 /* Before that, we unconditionally #undef all symbols,
342 * in case they were already defined with XXH_NAMESPACE.
343 * They will then be redefined for XXH_INLINE_ALL
344 */
345# undef XXH_versionNumber
346 /* XXH32 */
347# undef XXH32
348# undef XXH32_createState
349# undef XXH32_freeState
350# undef XXH32_reset
351# undef XXH32_update
352# undef XXH32_digest
353# undef XXH32_copyState
354# undef XXH32_canonicalFromHash
355# undef XXH32_hashFromCanonical
356 /* XXH64 */
357# undef XXH64
358# undef XXH64_createState
359# undef XXH64_freeState
360# undef XXH64_reset
361# undef XXH64_update
362# undef XXH64_digest
363# undef XXH64_copyState
364# undef XXH64_canonicalFromHash
365# undef XXH64_hashFromCanonical
366 /* XXH3_64bits */
367# undef XXH3_64bits
368# undef XXH3_64bits_withSecret
369# undef XXH3_64bits_withSeed
370# undef XXH3_64bits_withSecretandSeed
371# undef XXH3_createState
372# undef XXH3_freeState
373# undef XXH3_copyState
374# undef XXH3_64bits_reset
375# undef XXH3_64bits_reset_withSeed
376# undef XXH3_64bits_reset_withSecret
377# undef XXH3_64bits_update
378# undef XXH3_64bits_digest
379# undef XXH3_generateSecret
380 /* XXH3_128bits */
381# undef XXH128
382# undef XXH3_128bits
383# undef XXH3_128bits_withSeed
384# undef XXH3_128bits_withSecret
385# undef XXH3_128bits_reset
386# undef XXH3_128bits_reset_withSeed
387# undef XXH3_128bits_reset_withSecret
388# undef XXH3_128bits_reset_withSecretandSeed
389# undef XXH3_128bits_update
390# undef XXH3_128bits_digest
391# undef XXH128_isEqual
392# undef XXH128_cmp
393# undef XXH128_canonicalFromHash
394# undef XXH128_hashFromCanonical
395 /* Finally, free the namespace itself */
396# undef XXH_NAMESPACE
397
398 /* employ the namespace for XXH_INLINE_ALL */
399# define XXH_NAMESPACE XXH_INLINE_
400 /*
401 * Some identifiers (enums, type names) are not symbols,
402 * but they must nonetheless be renamed to avoid redeclaration.
403 * Alternative solution: do not redeclare them.
404 * However, this requires some #ifdefs, and has a more dispersed impact.
405 * Meanwhile, renaming can be achieved in a single place.
406 */
407# define XXH_IPREF(Id) XXH_NAMESPACE ## Id
408# define XXH_OK XXH_IPREF(XXH_OK)
409# define XXH_ERROR XXH_IPREF(XXH_ERROR)
410# define XXH_errorcode XXH_IPREF(XXH_errorcode)
411# define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t)
412# define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t)
413# define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
414# define XXH32_state_s XXH_IPREF(XXH32_state_s)
415# define XXH32_state_t XXH_IPREF(XXH32_state_t)
416# define XXH64_state_s XXH_IPREF(XXH64_state_s)
417# define XXH64_state_t XXH_IPREF(XXH64_state_t)
418# define XXH3_state_s XXH_IPREF(XXH3_state_s)
419# define XXH3_state_t XXH_IPREF(XXH3_state_t)
420# define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
421 /* Ensure the header is parsed again, even if it was previously included */
422# undef XXHASH_H_5627135585666179
423# undef XXHASH_H_STATIC_13879238742
424#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
425
426/* ****************************************************************
427 * Stable API
428 *****************************************************************/
429#ifndef XXHASH_H_5627135585666179
430#define XXHASH_H_5627135585666179 1
431
433#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
434# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
435# ifdef XXH_EXPORT
436# define XXH_PUBLIC_API __declspec(dllexport)
437# elif XXH_IMPORT
438# define XXH_PUBLIC_API __declspec(dllimport)
439# endif
440# else
441# define XXH_PUBLIC_API /* do nothing */
442# endif
443#endif
444
445#ifdef XXH_NAMESPACE
446# define XXH_CAT(A,B) A##B
447# define XXH_NAME2(A,B) XXH_CAT(A,B)
448# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
449/* XXH32 */
450# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
451# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
452# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
453# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
454# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
455# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
456# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
457# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
458# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
459/* XXH64 */
460# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
461# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
462# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
463# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
464# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
465# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
466# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
467# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
468# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
469/* XXH3_64bits */
470# define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
471# define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
472# define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
473# define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed)
474# define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
475# define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
476# define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
477# define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
478# define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
479# define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
480# define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed)
481# define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
482# define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
483# define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
484# define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed)
485/* XXH3_128bits */
486# define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
487# define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
488# define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
489# define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
490# define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed)
491# define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
492# define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
493# define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
494# define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed)
495# define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
496# define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
497# define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
498# define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
499# define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
500# define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
501#endif
502
503
504/* *************************************
505* Compiler specifics
506***************************************/
507
508/* specific declaration modes for Windows */
509#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
510# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
511# ifdef XXH_EXPORT
512# define XXH_PUBLIC_API __declspec(dllexport)
513# elif XXH_IMPORT
514# define XXH_PUBLIC_API __declspec(dllimport)
515# endif
516# else
517# define XXH_PUBLIC_API /* do nothing */
518# endif
519#endif
520
521#if defined (__GNUC__)
522# define XXH_CONSTF __attribute__((const))
523# define XXH_PUREF __attribute__((pure))
524# define XXH_MALLOCF __attribute__((malloc))
525#else
526# define XXH_CONSTF /* disable */
527# define XXH_PUREF
528# define XXH_MALLOCF
529#endif
530
531/* *************************************
532* Version
533***************************************/
534#define XXH_VERSION_MAJOR 0
535#define XXH_VERSION_MINOR 8
536#define XXH_VERSION_RELEASE 2
538#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
539
549
550
551/* ****************************
552* Common basic types
553******************************/
554#include <stddef.h> /* size_t */
558typedef enum {
559 XXH_OK = 0,
560 XXH_ERROR
562
563
564/*-**********************************************************************
565* 32-bit hash
566************************************************************************/
567#if defined(XXH_DOXYGEN) /* Don't show <stdint.h> include */
573typedef uint32_t XXH32_hash_t;
574
575#elif !defined (__VMS) \
576 && (defined (__cplusplus) \
577 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
578# ifdef _AIX
579# include <inttypes.h>
580# else
581# include <stdint.h>
582# endif
583 typedef uint32_t XXH32_hash_t;
584
585#else
586# include <limits.h>
587# if UINT_MAX == 0xFFFFFFFFUL
588 typedef unsigned int XXH32_hash_t;
589# elif ULONG_MAX == 0xFFFFFFFFUL
590 typedef unsigned long XXH32_hash_t;
591# else
592# error "unsupported platform: need a 32-bit type"
593# endif
594#endif
595
629XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
630
631#ifndef XXH_NO_STREAM
638typedef struct XXH32_state_s XXH32_state_t;
639
669
685
705XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
706
721XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
722#endif /* !XXH_NO_STREAM */
723
724/******* Canonical representation *******/
725
729typedef struct {
730 unsigned char digest[4];
732
745
759
760
762#ifdef __has_attribute
763# define XXH_HAS_ATTRIBUTE(x) __has_attribute(x)
764#else
765# define XXH_HAS_ATTRIBUTE(x) 0
766#endif
770/*
771 * C23 __STDC_VERSION__ number hasn't been specified yet. For now
772 * leave as `201711L` (C17 + 1).
773 * TODO: Update to correct value when its been specified.
774 */
775#define XXH_C23_VN 201711L
779/* C-language Attributes are added in C23. */
780#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) && defined(__has_c_attribute)
781# define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
782#else
783# define XXH_HAS_C_ATTRIBUTE(x) 0
784#endif
788#if defined(__cplusplus) && defined(__has_cpp_attribute)
789# define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
790#else
791# define XXH_HAS_CPP_ATTRIBUTE(x) 0
792#endif
796/*
797 * Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute
798 * introduced in CPP17 and C23.
799 * CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough
800 * C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough
801 */
802#if XXH_HAS_C_ATTRIBUTE(fallthrough) || XXH_HAS_CPP_ATTRIBUTE(fallthrough)
803# define XXH_FALLTHROUGH [[fallthrough]]
804#elif XXH_HAS_ATTRIBUTE(__fallthrough__)
805# define XXH_FALLTHROUGH __attribute__ ((__fallthrough__))
806#else
807# define XXH_FALLTHROUGH /* fallthrough */
808#endif
812/*
813 * Define XXH_NOESCAPE for annotated pointers in public API.
814 * https://clang.llvm.org/docs/AttributeReference.html#noescape
815 * As of writing this, only supported by clang.
816 */
817#if XXH_HAS_ATTRIBUTE(noescape)
818# define XXH_NOESCAPE __attribute__((noescape))
819#else
820# define XXH_NOESCAPE
821#endif
831#ifndef XXH_NO_LONG_LONG
832/*-**********************************************************************
833* 64-bit hash
834************************************************************************/
835#if defined(XXH_DOXYGEN) /* don't include <stdint.h> */
841typedef uint64_t XXH64_hash_t;
842#elif !defined (__VMS) \
843 && (defined (__cplusplus) \
844 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
845# ifdef _AIX
846# include <inttypes.h>
847# else
848# include <stdint.h>
849# endif
850 typedef uint64_t XXH64_hash_t;
851#else
852# include <limits.h>
853# if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL
854 /* LP64 ABI says uint64_t is unsigned long */
855 typedef unsigned long XXH64_hash_t;
856# else
857 /* the following type must have a width of 64-bit */
858 typedef unsigned long long XXH64_hash_t;
859# endif
860#endif
861
892XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed);
893
894/******* Streaming *******/
895#ifndef XXH_NO_STREAM
901typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
902
912
923
932XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dst_state, const XXH64_state_t* src_state);
933
949
969XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH_NOESCAPE XXH64_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
970
986#endif /* !XXH_NO_STREAM */
987/******* Canonical representation *******/
988
992typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t;
993
1006
1020
1021#ifndef XXH_NO_XXH3
1022
1077/*-**********************************************************************
1078* XXH3 64-bit variant
1079************************************************************************/
1080
1103XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length);
1104
1129XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed);
1130
1138#define XXH3_SECRET_SIZE_MIN 136
1139
1172XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize);
1173
1174
1175/******* Streaming *******/
1176#ifndef XXH_NO_STREAM
1177/*
1178 * Streaming requires state maintenance.
1179 * This operation costs memory and CPU.
1180 * As a consequence, streaming is slower than one-shot hashing.
1181 * For better performance, prefer one-shot functions whenever applicable.
1182 */
1183
1189typedef struct XXH3_state_s XXH3_state_t;
1190XXH_PUBLIC_API XXH_MALLOCF XXH3_state_t* XXH3_createState(void);
1191XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr);
1192
1201XXH_PUBLIC_API void XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state);
1202
1220XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr);
1221
1240XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed);
1241
1264XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize);
1265
1285XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
1286
1301XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr);
1302#endif /* !XXH_NO_STREAM */
1303
1304/* note : canonical representation of XXH3 is the same as XXH64
1305 * since they both produce XXH64_hash_t values */
1306
1307
1308/*-**********************************************************************
1309* XXH3 128-bit variant
1310************************************************************************/
1311
1318typedef struct {
1319 XXH64_hash_t low64;
1320 XXH64_hash_t high64;
1321} XXH128_hash_t;
1322
1341XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* data, size_t len);
1361XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSeed(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed);
1389XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize);
1390
1391/******* Streaming *******/
1392#ifndef XXH_NO_STREAM
1393/*
1394 * Streaming requires state maintenance.
1395 * This operation costs memory and CPU.
1396 * As a consequence, streaming is slower than one-shot hashing.
1397 * For better performance, prefer one-shot functions whenever applicable.
1398 *
1399 * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits().
1400 * Use already declared XXH3_createState() and XXH3_freeState().
1401 *
1402 * All reset and streaming functions have same meaning as their 64-bit counterpart.
1403 */
1404
1421XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr);
1422
1440XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed);
1461XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize);
1462
1484XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
1485
1501XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr);
1502#endif /* !XXH_NO_STREAM */
1503
1504/* Following helper functions make it possible to compare XXH128_hast_t values.
1505 * Since XXH128_hash_t is a structure, this capability is not offered by the language.
1506 * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */
1507
1517XXH_PUBLIC_API XXH_PUREF int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2);
1518
1531XXH_PUBLIC_API XXH_PUREF int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2);
1532
1533
1534/******* Canonical representation *******/
1535typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t;
1536
1537
1548XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash);
1549
1561XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src);
1562
1563
1564#endif /* !XXH_NO_XXH3 */
1565#endif /* XXH_NO_LONG_LONG */
1566
1570#endif /* XXHASH_H_5627135585666179 */
1571
1572
1573
1574#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
1575#define XXHASH_H_STATIC_13879238742
1576/* ****************************************************************************
1577 * This section contains declarations which are not guaranteed to remain stable.
1578 * They may change in future versions, becoming incompatible with a different
1579 * version of the library.
1580 * These declarations should only be used with static linking.
1581 * Never use them in association with dynamic linking!
1582 ***************************************************************************** */
1583
1584/*
1585 * These definitions are only present to allow static allocation
1586 * of XXH states, on stack or in a struct, for example.
1587 * Never **ever** access their members directly.
1588 */
1589
1602struct XXH32_state_s {
1603 XXH32_hash_t total_len_32;
1604 XXH32_hash_t large_len;
1605 XXH32_hash_t v[4];
1606 XXH32_hash_t mem32[4];
1607 XXH32_hash_t memsize;
1608 XXH32_hash_t reserved;
1609}; /* typedef'd to XXH32_state_t */
1610
1611
1612#ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */
1613
1626struct XXH64_state_s {
1627 XXH64_hash_t total_len;
1628 XXH64_hash_t v[4];
1629 XXH64_hash_t mem64[4];
1630 XXH32_hash_t memsize;
1631 XXH32_hash_t reserved32;
1632 XXH64_hash_t reserved64;
1633}; /* typedef'd to XXH64_state_t */
1634
1635#ifndef XXH_NO_XXH3
1636
1637#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */
1638# include <stdalign.h>
1639# define XXH_ALIGN(n) alignas(n)
1640#elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */
1641/* In C++ alignas() is a keyword */
1642# define XXH_ALIGN(n) alignas(n)
1643#elif defined(__GNUC__)
1644# define XXH_ALIGN(n) __attribute__ ((aligned(n)))
1645#elif defined(_MSC_VER)
1646# define XXH_ALIGN(n) __declspec(align(n))
1647#else
1648# define XXH_ALIGN(n) /* disabled */
1649#endif
1650
1651/* Old GCC versions only accept the attribute after the type in structures. */
1652#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \
1653 && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \
1654 && defined(__GNUC__)
1655# define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
1656#else
1657# define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
1658#endif
1659
1667#define XXH3_INTERNALBUFFER_SIZE 256
1668
1677#define XXH3_SECRET_DEFAULT_SIZE 192
1678
1701struct XXH3_state_s {
1702 XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
1704 XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
1706 XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
1708 XXH32_hash_t bufferedSize;
1710 XXH32_hash_t useSeed;
1712 size_t nbStripesSoFar;
1714 XXH64_hash_t totalLen;
1716 size_t nbStripesPerBlock;
1718 size_t secretLimit;
1720 XXH64_hash_t seed;
1722 XXH64_hash_t reserved64;
1724 const unsigned char* extSecret;
1727 /* note: there may be some padding at the end due to alignment on 64 bytes */
1728}; /* typedef'd to XXH3_state_t */
1729
1730#undef XXH_ALIGN_MEMBER
1731
1743#define XXH3_INITSTATE(XXH3_state_ptr) \
1744 do { \
1745 XXH3_state_t* tmp_xxh3_state_ptr = (XXH3_state_ptr); \
1746 tmp_xxh3_state_ptr->seed = 0; \
1747 tmp_xxh3_state_ptr->extSecret = NULL; \
1748 } while(0)
1749
1750
1767XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed);
1768
1769
1770/* === Experimental API === */
1771/* Symbols defined below must be considered tied to a specific library version. */
1772
1831XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize);
1832
1871XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed);
1872
1908XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* data, size_t len,
1909 XXH_NOESCAPE const void* secret, size_t secretSize,
1910 XXH64_hash_t seed);
1925XXH_PUBLIC_API XXH_PUREF XXH128_hash_t
1926XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length,
1927 XXH_NOESCAPE const void* secret, size_t secretSize,
1928 XXH64_hash_t seed64);
1929#ifndef XXH_NO_STREAM
1944XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr,
1945 XXH_NOESCAPE const void* secret, size_t secretSize,
1946 XXH64_hash_t seed64);
1961XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr,
1962 XXH_NOESCAPE const void* secret, size_t secretSize,
1963 XXH64_hash_t seed64);
1964#endif /* !XXH_NO_STREAM */
1965
1966#endif /* !XXH_NO_XXH3 */
1967#endif /* XXH_NO_LONG_LONG */
1968#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
1969# define XXH_IMPLEMENTATION
1970#endif
1971
1972#endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */
1973
1974
1975/* ======================================================================== */
1976/* ======================================================================== */
1977/* ======================================================================== */
1978
1979
1980/*-**********************************************************************
1981 * xxHash implementation
1982 *-**********************************************************************
1983 * xxHash's implementation used to be hosted inside xxhash.c.
1984 *
1985 * However, inlining requires implementation to be visible to the compiler,
1986 * hence be included alongside the header.
1987 * Previously, implementation was hosted inside xxhash.c,
1988 * which was then #included when inlining was activated.
1989 * This construction created issues with a few build and install systems,
1990 * as it required xxhash.c to be stored in /include directory.
1991 *
1992 * xxHash implementation is now directly integrated within xxhash.h.
1993 * As a consequence, xxhash.c is no longer needed in /include.
1994 *
1995 * xxhash.c is still available and is still useful.
1996 * In a "normal" setup, when xxhash is not inlined,
1997 * xxhash.h only exposes the prototypes and public symbols,
1998 * while xxhash.c can be built into an object file xxhash.o
1999 * which can then be linked into the final binary.
2000 ************************************************************************/
2001
2002#if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
2003 || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
2004# define XXH_IMPLEM_13a8737387
2005
2006/* *************************************
2007* Tuning parameters
2008***************************************/
2009
2016#ifdef XXH_DOXYGEN
2022# define XXH_NO_LONG_LONG
2023# undef XXH_NO_LONG_LONG /* don't actually */
2074# define XXH_FORCE_MEMORY_ACCESS 0
2075
2102# define XXH_SIZE_OPT 0
2103
2132# define XXH_FORCE_ALIGN_CHECK 0
2133
2154# define XXH_NO_INLINE_HINTS 0
2155
2171# define XXH3_INLINE_SECRET 0
2172
2183# define XXH32_ENDJMP 0
2184
2192# define XXH_OLD_NAMES
2193# undef XXH_OLD_NAMES /* don't actually use, it is ugly. */
2194
2203# define XXH_NO_STREAM
2204# undef XXH_NO_STREAM /* don't actually */
2205#endif /* XXH_DOXYGEN */
2210#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
2211 /* prefer __packed__ structures (method 1) for GCC
2212 * < ARMv7 with unaligned access (e.g. Raspbian armhf) still uses byte shifting, so we use memcpy
2213 * which for some reason does unaligned loads. */
2214# if defined(__GNUC__) && !(defined(__ARM_ARCH) && __ARM_ARCH < 7 && defined(__ARM_FEATURE_UNALIGNED))
2215# define XXH_FORCE_MEMORY_ACCESS 1
2216# endif
2217#endif
2218
2219#ifndef XXH_SIZE_OPT
2220 /* default to 1 for -Os or -Oz */
2221# if (defined(__GNUC__) || defined(__clang__)) && defined(__OPTIMIZE_SIZE__)
2222# define XXH_SIZE_OPT 1
2223# else
2224# define XXH_SIZE_OPT 0
2225# endif
2226#endif
2227
2228#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
2229 /* don't check on sizeopt, x86, aarch64, or arm when unaligned access is available */
2230# if XXH_SIZE_OPT >= 1 || \
2231 defined(__i386) || defined(__x86_64__) || defined(__aarch64__) || defined(__ARM_FEATURE_UNALIGNED) \
2232 || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) || defined(_M_ARM) /* visual */
2233# define XXH_FORCE_ALIGN_CHECK 0
2234# else
2235# define XXH_FORCE_ALIGN_CHECK 1
2236# endif
2237#endif
2238
2239#ifndef XXH_NO_INLINE_HINTS
2240# if XXH_SIZE_OPT >= 1 || defined(__NO_INLINE__) /* -O0, -fno-inline */
2241# define XXH_NO_INLINE_HINTS 1
2242# else
2243# define XXH_NO_INLINE_HINTS 0
2244# endif
2245#endif
2246
2247#ifndef XXH3_INLINE_SECRET
2248# if (defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 12) \
2249 || !defined(XXH_INLINE_ALL)
2250# define XXH3_INLINE_SECRET 0
2251# else
2252# define XXH3_INLINE_SECRET 1
2253# endif
2254#endif
2255
2256#ifndef XXH32_ENDJMP
2257/* generally preferable for performance */
2258# define XXH32_ENDJMP 0
2259#endif
2260
2267/* *************************************
2268* Includes & Memory related functions
2269***************************************/
2270#if defined(XXH_NO_STREAM)
2271/* nothing */
2272#elif defined(XXH_NO_STDLIB)
2273
2274/* When requesting to disable any mention of stdlib,
2275 * the library loses the ability to invoked malloc / free.
2276 * In practice, it means that functions like `XXH*_createState()`
2277 * will always fail, and return NULL.
2278 * This flag is useful in situations where
2279 * xxhash.h is integrated into some kernel, embedded or limited environment
2280 * without access to dynamic allocation.
2281 */
2282
2283static XXH_CONSTF void* XXH_malloc(size_t s) { (void)s; return NULL; }
2284static void XXH_free(void* p) { (void)p; }
2285
2286#else
2287
2288/*
2289 * Modify the local functions below should you wish to use
2290 * different memory routines for malloc() and free()
2291 */
2292#include <stdlib.h>
2293
2298static XXH_MALLOCF void* XXH_malloc(size_t s) { return malloc(s); }
2299
2304static void XXH_free(void* p) { free(p); }
2305
2306#endif /* XXH_NO_STDLIB */
2307
2308#include <string.h>
2309
2314static void* XXH_memcpy(void* dest, const void* src, size_t size)
2315{
2316 return memcpy(dest,src,size);
2317}
2318
2319#include <limits.h> /* ULLONG_MAX */
2320
2321
2322/* *************************************
2323* Compiler Specific Options
2324***************************************/
2325#ifdef _MSC_VER /* Visual Studio warning fix */
2326# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
2327#endif
2328
2329#if XXH_NO_INLINE_HINTS /* disable inlining hints */
2330# if defined(__GNUC__) || defined(__clang__)
2331# define XXH_FORCE_INLINE static
2332# else
2333# define XXH_FORCE_INLINE static
2334# endif
2335# define XXH_NO_INLINE static
2336/* enable inlining hints */
2337#elif defined(__GNUC__) || defined(__clang__)
2338# define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline))
2339# define XXH_NO_INLINE static __attribute__((noinline))
2340#elif defined(_MSC_VER) /* Visual Studio */
2341# define XXH_FORCE_INLINE static __forceinline
2342# define XXH_NO_INLINE static __declspec(noinline)
2343#elif defined (__cplusplus) \
2344 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */
2345# define XXH_FORCE_INLINE static inline
2346# define XXH_NO_INLINE static
2347#else
2348# define XXH_FORCE_INLINE static
2349# define XXH_NO_INLINE static
2350#endif
2351
2352#if XXH3_INLINE_SECRET
2353# define XXH3_WITH_SECRET_INLINE XXH_FORCE_INLINE
2354#else
2355# define XXH3_WITH_SECRET_INLINE XXH_NO_INLINE
2356#endif
2357
2358
2359/* *************************************
2360* Debug
2361***************************************/
2370#ifndef XXH_DEBUGLEVEL
2371# ifdef DEBUGLEVEL /* backwards compat */
2372# define XXH_DEBUGLEVEL DEBUGLEVEL
2373# else
2374# define XXH_DEBUGLEVEL 0
2375# endif
2376#endif
2377
2378#if (XXH_DEBUGLEVEL>=1)
2379# include <assert.h> /* note: can still be disabled with NDEBUG */
2380# define XXH_ASSERT(c) assert(c)
2381#else
2382# if defined(__INTEL_COMPILER)
2383# define XXH_ASSERT(c) XXH_ASSUME((unsigned char) (c))
2384# else
2385# define XXH_ASSERT(c) XXH_ASSUME(c)
2386# endif
2387#endif
2388
2389/* note: use after variable declarations */
2390#ifndef XXH_STATIC_ASSERT
2391# if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */
2392# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { _Static_assert((c),m); } while(0)
2393# elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */
2394# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
2395# else
2396# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0)
2397# endif
2398# define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c)
2399#endif
2400
2417#if defined(__GNUC__) || defined(__clang__)
2418# define XXH_COMPILER_GUARD(var) __asm__("" : "+r" (var))
2419#else
2420# define XXH_COMPILER_GUARD(var) ((void)0)
2421#endif
2422
2423/* Specifically for NEON vectors which use the "w" constraint, on
2424 * Clang. */
2425#if defined(__clang__) && defined(__ARM_ARCH) && !defined(__wasm__)
2426# define XXH_COMPILER_GUARD_CLANG_NEON(var) __asm__("" : "+w" (var))
2427#else
2428# define XXH_COMPILER_GUARD_CLANG_NEON(var) ((void)0)
2429#endif
2430
2431/* *************************************
2432* Basic Types
2433***************************************/
2434#if !defined (__VMS) \
2435 && (defined (__cplusplus) \
2436 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
2437# ifdef _AIX
2438# include <inttypes.h>
2439# else
2440# include <stdint.h>
2441# endif
2442 typedef uint8_t xxh_u8;
2443#else
2444 typedef unsigned char xxh_u8;
2445#endif
2446typedef XXH32_hash_t xxh_u32;
2447
2448#ifdef XXH_OLD_NAMES
2449# warning "XXH_OLD_NAMES is planned to be removed starting v0.9. If the program depends on it, consider moving away from it by employing newer type names directly"
2450# define BYTE xxh_u8
2451# define U8 xxh_u8
2452# define U32 xxh_u32
2453#endif
2454
2455/* *** Memory access *** */
2456
2507#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2508/*
2509 * Manual byteshift. Best for old compilers which don't inline memcpy.
2510 * We actually directly use XXH_readLE32 and XXH_readBE32.
2511 */
2512#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
2513
2514/*
2515 * Force direct memory access. Only works on CPU which support unaligned memory
2516 * access in hardware.
2517 */
2518static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
2519
2520#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
2521
2522/*
2523 * __attribute__((aligned(1))) is supported by gcc and clang. Originally the
2524 * documentation claimed that it only increased the alignment, but actually it
2525 * can decrease it on gcc, clang, and icc:
2526 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502,
2527 * https://gcc.godbolt.org/z/xYez1j67Y.
2528 */
2529#ifdef XXH_OLD_NAMES
2530typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
2531#endif
2532static xxh_u32 XXH_read32(const void* ptr)
2533{
2534 typedef __attribute__((aligned(1))) xxh_u32 xxh_unalign32;
2535 return *((const xxh_unalign32*)ptr);
2536}
2537
2538#else
2539
2540/*
2541 * Portable and safe solution. Generally efficient.
2542 * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
2543 */
2544static xxh_u32 XXH_read32(const void* memPtr)
2545{
2546 xxh_u32 val;
2547 XXH_memcpy(&val, memPtr, sizeof(val));
2548 return val;
2549}
2550
2551#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
2552
2553
2554/* *** Endianness *** */
2555
2572#ifndef XXH_CPU_LITTLE_ENDIAN
2573/*
2574 * Try to detect endianness automatically, to avoid the nonstandard behavior
2575 * in `XXH_isLittleEndian()`
2576 */
2577# if defined(_WIN32) /* Windows is always little endian */ \
2578 || defined(__LITTLE_ENDIAN__) \
2579 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
2580# define XXH_CPU_LITTLE_ENDIAN 1
2581# elif defined(__BIG_ENDIAN__) \
2582 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
2583# define XXH_CPU_LITTLE_ENDIAN 0
2584# else
2591static int XXH_isLittleEndian(void)
2592{
2593 /*
2594 * Portable and well-defined behavior.
2595 * Don't use static: it is detrimental to performance.
2596 */
2597 const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 };
2598 return one.c[0];
2599}
2600# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
2601# endif
2602#endif
2603
2604
2605
2606
2607/* ****************************************
2608* Compiler-specific Functions and Macros
2609******************************************/
2610#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
2611
2612#ifdef __has_builtin
2613# define XXH_HAS_BUILTIN(x) __has_builtin(x)
2614#else
2615# define XXH_HAS_BUILTIN(x) 0
2616#endif
2617
2618
2619
2620/*
2621 * C23 and future versions have standard "unreachable()".
2622 * Once it has been implemented reliably we can add it as an
2623 * additional case:
2624 *
2625 * ```
2626 * #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN)
2627 * # include <stddef.h>
2628 * # ifdef unreachable
2629 * # define XXH_UNREACHABLE() unreachable()
2630 * # endif
2631 * #endif
2632 * ```
2633 *
2634 * Note C++23 also has std::unreachable() which can be detected
2635 * as follows:
2636 * ```
2637 * #if defined(__cpp_lib_unreachable) && (__cpp_lib_unreachable >= 202202L)
2638 * # include <utility>
2639 * # define XXH_UNREACHABLE() std::unreachable()
2640 * #endif
2641 * ```
2642 * NB: `__cpp_lib_unreachable` is defined in the `<version>` header.
2643 * We don't use that as including `<utility>` in `extern "C"` blocks
2644 * doesn't work on GCC12
2645 */
2646
2647#if XXH_HAS_BUILTIN(__builtin_unreachable)
2648# define XXH_UNREACHABLE() __builtin_unreachable()
2649
2650#elif defined(_MSC_VER)
2651# define XXH_UNREACHABLE() __assume(0)
2652
2653#else
2654# define XXH_UNREACHABLE()
2655#endif
2656
2657#if XXH_HAS_BUILTIN(__builtin_assume)
2658# define XXH_ASSUME(c) __builtin_assume(c)
2659#else
2660# define XXH_ASSUME(c) if (!(c)) { XXH_UNREACHABLE(); }
2661#endif
2662
2676#if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
2677 && XXH_HAS_BUILTIN(__builtin_rotateleft64)
2678# define XXH_rotl32 __builtin_rotateleft32
2679# define XXH_rotl64 __builtin_rotateleft64
2680/* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */
2681#elif defined(_MSC_VER)
2682# define XXH_rotl32(x,r) _rotl(x,r)
2683# define XXH_rotl64(x,r) _rotl64(x,r)
2684#else
2685# define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
2686# define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
2687#endif
2688
2697#if defined(_MSC_VER) /* Visual Studio */
2698# define XXH_swap32 _byteswap_ulong
2699#elif XXH_GCC_VERSION >= 403
2700# define XXH_swap32 __builtin_bswap32
2701#else
2702static xxh_u32 XXH_swap32 (xxh_u32 x)
2703{
2704 return ((x << 24) & 0xff000000 ) |
2705 ((x << 8) & 0x00ff0000 ) |
2706 ((x >> 8) & 0x0000ff00 ) |
2707 ((x >> 24) & 0x000000ff );
2708}
2709#endif
2710
2711
2712/* ***************************
2713* Memory reads
2714*****************************/
2715
2720typedef enum {
2721 XXH_aligned,
2722 XXH_unaligned
2723} XXH_alignment;
2724
2725/*
2726 * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load.
2727 *
2728 * This is ideal for older compilers which don't inline memcpy.
2729 */
2730#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2731
2732XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr)
2733{
2734 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2735 return bytePtr[0]
2736 | ((xxh_u32)bytePtr[1] << 8)
2737 | ((xxh_u32)bytePtr[2] << 16)
2738 | ((xxh_u32)bytePtr[3] << 24);
2739}
2740
2741XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr)
2742{
2743 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2744 return bytePtr[3]
2745 | ((xxh_u32)bytePtr[2] << 8)
2746 | ((xxh_u32)bytePtr[1] << 16)
2747 | ((xxh_u32)bytePtr[0] << 24);
2748}
2749
2750#else
2751XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
2752{
2753 return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
2754}
2755
2756static xxh_u32 XXH_readBE32(const void* ptr)
2757{
2758 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
2759}
2760#endif
2761
2762XXH_FORCE_INLINE xxh_u32
2763XXH_readLE32_align(const void* ptr, XXH_alignment align)
2764{
2765 if (align==XXH_unaligned) {
2766 return XXH_readLE32(ptr);
2767 } else {
2768 return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
2769 }
2770}
2771
2772
2773/* *************************************
2774* Misc
2775***************************************/
2777XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
2778
2779
2780/* *******************************************************************
2781* 32-bit hash functions
2782*********************************************************************/
2791 /* #define instead of static const, to be used as initializers */
2792#define XXH_PRIME32_1 0x9E3779B1U
2793#define XXH_PRIME32_2 0x85EBCA77U
2794#define XXH_PRIME32_3 0xC2B2AE3DU
2795#define XXH_PRIME32_4 0x27D4EB2FU
2796#define XXH_PRIME32_5 0x165667B1U
2798#ifdef XXH_OLD_NAMES
2799# define PRIME32_1 XXH_PRIME32_1
2800# define PRIME32_2 XXH_PRIME32_2
2801# define PRIME32_3 XXH_PRIME32_3
2802# define PRIME32_4 XXH_PRIME32_4
2803# define PRIME32_5 XXH_PRIME32_5
2804#endif
2805
2817static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
2818{
2819 acc += input * XXH_PRIME32_2;
2820 acc = XXH_rotl32(acc, 13);
2821 acc *= XXH_PRIME32_1;
2822#if (defined(__SSE4_1__) || defined(__aarch64__) || defined(__wasm_simd128__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
2823 /*
2824 * UGLY HACK:
2825 * A compiler fence is the only thing that prevents GCC and Clang from
2826 * autovectorizing the XXH32 loop (pragmas and attributes don't work for some
2827 * reason) without globally disabling SSE4.1.
2828 *
2829 * The reason we want to avoid vectorization is because despite working on
2830 * 4 integers at a time, there are multiple factors slowing XXH32 down on
2831 * SSE4:
2832 * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on
2833 * newer chips!) making it slightly slower to multiply four integers at
2834 * once compared to four integers independently. Even when pmulld was
2835 * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE
2836 * just to multiply unless doing a long operation.
2837 *
2838 * - Four instructions are required to rotate,
2839 * movqda tmp, v // not required with VEX encoding
2840 * pslld tmp, 13 // tmp <<= 13
2841 * psrld v, 19 // x >>= 19
2842 * por v, tmp // x |= tmp
2843 * compared to one for scalar:
2844 * roll v, 13 // reliably fast across the board
2845 * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason
2846 *
2847 * - Instruction level parallelism is actually more beneficial here because
2848 * the SIMD actually serializes this operation: While v1 is rotating, v2
2849 * can load data, while v3 can multiply. SSE forces them to operate
2850 * together.
2851 *
2852 * This is also enabled on AArch64, as Clang is *very aggressive* in vectorizing
2853 * the loop. NEON is only faster on the A53, and with the newer cores, it is less
2854 * than half the speed.
2855 *
2856 * Additionally, this is used on WASM SIMD128 because it JITs to the same
2857 * SIMD instructions and has the same issue.
2858 */
2859 XXH_COMPILER_GUARD(acc);
2860#endif
2861 return acc;
2862}
2863
2874static xxh_u32 XXH32_avalanche(xxh_u32 hash)
2875{
2876 hash ^= hash >> 15;
2877 hash *= XXH_PRIME32_2;
2878 hash ^= hash >> 13;
2879 hash *= XXH_PRIME32_3;
2880 hash ^= hash >> 16;
2881 return hash;
2882}
2883
2884#define XXH_get32bits(p) XXH_readLE32_align(p, align)
2885
2901static XXH_PUREF xxh_u32
2902XXH32_finalize(xxh_u32 hash, const xxh_u8* ptr, size_t len, XXH_alignment align)
2903{
2904#define XXH_PROCESS1 do { \
2905 hash += (*ptr++) * XXH_PRIME32_5; \
2906 hash = XXH_rotl32(hash, 11) * XXH_PRIME32_1; \
2907} while (0)
2908
2909#define XXH_PROCESS4 do { \
2910 hash += XXH_get32bits(ptr) * XXH_PRIME32_3; \
2911 ptr += 4; \
2912 hash = XXH_rotl32(hash, 17) * XXH_PRIME32_4; \
2913} while (0)
2914
2915 if (ptr==NULL) XXH_ASSERT(len == 0);
2916
2917 /* Compact rerolled version; generally faster */
2918 if (!XXH32_ENDJMP) {
2919 len &= 15;
2920 while (len >= 4) {
2921 XXH_PROCESS4;
2922 len -= 4;
2923 }
2924 while (len > 0) {
2925 XXH_PROCESS1;
2926 --len;
2927 }
2928 return XXH32_avalanche(hash);
2929 } else {
2930 switch(len&15) /* or switch(bEnd - p) */ {
2931 case 12: XXH_PROCESS4;
2932 XXH_FALLTHROUGH; /* fallthrough */
2933 case 8: XXH_PROCESS4;
2934 XXH_FALLTHROUGH; /* fallthrough */
2935 case 4: XXH_PROCESS4;
2936 return XXH32_avalanche(hash);
2937
2938 case 13: XXH_PROCESS4;
2939 XXH_FALLTHROUGH; /* fallthrough */
2940 case 9: XXH_PROCESS4;
2941 XXH_FALLTHROUGH; /* fallthrough */
2942 case 5: XXH_PROCESS4;
2943 XXH_PROCESS1;
2944 return XXH32_avalanche(hash);
2945
2946 case 14: XXH_PROCESS4;
2947 XXH_FALLTHROUGH; /* fallthrough */
2948 case 10: XXH_PROCESS4;
2949 XXH_FALLTHROUGH; /* fallthrough */
2950 case 6: XXH_PROCESS4;
2951 XXH_PROCESS1;
2952 XXH_PROCESS1;
2953 return XXH32_avalanche(hash);
2954
2955 case 15: XXH_PROCESS4;
2956 XXH_FALLTHROUGH; /* fallthrough */
2957 case 11: XXH_PROCESS4;
2958 XXH_FALLTHROUGH; /* fallthrough */
2959 case 7: XXH_PROCESS4;
2960 XXH_FALLTHROUGH; /* fallthrough */
2961 case 3: XXH_PROCESS1;
2962 XXH_FALLTHROUGH; /* fallthrough */
2963 case 2: XXH_PROCESS1;
2964 XXH_FALLTHROUGH; /* fallthrough */
2965 case 1: XXH_PROCESS1;
2966 XXH_FALLTHROUGH; /* fallthrough */
2967 case 0: return XXH32_avalanche(hash);
2968 }
2969 XXH_ASSERT(0);
2970 return hash; /* reaching this point is deemed impossible */
2971 }
2972}
2973
2974#ifdef XXH_OLD_NAMES
2975# define PROCESS1 XXH_PROCESS1
2976# define PROCESS4 XXH_PROCESS4
2977#else
2978# undef XXH_PROCESS1
2979# undef XXH_PROCESS4
2980#endif
2981
2990XXH_FORCE_INLINE XXH_PUREF xxh_u32
2991XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
2992{
2993 xxh_u32 h32;
2994
2995 if (input==NULL) XXH_ASSERT(len == 0);
2996
2997 if (len>=16) {
2998 const xxh_u8* const bEnd = input + len;
2999 const xxh_u8* const limit = bEnd - 15;
3000 xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
3001 xxh_u32 v2 = seed + XXH_PRIME32_2;
3002 xxh_u32 v3 = seed + 0;
3003 xxh_u32 v4 = seed - XXH_PRIME32_1;
3004
3005 do {
3006 v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
3007 v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4;
3008 v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4;
3009 v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4;
3010 } while (input < limit);
3011
3012 h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
3013 + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
3014 } else {
3015 h32 = seed + XXH_PRIME32_5;
3016 }
3017
3018 h32 += (xxh_u32)len;
3019
3020 return XXH32_finalize(h32, input, len&15, align);
3021}
3022
3024XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
3025{
3026#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2
3027 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
3029 XXH32_reset(&state, seed);
3030 XXH32_update(&state, (const xxh_u8*)input, len);
3031 return XXH32_digest(&state);
3032#else
3033 if (XXH_FORCE_ALIGN_CHECK) {
3034 if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
3035 return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
3036 } }
3037
3038 return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
3039#endif
3040}
3041
3042
3043
3044/******* Hash streaming *******/
3045#ifndef XXH_NO_STREAM
3048{
3049 return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
3050}
3053{
3054 XXH_free(statePtr);
3055 return XXH_OK;
3056}
3057
3059XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
3060{
3061 XXH_memcpy(dstState, srcState, sizeof(*dstState));
3062}
3063
3065XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed)
3066{
3067 XXH_ASSERT(statePtr != NULL);
3068 memset(statePtr, 0, sizeof(*statePtr));
3069 statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
3070 statePtr->v[1] = seed + XXH_PRIME32_2;
3071 statePtr->v[2] = seed + 0;
3072 statePtr->v[3] = seed - XXH_PRIME32_1;
3073 return XXH_OK;
3074}
3075
3076
3079XXH32_update(XXH32_state_t* state, const void* input, size_t len)
3080{
3081 if (input==NULL) {
3082 XXH_ASSERT(len == 0);
3083 return XXH_OK;
3084 }
3085
3086 { const xxh_u8* p = (const xxh_u8*)input;
3087 const xxh_u8* const bEnd = p + len;
3088
3089 state->total_len_32 += (XXH32_hash_t)len;
3090 state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
3091
3092 if (state->memsize + len < 16) { /* fill in tmp buffer */
3093 XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
3094 state->memsize += (XXH32_hash_t)len;
3095 return XXH_OK;
3096 }
3097
3098 if (state->memsize) { /* some data left from previous update */
3099 XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
3100 { const xxh_u32* p32 = state->mem32;
3101 state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++;
3102 state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++;
3103 state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++;
3104 state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32));
3105 }
3106 p += 16-state->memsize;
3107 state->memsize = 0;
3108 }
3109
3110 if (p <= bEnd-16) {
3111 const xxh_u8* const limit = bEnd - 16;
3112
3113 do {
3114 state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4;
3115 state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4;
3116 state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4;
3117 state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4;
3118 } while (p<=limit);
3119
3120 }
3121
3122 if (p < bEnd) {
3123 XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
3124 state->memsize = (unsigned)(bEnd-p);
3125 }
3126 }
3127
3128 return XXH_OK;
3129}
3130
3131
3133XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state)
3134{
3135 xxh_u32 h32;
3136
3137 if (state->large_len) {
3138 h32 = XXH_rotl32(state->v[0], 1)
3139 + XXH_rotl32(state->v[1], 7)
3140 + XXH_rotl32(state->v[2], 12)
3141 + XXH_rotl32(state->v[3], 18);
3142 } else {
3143 h32 = state->v[2] /* == seed */ + XXH_PRIME32_5;
3144 }
3145
3146 h32 += state->total_len_32;
3147
3148 return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
3149}
3150#endif /* !XXH_NO_STREAM */
3151
3152/******* Canonical representation *******/
3153
3155XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
3156{
3157 XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
3158 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
3159 XXH_memcpy(dst, &hash, sizeof(*dst));
3160}
3163{
3164 return XXH_readBE32(src);
3165}
3166
3167
3168#ifndef XXH_NO_LONG_LONG
3169
3170/* *******************************************************************
3171* 64-bit hash functions
3172*********************************************************************/
3178/******* Memory access *******/
3179
3180typedef XXH64_hash_t xxh_u64;
3181
3182#ifdef XXH_OLD_NAMES
3183# define U64 xxh_u64
3184#endif
3185
3186#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
3187/*
3188 * Manual byteshift. Best for old compilers which don't inline memcpy.
3189 * We actually directly use XXH_readLE64 and XXH_readBE64.
3190 */
3191#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
3192
3193/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
3194static xxh_u64 XXH_read64(const void* memPtr)
3195{
3196 return *(const xxh_u64*) memPtr;
3197}
3198
3199#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
3200
3201/*
3202 * __attribute__((aligned(1))) is supported by gcc and clang. Originally the
3203 * documentation claimed that it only increased the alignment, but actually it
3204 * can decrease it on gcc, clang, and icc:
3205 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502,
3206 * https://gcc.godbolt.org/z/xYez1j67Y.
3207 */
3208#ifdef XXH_OLD_NAMES
3209typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
3210#endif
3211static xxh_u64 XXH_read64(const void* ptr)
3212{
3213 typedef __attribute__((aligned(1))) xxh_u64 xxh_unalign64;
3214 return *((const xxh_unalign64*)ptr);
3215}
3216
3217#else
3218
3219/*
3220 * Portable and safe solution. Generally efficient.
3221 * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
3222 */
3223static xxh_u64 XXH_read64(const void* memPtr)
3224{
3225 xxh_u64 val;
3226 XXH_memcpy(&val, memPtr, sizeof(val));
3227 return val;
3228}
3229
3230#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
3231
3232#if defined(_MSC_VER) /* Visual Studio */
3233# define XXH_swap64 _byteswap_uint64
3234#elif XXH_GCC_VERSION >= 403
3235# define XXH_swap64 __builtin_bswap64
3236#else
3237static xxh_u64 XXH_swap64(xxh_u64 x)
3238{
3239 return ((x << 56) & 0xff00000000000000ULL) |
3240 ((x << 40) & 0x00ff000000000000ULL) |
3241 ((x << 24) & 0x0000ff0000000000ULL) |
3242 ((x << 8) & 0x000000ff00000000ULL) |
3243 ((x >> 8) & 0x00000000ff000000ULL) |
3244 ((x >> 24) & 0x0000000000ff0000ULL) |
3245 ((x >> 40) & 0x000000000000ff00ULL) |
3246 ((x >> 56) & 0x00000000000000ffULL);
3247}
3248#endif
3249
3250
3251/* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */
3252#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
3253
3254XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr)
3255{
3256 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
3257 return bytePtr[0]
3258 | ((xxh_u64)bytePtr[1] << 8)
3259 | ((xxh_u64)bytePtr[2] << 16)
3260 | ((xxh_u64)bytePtr[3] << 24)
3261 | ((xxh_u64)bytePtr[4] << 32)
3262 | ((xxh_u64)bytePtr[5] << 40)
3263 | ((xxh_u64)bytePtr[6] << 48)
3264 | ((xxh_u64)bytePtr[7] << 56);
3265}
3266
3267XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr)
3268{
3269 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
3270 return bytePtr[7]
3271 | ((xxh_u64)bytePtr[6] << 8)
3272 | ((xxh_u64)bytePtr[5] << 16)
3273 | ((xxh_u64)bytePtr[4] << 24)
3274 | ((xxh_u64)bytePtr[3] << 32)
3275 | ((xxh_u64)bytePtr[2] << 40)
3276 | ((xxh_u64)bytePtr[1] << 48)
3277 | ((xxh_u64)bytePtr[0] << 56);
3278}
3279
3280#else
3281XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
3282{
3283 return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
3284}
3285
3286static xxh_u64 XXH_readBE64(const void* ptr)
3287{
3288 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
3289}
3290#endif
3291
3292XXH_FORCE_INLINE xxh_u64
3293XXH_readLE64_align(const void* ptr, XXH_alignment align)
3294{
3295 if (align==XXH_unaligned)
3296 return XXH_readLE64(ptr);
3297 else
3298 return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
3299}
3300
3301
3302/******* xxh64 *******/
3311/* #define rather that static const, to be used as initializers */
3312#define XXH_PRIME64_1 0x9E3779B185EBCA87ULL
3313#define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL
3314#define XXH_PRIME64_3 0x165667B19E3779F9ULL
3315#define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL
3316#define XXH_PRIME64_5 0x27D4EB2F165667C5ULL
3318#ifdef XXH_OLD_NAMES
3319# define PRIME64_1 XXH_PRIME64_1
3320# define PRIME64_2 XXH_PRIME64_2
3321# define PRIME64_3 XXH_PRIME64_3
3322# define PRIME64_4 XXH_PRIME64_4
3323# define PRIME64_5 XXH_PRIME64_5
3324#endif
3325
3327static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
3328{
3329 acc += input * XXH_PRIME64_2;
3330 acc = XXH_rotl64(acc, 31);
3331 acc *= XXH_PRIME64_1;
3332#if (defined(__AVX512F__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
3333 /*
3334 * DISABLE AUTOVECTORIZATION:
3335 * A compiler fence is used to prevent GCC and Clang from
3336 * autovectorizing the XXH64 loop (pragmas and attributes don't work for some
3337 * reason) without globally disabling AVX512.
3338 *
3339 * Autovectorization of XXH64 tends to be detrimental,
3340 * though the exact outcome may change depending on exact cpu and compiler version.
3341 * For information, it has been reported as detrimental for Skylake-X,
3342 * but possibly beneficial for Zen4.
3343 *
3344 * The default is to disable auto-vectorization,
3345 * but you can select to enable it instead using `XXH_ENABLE_AUTOVECTORIZE` build variable.
3346 */
3347 XXH_COMPILER_GUARD(acc);
3348#endif
3349 return acc;
3350}
3351
3352static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
3353{
3354 val = XXH64_round(0, val);
3355 acc ^= val;
3356 acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
3357 return acc;
3358}
3359
3361static xxh_u64 XXH64_avalanche(xxh_u64 hash)
3362{
3363 hash ^= hash >> 33;
3364 hash *= XXH_PRIME64_2;
3365 hash ^= hash >> 29;
3366 hash *= XXH_PRIME64_3;
3367 hash ^= hash >> 32;
3368 return hash;
3369}
3370
3371
3372#define XXH_get64bits(p) XXH_readLE64_align(p, align)
3373
3389static XXH_PUREF xxh_u64
3390XXH64_finalize(xxh_u64 hash, const xxh_u8* ptr, size_t len, XXH_alignment align)
3391{
3392 if (ptr==NULL) XXH_ASSERT(len == 0);
3393 len &= 31;
3394 while (len >= 8) {
3395 xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr));
3396 ptr += 8;
3397 hash ^= k1;
3398 hash = XXH_rotl64(hash,27) * XXH_PRIME64_1 + XXH_PRIME64_4;
3399 len -= 8;
3400 }
3401 if (len >= 4) {
3402 hash ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;
3403 ptr += 4;
3404 hash = XXH_rotl64(hash, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
3405 len -= 4;
3406 }
3407 while (len > 0) {
3408 hash ^= (*ptr++) * XXH_PRIME64_5;
3409 hash = XXH_rotl64(hash, 11) * XXH_PRIME64_1;
3410 --len;
3411 }
3412 return XXH64_avalanche(hash);
3413}
3414
3415#ifdef XXH_OLD_NAMES
3416# define PROCESS1_64 XXH_PROCESS1_64
3417# define PROCESS4_64 XXH_PROCESS4_64
3418# define PROCESS8_64 XXH_PROCESS8_64
3419#else
3420# undef XXH_PROCESS1_64
3421# undef XXH_PROCESS4_64
3422# undef XXH_PROCESS8_64
3423#endif
3424
3433XXH_FORCE_INLINE XXH_PUREF xxh_u64
3434XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
3435{
3436 xxh_u64 h64;
3437 if (input==NULL) XXH_ASSERT(len == 0);
3438
3439 if (len>=32) {
3440 const xxh_u8* const bEnd = input + len;
3441 const xxh_u8* const limit = bEnd - 31;
3442 xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
3443 xxh_u64 v2 = seed + XXH_PRIME64_2;
3444 xxh_u64 v3 = seed + 0;
3445 xxh_u64 v4 = seed - XXH_PRIME64_1;
3446
3447 do {
3448 v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
3449 v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8;
3450 v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8;
3451 v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8;
3452 } while (input<limit);
3453
3454 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
3455 h64 = XXH64_mergeRound(h64, v1);
3456 h64 = XXH64_mergeRound(h64, v2);
3457 h64 = XXH64_mergeRound(h64, v3);
3458 h64 = XXH64_mergeRound(h64, v4);
3459
3460 } else {
3461 h64 = seed + XXH_PRIME64_5;
3462 }
3463
3464 h64 += (xxh_u64) len;
3465
3466 return XXH64_finalize(h64, input, len, align);
3467}
3468
3469
3471XXH_PUBLIC_API XXH64_hash_t XXH64 (XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
3472{
3473#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2
3474 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
3476 XXH64_reset(&state, seed);
3477 XXH64_update(&state, (const xxh_u8*)input, len);
3478 return XXH64_digest(&state);
3479#else
3480 if (XXH_FORCE_ALIGN_CHECK) {
3481 if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
3482 return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
3483 } }
3484
3485 return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
3486
3487#endif
3488}
3489
3490/******* Hash Streaming *******/
3491#ifndef XXH_NO_STREAM
3494{
3495 return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
3496}
3499{
3500 XXH_free(statePtr);
3501 return XXH_OK;
3502}
3503
3505XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dstState, const XXH64_state_t* srcState)
3506{
3507 XXH_memcpy(dstState, srcState, sizeof(*dstState));
3508}
3509
3512{
3513 XXH_ASSERT(statePtr != NULL);
3514 memset(statePtr, 0, sizeof(*statePtr));
3515 statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
3516 statePtr->v[1] = seed + XXH_PRIME64_2;
3517 statePtr->v[2] = seed + 0;
3518 statePtr->v[3] = seed - XXH_PRIME64_1;
3519 return XXH_OK;
3520}
3521
3524XXH64_update (XXH_NOESCAPE XXH64_state_t* state, XXH_NOESCAPE const void* input, size_t len)
3525{
3526 if (input==NULL) {
3527 XXH_ASSERT(len == 0);
3528 return XXH_OK;
3529 }
3530
3531 { const xxh_u8* p = (const xxh_u8*)input;
3532 const xxh_u8* const bEnd = p + len;
3533
3534 state->total_len += len;
3535
3536 if (state->memsize + len < 32) { /* fill in tmp buffer */
3537 XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
3538 state->memsize += (xxh_u32)len;
3539 return XXH_OK;
3540 }
3541
3542 if (state->memsize) { /* tmp buffer is full */
3543 XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
3544 state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0));
3545 state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1));
3546 state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2));
3547 state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3));
3548 p += 32 - state->memsize;
3549 state->memsize = 0;
3550 }
3551
3552 if (p+32 <= bEnd) {
3553 const xxh_u8* const limit = bEnd - 32;
3554
3555 do {
3556 state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8;
3557 state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8;
3558 state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8;
3559 state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8;
3560 } while (p<=limit);
3561
3562 }
3563
3564 if (p < bEnd) {
3565 XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
3566 state->memsize = (unsigned)(bEnd-p);
3567 }
3568 }
3569
3570 return XXH_OK;
3571}
3572
3573
3575XXH_PUBLIC_API XXH64_hash_t XXH64_digest(XXH_NOESCAPE const XXH64_state_t* state)
3576{
3577 xxh_u64 h64;
3578
3579 if (state->total_len >= 32) {
3580 h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18);
3581 h64 = XXH64_mergeRound(h64, state->v[0]);
3582 h64 = XXH64_mergeRound(h64, state->v[1]);
3583 h64 = XXH64_mergeRound(h64, state->v[2]);
3584 h64 = XXH64_mergeRound(h64, state->v[3]);
3585 } else {
3586 h64 = state->v[2] /*seed*/ + XXH_PRIME64_5;
3587 }
3588
3589 h64 += (xxh_u64) state->total_len;
3590
3591 return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
3592}
3593#endif /* !XXH_NO_STREAM */
3594
3595/******* Canonical representation *******/
3596
3599{
3600 XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
3601 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
3602 XXH_memcpy(dst, &hash, sizeof(*dst));
3603}
3604
3607{
3608 return XXH_readBE64(src);
3609}
3610
3611#ifndef XXH_NO_XXH3
3612
3613/* *********************************************************************
3614* XXH3
3615* New generation hash designed for speed on small keys and vectorization
3616************************************************************************ */
3624/* === Compiler specifics === */
3625
3626#if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */
3627# define XXH_RESTRICT /* disable */
3628#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */
3629# define XXH_RESTRICT restrict
3630#elif (defined (__GNUC__) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))) \
3631 || (defined (__clang__)) \
3632 || (defined (_MSC_VER) && (_MSC_VER >= 1400)) \
3633 || (defined (__INTEL_COMPILER) && (__INTEL_COMPILER >= 1300))
3634/*
3635 * There are a LOT more compilers that recognize __restrict but this
3636 * covers the major ones.
3637 */
3638# define XXH_RESTRICT __restrict
3639#else
3640# define XXH_RESTRICT /* disable */
3641#endif
3642
3643#if (defined(__GNUC__) && (__GNUC__ >= 3)) \
3644 || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \
3645 || defined(__clang__)
3646# define XXH_likely(x) __builtin_expect(x, 1)
3647# define XXH_unlikely(x) __builtin_expect(x, 0)
3648#else
3649# define XXH_likely(x) (x)
3650# define XXH_unlikely(x) (x)
3651#endif
3652
3653#ifndef XXH_HAS_INCLUDE
3654# ifdef __has_include
3655/*
3656 * Not defined as XXH_HAS_INCLUDE(x) (function-like) because
3657 * this causes segfaults in Apple Clang 4.2 (on Mac OS X 10.7 Lion)
3658 */
3659# define XXH_HAS_INCLUDE __has_include
3660# else
3661# define XXH_HAS_INCLUDE(x) 0
3662# endif
3663#endif
3664
3665#if defined(__GNUC__) || defined(__clang__)
3666# if defined(__ARM_FEATURE_SVE)
3667# include <arm_sve.h>
3668# endif
3669# if defined(__ARM_NEON__) || defined(__ARM_NEON) \
3670 || (defined(_M_ARM) && _M_ARM >= 7) \
3671 || defined(_M_ARM64) || defined(_M_ARM64EC) \
3672 || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE(<arm_neon.h>)) /* WASM SIMD128 via SIMDe */
3673# define inline __inline__ /* circumvent a clang bug */
3674# include <arm_neon.h>
3675# undef inline
3676# elif defined(__AVX2__)
3677# include <immintrin.h>
3678# elif defined(__SSE2__)
3679# include <emmintrin.h>
3680# endif
3681#endif
3682
3683#if defined(_MSC_VER)
3684# include <intrin.h>
3685#endif
3686
3687/*
3688 * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while
3689 * remaining a true 64-bit/128-bit hash function.
3690 *
3691 * This is done by prioritizing a subset of 64-bit operations that can be
3692 * emulated without too many steps on the average 32-bit machine.
3693 *
3694 * For example, these two lines seem similar, and run equally fast on 64-bit:
3695 *
3696 * xxh_u64 x;
3697 * x ^= (x >> 47); // good
3698 * x ^= (x >> 13); // bad
3699 *
3700 * However, to a 32-bit machine, there is a major difference.
3701 *
3702 * x ^= (x >> 47) looks like this:
3703 *
3704 * x.lo ^= (x.hi >> (47 - 32));
3705 *
3706 * while x ^= (x >> 13) looks like this:
3707 *
3708 * // note: funnel shifts are not usually cheap.
3709 * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13));
3710 * x.hi ^= (x.hi >> 13);
3711 *
3712 * The first one is significantly faster than the second, simply because the
3713 * shift is larger than 32. This means:
3714 * - All the bits we need are in the upper 32 bits, so we can ignore the lower
3715 * 32 bits in the shift.
3716 * - The shift result will always fit in the lower 32 bits, and therefore,
3717 * we can ignore the upper 32 bits in the xor.
3718 *
3719 * Thanks to this optimization, XXH3 only requires these features to be efficient:
3720 *
3721 * - Usable unaligned access
3722 * - A 32-bit or 64-bit ALU
3723 * - If 32-bit, a decent ADC instruction
3724 * - A 32 or 64-bit multiply with a 64-bit result
3725 * - For the 128-bit variant, a decent byteswap helps short inputs.
3726 *
3727 * The first two are already required by XXH32, and almost all 32-bit and 64-bit
3728 * platforms which can run XXH32 can run XXH3 efficiently.
3729 *
3730 * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one
3731 * notable exception.
3732 *
3733 * First of all, Thumb-1 lacks support for the UMULL instruction which
3734 * performs the important long multiply. This means numerous __aeabi_lmul
3735 * calls.
3736 *
3737 * Second of all, the 8 functional registers are just not enough.
3738 * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need
3739 * Lo registers, and this shuffling results in thousands more MOVs than A32.
3740 *
3741 * A32 and T32 don't have this limitation. They can access all 14 registers,
3742 * do a 32->64 multiply with UMULL, and the flexible operand allowing free
3743 * shifts is helpful, too.
3744 *
3745 * Therefore, we do a quick sanity check.
3746 *
3747 * If compiling Thumb-1 for a target which supports ARM instructions, we will
3748 * emit a warning, as it is not a "sane" platform to compile for.
3749 *
3750 * Usually, if this happens, it is because of an accident and you probably need
3751 * to specify -march, as you likely meant to compile for a newer architecture.
3752 *
3753 * Credit: large sections of the vectorial and asm source code paths
3754 * have been contributed by @easyaspi314
3755 */
3756#if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM)
3757# warning "XXH3 is highly inefficient without ARM or Thumb-2."
3758#endif
3759
3760/* ==========================================
3761 * Vectorization detection
3762 * ========================================== */
3763
3764#ifdef XXH_DOXYGEN
3775# define XXH_VECTOR XXH_SCALAR
3785enum XXH_VECTOR_TYPE /* fake enum */ {
3786 XXH_SCALAR = 0,
3787 XXH_SSE2 = 1,
3793 XXH_AVX2 = 2,
3794 XXH_AVX512 = 3,
3795 XXH_NEON = 4,
3800 XXH_VSX = 5,
3801 XXH_SVE = 6,
3802};
3812# define XXH_ACC_ALIGN 8
3813#endif
3814
3815/* Actual definition */
3816#ifndef XXH_DOXYGEN
3817# define XXH_SCALAR 0
3818# define XXH_SSE2 1
3819# define XXH_AVX2 2
3820# define XXH_AVX512 3
3821# define XXH_NEON 4
3822# define XXH_VSX 5
3823# define XXH_SVE 6
3824#endif
3825
3826#ifndef XXH_VECTOR /* can be defined on command line */
3827# if defined(__ARM_FEATURE_SVE)
3828# define XXH_VECTOR XXH_SVE
3829# elif ( \
3830 defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \
3831 || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) /* msvc */ \
3832 || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE(<arm_neon.h>)) /* wasm simd128 via SIMDe */ \
3833 ) && ( \
3834 defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \
3835 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
3836 )
3837# define XXH_VECTOR XXH_NEON
3838# elif defined(__AVX512F__)
3839# define XXH_VECTOR XXH_AVX512
3840# elif defined(__AVX2__)
3841# define XXH_VECTOR XXH_AVX2
3842# elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
3843# define XXH_VECTOR XXH_SSE2
3844# elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \
3845 || (defined(__s390x__) && defined(__VEC__)) \
3846 && defined(__GNUC__) /* TODO: IBM XL */
3847# define XXH_VECTOR XXH_VSX
3848# else
3849# define XXH_VECTOR XXH_SCALAR
3850# endif
3851#endif
3852
3853/* __ARM_FEATURE_SVE is only supported by GCC & Clang. */
3854#if (XXH_VECTOR == XXH_SVE) && !defined(__ARM_FEATURE_SVE)
3855# ifdef _MSC_VER
3856# pragma warning(once : 4606)
3857# else
3858# warning "__ARM_FEATURE_SVE isn't supported. Use SCALAR instead."
3859# endif
3860# undef XXH_VECTOR
3861# define XXH_VECTOR XXH_SCALAR
3862#endif
3863
3864/*
3865 * Controls the alignment of the accumulator,
3866 * for compatibility with aligned vector loads, which are usually faster.
3867 */
3868#ifndef XXH_ACC_ALIGN
3869# if defined(XXH_X86DISPATCH)
3870# define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */
3871# elif XXH_VECTOR == XXH_SCALAR /* scalar */
3872# define XXH_ACC_ALIGN 8
3873# elif XXH_VECTOR == XXH_SSE2 /* sse2 */
3874# define XXH_ACC_ALIGN 16
3875# elif XXH_VECTOR == XXH_AVX2 /* avx2 */
3876# define XXH_ACC_ALIGN 32
3877# elif XXH_VECTOR == XXH_NEON /* neon */
3878# define XXH_ACC_ALIGN 16
3879# elif XXH_VECTOR == XXH_VSX /* vsx */
3880# define XXH_ACC_ALIGN 16
3881# elif XXH_VECTOR == XXH_AVX512 /* avx512 */
3882# define XXH_ACC_ALIGN 64
3883# elif XXH_VECTOR == XXH_SVE /* sve */
3884# define XXH_ACC_ALIGN 64
3885# endif
3886#endif
3887
3888#if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \
3889 || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
3890# define XXH_SEC_ALIGN XXH_ACC_ALIGN
3891#elif XXH_VECTOR == XXH_SVE
3892# define XXH_SEC_ALIGN XXH_ACC_ALIGN
3893#else
3894# define XXH_SEC_ALIGN 8
3895#endif
3896
3897#if defined(__GNUC__) || defined(__clang__)
3898# define XXH_ALIASING __attribute__((may_alias))
3899#else
3900# define XXH_ALIASING /* nothing */
3901#endif
3902
3903/*
3904 * UGLY HACK:
3905 * GCC usually generates the best code with -O3 for xxHash.
3906 *
3907 * However, when targeting AVX2, it is overzealous in its unrolling resulting
3908 * in code roughly 3/4 the speed of Clang.
3909 *
3910 * There are other issues, such as GCC splitting _mm256_loadu_si256 into
3911 * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which
3912 * only applies to Sandy and Ivy Bridge... which don't even support AVX2.
3913 *
3914 * That is why when compiling the AVX2 version, it is recommended to use either
3915 * -O2 -mavx2 -march=haswell
3916 * or
3917 * -O2 -mavx2 -mno-avx256-split-unaligned-load
3918 * for decent performance, or to use Clang instead.
3919 *
3920 * Fortunately, we can control the first one with a pragma that forces GCC into
3921 * -O2, but the other one we can't control without "failed to inline always
3922 * inline function due to target mismatch" warnings.
3923 */
3924#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
3925 && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
3926 && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */
3927# pragma GCC push_options
3928# pragma GCC optimize("-O2")
3929#endif
3930
3931#if XXH_VECTOR == XXH_NEON
3932
3933/*
3934 * UGLY HACK: While AArch64 GCC on Linux does not seem to care, on macOS, GCC -O3
3935 * optimizes out the entire hashLong loop because of the aliasing violation.
3936 *
3937 * However, GCC is also inefficient at load-store optimization with vld1q/vst1q,
3938 * so the only option is to mark it as aliasing.
3939 */
3940typedef uint64x2_t xxh_aliasing_uint64x2_t XXH_ALIASING;
3941
3955#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__)
3956XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) /* silence -Wcast-align */
3957{
3958 return *(xxh_aliasing_uint64x2_t const *)ptr;
3959}
3960#else
3961XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr)
3962{
3963 return vreinterpretq_u64_u8(vld1q_u8((uint8_t const*)ptr));
3964}
3965#endif
3966
3975#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 11
3976XXH_FORCE_INLINE uint64x2_t
3977XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
3978{
3979 /* Inline assembly is the only way */
3980 __asm__("umlal %0.2d, %1.2s, %2.2s" : "+w" (acc) : "w" (lhs), "w" (rhs));
3981 return acc;
3982}
3983XXH_FORCE_INLINE uint64x2_t
3984XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
3985{
3986 /* This intrinsic works as expected */
3987 return vmlal_high_u32(acc, lhs, rhs);
3988}
3989#else
3990/* Portable intrinsic versions */
3991XXH_FORCE_INLINE uint64x2_t
3992XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
3993{
3994 return vmlal_u32(acc, vget_low_u32(lhs), vget_low_u32(rhs));
3995}
3998XXH_FORCE_INLINE uint64x2_t
3999XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
4000{
4001 return vmlal_u32(acc, vget_high_u32(lhs), vget_high_u32(rhs));
4002}
4003#endif
4004
4043# ifndef XXH3_NEON_LANES
4044# if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \
4045 && !defined(__APPLE__) && XXH_SIZE_OPT <= 0
4046# define XXH3_NEON_LANES 6
4047# else
4048# define XXH3_NEON_LANES XXH_ACC_NB
4049# endif
4050# endif
4051#endif /* XXH_VECTOR == XXH_NEON */
4052
4053/*
4054 * VSX and Z Vector helpers.
4055 *
4056 * This is very messy, and any pull requests to clean this up are welcome.
4057 *
4058 * There are a lot of problems with supporting VSX and s390x, due to
4059 * inconsistent intrinsics, spotty coverage, and multiple endiannesses.
4060 */
4061#if XXH_VECTOR == XXH_VSX
4062/* Annoyingly, these headers _may_ define three macros: `bool`, `vector`,
4063 * and `pixel`. This is a problem for obvious reasons.
4064 *
4065 * These keywords are unnecessary; the spec literally says they are
4066 * equivalent to `__bool`, `__vector`, and `__pixel` and may be undef'd
4067 * after including the header.
4068 *
4069 * We use pragma push_macro/pop_macro to keep the namespace clean. */
4070# pragma push_macro("bool")
4071# pragma push_macro("vector")
4072# pragma push_macro("pixel")
4073/* silence potential macro redefined warnings */
4074# undef bool
4075# undef vector
4076# undef pixel
4077
4078# if defined(__s390x__)
4079# include <s390intrin.h>
4080# else
4081# include <altivec.h>
4082# endif
4083
4084/* Restore the original macro values, if applicable. */
4085# pragma pop_macro("pixel")
4086# pragma pop_macro("vector")
4087# pragma pop_macro("bool")
4088
4089typedef __vector unsigned long long xxh_u64x2;
4090typedef __vector unsigned char xxh_u8x16;
4091typedef __vector unsigned xxh_u32x4;
4092
4093/*
4094 * UGLY HACK: Similar to aarch64 macOS GCC, s390x GCC has the same aliasing issue.
4095 */
4096typedef xxh_u64x2 xxh_aliasing_u64x2 XXH_ALIASING;
4097
4098# ifndef XXH_VSX_BE
4099# if defined(__BIG_ENDIAN__) \
4100 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
4101# define XXH_VSX_BE 1
4102# elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
4103# warning "-maltivec=be is not recommended. Please use native endianness."
4104# define XXH_VSX_BE 1
4105# else
4106# define XXH_VSX_BE 0
4107# endif
4108# endif /* !defined(XXH_VSX_BE) */
4109
4110# if XXH_VSX_BE
4111# if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
4112# define XXH_vec_revb vec_revb
4113# else
4117XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val)
4118{
4119 xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
4120 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 };
4121 return vec_perm(val, val, vByteSwap);
4122}
4123# endif
4124# endif /* XXH_VSX_BE */
4125
4129XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr)
4130{
4131 xxh_u64x2 ret;
4132 XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2));
4133# if XXH_VSX_BE
4134 ret = XXH_vec_revb(ret);
4135# endif
4136 return ret;
4137}
4138
4139/*
4140 * vec_mulo and vec_mule are very problematic intrinsics on PowerPC
4141 *
4142 * These intrinsics weren't added until GCC 8, despite existing for a while,
4143 * and they are endian dependent. Also, their meaning swap depending on version.
4144 * */
4145# if defined(__s390x__)
4146 /* s390x is always big endian, no issue on this platform */
4147# define XXH_vec_mulo vec_mulo
4148# define XXH_vec_mule vec_mule
4149# elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) && !defined(__ibmxl__)
4150/* Clang has a better way to control this, we can just use the builtin which doesn't swap. */
4151 /* The IBM XL Compiler (which defined __clang__) only implements the vec_* operations */
4152# define XXH_vec_mulo __builtin_altivec_vmulouw
4153# define XXH_vec_mule __builtin_altivec_vmuleuw
4154# else
4155/* gcc needs inline assembly */
4156/* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */
4157XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b)
4158{
4159 xxh_u64x2 result;
4160 __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
4161 return result;
4162}
4163XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b)
4164{
4165 xxh_u64x2 result;
4166 __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
4167 return result;
4168}
4169# endif /* XXH_vec_mulo, XXH_vec_mule */
4170#endif /* XXH_VECTOR == XXH_VSX */
4171
4172#if XXH_VECTOR == XXH_SVE
4173#define ACCRND(acc, offset) \
4174do { \
4175 svuint64_t input_vec = svld1_u64(mask, xinput + offset); \
4176 svuint64_t secret_vec = svld1_u64(mask, xsecret + offset); \
4177 svuint64_t mixed = sveor_u64_x(mask, secret_vec, input_vec); \
4178 svuint64_t swapped = svtbl_u64(input_vec, kSwap); \
4179 svuint64_t mixed_lo = svextw_u64_x(mask, mixed); \
4180 svuint64_t mixed_hi = svlsr_n_u64_x(mask, mixed, 32); \
4181 svuint64_t mul = svmad_u64_x(mask, mixed_lo, mixed_hi, swapped); \
4182 acc = svadd_u64_x(mask, acc, mul); \
4183} while (0)
4184#endif /* XXH_VECTOR == XXH_SVE */
4185
4186/* prefetch
4187 * can be disabled, by declaring XXH_NO_PREFETCH build macro */
4188#if defined(XXH_NO_PREFETCH)
4189# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
4190#else
4191# if XXH_SIZE_OPT >= 1
4192# define XXH_PREFETCH(ptr) (void)(ptr)
4193# elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */
4194# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
4195# define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
4196# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
4197# define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
4198# else
4199# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
4200# endif
4201#endif /* XXH_NO_PREFETCH */
4202
4203
4204/* ==========================================
4205 * XXH3 default settings
4206 * ========================================== */
4207
4208#define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */
4209
4210#if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
4211# error "default keyset is not large enough"
4212#endif
4213
4215XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
4216 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
4217 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
4218 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
4219 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c,
4220 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3,
4221 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
4222 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d,
4223 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
4224 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
4225 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e,
4226 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
4227 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
4228};
4229
4230static const xxh_u64 PRIME_MX1 = 0x165667919E3779F9ULL;
4231static const xxh_u64 PRIME_MX2 = 0x9FB21C651E98DF25ULL;
4233#ifdef XXH_OLD_NAMES
4234# define kSecret XXH3_kSecret
4235#endif
4236
4237#ifdef XXH_DOXYGEN
4254XXH_FORCE_INLINE xxh_u64
4255XXH_mult32to64(xxh_u64 x, xxh_u64 y)
4256{
4257 return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF);
4258}
4259#elif defined(_MSC_VER) && defined(_M_IX86)
4260# define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
4261#else
4262/*
4263 * Downcast + upcast is usually better than masking on older compilers like
4264 * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers.
4265 *
4266 * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands
4267 * and perform a full 64x64 multiply -- entirely redundant on 32-bit.
4268 */
4269# define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
4270#endif
4271
4281static XXH128_hash_t
4282XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs)
4283{
4284 /*
4285 * GCC/Clang __uint128_t method.
4286 *
4287 * On most 64-bit targets, GCC and Clang define a __uint128_t type.
4288 * This is usually the best way as it usually uses a native long 64-bit
4289 * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64.
4290 *
4291 * Usually.
4292 *
4293 * Despite being a 32-bit platform, Clang (and emscripten) define this type
4294 * despite not having the arithmetic for it. This results in a laggy
4295 * compiler builtin call which calculates a full 128-bit multiply.
4296 * In that case it is best to use the portable one.
4297 * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677
4298 */
4299#if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \
4300 && defined(__SIZEOF_INT128__) \
4301 || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
4302
4303 __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs;
4304 XXH128_hash_t r128;
4305 r128.low64 = (xxh_u64)(product);
4306 r128.high64 = (xxh_u64)(product >> 64);
4307 return r128;
4308
4309 /*
4310 * MSVC for x64's _umul128 method.
4311 *
4312 * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct);
4313 *
4314 * This compiles to single operand MUL on x64.
4315 */
4316#elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC)
4317
4318#ifndef _MSC_VER
4319# pragma intrinsic(_umul128)
4320#endif
4321 xxh_u64 product_high;
4322 xxh_u64 const product_low = _umul128(lhs, rhs, &product_high);
4323 XXH128_hash_t r128;
4324 r128.low64 = product_low;
4325 r128.high64 = product_high;
4326 return r128;
4327
4328 /*
4329 * MSVC for ARM64's __umulh method.
4330 *
4331 * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method.
4332 */
4333#elif defined(_M_ARM64) || defined(_M_ARM64EC)
4334
4335#ifndef _MSC_VER
4336# pragma intrinsic(__umulh)
4337#endif
4338 XXH128_hash_t r128;
4339 r128.low64 = lhs * rhs;
4340 r128.high64 = __umulh(lhs, rhs);
4341 return r128;
4342
4343#else
4344 /*
4345 * Portable scalar method. Optimized for 32-bit and 64-bit ALUs.
4346 *
4347 * This is a fast and simple grade school multiply, which is shown below
4348 * with base 10 arithmetic instead of base 0x100000000.
4349 *
4350 * 9 3 // D2 lhs = 93
4351 * x 7 5 // D2 rhs = 75
4352 * ----------
4353 * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15
4354 * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45
4355 * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21
4356 * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63
4357 * ---------
4358 * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27
4359 * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67
4360 * ---------
4361 * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975
4362 *
4363 * The reasons for adding the products like this are:
4364 * 1. It avoids manual carry tracking. Just like how
4365 * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX.
4366 * This avoids a lot of complexity.
4367 *
4368 * 2. It hints for, and on Clang, compiles to, the powerful UMAAL
4369 * instruction available in ARM's Digital Signal Processing extension
4370 * in 32-bit ARMv6 and later, which is shown below:
4371 *
4372 * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm)
4373 * {
4374 * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm;
4375 * *RdLo = (xxh_u32)(product & 0xFFFFFFFF);
4376 * *RdHi = (xxh_u32)(product >> 32);
4377 * }
4378 *
4379 * This instruction was designed for efficient long multiplication, and
4380 * allows this to be calculated in only 4 instructions at speeds
4381 * comparable to some 64-bit ALUs.
4382 *
4383 * 3. It isn't terrible on other platforms. Usually this will be a couple
4384 * of 32-bit ADD/ADCs.
4385 */
4386
4387 /* First calculate all of the cross products. */
4388 xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
4389 xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF);
4390 xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
4391 xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32);
4392
4393 /* Now add the products together. These will never overflow. */
4394 xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
4395 xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi;
4396 xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
4397
4398 XXH128_hash_t r128;
4399 r128.low64 = lower;
4400 r128.high64 = upper;
4401 return r128;
4402#endif
4403}
4404
4415static xxh_u64
4416XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs)
4417{
4418 XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
4419 return product.low64 ^ product.high64;
4420}
4421
4423XXH_FORCE_INLINE XXH_CONSTF xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift)
4424{
4425 XXH_ASSERT(0 <= shift && shift < 64);
4426 return v64 ^ (v64 >> shift);
4427}
4428
4429/*
4430 * This is a fast avalanche stage,
4431 * suitable when input bits are already partially mixed
4432 */
4433static XXH64_hash_t XXH3_avalanche(xxh_u64 h64)
4434{
4435 h64 = XXH_xorshift64(h64, 37);
4436 h64 *= PRIME_MX1;
4437 h64 = XXH_xorshift64(h64, 32);
4438 return h64;
4439}
4440
4441/*
4442 * This is a stronger avalanche,
4443 * inspired by Pelle Evensen's rrmxmx
4444 * preferable when input has not been previously mixed
4445 */
4446static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len)
4447{
4448 /* this mix is inspired by Pelle Evensen's rrmxmx */
4449 h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24);
4450 h64 *= PRIME_MX2;
4451 h64 ^= (h64 >> 35) + len ;
4452 h64 *= PRIME_MX2;
4453 return XXH_xorshift64(h64, 28);
4454}
4455
4456
4457/* ==========================================
4458 * Short keys
4459 * ==========================================
4460 * One of the shortcomings of XXH32 and XXH64 was that their performance was
4461 * sub-optimal on short lengths. It used an iterative algorithm which strongly
4462 * favored lengths that were a multiple of 4 or 8.
4463 *
4464 * Instead of iterating over individual inputs, we use a set of single shot
4465 * functions which piece together a range of lengths and operate in constant time.
4466 *
4467 * Additionally, the number of multiplies has been significantly reduced. This
4468 * reduces latency, especially when emulating 64-bit multiplies on 32-bit.
4469 *
4470 * Depending on the platform, this may or may not be faster than XXH32, but it
4471 * is almost guaranteed to be faster than XXH64.
4472 */
4473
4474/*
4475 * At very short lengths, there isn't enough input to fully hide secrets, or use
4476 * the entire secret.
4477 *
4478 * There is also only a limited amount of mixing we can do before significantly
4479 * impacting performance.
4480 *
4481 * Therefore, we use different sections of the secret and always mix two secret
4482 * samples with an XOR. This should have no effect on performance on the
4483 * seedless or withSeed variants because everything _should_ be constant folded
4484 * by modern compilers.
4485 *
4486 * The XOR mixing hides individual parts of the secret and increases entropy.
4487 *
4488 * This adds an extra layer of strength for custom secrets.
4489 */
4490XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
4491XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
4492{
4493 XXH_ASSERT(input != NULL);
4494 XXH_ASSERT(1 <= len && len <= 3);
4495 XXH_ASSERT(secret != NULL);
4496 /*
4497 * len = 1: combined = { input[0], 0x01, input[0], input[0] }
4498 * len = 2: combined = { input[1], 0x02, input[0], input[1] }
4499 * len = 3: combined = { input[2], 0x03, input[0], input[1] }
4500 */
4501 { xxh_u8 const c1 = input[0];
4502 xxh_u8 const c2 = input[len >> 1];
4503 xxh_u8 const c3 = input[len - 1];
4504 xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24)
4505 | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
4506 xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
4507 xxh_u64 const keyed = (xxh_u64)combined ^ bitflip;
4508 return XXH64_avalanche(keyed);
4509 }
4510}
4511
4512XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
4513XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
4514{
4515 XXH_ASSERT(input != NULL);
4516 XXH_ASSERT(secret != NULL);
4517 XXH_ASSERT(4 <= len && len <= 8);
4518 seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
4519 { xxh_u32 const input1 = XXH_readLE32(input);
4520 xxh_u32 const input2 = XXH_readLE32(input + len - 4);
4521 xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed;
4522 xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32);
4523 xxh_u64 const keyed = input64 ^ bitflip;
4524 return XXH3_rrmxmx(keyed, len);
4525 }
4526}
4527
4528XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
4529XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
4530{
4531 XXH_ASSERT(input != NULL);
4532 XXH_ASSERT(secret != NULL);
4533 XXH_ASSERT(9 <= len && len <= 16);
4534 { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed;
4535 xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed;
4536 xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1;
4537 xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2;
4538 xxh_u64 const acc = len
4539 + XXH_swap64(input_lo) + input_hi
4540 + XXH3_mul128_fold64(input_lo, input_hi);
4541 return XXH3_avalanche(acc);
4542 }
4543}
4544
4545XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
4546XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
4547{
4548 XXH_ASSERT(len <= 16);
4549 { if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed);
4550 if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed);
4551 if (len) return XXH3_len_1to3_64b(input, len, secret, seed);
4552 return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64)));
4553 }
4554}
4555
4556/*
4557 * DISCLAIMER: There are known *seed-dependent* multicollisions here due to
4558 * multiplication by zero, affecting hashes of lengths 17 to 240.
4559 *
4560 * However, they are very unlikely.
4561 *
4562 * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all
4563 * unseeded non-cryptographic hashes, it does not attempt to defend itself
4564 * against specially crafted inputs, only random inputs.
4565 *
4566 * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes
4567 * cancelling out the secret is taken an arbitrary number of times (addressed
4568 * in XXH3_accumulate_512), this collision is very unlikely with random inputs
4569 * and/or proper seeding:
4570 *
4571 * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a
4572 * function that is only called up to 16 times per hash with up to 240 bytes of
4573 * input.
4574 *
4575 * This is not too bad for a non-cryptographic hash function, especially with
4576 * only 64 bit outputs.
4577 *
4578 * The 128-bit variant (which trades some speed for strength) is NOT affected
4579 * by this, although it is always a good idea to use a proper seed if you care
4580 * about strength.
4581 */
4582XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input,
4583 const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64)
4584{
4585#if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
4586 && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \
4587 && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */
4588 /*
4589 * UGLY HACK:
4590 * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in
4591 * slower code.
4592 *
4593 * By forcing seed64 into a register, we disrupt the cost model and
4594 * cause it to scalarize. See `XXH32_round()`
4595 *
4596 * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600,
4597 * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on
4598 * GCC 9.2, despite both emitting scalar code.
4599 *
4600 * GCC generates much better scalar code than Clang for the rest of XXH3,
4601 * which is why finding a more optimal codepath is an interest.
4602 */
4603 XXH_COMPILER_GUARD(seed64);
4604#endif
4605 { xxh_u64 const input_lo = XXH_readLE64(input);
4606 xxh_u64 const input_hi = XXH_readLE64(input+8);
4607 return XXH3_mul128_fold64(
4608 input_lo ^ (XXH_readLE64(secret) + seed64),
4609 input_hi ^ (XXH_readLE64(secret+8) - seed64)
4610 );
4611 }
4612}
4613
4614/* For mid range keys, XXH3 uses a Mum-hash variant. */
4615XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
4616XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
4617 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
4618 XXH64_hash_t seed)
4619{
4620 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
4621 XXH_ASSERT(16 < len && len <= 128);
4622
4623 { xxh_u64 acc = len * XXH_PRIME64_1;
4624#if XXH_SIZE_OPT >= 1
4625 /* Smaller and cleaner, but slightly slower. */
4626 unsigned int i = (unsigned int)(len - 1) / 32;
4627 do {
4628 acc += XXH3_mix16B(input+16 * i, secret+32*i, seed);
4629 acc += XXH3_mix16B(input+len-16*(i+1), secret+32*i+16, seed);
4630 } while (i-- != 0);
4631#else
4632 if (len > 32) {
4633 if (len > 64) {
4634 if (len > 96) {
4635 acc += XXH3_mix16B(input+48, secret+96, seed);
4636 acc += XXH3_mix16B(input+len-64, secret+112, seed);
4637 }
4638 acc += XXH3_mix16B(input+32, secret+64, seed);
4639 acc += XXH3_mix16B(input+len-48, secret+80, seed);
4640 }
4641 acc += XXH3_mix16B(input+16, secret+32, seed);
4642 acc += XXH3_mix16B(input+len-32, secret+48, seed);
4643 }
4644 acc += XXH3_mix16B(input+0, secret+0, seed);
4645 acc += XXH3_mix16B(input+len-16, secret+16, seed);
4646#endif
4647 return XXH3_avalanche(acc);
4648 }
4649}
4650
4654#define XXH3_MIDSIZE_MAX 240
4655
4656XXH_NO_INLINE XXH_PUREF XXH64_hash_t
4657XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
4658 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
4659 XXH64_hash_t seed)
4660{
4661 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
4662 XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
4663
4664 #define XXH3_MIDSIZE_STARTOFFSET 3
4665 #define XXH3_MIDSIZE_LASTOFFSET 17
4666
4667 { xxh_u64 acc = len * XXH_PRIME64_1;
4668 xxh_u64 acc_end;
4669 unsigned int const nbRounds = (unsigned int)len / 16;
4670 unsigned int i;
4671 XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
4672 for (i=0; i<8; i++) {
4673 acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed);
4674 }
4675 /* last bytes */
4676 acc_end = XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed);
4677 XXH_ASSERT(nbRounds >= 8);
4678 acc = XXH3_avalanche(acc);
4679#if defined(__clang__) /* Clang */ \
4680 && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
4681 && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
4682 /*
4683 * UGLY HACK:
4684 * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86.
4685 * In everywhere else, it uses scalar code.
4686 *
4687 * For 64->128-bit multiplies, even if the NEON was 100% optimal, it
4688 * would still be slower than UMAAL (see XXH_mult64to128).
4689 *
4690 * Unfortunately, Clang doesn't handle the long multiplies properly and
4691 * converts them to the nonexistent "vmulq_u64" intrinsic, which is then
4692 * scalarized into an ugly mess of VMOV.32 instructions.
4693 *
4694 * This mess is difficult to avoid without turning autovectorization
4695 * off completely, but they are usually relatively minor and/or not
4696 * worth it to fix.
4697 *
4698 * This loop is the easiest to fix, as unlike XXH32, this pragma
4699 * _actually works_ because it is a loop vectorization instead of an
4700 * SLP vectorization.
4701 */
4702 #pragma clang loop vectorize(disable)
4703#endif
4704 for (i=8 ; i < nbRounds; i++) {
4705 /*
4706 * Prevents clang for unrolling the acc loop and interleaving with this one.
4707 */
4708 XXH_COMPILER_GUARD(acc);
4709 acc_end += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed);
4710 }
4711 return XXH3_avalanche(acc + acc_end);
4712 }
4713}
4714
4715
4716/* ======= Long Keys ======= */
4717
4718#define XXH_STRIPE_LEN 64
4719#define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */
4720#define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
4721
4722#ifdef XXH_OLD_NAMES
4723# define STRIPE_LEN XXH_STRIPE_LEN
4724# define ACC_NB XXH_ACC_NB
4725#endif
4726
4727#ifndef XXH_PREFETCH_DIST
4728# ifdef __clang__
4729# define XXH_PREFETCH_DIST 320
4730# else
4731# if (XXH_VECTOR == XXH_AVX512)
4732# define XXH_PREFETCH_DIST 512
4733# else
4734# define XXH_PREFETCH_DIST 384
4735# endif
4736# endif /* __clang__ */
4737#endif /* XXH_PREFETCH_DIST */
4738
4739/*
4740 * These macros are to generate an XXH3_accumulate() function.
4741 * The two arguments select the name suffix and target attribute.
4742 *
4743 * The name of this symbol is XXH3_accumulate_<name>() and it calls
4744 * XXH3_accumulate_512_<name>().
4745 *
4746 * It may be useful to hand implement this function if the compiler fails to
4747 * optimize the inline function.
4748 */
4749#define XXH3_ACCUMULATE_TEMPLATE(name) \
4750void \
4751XXH3_accumulate_##name(xxh_u64* XXH_RESTRICT acc, \
4752 const xxh_u8* XXH_RESTRICT input, \
4753 const xxh_u8* XXH_RESTRICT secret, \
4754 size_t nbStripes) \
4755{ \
4756 size_t n; \
4757 for (n = 0; n < nbStripes; n++ ) { \
4758 const xxh_u8* const in = input + n*XXH_STRIPE_LEN; \
4759 XXH_PREFETCH(in + XXH_PREFETCH_DIST); \
4760 XXH3_accumulate_512_##name( \
4761 acc, \
4762 in, \
4763 secret + n*XXH_SECRET_CONSUME_RATE); \
4764 } \
4765}
4766
4767
4768XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64)
4769{
4770 if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
4771 XXH_memcpy(dst, &v64, sizeof(v64));
4772}
4773
4774/* Several intrinsic functions below are supposed to accept __int64 as argument,
4775 * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ .
4776 * However, several environments do not define __int64 type,
4777 * requiring a workaround.
4778 */
4779#if !defined (__VMS) \
4780 && (defined (__cplusplus) \
4781 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
4782 typedef int64_t xxh_i64;
4783#else
4784 /* the following type must have a width of 64-bit */
4785 typedef long long xxh_i64;
4786#endif
4787
4788
4789/*
4790 * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized.
4791 *
4792 * It is a hardened version of UMAC, based off of FARSH's implementation.
4793 *
4794 * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD
4795 * implementations, and it is ridiculously fast.
4796 *
4797 * We harden it by mixing the original input to the accumulators as well as the product.
4798 *
4799 * This means that in the (relatively likely) case of a multiply by zero, the
4800 * original input is preserved.
4801 *
4802 * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve
4803 * cross-pollination, as otherwise the upper and lower halves would be
4804 * essentially independent.
4805 *
4806 * This doesn't matter on 64-bit hashes since they all get merged together in
4807 * the end, so we skip the extra step.
4808 *
4809 * Both XXH3_64bits and XXH3_128bits use this subroutine.
4810 */
4811
4812#if (XXH_VECTOR == XXH_AVX512) \
4813 || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0)
4814
4815#ifndef XXH_TARGET_AVX512
4816# define XXH_TARGET_AVX512 /* disable attribute target */
4817#endif
4818
4819XXH_FORCE_INLINE XXH_TARGET_AVX512 void
4820XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc,
4821 const void* XXH_RESTRICT input,
4822 const void* XXH_RESTRICT secret)
4823{
4824 __m512i* const xacc = (__m512i *) acc;
4825 XXH_ASSERT((((size_t)acc) & 63) == 0);
4826 XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
4827
4828 {
4829 /* data_vec = input[0]; */
4830 __m512i const data_vec = _mm512_loadu_si512 (input);
4831 /* key_vec = secret[0]; */
4832 __m512i const key_vec = _mm512_loadu_si512 (secret);
4833 /* data_key = data_vec ^ key_vec; */
4834 __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec);
4835 /* data_key_lo = data_key >> 32; */
4836 __m512i const data_key_lo = _mm512_srli_epi64 (data_key, 32);
4837 /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
4838 __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo);
4839 /* xacc[0] += swap(data_vec); */
4840 __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
4841 __m512i const sum = _mm512_add_epi64(*xacc, data_swap);
4842 /* xacc[0] += product; */
4843 *xacc = _mm512_add_epi64(product, sum);
4844 }
4845}
4846XXH_FORCE_INLINE XXH_TARGET_AVX512 XXH3_ACCUMULATE_TEMPLATE(avx512)
4847
4848/*
4849 * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing.
4850 *
4851 * Multiplication isn't perfect, as explained by Google in HighwayHash:
4852 *
4853 * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to
4854 * // varying degrees. In descending order of goodness, bytes
4855 * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32.
4856 * // As expected, the upper and lower bytes are much worse.
4857 *
4858 * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291
4859 *
4860 * Since our algorithm uses a pseudorandom secret to add some variance into the
4861 * mix, we don't need to (or want to) mix as often or as much as HighwayHash does.
4862 *
4863 * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid
4864 * extraction.
4865 *
4866 * Both XXH3_64bits and XXH3_128bits use this subroutine.
4867 */
4868
4869XXH_FORCE_INLINE XXH_TARGET_AVX512 void
4870XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4871{
4872 XXH_ASSERT((((size_t)acc) & 63) == 0);
4873 XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
4874 { __m512i* const xacc = (__m512i*) acc;
4875 const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1);
4876
4877 /* xacc[0] ^= (xacc[0] >> 47) */
4878 __m512i const acc_vec = *xacc;
4879 __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47);
4880 /* xacc[0] ^= secret; */
4881 __m512i const key_vec = _mm512_loadu_si512 (secret);
4882 __m512i const data_key = _mm512_ternarylogic_epi32(key_vec, acc_vec, shifted, 0x96 /* key_vec ^ acc_vec ^ shifted */);
4883
4884 /* xacc[0] *= XXH_PRIME32_1; */
4885 __m512i const data_key_hi = _mm512_srli_epi64 (data_key, 32);
4886 __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32);
4887 __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32);
4888 *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
4889 }
4890}
4891
4892XXH_FORCE_INLINE XXH_TARGET_AVX512 void
4893XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4894{
4895 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
4896 XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
4897 XXH_ASSERT(((size_t)customSecret & 63) == 0);
4898 (void)(&XXH_writeLE64);
4899 { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i);
4900 __m512i const seed_pos = _mm512_set1_epi64((xxh_i64)seed64);
4901 __m512i const seed = _mm512_mask_sub_epi64(seed_pos, 0xAA, _mm512_set1_epi8(0), seed_pos);
4902
4903 const __m512i* const src = (const __m512i*) ((const void*) XXH3_kSecret);
4904 __m512i* const dest = ( __m512i*) customSecret;
4905 int i;
4906 XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */
4907 XXH_ASSERT(((size_t)dest & 63) == 0);
4908 for (i=0; i < nbRounds; ++i) {
4909 dest[i] = _mm512_add_epi64(_mm512_load_si512(src + i), seed);
4910 } }
4911}
4912
4913#endif
4914
4915#if (XXH_VECTOR == XXH_AVX2) \
4916 || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0)
4917
4918#ifndef XXH_TARGET_AVX2
4919# define XXH_TARGET_AVX2 /* disable attribute target */
4920#endif
4921
4922XXH_FORCE_INLINE XXH_TARGET_AVX2 void
4923XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc,
4924 const void* XXH_RESTRICT input,
4925 const void* XXH_RESTRICT secret)
4926{
4927 XXH_ASSERT((((size_t)acc) & 31) == 0);
4928 { __m256i* const xacc = (__m256i *) acc;
4929 /* Unaligned. This is mainly for pointer arithmetic, and because
4930 * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
4931 const __m256i* const xinput = (const __m256i *) input;
4932 /* Unaligned. This is mainly for pointer arithmetic, and because
4933 * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
4934 const __m256i* const xsecret = (const __m256i *) secret;
4935
4936 size_t i;
4937 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
4938 /* data_vec = xinput[i]; */
4939 __m256i const data_vec = _mm256_loadu_si256 (xinput+i);
4940 /* key_vec = xsecret[i]; */
4941 __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
4942 /* data_key = data_vec ^ key_vec; */
4943 __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
4944 /* data_key_lo = data_key >> 32; */
4945 __m256i const data_key_lo = _mm256_srli_epi64 (data_key, 32);
4946 /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
4947 __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo);
4948 /* xacc[i] += swap(data_vec); */
4949 __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
4950 __m256i const sum = _mm256_add_epi64(xacc[i], data_swap);
4951 /* xacc[i] += product; */
4952 xacc[i] = _mm256_add_epi64(product, sum);
4953 } }
4954}
4955XXH_FORCE_INLINE XXH_TARGET_AVX2 XXH3_ACCUMULATE_TEMPLATE(avx2)
4956
4957XXH_FORCE_INLINE XXH_TARGET_AVX2 void
4958XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4959{
4960 XXH_ASSERT((((size_t)acc) & 31) == 0);
4961 { __m256i* const xacc = (__m256i*) acc;
4962 /* Unaligned. This is mainly for pointer arithmetic, and because
4963 * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
4964 const __m256i* const xsecret = (const __m256i *) secret;
4965 const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1);
4966
4967 size_t i;
4968 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
4969 /* xacc[i] ^= (xacc[i] >> 47) */
4970 __m256i const acc_vec = xacc[i];
4971 __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47);
4972 __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted);
4973 /* xacc[i] ^= xsecret; */
4974 __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
4975 __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
4976
4977 /* xacc[i] *= XXH_PRIME32_1; */
4978 __m256i const data_key_hi = _mm256_srli_epi64 (data_key, 32);
4979 __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32);
4980 __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32);
4981 xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
4982 }
4983 }
4984}
4985
4986XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4987{
4988 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
4989 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6);
4990 XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
4991 (void)(&XXH_writeLE64);
4992 XXH_PREFETCH(customSecret);
4993 { __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64);
4994
4995 const __m256i* const src = (const __m256i*) ((const void*) XXH3_kSecret);
4996 __m256i* dest = ( __m256i*) customSecret;
4997
4998# if defined(__GNUC__) || defined(__clang__)
4999 /*
5000 * On GCC & Clang, marking 'dest' as modified will cause the compiler:
5001 * - do not extract the secret from sse registers in the internal loop
5002 * - use less common registers, and avoid pushing these reg into stack
5003 */
5004 XXH_COMPILER_GUARD(dest);
5005# endif
5006 XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */
5007 XXH_ASSERT(((size_t)dest & 31) == 0);
5008
5009 /* GCC -O2 need unroll loop manually */
5010 dest[0] = _mm256_add_epi64(_mm256_load_si256(src+0), seed);
5011 dest[1] = _mm256_add_epi64(_mm256_load_si256(src+1), seed);
5012 dest[2] = _mm256_add_epi64(_mm256_load_si256(src+2), seed);
5013 dest[3] = _mm256_add_epi64(_mm256_load_si256(src+3), seed);
5014 dest[4] = _mm256_add_epi64(_mm256_load_si256(src+4), seed);
5015 dest[5] = _mm256_add_epi64(_mm256_load_si256(src+5), seed);
5016 }
5017}
5018
5019#endif
5020
5021/* x86dispatch always generates SSE2 */
5022#if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
5023
5024#ifndef XXH_TARGET_SSE2
5025# define XXH_TARGET_SSE2 /* disable attribute target */
5026#endif
5027
5028XXH_FORCE_INLINE XXH_TARGET_SSE2 void
5029XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc,
5030 const void* XXH_RESTRICT input,
5031 const void* XXH_RESTRICT secret)
5032{
5033 /* SSE2 is just a half-scale version of the AVX2 version. */
5034 XXH_ASSERT((((size_t)acc) & 15) == 0);
5035 { __m128i* const xacc = (__m128i *) acc;
5036 /* Unaligned. This is mainly for pointer arithmetic, and because
5037 * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
5038 const __m128i* const xinput = (const __m128i *) input;
5039 /* Unaligned. This is mainly for pointer arithmetic, and because
5040 * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
5041 const __m128i* const xsecret = (const __m128i *) secret;
5042
5043 size_t i;
5044 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
5045 /* data_vec = xinput[i]; */
5046 __m128i const data_vec = _mm_loadu_si128 (xinput+i);
5047 /* key_vec = xsecret[i]; */
5048 __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
5049 /* data_key = data_vec ^ key_vec; */
5050 __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
5051 /* data_key_lo = data_key >> 32; */
5052 __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
5053 /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
5054 __m128i const product = _mm_mul_epu32 (data_key, data_key_lo);
5055 /* xacc[i] += swap(data_vec); */
5056 __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2));
5057 __m128i const sum = _mm_add_epi64(xacc[i], data_swap);
5058 /* xacc[i] += product; */
5059 xacc[i] = _mm_add_epi64(product, sum);
5060 } }
5061}
5062XXH_FORCE_INLINE XXH_TARGET_SSE2 XXH3_ACCUMULATE_TEMPLATE(sse2)
5063
5064XXH_FORCE_INLINE XXH_TARGET_SSE2 void
5065XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
5066{
5067 XXH_ASSERT((((size_t)acc) & 15) == 0);
5068 { __m128i* const xacc = (__m128i*) acc;
5069 /* Unaligned. This is mainly for pointer arithmetic, and because
5070 * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
5071 const __m128i* const xsecret = (const __m128i *) secret;
5072 const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1);
5073
5074 size_t i;
5075 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
5076 /* xacc[i] ^= (xacc[i] >> 47) */
5077 __m128i const acc_vec = xacc[i];
5078 __m128i const shifted = _mm_srli_epi64 (acc_vec, 47);
5079 __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted);
5080 /* xacc[i] ^= xsecret[i]; */
5081 __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
5082 __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
5083
5084 /* xacc[i] *= XXH_PRIME32_1; */
5085 __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
5086 __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32);
5087 __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32);
5088 xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
5089 }
5090 }
5091}
5092
5093XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
5094{
5095 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
5096 (void)(&XXH_writeLE64);
5097 { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i);
5098
5099# if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
5100 /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */
5101 XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) };
5102 __m128i const seed = _mm_load_si128((__m128i const*)seed64x2);
5103# else
5104 __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64);
5105# endif
5106 int i;
5107
5108 const void* const src16 = XXH3_kSecret;
5109 __m128i* dst16 = (__m128i*) customSecret;
5110# if defined(__GNUC__) || defined(__clang__)
5111 /*
5112 * On GCC & Clang, marking 'dest' as modified will cause the compiler:
5113 * - do not extract the secret from sse registers in the internal loop
5114 * - use less common registers, and avoid pushing these reg into stack
5115 */
5116 XXH_COMPILER_GUARD(dst16);
5117# endif
5118 XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */
5119 XXH_ASSERT(((size_t)dst16 & 15) == 0);
5120
5121 for (i=0; i < nbRounds; ++i) {
5122 dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed);
5123 } }
5124}
5125
5126#endif
5127
5128#if (XXH_VECTOR == XXH_NEON)
5129
5130/* forward declarations for the scalar routines */
5131XXH_FORCE_INLINE void
5132XXH3_scalarRound(void* XXH_RESTRICT acc, void const* XXH_RESTRICT input,
5133 void const* XXH_RESTRICT secret, size_t lane);
5134
5135XXH_FORCE_INLINE void
5136XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
5137 void const* XXH_RESTRICT secret, size_t lane);
5138
5163XXH_FORCE_INLINE void
5164XXH3_accumulate_512_neon( void* XXH_RESTRICT acc,
5165 const void* XXH_RESTRICT input,
5166 const void* XXH_RESTRICT secret)
5167{
5168 XXH_ASSERT((((size_t)acc) & 15) == 0);
5169 XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0);
5170 { /* GCC for darwin arm64 does not like aliasing here */
5171 xxh_aliasing_uint64x2_t* const xacc = (xxh_aliasing_uint64x2_t*) acc;
5172 /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */
5173 uint8_t const* xinput = (const uint8_t *) input;
5174 uint8_t const* xsecret = (const uint8_t *) secret;
5175
5176 size_t i;
5177#ifdef __wasm_simd128__
5178 /*
5179 * On WASM SIMD128, Clang emits direct address loads when XXH3_kSecret
5180 * is constant propagated, which results in it converting it to this
5181 * inside the loop:
5182 *
5183 * a = v128.load(XXH3_kSecret + 0 + $secret_offset, offset = 0)
5184 * b = v128.load(XXH3_kSecret + 16 + $secret_offset, offset = 0)
5185 * ...
5186 *
5187 * This requires a full 32-bit address immediate (and therefore a 6 byte
5188 * instruction) as well as an add for each offset.
5189 *
5190 * Putting an asm guard prevents it from folding (at the cost of losing
5191 * the alignment hint), and uses the free offset in `v128.load` instead
5192 * of adding secret_offset each time which overall reduces code size by
5193 * about a kilobyte and improves performance.
5194 */
5195 XXH_COMPILER_GUARD(xsecret);
5196#endif
5197 /* Scalar lanes use the normal scalarRound routine */
5198 for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
5199 XXH3_scalarRound(acc, input, secret, i);
5200 }
5201 i = 0;
5202 /* 4 NEON lanes at a time. */
5203 for (; i+1 < XXH3_NEON_LANES / 2; i+=2) {
5204 /* data_vec = xinput[i]; */
5205 uint64x2_t data_vec_1 = XXH_vld1q_u64(xinput + (i * 16));
5206 uint64x2_t data_vec_2 = XXH_vld1q_u64(xinput + ((i+1) * 16));
5207 /* key_vec = xsecret[i]; */
5208 uint64x2_t key_vec_1 = XXH_vld1q_u64(xsecret + (i * 16));
5209 uint64x2_t key_vec_2 = XXH_vld1q_u64(xsecret + ((i+1) * 16));
5210 /* data_swap = swap(data_vec) */
5211 uint64x2_t data_swap_1 = vextq_u64(data_vec_1, data_vec_1, 1);
5212 uint64x2_t data_swap_2 = vextq_u64(data_vec_2, data_vec_2, 1);
5213 /* data_key = data_vec ^ key_vec; */
5214 uint64x2_t data_key_1 = veorq_u64(data_vec_1, key_vec_1);
5215 uint64x2_t data_key_2 = veorq_u64(data_vec_2, key_vec_2);
5216
5217 /*
5218 * If we reinterpret the 64x2 vectors as 32x4 vectors, we can use a
5219 * de-interleave operation for 4 lanes in 1 step with `vuzpq_u32` to
5220 * get one vector with the low 32 bits of each lane, and one vector
5221 * with the high 32 bits of each lane.
5222 *
5223 * The intrinsic returns a double vector because the original ARMv7-a
5224 * instruction modified both arguments in place. AArch64 and SIMD128 emit
5225 * two instructions from this intrinsic.
5226 *
5227 * [ dk11L | dk11H | dk12L | dk12H ] -> [ dk11L | dk12L | dk21L | dk22L ]
5228 * [ dk21L | dk21H | dk22L | dk22H ] -> [ dk11H | dk12H | dk21H | dk22H ]
5229 */
5230 uint32x4x2_t unzipped = vuzpq_u32(
5231 vreinterpretq_u32_u64(data_key_1),
5232 vreinterpretq_u32_u64(data_key_2)
5233 );
5234 /* data_key_lo = data_key & 0xFFFFFFFF */
5235 uint32x4_t data_key_lo = unzipped.val[0];
5236 /* data_key_hi = data_key >> 32 */
5237 uint32x4_t data_key_hi = unzipped.val[1];
5238 /*
5239 * Then, we can split the vectors horizontally and multiply which, as for most
5240 * widening intrinsics, have a variant that works on both high half vectors
5241 * for free on AArch64. A similar instruction is available on SIMD128.
5242 *
5243 * sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi
5244 */
5245 uint64x2_t sum_1 = XXH_vmlal_low_u32(data_swap_1, data_key_lo, data_key_hi);
5246 uint64x2_t sum_2 = XXH_vmlal_high_u32(data_swap_2, data_key_lo, data_key_hi);
5247 /*
5248 * Clang reorders
5249 * a += b * c; // umlal swap.2d, dkl.2s, dkh.2s
5250 * c += a; // add acc.2d, acc.2d, swap.2d
5251 * to
5252 * c += a; // add acc.2d, acc.2d, swap.2d
5253 * c += b * c; // umlal acc.2d, dkl.2s, dkh.2s
5254 *
5255 * While it would make sense in theory since the addition is faster,
5256 * for reasons likely related to umlal being limited to certain NEON
5257 * pipelines, this is worse. A compiler guard fixes this.
5258 */
5259 XXH_COMPILER_GUARD_CLANG_NEON(sum_1);
5260 XXH_COMPILER_GUARD_CLANG_NEON(sum_2);
5261 /* xacc[i] = acc_vec + sum; */
5262 xacc[i] = vaddq_u64(xacc[i], sum_1);
5263 xacc[i+1] = vaddq_u64(xacc[i+1], sum_2);
5264 }
5265 /* Operate on the remaining NEON lanes 2 at a time. */
5266 for (; i < XXH3_NEON_LANES / 2; i++) {
5267 /* data_vec = xinput[i]; */
5268 uint64x2_t data_vec = XXH_vld1q_u64(xinput + (i * 16));
5269 /* key_vec = xsecret[i]; */
5270 uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16));
5271 /* acc_vec_2 = swap(data_vec) */
5272 uint64x2_t data_swap = vextq_u64(data_vec, data_vec, 1);
5273 /* data_key = data_vec ^ key_vec; */
5274 uint64x2_t data_key = veorq_u64(data_vec, key_vec);
5275 /* For two lanes, just use VMOVN and VSHRN. */
5276 /* data_key_lo = data_key & 0xFFFFFFFF; */
5277 uint32x2_t data_key_lo = vmovn_u64(data_key);
5278 /* data_key_hi = data_key >> 32; */
5279 uint32x2_t data_key_hi = vshrn_n_u64(data_key, 32);
5280 /* sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi; */
5281 uint64x2_t sum = vmlal_u32(data_swap, data_key_lo, data_key_hi);
5282 /* Same Clang workaround as before */
5283 XXH_COMPILER_GUARD_CLANG_NEON(sum);
5284 /* xacc[i] = acc_vec + sum; */
5285 xacc[i] = vaddq_u64 (xacc[i], sum);
5286 }
5287 }
5288}
5289XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(neon)
5290
5291XXH_FORCE_INLINE void
5292XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
5293{
5294 XXH_ASSERT((((size_t)acc) & 15) == 0);
5295
5296 { xxh_aliasing_uint64x2_t* xacc = (xxh_aliasing_uint64x2_t*) acc;
5297 uint8_t const* xsecret = (uint8_t const*) secret;
5298
5299 size_t i;
5300 /* WASM uses operator overloads and doesn't need these. */
5301#ifndef __wasm_simd128__
5302 /* { prime32_1, prime32_1 } */
5303 uint32x2_t const kPrimeLo = vdup_n_u32(XXH_PRIME32_1);
5304 /* { 0, prime32_1, 0, prime32_1 } */
5305 uint32x4_t const kPrimeHi = vreinterpretq_u32_u64(vdupq_n_u64((xxh_u64)XXH_PRIME32_1 << 32));
5306#endif
5307
5308 /* AArch64 uses both scalar and neon at the same time */
5309 for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
5310 XXH3_scalarScrambleRound(acc, secret, i);
5311 }
5312 for (i=0; i < XXH3_NEON_LANES / 2; i++) {
5313 /* xacc[i] ^= (xacc[i] >> 47); */
5314 uint64x2_t acc_vec = xacc[i];
5315 uint64x2_t shifted = vshrq_n_u64(acc_vec, 47);
5316 uint64x2_t data_vec = veorq_u64(acc_vec, shifted);
5317
5318 /* xacc[i] ^= xsecret[i]; */
5319 uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16));
5320 uint64x2_t data_key = veorq_u64(data_vec, key_vec);
5321 /* xacc[i] *= XXH_PRIME32_1 */
5322#ifdef __wasm_simd128__
5323 /* SIMD128 has multiply by u64x2, use it instead of expanding and scalarizing */
5324 xacc[i] = data_key * XXH_PRIME32_1;
5325#else
5326 /*
5327 * Expanded version with portable NEON intrinsics
5328 *
5329 * lo(x) * lo(y) + (hi(x) * lo(y) << 32)
5330 *
5331 * prod_hi = hi(data_key) * lo(prime) << 32
5332 *
5333 * Since we only need 32 bits of this multiply a trick can be used, reinterpreting the vector
5334 * as a uint32x4_t and multiplying by { 0, prime, 0, prime } to cancel out the unwanted bits
5335 * and avoid the shift.
5336 */
5337 uint32x4_t prod_hi = vmulq_u32 (vreinterpretq_u32_u64(data_key), kPrimeHi);
5338 /* Extract low bits for vmlal_u32 */
5339 uint32x2_t data_key_lo = vmovn_u64(data_key);
5340 /* xacc[i] = prod_hi + lo(data_key) * XXH_PRIME32_1; */
5341 xacc[i] = vmlal_u32(vreinterpretq_u64_u32(prod_hi), data_key_lo, kPrimeLo);
5342#endif
5343 }
5344 }
5345}
5346#endif
5347
5348#if (XXH_VECTOR == XXH_VSX)
5349
5350XXH_FORCE_INLINE void
5351XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc,
5352 const void* XXH_RESTRICT input,
5353 const void* XXH_RESTRICT secret)
5354{
5355 /* presumed aligned */
5356 xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc;
5357 xxh_u8 const* const xinput = (xxh_u8 const*) input; /* no alignment restriction */
5358 xxh_u8 const* const xsecret = (xxh_u8 const*) secret; /* no alignment restriction */
5359 xxh_u64x2 const v32 = { 32, 32 };
5360 size_t i;
5361 for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
5362 /* data_vec = xinput[i]; */
5363 xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + 16*i);
5364 /* key_vec = xsecret[i]; */
5365 xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i);
5366 xxh_u64x2 const data_key = data_vec ^ key_vec;
5367 /* shuffled = (data_key << 32) | (data_key >> 32); */
5368 xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
5369 /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */
5370 xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
5371 /* acc_vec = xacc[i]; */
5372 xxh_u64x2 acc_vec = xacc[i];
5373 acc_vec += product;
5374
5375 /* swap high and low halves */
5376#ifdef __s390x__
5377 acc_vec += vec_permi(data_vec, data_vec, 2);
5378#else
5379 acc_vec += vec_xxpermdi(data_vec, data_vec, 2);
5380#endif
5381 xacc[i] = acc_vec;
5382 }
5383}
5384XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(vsx)
5385
5386XXH_FORCE_INLINE void
5387XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
5388{
5389 XXH_ASSERT((((size_t)acc) & 15) == 0);
5390
5391 { xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc;
5392 const xxh_u8* const xsecret = (const xxh_u8*) secret;
5393 /* constants */
5394 xxh_u64x2 const v32 = { 32, 32 };
5395 xxh_u64x2 const v47 = { 47, 47 };
5396 xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 };
5397 size_t i;
5398 for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
5399 /* xacc[i] ^= (xacc[i] >> 47); */
5400 xxh_u64x2 const acc_vec = xacc[i];
5401 xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47);
5402
5403 /* xacc[i] ^= xsecret[i]; */
5404 xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i);
5405 xxh_u64x2 const data_key = data_vec ^ key_vec;
5406
5407 /* xacc[i] *= XXH_PRIME32_1 */
5408 /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */
5409 xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime);
5410 /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */
5411 xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime);
5412 xacc[i] = prod_odd + (prod_even << v32);
5413 } }
5414}
5415
5416#endif
5417
5418#if (XXH_VECTOR == XXH_SVE)
5419
5420XXH_FORCE_INLINE void
5421XXH3_accumulate_512_sve( void* XXH_RESTRICT acc,
5422 const void* XXH_RESTRICT input,
5423 const void* XXH_RESTRICT secret)
5424{
5425 uint64_t *xacc = (uint64_t *)acc;
5426 const uint64_t *xinput = (const uint64_t *)(const void *)input;
5427 const uint64_t *xsecret = (const uint64_t *)(const void *)secret;
5428 svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1);
5429 uint64_t element_count = svcntd();
5430 if (element_count >= 8) {
5431 svbool_t mask = svptrue_pat_b64(SV_VL8);
5432 svuint64_t vacc = svld1_u64(mask, xacc);
5433 ACCRND(vacc, 0);
5434 svst1_u64(mask, xacc, vacc);
5435 } else if (element_count == 2) { /* sve128 */
5436 svbool_t mask = svptrue_pat_b64(SV_VL2);
5437 svuint64_t acc0 = svld1_u64(mask, xacc + 0);
5438 svuint64_t acc1 = svld1_u64(mask, xacc + 2);
5439 svuint64_t acc2 = svld1_u64(mask, xacc + 4);
5440 svuint64_t acc3 = svld1_u64(mask, xacc + 6);
5441 ACCRND(acc0, 0);
5442 ACCRND(acc1, 2);
5443 ACCRND(acc2, 4);
5444 ACCRND(acc3, 6);
5445 svst1_u64(mask, xacc + 0, acc0);
5446 svst1_u64(mask, xacc + 2, acc1);
5447 svst1_u64(mask, xacc + 4, acc2);
5448 svst1_u64(mask, xacc + 6, acc3);
5449 } else {
5450 svbool_t mask = svptrue_pat_b64(SV_VL4);
5451 svuint64_t acc0 = svld1_u64(mask, xacc + 0);
5452 svuint64_t acc1 = svld1_u64(mask, xacc + 4);
5453 ACCRND(acc0, 0);
5454 ACCRND(acc1, 4);
5455 svst1_u64(mask, xacc + 0, acc0);
5456 svst1_u64(mask, xacc + 4, acc1);
5457 }
5458}
5459
5460XXH_FORCE_INLINE void
5461XXH3_accumulate_sve(xxh_u64* XXH_RESTRICT acc,
5462 const xxh_u8* XXH_RESTRICT input,
5463 const xxh_u8* XXH_RESTRICT secret,
5464 size_t nbStripes)
5465{
5466 if (nbStripes != 0) {
5467 uint64_t *xacc = (uint64_t *)acc;
5468 const uint64_t *xinput = (const uint64_t *)(const void *)input;
5469 const uint64_t *xsecret = (const uint64_t *)(const void *)secret;
5470 svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1);
5471 uint64_t element_count = svcntd();
5472 if (element_count >= 8) {
5473 svbool_t mask = svptrue_pat_b64(SV_VL8);
5474 svuint64_t vacc = svld1_u64(mask, xacc + 0);
5475 do {
5476 /* svprfd(svbool_t, void *, enum svfprop); */
5477 svprfd(mask, xinput + 128, SV_PLDL1STRM);
5478 ACCRND(vacc, 0);
5479 xinput += 8;
5480 xsecret += 1;
5481 nbStripes--;
5482 } while (nbStripes != 0);
5483
5484 svst1_u64(mask, xacc + 0, vacc);
5485 } else if (element_count == 2) { /* sve128 */
5486 svbool_t mask = svptrue_pat_b64(SV_VL2);
5487 svuint64_t acc0 = svld1_u64(mask, xacc + 0);
5488 svuint64_t acc1 = svld1_u64(mask, xacc + 2);
5489 svuint64_t acc2 = svld1_u64(mask, xacc + 4);
5490 svuint64_t acc3 = svld1_u64(mask, xacc + 6);
5491 do {
5492 svprfd(mask, xinput + 128, SV_PLDL1STRM);
5493 ACCRND(acc0, 0);
5494 ACCRND(acc1, 2);
5495 ACCRND(acc2, 4);
5496 ACCRND(acc3, 6);
5497 xinput += 8;
5498 xsecret += 1;
5499 nbStripes--;
5500 } while (nbStripes != 0);
5501
5502 svst1_u64(mask, xacc + 0, acc0);
5503 svst1_u64(mask, xacc + 2, acc1);
5504 svst1_u64(mask, xacc + 4, acc2);
5505 svst1_u64(mask, xacc + 6, acc3);
5506 } else {
5507 svbool_t mask = svptrue_pat_b64(SV_VL4);
5508 svuint64_t acc0 = svld1_u64(mask, xacc + 0);
5509 svuint64_t acc1 = svld1_u64(mask, xacc + 4);
5510 do {
5511 svprfd(mask, xinput + 128, SV_PLDL1STRM);
5512 ACCRND(acc0, 0);
5513 ACCRND(acc1, 4);
5514 xinput += 8;
5515 xsecret += 1;
5516 nbStripes--;
5517 } while (nbStripes != 0);
5518
5519 svst1_u64(mask, xacc + 0, acc0);
5520 svst1_u64(mask, xacc + 4, acc1);
5521 }
5522 }
5523}
5524
5525#endif
5526
5527/* scalar variants - universal */
5528
5529#if defined(__aarch64__) && (defined(__GNUC__) || defined(__clang__))
5530/*
5531 * In XXH3_scalarRound(), GCC and Clang have a similar codegen issue, where they
5532 * emit an excess mask and a full 64-bit multiply-add (MADD X-form).
5533 *
5534 * While this might not seem like much, as AArch64 is a 64-bit architecture, only
5535 * big Cortex designs have a full 64-bit multiplier.
5536 *
5537 * On the little cores, the smaller 32-bit multiplier is used, and full 64-bit
5538 * multiplies expand to 2-3 multiplies in microcode. This has a major penalty
5539 * of up to 4 latency cycles and 2 stall cycles in the multiply pipeline.
5540 *
5541 * Thankfully, AArch64 still provides the 32-bit long multiply-add (UMADDL) which does
5542 * not have this penalty and does the mask automatically.
5543 */
5544XXH_FORCE_INLINE xxh_u64
5545XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc)
5546{
5547 xxh_u64 ret;
5548 /* note: %x = 64-bit register, %w = 32-bit register */
5549 __asm__("umaddl %x0, %w1, %w2, %x3" : "=r" (ret) : "r" (lhs), "r" (rhs), "r" (acc));
5550 return ret;
5551}
5552#else
5553XXH_FORCE_INLINE xxh_u64
5554XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc)
5555{
5556 return XXH_mult32to64((xxh_u32)lhs, (xxh_u32)rhs) + acc;
5557}
5558#endif
5559
5567XXH_FORCE_INLINE void
5568XXH3_scalarRound(void* XXH_RESTRICT acc,
5569 void const* XXH_RESTRICT input,
5570 void const* XXH_RESTRICT secret,
5571 size_t lane)
5572{
5573 xxh_u64* xacc = (xxh_u64*) acc;
5574 xxh_u8 const* xinput = (xxh_u8 const*) input;
5575 xxh_u8 const* xsecret = (xxh_u8 const*) secret;
5576 XXH_ASSERT(lane < XXH_ACC_NB);
5577 XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0);
5578 {
5579 xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8);
5580 xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8);
5581 xacc[lane ^ 1] += data_val; /* swap adjacent lanes */
5582 xacc[lane] = XXH_mult32to64_add64(data_key /* & 0xFFFFFFFF */, data_key >> 32, xacc[lane]);
5583 }
5584}
5585
5590XXH_FORCE_INLINE void
5591XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc,
5592 const void* XXH_RESTRICT input,
5593 const void* XXH_RESTRICT secret)
5594{
5595 size_t i;
5596 /* ARM GCC refuses to unroll this loop, resulting in a 24% slowdown on ARMv6. */
5597#if defined(__GNUC__) && !defined(__clang__) \
5598 && (defined(__arm__) || defined(__thumb2__)) \
5599 && defined(__ARM_FEATURE_UNALIGNED) /* no unaligned access just wastes bytes */ \
5600 && XXH_SIZE_OPT <= 0
5601# pragma GCC unroll 8
5602#endif
5603 for (i=0; i < XXH_ACC_NB; i++) {
5604 XXH3_scalarRound(acc, input, secret, i);
5605 }
5606}
5607XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(scalar)
5608
5609
5616XXH_FORCE_INLINE void
5617XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
5618 void const* XXH_RESTRICT secret,
5619 size_t lane)
5620{
5621 xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */
5622 const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */
5623 XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0);
5624 XXH_ASSERT(lane < XXH_ACC_NB);
5625 {
5626 xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8);
5627 xxh_u64 acc64 = xacc[lane];
5628 acc64 = XXH_xorshift64(acc64, 47);
5629 acc64 ^= key64;
5630 acc64 *= XXH_PRIME32_1;
5631 xacc[lane] = acc64;
5632 }
5633}
5634
5639XXH_FORCE_INLINE void
5640XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
5641{
5642 size_t i;
5643 for (i=0; i < XXH_ACC_NB; i++) {
5644 XXH3_scalarScrambleRound(acc, secret, i);
5645 }
5646}
5647
5648XXH_FORCE_INLINE void
5649XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
5650{
5651 /*
5652 * We need a separate pointer for the hack below,
5653 * which requires a non-const pointer.
5654 * Any decent compiler will optimize this out otherwise.
5655 */
5656 const xxh_u8* kSecretPtr = XXH3_kSecret;
5657 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
5658
5659#if defined(__GNUC__) && defined(__aarch64__)
5660 /*
5661 * UGLY HACK:
5662 * GCC and Clang generate a bunch of MOV/MOVK pairs for aarch64, and they are
5663 * placed sequentially, in order, at the top of the unrolled loop.
5664 *
5665 * While MOVK is great for generating constants (2 cycles for a 64-bit
5666 * constant compared to 4 cycles for LDR), it fights for bandwidth with
5667 * the arithmetic instructions.
5668 *
5669 * I L S
5670 * MOVK
5671 * MOVK
5672 * MOVK
5673 * MOVK
5674 * ADD
5675 * SUB STR
5676 * STR
5677 * By forcing loads from memory (as the asm line causes the compiler to assume
5678 * that XXH3_kSecretPtr has been changed), the pipelines are used more
5679 * efficiently:
5680 * I L S
5681 * LDR
5682 * ADD LDR
5683 * SUB STR
5684 * STR
5685 *
5686 * See XXH3_NEON_LANES for details on the pipsline.
5687 *
5688 * XXH3_64bits_withSeed, len == 256, Snapdragon 835
5689 * without hack: 2654.4 MB/s
5690 * with hack: 3202.9 MB/s
5691 */
5692 XXH_COMPILER_GUARD(kSecretPtr);
5693#endif
5694 { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
5695 int i;
5696 for (i=0; i < nbRounds; i++) {
5697 /*
5698 * The asm hack causes the compiler to assume that kSecretPtr aliases with
5699 * customSecret, and on aarch64, this prevented LDP from merging two
5700 * loads together for free. Putting the loads together before the stores
5701 * properly generates LDP.
5702 */
5703 xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64;
5704 xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64;
5705 XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo);
5706 XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi);
5707 } }
5708}
5709
5710
5711typedef void (*XXH3_f_accumulate)(xxh_u64* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, size_t);
5712typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*);
5713typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64);
5714
5715
5716#if (XXH_VECTOR == XXH_AVX512)
5717
5718#define XXH3_accumulate_512 XXH3_accumulate_512_avx512
5719#define XXH3_accumulate XXH3_accumulate_avx512
5720#define XXH3_scrambleAcc XXH3_scrambleAcc_avx512
5721#define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
5722
5723#elif (XXH_VECTOR == XXH_AVX2)
5724
5725#define XXH3_accumulate_512 XXH3_accumulate_512_avx2
5726#define XXH3_accumulate XXH3_accumulate_avx2
5727#define XXH3_scrambleAcc XXH3_scrambleAcc_avx2
5728#define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
5729
5730#elif (XXH_VECTOR == XXH_SSE2)
5731
5732#define XXH3_accumulate_512 XXH3_accumulate_512_sse2
5733#define XXH3_accumulate XXH3_accumulate_sse2
5734#define XXH3_scrambleAcc XXH3_scrambleAcc_sse2
5735#define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
5736
5737#elif (XXH_VECTOR == XXH_NEON)
5738
5739#define XXH3_accumulate_512 XXH3_accumulate_512_neon
5740#define XXH3_accumulate XXH3_accumulate_neon
5741#define XXH3_scrambleAcc XXH3_scrambleAcc_neon
5742#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5743
5744#elif (XXH_VECTOR == XXH_VSX)
5745
5746#define XXH3_accumulate_512 XXH3_accumulate_512_vsx
5747#define XXH3_accumulate XXH3_accumulate_vsx
5748#define XXH3_scrambleAcc XXH3_scrambleAcc_vsx
5749#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5750
5751#elif (XXH_VECTOR == XXH_SVE)
5752#define XXH3_accumulate_512 XXH3_accumulate_512_sve
5753#define XXH3_accumulate XXH3_accumulate_sve
5754#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
5755#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5756
5757#else /* scalar */
5758
5759#define XXH3_accumulate_512 XXH3_accumulate_512_scalar
5760#define XXH3_accumulate XXH3_accumulate_scalar
5761#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
5762#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5763
5764#endif
5765
5766#if XXH_SIZE_OPT >= 1 /* don't do SIMD for initialization */
5767# undef XXH3_initCustomSecret
5768# define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5769#endif
5770
5771XXH_FORCE_INLINE void
5772XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc,
5773 const xxh_u8* XXH_RESTRICT input, size_t len,
5774 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5775 XXH3_f_accumulate f_acc,
5776 XXH3_f_scrambleAcc f_scramble)
5777{
5778 size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
5779 size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
5780 size_t const nb_blocks = (len - 1) / block_len;
5781
5782 size_t n;
5783
5784 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
5785
5786 for (n = 0; n < nb_blocks; n++) {
5787 f_acc(acc, input + n*block_len, secret, nbStripesPerBlock);
5788 f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
5789 }
5790
5791 /* last partial block */
5792 XXH_ASSERT(len > XXH_STRIPE_LEN);
5793 { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
5794 XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
5795 f_acc(acc, input + nb_blocks*block_len, secret, nbStripes);
5796
5797 /* last stripe */
5798 { const xxh_u8* const p = input + len - XXH_STRIPE_LEN;
5799#define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */
5800 XXH3_accumulate_512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
5801 } }
5802}
5803
5804XXH_FORCE_INLINE xxh_u64
5805XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret)
5806{
5807 return XXH3_mul128_fold64(
5808 acc[0] ^ XXH_readLE64(secret),
5809 acc[1] ^ XXH_readLE64(secret+8) );
5810}
5811
5812static XXH64_hash_t
5813XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start)
5814{
5815 xxh_u64 result64 = start;
5816 size_t i = 0;
5817
5818 for (i = 0; i < 4; i++) {
5819 result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i);
5820#if defined(__clang__) /* Clang */ \
5821 && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \
5822 && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
5823 && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
5824 /*
5825 * UGLY HACK:
5826 * Prevent autovectorization on Clang ARMv7-a. Exact same problem as
5827 * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b.
5828 * XXH3_64bits, len == 256, Snapdragon 835:
5829 * without hack: 2063.7 MB/s
5830 * with hack: 2560.7 MB/s
5831 */
5832 XXH_COMPILER_GUARD(result64);
5833#endif
5834 }
5835
5836 return XXH3_avalanche(result64);
5837}
5838
5839#define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \
5840 XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 }
5841
5842XXH_FORCE_INLINE XXH64_hash_t
5843XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len,
5844 const void* XXH_RESTRICT secret, size_t secretSize,
5845 XXH3_f_accumulate f_acc,
5846 XXH3_f_scrambleAcc f_scramble)
5847{
5848 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
5849
5850 XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc, f_scramble);
5851
5852 /* converge into final hash */
5853 XXH_STATIC_ASSERT(sizeof(acc) == 64);
5854 /* do not align on 8, so that the secret is different from the accumulator */
5855#define XXH_SECRET_MERGEACCS_START 11
5856 XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
5857 return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1);
5858}
5859
5860/*
5861 * It's important for performance to transmit secret's size (when it's static)
5862 * so that the compiler can properly optimize the vectorized loop.
5863 * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set.
5864 * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE
5865 * breaks -Og, this is XXH_NO_INLINE.
5866 */
5867XXH3_WITH_SECRET_INLINE XXH64_hash_t
5868XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len,
5869 XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
5870{
5871 (void)seed64;
5872 return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate, XXH3_scrambleAcc);
5873}
5874
5875/*
5876 * It's preferable for performance that XXH3_hashLong is not inlined,
5877 * as it results in a smaller function for small data, easier to the instruction cache.
5878 * Note that inside this no_inline function, we do inline the internal loop,
5879 * and provide a statically defined secret size to allow optimization of vector loop.
5880 */
5881XXH_NO_INLINE XXH_PUREF XXH64_hash_t
5882XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len,
5883 XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
5884{
5885 (void)seed64; (void)secret; (void)secretLen;
5886 return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate, XXH3_scrambleAcc);
5887}
5888
5889/*
5890 * XXH3_hashLong_64b_withSeed():
5891 * Generate a custom key based on alteration of default XXH3_kSecret with the seed,
5892 * and then use this key for long mode hashing.
5893 *
5894 * This operation is decently fast but nonetheless costs a little bit of time.
5895 * Try to avoid it whenever possible (typically when seed==0).
5896 *
5897 * It's important for performance that XXH3_hashLong is not inlined. Not sure
5898 * why (uop cache maybe?), but the difference is large and easily measurable.
5899 */
5900XXH_FORCE_INLINE XXH64_hash_t
5901XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len,
5902 XXH64_hash_t seed,
5903 XXH3_f_accumulate f_acc,
5904 XXH3_f_scrambleAcc f_scramble,
5905 XXH3_f_initCustomSecret f_initSec)
5906{
5907#if XXH_SIZE_OPT <= 0
5908 if (seed == 0)
5909 return XXH3_hashLong_64b_internal(input, len,
5910 XXH3_kSecret, sizeof(XXH3_kSecret),
5911 f_acc, f_scramble);
5912#endif
5913 { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
5914 f_initSec(secret, seed);
5915 return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret),
5916 f_acc, f_scramble);
5917 }
5918}
5919
5920/*
5921 * It's important for performance that XXH3_hashLong is not inlined.
5922 */
5923XXH_NO_INLINE XXH64_hash_t
5924XXH3_hashLong_64b_withSeed(const void* XXH_RESTRICT input, size_t len,
5925 XXH64_hash_t seed, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
5926{
5927 (void)secret; (void)secretLen;
5928 return XXH3_hashLong_64b_withSeed_internal(input, len, seed,
5929 XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret);
5930}
5931
5932
5933typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t,
5934 XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t);
5935
5936XXH_FORCE_INLINE XXH64_hash_t
5937XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len,
5938 XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
5939 XXH3_hashLong64_f f_hashLong)
5940{
5941 XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
5942 /*
5943 * If an action is to be taken if `secretLen` condition is not respected,
5944 * it should be done here.
5945 * For now, it's a contract pre-condition.
5946 * Adding a check and a branch here would cost performance at every hash.
5947 * Also, note that function signature doesn't offer room to return an error.
5948 */
5949 if (len <= 16)
5950 return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
5951 if (len <= 128)
5952 return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
5953 if (len <= XXH3_MIDSIZE_MAX)
5954 return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
5955 return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen);
5956}
5957
5958
5959/* === Public entry point === */
5960
5962XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length)
5963{
5964 return XXH3_64bits_internal(input, length, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default);
5965}
5966
5969XXH3_64bits_withSecret(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize)
5970{
5971 return XXH3_64bits_internal(input, length, 0, secret, secretSize, XXH3_hashLong_64b_withSecret);
5972}
5973
5976XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed)
5977{
5978 return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
5979}
5980
5982XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
5983{
5984 if (length <= XXH3_MIDSIZE_MAX)
5985 return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
5986 return XXH3_hashLong_64b_withSecret(input, length, seed, (const xxh_u8*)secret, secretSize);
5987}
5988
5989
5990/* === XXH3 streaming === */
5991#ifndef XXH_NO_STREAM
5992/*
5993 * Malloc's a pointer that is always aligned to align.
5994 *
5995 * This must be freed with `XXH_alignedFree()`.
5996 *
5997 * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte
5998 * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2
5999 * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON.
6000 *
6001 * This underalignment previously caused a rather obvious crash which went
6002 * completely unnoticed due to XXH3_createState() not actually being tested.
6003 * Credit to RedSpah for noticing this bug.
6004 *
6005 * The alignment is done manually: Functions like posix_memalign or _mm_malloc
6006 * are avoided: To maintain portability, we would have to write a fallback
6007 * like this anyways, and besides, testing for the existence of library
6008 * functions without relying on external build tools is impossible.
6009 *
6010 * The method is simple: Overallocate, manually align, and store the offset
6011 * to the original behind the returned pointer.
6012 *
6013 * Align must be a power of 2 and 8 <= align <= 128.
6014 */
6015static XXH_MALLOCF void* XXH_alignedMalloc(size_t s, size_t align)
6016{
6017 XXH_ASSERT(align <= 128 && align >= 8); /* range check */
6018 XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */
6019 XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */
6020 { /* Overallocate to make room for manual realignment and an offset byte */
6021 xxh_u8* base = (xxh_u8*)XXH_malloc(s + align);
6022 if (base != NULL) {
6023 /*
6024 * Get the offset needed to align this pointer.
6025 *
6026 * Even if the returned pointer is aligned, there will always be
6027 * at least one byte to store the offset to the original pointer.
6028 */
6029 size_t offset = align - ((size_t)base & (align - 1)); /* base % align */
6030 /* Add the offset for the now-aligned pointer */
6031 xxh_u8* ptr = base + offset;
6032
6033 XXH_ASSERT((size_t)ptr % align == 0);
6034
6035 /* Store the offset immediately before the returned pointer. */
6036 ptr[-1] = (xxh_u8)offset;
6037 return ptr;
6038 }
6039 return NULL;
6040 }
6041}
6042/*
6043 * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass
6044 * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout.
6045 */
6046static void XXH_alignedFree(void* p)
6047{
6048 if (p != NULL) {
6049 xxh_u8* ptr = (xxh_u8*)p;
6050 /* Get the offset byte we added in XXH_malloc. */
6051 xxh_u8 offset = ptr[-1];
6052 /* Free the original malloc'd pointer */
6053 xxh_u8* base = ptr - offset;
6054 XXH_free(base);
6055 }
6056}
6066XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void)
6067{
6068 XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64);
6069 if (state==NULL) return NULL;
6070 XXH3_INITSTATE(state);
6071 return state;
6072}
6073
6084XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr)
6085{
6086 XXH_alignedFree(statePtr);
6087 return XXH_OK;
6088}
6089
6091XXH_PUBLIC_API void
6092XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state)
6093{
6094 XXH_memcpy(dst_state, src_state, sizeof(*dst_state));
6095}
6096
6097static void
6098XXH3_reset_internal(XXH3_state_t* statePtr,
6099 XXH64_hash_t seed,
6100 const void* secret, size_t secretSize)
6101{
6102 size_t const initStart = offsetof(XXH3_state_t, bufferedSize);
6103 size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart;
6104 XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart);
6105 XXH_ASSERT(statePtr != NULL);
6106 /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */
6107 memset((char*)statePtr + initStart, 0, initLength);
6108 statePtr->acc[0] = XXH_PRIME32_3;
6109 statePtr->acc[1] = XXH_PRIME64_1;
6110 statePtr->acc[2] = XXH_PRIME64_2;
6111 statePtr->acc[3] = XXH_PRIME64_3;
6112 statePtr->acc[4] = XXH_PRIME64_4;
6113 statePtr->acc[5] = XXH_PRIME32_2;
6114 statePtr->acc[6] = XXH_PRIME64_5;
6115 statePtr->acc[7] = XXH_PRIME32_1;
6116 statePtr->seed = seed;
6117 statePtr->useSeed = (seed != 0);
6118 statePtr->extSecret = (const unsigned char*)secret;
6119 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
6120 statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
6121 statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
6122}
6123
6126XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr)
6127{
6128 if (statePtr == NULL) return XXH_ERROR;
6129 XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
6130 return XXH_OK;
6131}
6132
6135XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize)
6136{
6137 if (statePtr == NULL) return XXH_ERROR;
6138 XXH3_reset_internal(statePtr, 0, secret, secretSize);
6139 if (secret == NULL) return XXH_ERROR;
6140 if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
6141 return XXH_OK;
6142}
6143
6146XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed)
6147{
6148 if (statePtr == NULL) return XXH_ERROR;
6149 if (seed==0) return XXH3_64bits_reset(statePtr);
6150 if ((seed != statePtr->seed) || (statePtr->extSecret != NULL))
6151 XXH3_initCustomSecret(statePtr->customSecret, seed);
6152 XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
6153 return XXH_OK;
6154}
6155
6158XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed64)
6159{
6160 if (statePtr == NULL) return XXH_ERROR;
6161 if (secret == NULL) return XXH_ERROR;
6162 if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
6163 XXH3_reset_internal(statePtr, seed64, secret, secretSize);
6164 statePtr->useSeed = 1; /* always, even if seed64==0 */
6165 return XXH_OK;
6166}
6167
6185XXH_FORCE_INLINE const xxh_u8 *
6186XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc,
6187 size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock,
6188 const xxh_u8* XXH_RESTRICT input, size_t nbStripes,
6189 const xxh_u8* XXH_RESTRICT secret, size_t secretLimit,
6190 XXH3_f_accumulate f_acc,
6191 XXH3_f_scrambleAcc f_scramble)
6192{
6193 const xxh_u8* initialSecret = secret + *nbStripesSoFarPtr * XXH_SECRET_CONSUME_RATE;
6194 /* Process full blocks */
6195 if (nbStripes >= (nbStripesPerBlock - *nbStripesSoFarPtr)) {
6196 /* Process the initial partial block... */
6197 size_t nbStripesThisIter = nbStripesPerBlock - *nbStripesSoFarPtr;
6198
6199 do {
6200 /* Accumulate and scramble */
6201 f_acc(acc, input, initialSecret, nbStripesThisIter);
6202 f_scramble(acc, secret + secretLimit);
6203 input += nbStripesThisIter * XXH_STRIPE_LEN;
6204 nbStripes -= nbStripesThisIter;
6205 /* Then continue the loop with the full block size */
6206 nbStripesThisIter = nbStripesPerBlock;
6207 initialSecret = secret;
6208 } while (nbStripes >= nbStripesPerBlock);
6209 *nbStripesSoFarPtr = 0;
6210 }
6211 /* Process a partial block */
6212 if (nbStripes > 0) {
6213 f_acc(acc, input, initialSecret, nbStripes);
6214 input += nbStripes * XXH_STRIPE_LEN;
6215 *nbStripesSoFarPtr += nbStripes;
6216 }
6217 /* Return end pointer */
6218 return input;
6219}
6220
6221#ifndef XXH3_STREAM_USE_STACK
6222# if XXH_SIZE_OPT <= 0 && !defined(__clang__) /* clang doesn't need additional stack space */
6223# define XXH3_STREAM_USE_STACK 1
6224# endif
6225#endif
6226/*
6227 * Both XXH3_64bits_update and XXH3_128bits_update use this routine.
6228 */
6229XXH_FORCE_INLINE XXH_errorcode
6230XXH3_update(XXH3_state_t* XXH_RESTRICT const state,
6231 const xxh_u8* XXH_RESTRICT input, size_t len,
6232 XXH3_f_accumulate f_acc,
6233 XXH3_f_scrambleAcc f_scramble)
6234{
6235 if (input==NULL) {
6236 XXH_ASSERT(len == 0);
6237 return XXH_OK;
6238 }
6239
6240 XXH_ASSERT(state != NULL);
6241 { const xxh_u8* const bEnd = input + len;
6242 const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
6243#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
6244 /* For some reason, gcc and MSVC seem to suffer greatly
6245 * when operating accumulators directly into state.
6246 * Operating into stack space seems to enable proper optimization.
6247 * clang, on the other hand, doesn't seem to need this trick */
6248 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8];
6249 XXH_memcpy(acc, state->acc, sizeof(acc));
6250#else
6251 xxh_u64* XXH_RESTRICT const acc = state->acc;
6252#endif
6253 state->totalLen += len;
6254 XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE);
6255
6256 /* small input : just fill in tmp buffer */
6257 if (len <= XXH3_INTERNALBUFFER_SIZE - state->bufferedSize) {
6258 XXH_memcpy(state->buffer + state->bufferedSize, input, len);
6259 state->bufferedSize += (XXH32_hash_t)len;
6260 return XXH_OK;
6261 }
6262
6263 /* total input is now > XXH3_INTERNALBUFFER_SIZE */
6264 #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
6265 XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */
6266
6267 /*
6268 * Internal buffer is partially filled (always, except at beginning)
6269 * Complete it, then consume it.
6270 */
6271 if (state->bufferedSize) {
6272 size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize;
6273 XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize);
6274 input += loadSize;
6275 XXH3_consumeStripes(acc,
6276 &state->nbStripesSoFar, state->nbStripesPerBlock,
6277 state->buffer, XXH3_INTERNALBUFFER_STRIPES,
6278 secret, state->secretLimit,
6279 f_acc, f_scramble);
6280 state->bufferedSize = 0;
6281 }
6282 XXH_ASSERT(input < bEnd);
6283 if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) {
6284 size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN;
6285 input = XXH3_consumeStripes(acc,
6286 &state->nbStripesSoFar, state->nbStripesPerBlock,
6287 input, nbStripes,
6288 secret, state->secretLimit,
6289 f_acc, f_scramble);
6290 XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
6291
6292 }
6293 /* Some remaining input (always) : buffer it */
6294 XXH_ASSERT(input < bEnd);
6295 XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE);
6296 XXH_ASSERT(state->bufferedSize == 0);
6297 XXH_memcpy(state->buffer, input, (size_t)(bEnd-input));
6298 state->bufferedSize = (XXH32_hash_t)(bEnd-input);
6299#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
6300 /* save stack accumulators into state */
6301 XXH_memcpy(state->acc, acc, sizeof(acc));
6302#endif
6303 }
6304
6305 return XXH_OK;
6306}
6307
6310XXH3_64bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len)
6311{
6312 return XXH3_update(state, (const xxh_u8*)input, len,
6313 XXH3_accumulate, XXH3_scrambleAcc);
6314}
6315
6316
6317XXH_FORCE_INLINE void
6318XXH3_digest_long (XXH64_hash_t* acc,
6319 const XXH3_state_t* state,
6320 const unsigned char* secret)
6321{
6322 xxh_u8 lastStripe[XXH_STRIPE_LEN];
6323 const xxh_u8* lastStripePtr;
6324
6325 /*
6326 * Digest on a local copy. This way, the state remains unaltered, and it can
6327 * continue ingesting more input afterwards.
6328 */
6329 XXH_memcpy(acc, state->acc, sizeof(state->acc));
6330 if (state->bufferedSize >= XXH_STRIPE_LEN) {
6331 /* Consume remaining stripes then point to remaining data in buffer */
6332 size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN;
6333 size_t nbStripesSoFar = state->nbStripesSoFar;
6334 XXH3_consumeStripes(acc,
6335 &nbStripesSoFar, state->nbStripesPerBlock,
6336 state->buffer, nbStripes,
6337 secret, state->secretLimit,
6338 XXH3_accumulate, XXH3_scrambleAcc);
6339 lastStripePtr = state->buffer + state->bufferedSize - XXH_STRIPE_LEN;
6340 } else { /* bufferedSize < XXH_STRIPE_LEN */
6341 /* Copy to temp buffer */
6342 size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize;
6343 XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */
6344 XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize);
6345 XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
6346 lastStripePtr = lastStripe;
6347 }
6348 /* Last stripe */
6349 XXH3_accumulate_512(acc,
6350 lastStripePtr,
6351 secret + state->secretLimit - XXH_SECRET_LASTACC_START);
6352}
6353
6355XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* state)
6356{
6357 const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
6358 if (state->totalLen > XXH3_MIDSIZE_MAX) {
6359 XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
6360 XXH3_digest_long(acc, state, secret);
6361 return XXH3_mergeAccs(acc,
6362 secret + XXH_SECRET_MERGEACCS_START,
6363 (xxh_u64)state->totalLen * XXH_PRIME64_1);
6364 }
6365 /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */
6366 if (state->useSeed)
6367 return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
6368 return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen),
6369 secret, state->secretLimit + XXH_STRIPE_LEN);
6370}
6371#endif /* !XXH_NO_STREAM */
6372
6373
6374/* ==========================================
6375 * XXH3 128 bits (a.k.a XXH128)
6376 * ==========================================
6377 * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant,
6378 * even without counting the significantly larger output size.
6379 *
6380 * For example, extra steps are taken to avoid the seed-dependent collisions
6381 * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B).
6382 *
6383 * This strength naturally comes at the cost of some speed, especially on short
6384 * lengths. Note that longer hashes are about as fast as the 64-bit version
6385 * due to it using only a slight modification of the 64-bit loop.
6386 *
6387 * XXH128 is also more oriented towards 64-bit machines. It is still extremely
6388 * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64).
6389 */
6390
6391XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
6392XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
6393{
6394 /* A doubled version of 1to3_64b with different constants. */
6395 XXH_ASSERT(input != NULL);
6396 XXH_ASSERT(1 <= len && len <= 3);
6397 XXH_ASSERT(secret != NULL);
6398 /*
6399 * len = 1: combinedl = { input[0], 0x01, input[0], input[0] }
6400 * len = 2: combinedl = { input[1], 0x02, input[0], input[1] }
6401 * len = 3: combinedl = { input[2], 0x03, input[0], input[1] }
6402 */
6403 { xxh_u8 const c1 = input[0];
6404 xxh_u8 const c2 = input[len >> 1];
6405 xxh_u8 const c3 = input[len - 1];
6406 xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24)
6407 | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
6408 xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
6409 xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
6410 xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed;
6411 xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
6412 xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
6413 XXH128_hash_t h128;
6414 h128.low64 = XXH64_avalanche(keyed_lo);
6415 h128.high64 = XXH64_avalanche(keyed_hi);
6416 return h128;
6417 }
6418}
6419
6420XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
6421XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
6422{
6423 XXH_ASSERT(input != NULL);
6424 XXH_ASSERT(secret != NULL);
6425 XXH_ASSERT(4 <= len && len <= 8);
6426 seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
6427 { xxh_u32 const input_lo = XXH_readLE32(input);
6428 xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
6429 xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32);
6430 xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed;
6431 xxh_u64 const keyed = input_64 ^ bitflip;
6432
6433 /* Shift len to the left to ensure it is even, this avoids even multiplies. */
6434 XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2));
6435
6436 m128.high64 += (m128.low64 << 1);
6437 m128.low64 ^= (m128.high64 >> 3);
6438
6439 m128.low64 = XXH_xorshift64(m128.low64, 35);
6440 m128.low64 *= PRIME_MX2;
6441 m128.low64 = XXH_xorshift64(m128.low64, 28);
6442 m128.high64 = XXH3_avalanche(m128.high64);
6443 return m128;
6444 }
6445}
6446
6447XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
6448XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
6449{
6450 XXH_ASSERT(input != NULL);
6451 XXH_ASSERT(secret != NULL);
6452 XXH_ASSERT(9 <= len && len <= 16);
6453 { xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed;
6454 xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed;
6455 xxh_u64 const input_lo = XXH_readLE64(input);
6456 xxh_u64 input_hi = XXH_readLE64(input + len - 8);
6457 XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
6458 /*
6459 * Put len in the middle of m128 to ensure that the length gets mixed to
6460 * both the low and high bits in the 128x64 multiply below.
6461 */
6462 m128.low64 += (xxh_u64)(len - 1) << 54;
6463 input_hi ^= bitfliph;
6464 /*
6465 * Add the high 32 bits of input_hi to the high 32 bits of m128, then
6466 * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to
6467 * the high 64 bits of m128.
6468 *
6469 * The best approach to this operation is different on 32-bit and 64-bit.
6470 */
6471 if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */
6472 /*
6473 * 32-bit optimized version, which is more readable.
6474 *
6475 * On 32-bit, it removes an ADC and delays a dependency between the two
6476 * halves of m128.high64, but it generates an extra mask on 64-bit.
6477 */
6478 m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
6479 } else {
6480 /*
6481 * 64-bit optimized (albeit more confusing) version.
6482 *
6483 * Uses some properties of addition and multiplication to remove the mask:
6484 *
6485 * Let:
6486 * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF)
6487 * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000)
6488 * c = XXH_PRIME32_2
6489 *
6490 * a + (b * c)
6491 * Inverse Property: x + y - x == y
6492 * a + (b * (1 + c - 1))
6493 * Distributive Property: x * (y + z) == (x * y) + (x * z)
6494 * a + (b * 1) + (b * (c - 1))
6495 * Identity Property: x * 1 == x
6496 * a + b + (b * (c - 1))
6497 *
6498 * Substitute a, b, and c:
6499 * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
6500 *
6501 * Since input_hi.hi + input_hi.lo == input_hi, we get this:
6502 * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
6503 */
6504 m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
6505 }
6506 /* m128 ^= XXH_swap64(m128 >> 64); */
6507 m128.low64 ^= XXH_swap64(m128.high64);
6508
6509 { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */
6510 XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
6511 h128.high64 += m128.high64 * XXH_PRIME64_2;
6512
6513 h128.low64 = XXH3_avalanche(h128.low64);
6514 h128.high64 = XXH3_avalanche(h128.high64);
6515 return h128;
6516 } }
6517}
6518
6519/*
6520 * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN
6521 */
6522XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
6523XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
6524{
6525 XXH_ASSERT(len <= 16);
6526 { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed);
6527 if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed);
6528 if (len) return XXH3_len_1to3_128b(input, len, secret, seed);
6529 { XXH128_hash_t h128;
6530 xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72);
6531 xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88);
6532 h128.low64 = XXH64_avalanche(seed ^ bitflipl);
6533 h128.high64 = XXH64_avalanche( seed ^ bitfliph);
6534 return h128;
6535 } }
6536}
6537
6538/*
6539 * A bit slower than XXH3_mix16B, but handles multiply by zero better.
6540 */
6541XXH_FORCE_INLINE XXH128_hash_t
6542XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2,
6543 const xxh_u8* secret, XXH64_hash_t seed)
6544{
6545 acc.low64 += XXH3_mix16B (input_1, secret+0, seed);
6546 acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
6547 acc.high64 += XXH3_mix16B (input_2, secret+16, seed);
6548 acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
6549 return acc;
6550}
6551
6552
6553XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
6554XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
6555 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
6556 XXH64_hash_t seed)
6557{
6558 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
6559 XXH_ASSERT(16 < len && len <= 128);
6560
6561 { XXH128_hash_t acc;
6562 acc.low64 = len * XXH_PRIME64_1;
6563 acc.high64 = 0;
6564
6565#if XXH_SIZE_OPT >= 1
6566 {
6567 /* Smaller, but slightly slower. */
6568 unsigned int i = (unsigned int)(len - 1) / 32;
6569 do {
6570 acc = XXH128_mix32B(acc, input+16*i, input+len-16*(i+1), secret+32*i, seed);
6571 } while (i-- != 0);
6572 }
6573#else
6574 if (len > 32) {
6575 if (len > 64) {
6576 if (len > 96) {
6577 acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed);
6578 }
6579 acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed);
6580 }
6581 acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed);
6582 }
6583 acc = XXH128_mix32B(acc, input, input+len-16, secret, seed);
6584#endif
6585 { XXH128_hash_t h128;
6586 h128.low64 = acc.low64 + acc.high64;
6587 h128.high64 = (acc.low64 * XXH_PRIME64_1)
6588 + (acc.high64 * XXH_PRIME64_4)
6589 + ((len - seed) * XXH_PRIME64_2);
6590 h128.low64 = XXH3_avalanche(h128.low64);
6591 h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
6592 return h128;
6593 }
6594 }
6595}
6596
6597XXH_NO_INLINE XXH_PUREF XXH128_hash_t
6598XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
6599 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
6600 XXH64_hash_t seed)
6601{
6602 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
6603 XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
6604
6605 { XXH128_hash_t acc;
6606 unsigned i;
6607 acc.low64 = len * XXH_PRIME64_1;
6608 acc.high64 = 0;
6609 /*
6610 * We set as `i` as offset + 32. We do this so that unchanged
6611 * `len` can be used as upper bound. This reaches a sweet spot
6612 * where both x86 and aarch64 get simple agen and good codegen
6613 * for the loop.
6614 */
6615 for (i = 32; i < 160; i += 32) {
6616 acc = XXH128_mix32B(acc,
6617 input + i - 32,
6618 input + i - 16,
6619 secret + i - 32,
6620 seed);
6621 }
6622 acc.low64 = XXH3_avalanche(acc.low64);
6623 acc.high64 = XXH3_avalanche(acc.high64);
6624 /*
6625 * NB: `i <= len` will duplicate the last 32-bytes if
6626 * len % 32 was zero. This is an unfortunate necessity to keep
6627 * the hash result stable.
6628 */
6629 for (i=160; i <= len; i += 32) {
6630 acc = XXH128_mix32B(acc,
6631 input + i - 32,
6632 input + i - 16,
6633 secret + XXH3_MIDSIZE_STARTOFFSET + i - 160,
6634 seed);
6635 }
6636 /* last bytes */
6637 acc = XXH128_mix32B(acc,
6638 input + len - 16,
6639 input + len - 32,
6640 secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
6641 (XXH64_hash_t)0 - seed);
6642
6643 { XXH128_hash_t h128;
6644 h128.low64 = acc.low64 + acc.high64;
6645 h128.high64 = (acc.low64 * XXH_PRIME64_1)
6646 + (acc.high64 * XXH_PRIME64_4)
6647 + ((len - seed) * XXH_PRIME64_2);
6648 h128.low64 = XXH3_avalanche(h128.low64);
6649 h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
6650 return h128;
6651 }
6652 }
6653}
6654
6655XXH_FORCE_INLINE XXH128_hash_t
6656XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len,
6657 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
6658 XXH3_f_accumulate f_acc,
6659 XXH3_f_scrambleAcc f_scramble)
6660{
6661 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
6662
6663 XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc, f_scramble);
6664
6665 /* converge into final hash */
6666 XXH_STATIC_ASSERT(sizeof(acc) == 64);
6667 XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
6668 { XXH128_hash_t h128;
6669 h128.low64 = XXH3_mergeAccs(acc,
6670 secret + XXH_SECRET_MERGEACCS_START,
6671 (xxh_u64)len * XXH_PRIME64_1);
6672 h128.high64 = XXH3_mergeAccs(acc,
6673 secret + secretSize
6674 - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
6675 ~((xxh_u64)len * XXH_PRIME64_2));
6676 return h128;
6677 }
6678}
6679
6680/*
6681 * It's important for performance that XXH3_hashLong() is not inlined.
6682 */
6683XXH_NO_INLINE XXH_PUREF XXH128_hash_t
6684XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len,
6685 XXH64_hash_t seed64,
6686 const void* XXH_RESTRICT secret, size_t secretLen)
6687{
6688 (void)seed64; (void)secret; (void)secretLen;
6689 return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret),
6690 XXH3_accumulate, XXH3_scrambleAcc);
6691}
6692
6693/*
6694 * It's important for performance to pass @p secretLen (when it's static)
6695 * to the compiler, so that it can properly optimize the vectorized loop.
6696 *
6697 * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE
6698 * breaks -Og, this is XXH_NO_INLINE.
6699 */
6700XXH3_WITH_SECRET_INLINE XXH128_hash_t
6701XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len,
6702 XXH64_hash_t seed64,
6703 const void* XXH_RESTRICT secret, size_t secretLen)
6704{
6705 (void)seed64;
6706 return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen,
6707 XXH3_accumulate, XXH3_scrambleAcc);
6708}
6709
6710XXH_FORCE_INLINE XXH128_hash_t
6711XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len,
6712 XXH64_hash_t seed64,
6713 XXH3_f_accumulate f_acc,
6714 XXH3_f_scrambleAcc f_scramble,
6715 XXH3_f_initCustomSecret f_initSec)
6716{
6717 if (seed64 == 0)
6718 return XXH3_hashLong_128b_internal(input, len,
6719 XXH3_kSecret, sizeof(XXH3_kSecret),
6720 f_acc, f_scramble);
6721 { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
6722 f_initSec(secret, seed64);
6723 return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret),
6724 f_acc, f_scramble);
6725 }
6726}
6727
6728/*
6729 * It's important for performance that XXH3_hashLong is not inlined.
6730 */
6731XXH_NO_INLINE XXH128_hash_t
6732XXH3_hashLong_128b_withSeed(const void* input, size_t len,
6733 XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen)
6734{
6735 (void)secret; (void)secretLen;
6736 return XXH3_hashLong_128b_withSeed_internal(input, len, seed64,
6737 XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret);
6738}
6739
6740typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t,
6741 XXH64_hash_t, const void* XXH_RESTRICT, size_t);
6742
6743XXH_FORCE_INLINE XXH128_hash_t
6744XXH3_128bits_internal(const void* input, size_t len,
6745 XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
6746 XXH3_hashLong128_f f_hl128)
6747{
6748 XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
6749 /*
6750 * If an action is to be taken if `secret` conditions are not respected,
6751 * it should be done here.
6752 * For now, it's a contract pre-condition.
6753 * Adding a check and a branch here would cost performance at every hash.
6754 */
6755 if (len <= 16)
6756 return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
6757 if (len <= 128)
6758 return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
6759 if (len <= XXH3_MIDSIZE_MAX)
6760 return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
6761 return f_hl128(input, len, seed64, secret, secretLen);
6762}
6763
6764
6765/* === Public XXH128 API === */
6766
6768XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* input, size_t len)
6769{
6770 return XXH3_128bits_internal(input, len, 0,
6771 XXH3_kSecret, sizeof(XXH3_kSecret),
6772 XXH3_hashLong_128b_default);
6773}
6774
6776XXH_PUBLIC_API XXH128_hash_t
6777XXH3_128bits_withSecret(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize)
6778{
6779 return XXH3_128bits_internal(input, len, 0,
6780 (const xxh_u8*)secret, secretSize,
6781 XXH3_hashLong_128b_withSecret);
6782}
6783
6785XXH_PUBLIC_API XXH128_hash_t
6786XXH3_128bits_withSeed(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
6787{
6788 return XXH3_128bits_internal(input, len, seed,
6789 XXH3_kSecret, sizeof(XXH3_kSecret),
6790 XXH3_hashLong_128b_withSeed);
6791}
6792
6794XXH_PUBLIC_API XXH128_hash_t
6795XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
6796{
6797 if (len <= XXH3_MIDSIZE_MAX)
6798 return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
6799 return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize);
6800}
6801
6803XXH_PUBLIC_API XXH128_hash_t
6804XXH128(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
6805{
6806 return XXH3_128bits_withSeed(input, len, seed);
6807}
6808
6809
6810/* === XXH3 128-bit streaming === */
6811#ifndef XXH_NO_STREAM
6812/*
6813 * All initialization and update functions are identical to 64-bit streaming variant.
6814 * The only difference is the finalization routine.
6815 */
6816
6819XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr)
6820{
6821 return XXH3_64bits_reset(statePtr);
6822}
6823
6826XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize)
6827{
6828 return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize);
6829}
6830
6833XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed)
6834{
6835 return XXH3_64bits_reset_withSeed(statePtr, seed);
6836}
6837
6840XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
6841{
6842 return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed);
6843}
6844
6847XXH3_128bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len)
6848{
6849 return XXH3_64bits_update(state, input, len);
6850}
6851
6853XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* state)
6854{
6855 const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
6856 if (state->totalLen > XXH3_MIDSIZE_MAX) {
6857 XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
6858 XXH3_digest_long(acc, state, secret);
6859 XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
6860 { XXH128_hash_t h128;
6861 h128.low64 = XXH3_mergeAccs(acc,
6862 secret + XXH_SECRET_MERGEACCS_START,
6863 (xxh_u64)state->totalLen * XXH_PRIME64_1);
6864 h128.high64 = XXH3_mergeAccs(acc,
6865 secret + state->secretLimit + XXH_STRIPE_LEN
6866 - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
6867 ~((xxh_u64)state->totalLen * XXH_PRIME64_2));
6868 return h128;
6869 }
6870 }
6871 /* len <= XXH3_MIDSIZE_MAX : short code */
6872 if (state->seed)
6873 return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
6874 return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen),
6875 secret, state->secretLimit + XXH_STRIPE_LEN);
6876}
6877#endif /* !XXH_NO_STREAM */
6878/* 128-bit utility functions */
6879
6880#include <string.h> /* memcmp, memcpy */
6881
6882/* return : 1 is equal, 0 if different */
6884XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2)
6885{
6886 /* note : XXH128_hash_t is compact, it has no padding byte */
6887 return !(memcmp(&h1, &h2, sizeof(h1)));
6888}
6889
6890/* This prototype is compatible with stdlib's qsort().
6891 * @return : >0 if *h128_1 > *h128_2
6892 * <0 if *h128_1 < *h128_2
6893 * =0 if *h128_1 == *h128_2 */
6895XXH_PUBLIC_API int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2)
6896{
6897 XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1;
6898 XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2;
6899 int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
6900 /* note : bets that, in most cases, hash values are different */
6901 if (hcmp) return hcmp;
6902 return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
6903}
6904
6905
6906/*====== Canonical representation ======*/
6908XXH_PUBLIC_API void
6909XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash)
6910{
6911 XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t));
6912 if (XXH_CPU_LITTLE_ENDIAN) {
6913 hash.high64 = XXH_swap64(hash.high64);
6914 hash.low64 = XXH_swap64(hash.low64);
6915 }
6916 XXH_memcpy(dst, &hash.high64, sizeof(hash.high64));
6917 XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64));
6918}
6919
6921XXH_PUBLIC_API XXH128_hash_t
6922XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src)
6923{
6924 XXH128_hash_t h;
6925 h.high64 = XXH_readBE64(src);
6926 h.low64 = XXH_readBE64(src->digest + 8);
6927 return h;
6928}
6929
6930
6931
6932/* ==========================================
6933 * Secret generators
6934 * ==========================================
6935 */
6936#define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
6937
6938XXH_FORCE_INLINE void XXH3_combine16(void* dst, XXH128_hash_t h128)
6939{
6940 XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 );
6941 XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 );
6942}
6943
6946XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize)
6947{
6948#if (XXH_DEBUGLEVEL >= 1)
6949 XXH_ASSERT(secretBuffer != NULL);
6950 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
6951#else
6952 /* production mode, assert() are disabled */
6953 if (secretBuffer == NULL) return XXH_ERROR;
6954 if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
6955#endif
6956
6957 if (customSeedSize == 0) {
6958 customSeed = XXH3_kSecret;
6959 customSeedSize = XXH_SECRET_DEFAULT_SIZE;
6960 }
6961#if (XXH_DEBUGLEVEL >= 1)
6962 XXH_ASSERT(customSeed != NULL);
6963#else
6964 if (customSeed == NULL) return XXH_ERROR;
6965#endif
6966
6967 /* Fill secretBuffer with a copy of customSeed - repeat as needed */
6968 { size_t pos = 0;
6969 while (pos < secretSize) {
6970 size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize);
6971 memcpy((char*)secretBuffer + pos, customSeed, toCopy);
6972 pos += toCopy;
6973 } }
6974
6975 { size_t const nbSeg16 = secretSize / 16;
6976 size_t n;
6977 XXH128_canonical_t scrambler;
6978 XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0));
6979 for (n=0; n<nbSeg16; n++) {
6980 XXH128_hash_t const h128 = XXH128(&scrambler, sizeof(scrambler), n);
6981 XXH3_combine16((char*)secretBuffer + n*16, h128);
6982 }
6983 /* last segment */
6984 XXH3_combine16((char*)secretBuffer + secretSize - 16, XXH128_hashFromCanonical(&scrambler));
6985 }
6986 return XXH_OK;
6987}
6988
6990XXH_PUBLIC_API void
6991XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed)
6992{
6993 XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
6994 XXH3_initCustomSecret(secret, seed);
6995 XXH_ASSERT(secretBuffer != NULL);
6996 memcpy(secretBuffer, secret, XXH_SECRET_DEFAULT_SIZE);
6997}
6998
6999
7000
7001/* Pop our optimization override from above */
7002#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
7003 && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
7004 && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */
7005# pragma GCC pop_options
7006#endif
7007
7008#endif /* XXH_NO_LONG_LONG */
7009
7010#endif /* XXH_NO_XXH3 */
7011
7015#endif /* XXH_IMPLEMENTATION */
7016
7017
7018#if defined (__cplusplus)
7019} /* extern "C" */
7020#endif
struct XXH32_state_s XXH32_state_t
The opaque state struct for the XXH32 streaming API.
Definition: xxhash.h:638
#define XXH32_createState
Allocates an XXH32_state_t.
Definition: xxhash.h:451
struct XXH64_state_s XXH64_state_t
The opaque state struct for the XXH64 streaming API.
Definition: xxhash.h:901
#define XXH64_createState
Allocates an XXH64_state_t.
Definition: xxhash.h:461
#define XXH32_update
Definition: xxhash.h:454
#define XXH3_128bits_reset
Definition: xxhash.h:491
#define XXH3_64bits_reset_withSecret
Definition: xxhash.h:479
#define XXH3_64bits_withSecretandSeed
Definition: xxhash.h:473
#define XXH64_hashFromCanonical
Definition: xxhash.h:468
#define XXH64_freeState
Definition: xxhash.h:462
#define XXH32_reset
Definition: xxhash.h:453
#define XXH128_canonicalFromHash
Definition: xxhash.h:499
#define XXH128_isEqual
Definition: xxhash.h:497
#define XXH3_128bits_reset_withSecretandSeed
Definition: xxhash.h:494
#define XXH3_64bits_withSecret
Definition: xxhash.h:471
#define XXH3_generateSecret
Definition: xxhash.h:483
unsigned long long XXH64_hash_t
Definition: xxhash.h:858
#define XXH3_64bits
Definition: xxhash.h:470
#define XXH3_64bits_withSeed
Definition: xxhash.h:472
#define XXH64_digest
Definition: xxhash.h:465
#define XXH3_64bits_update
Definition: xxhash.h:481
#define XXH3_128bits_update
Definition: xxhash.h:495
#define XXH_PUREF
Definition: xxhash.h:527
#define XXH3_copyState
Definition: xxhash.h:476
XXH_errorcode
Exit code for the streaming API.
Definition: xxhash.h:558
#define XXH64_copyState
Definition: xxhash.h:466
#define XXH3_64bits_reset_withSeed
Definition: xxhash.h:478
#define XXH_PUBLIC_API
Marks a global symbol.
Definition: xxhash.h:517
#define XXH3_128bits_reset_withSeed
Definition: xxhash.h:492
#define XXH32_hashFromCanonical
Definition: xxhash.h:458
#define XXH3_64bits_reset_withSecretandSeed
Definition: xxhash.h:480
#define XXH32_copyState
Definition: xxhash.h:456
#define XXH3_64bits_digest
Definition: xxhash.h:482
#define XXH3_128bits_withSecretandSeed
Definition: xxhash.h:490
#define XXH3_128bits
Definition: xxhash.h:487
#define XXH3_createState
Definition: xxhash.h:474
#define XXH32_freeState
Definition: xxhash.h:452
#define XXH3_128bits_withSecret
Definition: xxhash.h:489
#define XXH128
Definition: xxhash.h:486
#define XXH_MALLOCF
Definition: xxhash.h:528
#define XXH3_generateSecret_fromSeed
Definition: xxhash.h:484
#define XXH32
Definition: xxhash.h:450
#define XXH64_reset
Definition: xxhash.h:463
#define XXH32_canonicalFromHash
Definition: xxhash.h:457
#define XXH32_digest
Definition: xxhash.h:455
#define XXH128_cmp
Definition: xxhash.h:498
#define XXH_VERSION_NUMBER
Version number, encoded as two digits each.
Definition: xxhash.h:538
#define XXH64_canonicalFromHash
Definition: xxhash.h:467
#define XXH64_update
Definition: xxhash.h:464
#define XXH3_freeState
Definition: xxhash.h:475
#define XXH3_128bits_digest
Definition: xxhash.h:496
#define XXH_CONSTF
Definition: xxhash.h:526
#define XXH_versionNumber
Obtains the xxHash version.
Definition: xxhash.h:448
#define XXH128_hashFromCanonical
Definition: xxhash.h:500
#define XXH3_128bits_withSeed
Definition: xxhash.h:488
#define XXH64
Definition: xxhash.h:460
#define XXH3_64bits_reset
Definition: xxhash.h:477
#define XXH3_128bits_reset_withSecret
Definition: xxhash.h:493
@ XXH_ERROR
Definition: xxhash.h:560
@ XXH_OK
Definition: xxhash.h:559
uint uint32_t
ulong uint64_t
uchar uint8_t
Canonical (big endian) representation of XXH32_hash_t.
Definition: xxhash.h:729
Canonical (big endian) representation of XXH64_hash_t.
Definition: xxhash.h:992