19# define XXH_NAMESPACE ZSTD_
230#if defined (__cplusplus)
254# define XXH_STATIC_LINKING_ONLY
267# define XXH_IMPLEMENTATION
288# define XXH_INLINE_ALL
289# undef XXH_INLINE_ALL
293# define XXH_PRIVATE_API
294# undef XXH_PRIVATE_API
308# define XXH_NAMESPACE
312#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
313 && !defined(XXH_INLINE_ALL_31684351384)
315# define XXH_INLINE_ALL_31684351384
317# undef XXH_STATIC_LINKING_ONLY
318# define XXH_STATIC_LINKING_ONLY
320# undef XXH_PUBLIC_API
321# if defined(__GNUC__)
322# define XXH_PUBLIC_API static __inline __attribute__((unused))
323# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) )
324# define XXH_PUBLIC_API static inline
325# elif defined(_MSC_VER)
326# define XXH_PUBLIC_API static __inline
329# define XXH_PUBLIC_API static
345# undef XXH_versionNumber
348# undef XXH32_createState
349# undef XXH32_freeState
353# undef XXH32_copyState
354# undef XXH32_canonicalFromHash
355# undef XXH32_hashFromCanonical
358# undef XXH64_createState
359# undef XXH64_freeState
363# undef XXH64_copyState
364# undef XXH64_canonicalFromHash
365# undef XXH64_hashFromCanonical
368# undef XXH3_64bits_withSecret
369# undef XXH3_64bits_withSeed
370# undef XXH3_64bits_withSecretandSeed
371# undef XXH3_createState
372# undef XXH3_freeState
373# undef XXH3_copyState
374# undef XXH3_64bits_reset
375# undef XXH3_64bits_reset_withSeed
376# undef XXH3_64bits_reset_withSecret
377# undef XXH3_64bits_update
378# undef XXH3_64bits_digest
379# undef XXH3_generateSecret
383# undef XXH3_128bits_withSeed
384# undef XXH3_128bits_withSecret
385# undef XXH3_128bits_reset
386# undef XXH3_128bits_reset_withSeed
387# undef XXH3_128bits_reset_withSecret
388# undef XXH3_128bits_reset_withSecretandSeed
389# undef XXH3_128bits_update
390# undef XXH3_128bits_digest
391# undef XXH128_isEqual
393# undef XXH128_canonicalFromHash
394# undef XXH128_hashFromCanonical
399# define XXH_NAMESPACE XXH_INLINE_
407# define XXH_IPREF(Id) XXH_NAMESPACE ## Id
408# define XXH_OK XXH_IPREF(XXH_OK)
409# define XXH_ERROR XXH_IPREF(XXH_ERROR)
410# define XXH_errorcode XXH_IPREF(XXH_errorcode)
411# define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t)
412# define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t)
413# define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
414# define XXH32_state_s XXH_IPREF(XXH32_state_s)
415# define XXH32_state_t XXH_IPREF(XXH32_state_t)
416# define XXH64_state_s XXH_IPREF(XXH64_state_s)
417# define XXH64_state_t XXH_IPREF(XXH64_state_t)
418# define XXH3_state_s XXH_IPREF(XXH3_state_s)
419# define XXH3_state_t XXH_IPREF(XXH3_state_t)
420# define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
422# undef XXHASH_H_5627135585666179
423# undef XXHASH_H_STATIC_13879238742
429#ifndef XXHASH_H_5627135585666179
430#define XXHASH_H_5627135585666179 1
433#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
434# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
436# define XXH_PUBLIC_API __declspec(dllexport)
438# define XXH_PUBLIC_API __declspec(dllimport)
441# define XXH_PUBLIC_API
446# define XXH_CAT(A,B) A##B
447# define XXH_NAME2(A,B) XXH_CAT(A,B)
448# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
450# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
451# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
452# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
453# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
454# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
455# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
456# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
457# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
458# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
460# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
461# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
462# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
463# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
464# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
465# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
466# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
467# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
468# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
470# define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
471# define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
472# define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
473# define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed)
474# define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
475# define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
476# define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
477# define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
478# define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
479# define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
480# define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed)
481# define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
482# define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
483# define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
484# define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed)
486# define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
487# define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
488# define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
489# define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
490# define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed)
491# define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
492# define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
493# define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
494# define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed)
495# define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
496# define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
497# define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
498# define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
499# define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
500# define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
509#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
510# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
512# define XXH_PUBLIC_API __declspec(dllexport)
514# define XXH_PUBLIC_API __declspec(dllimport)
517# define XXH_PUBLIC_API
521#if defined (__GNUC__)
522# define XXH_CONSTF __attribute__((const))
523# define XXH_PUREF __attribute__((pure))
524# define XXH_MALLOCF __attribute__((malloc))
534#define XXH_VERSION_MAJOR 0
535#define XXH_VERSION_MINOR 8
536#define XXH_VERSION_RELEASE 2
538#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
567#if defined(XXH_DOXYGEN)
575#elif !defined (__VMS) \
576 && (defined (__cplusplus) \
577 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) ) )
579# include <inttypes.h>
587# if UINT_MAX == 0xFFFFFFFFUL
588 typedef unsigned int XXH32_hash_t;
589# elif ULONG_MAX == 0xFFFFFFFFUL
590 typedef unsigned long XXH32_hash_t;
592# error "unsupported platform: need a 32-bit type"
730 unsigned char digest[4];
762#ifdef __has_attribute
763# define XXH_HAS_ATTRIBUTE(x) __has_attribute(x)
765# define XXH_HAS_ATTRIBUTE(x) 0
775#define XXH_C23_VN 201711L
780#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) && defined(__has_c_attribute)
781# define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
783# define XXH_HAS_C_ATTRIBUTE(x) 0
788#if defined(__cplusplus) && defined(__has_cpp_attribute)
789# define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
791# define XXH_HAS_CPP_ATTRIBUTE(x) 0
802#if XXH_HAS_C_ATTRIBUTE(fallthrough) || XXH_HAS_CPP_ATTRIBUTE(fallthrough)
803# define XXH_FALLTHROUGH [[fallthrough]]
804#elif XXH_HAS_ATTRIBUTE(__fallthrough__)
805# define XXH_FALLTHROUGH __attribute__ ((__fallthrough__))
807# define XXH_FALLTHROUGH
817#if XXH_HAS_ATTRIBUTE(noescape)
818# define XXH_NOESCAPE __attribute__((noescape))
831#ifndef XXH_NO_LONG_LONG
835#if defined(XXH_DOXYGEN)
842#elif !defined (__VMS) \
843 && (defined (__cplusplus) \
844 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) ) )
846# include <inttypes.h>
853# if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL
1138#define XXH3_SECRET_SIZE_MIN 136
1176#ifndef XXH_NO_STREAM
1189typedef struct XXH3_state_s XXH3_state_t;
1392#ifndef XXH_NO_STREAM
1535typedef struct {
unsigned char digest[
sizeof(XXH128_hash_t)]; } XXH128_canonical_t;
1574#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
1575#define XXHASH_H_STATIC_13879238742
1602struct XXH32_state_s {
1603 XXH32_hash_t total_len_32;
1604 XXH32_hash_t large_len;
1606 XXH32_hash_t mem32[4];
1607 XXH32_hash_t memsize;
1608 XXH32_hash_t reserved;
1612#ifndef XXH_NO_LONG_LONG
1626struct XXH64_state_s {
1630 XXH32_hash_t memsize;
1631 XXH32_hash_t reserved32;
1637#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
1638# include <stdalign.h>
1639# define XXH_ALIGN(n) alignas(n)
1640#elif defined(__cplusplus) && (__cplusplus >= 201103L)
1642# define XXH_ALIGN(n) alignas(n)
1643#elif defined(__GNUC__)
1644# define XXH_ALIGN(n) __attribute__ ((aligned(n)))
1645#elif defined(_MSC_VER)
1646# define XXH_ALIGN(n) __declspec(align(n))
1648# define XXH_ALIGN(n)
1652#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) \
1653 && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) \
1654 && defined(__GNUC__)
1655# define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
1657# define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
1667#define XXH3_INTERNALBUFFER_SIZE 256
1677#define XXH3_SECRET_DEFAULT_SIZE 192
1701struct XXH3_state_s {
1704 XXH_ALIGN_MEMBER(64,
unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
1706 XXH_ALIGN_MEMBER(64,
unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
1708 XXH32_hash_t bufferedSize;
1710 XXH32_hash_t useSeed;
1712 size_t nbStripesSoFar;
1716 size_t nbStripesPerBlock;
1724 const unsigned char* extSecret;
1730#undef XXH_ALIGN_MEMBER
1743#define XXH3_INITSTATE(XXH3_state_ptr) \
1745 XXH3_state_t* tmp_xxh3_state_ptr = (XXH3_state_ptr); \
1746 tmp_xxh3_state_ptr->seed = 0; \
1747 tmp_xxh3_state_ptr->extSecret = NULL; \
1909 XXH_NOESCAPE
const void* secret,
size_t secretSize,
1927 XXH_NOESCAPE
const void* secret,
size_t secretSize,
1929#ifndef XXH_NO_STREAM
1945 XXH_NOESCAPE
const void* secret,
size_t secretSize,
1962 XXH_NOESCAPE
const void* secret,
size_t secretSize,
1968#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
1969# define XXH_IMPLEMENTATION
2002#if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
2003 || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
2004# define XXH_IMPLEM_13a8737387
2022# define XXH_NO_LONG_LONG
2023# undef XXH_NO_LONG_LONG
2074# define XXH_FORCE_MEMORY_ACCESS 0
2102# define XXH_SIZE_OPT 0
2132# define XXH_FORCE_ALIGN_CHECK 0
2154# define XXH_NO_INLINE_HINTS 0
2171# define XXH3_INLINE_SECRET 0
2183# define XXH32_ENDJMP 0
2192# define XXH_OLD_NAMES
2193# undef XXH_OLD_NAMES
2203# define XXH_NO_STREAM
2204# undef XXH_NO_STREAM
2210#ifndef XXH_FORCE_MEMORY_ACCESS
2214# if defined(__GNUC__) && !(defined(__ARM_ARCH) && __ARM_ARCH < 7 && defined(__ARM_FEATURE_UNALIGNED))
2215# define XXH_FORCE_MEMORY_ACCESS 1
2221# if (defined(__GNUC__) || defined(__clang__)) && defined(__OPTIMIZE_SIZE__)
2222# define XXH_SIZE_OPT 1
2224# define XXH_SIZE_OPT 0
2228#ifndef XXH_FORCE_ALIGN_CHECK
2230# if XXH_SIZE_OPT >= 1 || \
2231 defined(__i386) || defined(__x86_64__) || defined(__aarch64__) || defined(__ARM_FEATURE_UNALIGNED) \
2232 || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) || defined(_M_ARM)
2233# define XXH_FORCE_ALIGN_CHECK 0
2235# define XXH_FORCE_ALIGN_CHECK 1
2239#ifndef XXH_NO_INLINE_HINTS
2240# if XXH_SIZE_OPT >= 1 || defined(__NO_INLINE__)
2241# define XXH_NO_INLINE_HINTS 1
2243# define XXH_NO_INLINE_HINTS 0
2247#ifndef XXH3_INLINE_SECRET
2248# if (defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 12) \
2249 || !defined(XXH_INLINE_ALL)
2250# define XXH3_INLINE_SECRET 0
2252# define XXH3_INLINE_SECRET 1
2258# define XXH32_ENDJMP 0
2270#if defined(XXH_NO_STREAM)
2272#elif defined(XXH_NO_STDLIB)
2283static XXH_CONSTF void* XXH_malloc(
size_t s) { (void)s;
return NULL; }
2284static void XXH_free(
void* p) { (void)p; }
2298static XXH_MALLOCF void* XXH_malloc(
size_t s) {
return malloc(s); }
2304static void XXH_free(
void* p) { free(p); }
2314static void* XXH_memcpy(
void* dest,
const void* src,
size_t size)
2316 return memcpy(dest,src,size);
2326# pragma warning(disable : 4127)
2329#if XXH_NO_INLINE_HINTS
2330# if defined(__GNUC__) || defined(__clang__)
2331# define XXH_FORCE_INLINE static
2333# define XXH_FORCE_INLINE static
2335# define XXH_NO_INLINE static
2337#elif defined(__GNUC__) || defined(__clang__)
2338# define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline))
2339# define XXH_NO_INLINE static __attribute__((noinline))
2340#elif defined(_MSC_VER)
2341# define XXH_FORCE_INLINE static __forceinline
2342# define XXH_NO_INLINE static __declspec(noinline)
2343#elif defined (__cplusplus) \
2344 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L))
2345# define XXH_FORCE_INLINE static inline
2346# define XXH_NO_INLINE static
2348# define XXH_FORCE_INLINE static
2349# define XXH_NO_INLINE static
2352#if XXH3_INLINE_SECRET
2353# define XXH3_WITH_SECRET_INLINE XXH_FORCE_INLINE
2355# define XXH3_WITH_SECRET_INLINE XXH_NO_INLINE
2370#ifndef XXH_DEBUGLEVEL
2372# define XXH_DEBUGLEVEL DEBUGLEVEL
2374# define XXH_DEBUGLEVEL 0
2378#if (XXH_DEBUGLEVEL>=1)
2380# define XXH_ASSERT(c) assert(c)
2382# if defined(__INTEL_COMPILER)
2383# define XXH_ASSERT(c) XXH_ASSUME((unsigned char) (c))
2385# define XXH_ASSERT(c) XXH_ASSUME(c)
2390#ifndef XXH_STATIC_ASSERT
2391# if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
2392# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { _Static_assert((c),m); } while(0)
2393# elif defined(__cplusplus) && (__cplusplus >= 201103L)
2394# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
2396# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0)
2398# define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c)
2417#if defined(__GNUC__) || defined(__clang__)
2418# define XXH_COMPILER_GUARD(var) __asm__("" : "+r" (var))
2420# define XXH_COMPILER_GUARD(var) ((void)0)
2425#if defined(__clang__) && defined(__ARM_ARCH) && !defined(__wasm__)
2426# define XXH_COMPILER_GUARD_CLANG_NEON(var) __asm__("" : "+w" (var))
2428# define XXH_COMPILER_GUARD_CLANG_NEON(var) ((void)0)
2434#if !defined (__VMS) \
2435 && (defined (__cplusplus) \
2436 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) ) )
2438# include <inttypes.h>
2444 typedef unsigned char xxh_u8;
2446typedef XXH32_hash_t xxh_u32;
2449# warning "XXH_OLD_NAMES is planned to be removed starting v0.9. If the program depends on it, consider moving away from it by employing newer type names directly"
2507#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2512#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
2518static xxh_u32 XXH_read32(
const void* memPtr) {
return *(
const xxh_u32*) memPtr; }
2520#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
2530typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
2532static xxh_u32 XXH_read32(
const void* ptr)
2534 typedef __attribute__((aligned(1))) xxh_u32 xxh_unalign32;
2535 return *((const xxh_unalign32*)ptr);
2544static xxh_u32 XXH_read32(
const void* memPtr)
2547 XXH_memcpy(&val, memPtr,
sizeof(val));
2572#ifndef XXH_CPU_LITTLE_ENDIAN
2577# if defined(_WIN32) \
2578 || defined(__LITTLE_ENDIAN__) \
2579 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
2580# define XXH_CPU_LITTLE_ENDIAN 1
2581# elif defined(__BIG_ENDIAN__) \
2582 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
2583# define XXH_CPU_LITTLE_ENDIAN 0
2591static int XXH_isLittleEndian(
void)
2597 const union { xxh_u32 u; xxh_u8 c[4]; }
one = { 1 };
2600# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
2610#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
2613# define XXH_HAS_BUILTIN(x) __has_builtin(x)
2615# define XXH_HAS_BUILTIN(x) 0
2647#if XXH_HAS_BUILTIN(__builtin_unreachable)
2648# define XXH_UNREACHABLE() __builtin_unreachable()
2650#elif defined(_MSC_VER)
2651# define XXH_UNREACHABLE() __assume(0)
2654# define XXH_UNREACHABLE()
2657#if XXH_HAS_BUILTIN(__builtin_assume)
2658# define XXH_ASSUME(c) __builtin_assume(c)
2660# define XXH_ASSUME(c) if (!(c)) { XXH_UNREACHABLE(); }
2676#if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
2677 && XXH_HAS_BUILTIN(__builtin_rotateleft64)
2678# define XXH_rotl32 __builtin_rotateleft32
2679# define XXH_rotl64 __builtin_rotateleft64
2681#elif defined(_MSC_VER)
2682# define XXH_rotl32(x,r) _rotl(x,r)
2683# define XXH_rotl64(x,r) _rotl64(x,r)
2685# define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
2686# define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
2697#if defined(_MSC_VER)
2698# define XXH_swap32 _byteswap_ulong
2699#elif XXH_GCC_VERSION >= 403
2700# define XXH_swap32 __builtin_bswap32
2702static xxh_u32 XXH_swap32 (xxh_u32 x)
2704 return ((x << 24) & 0xff000000 ) |
2705 ((
x << 8) & 0x00ff0000 ) |
2706 ((
x >> 8) & 0x0000ff00 ) |
2707 ((
x >> 24) & 0x000000ff );
2730#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2732XXH_FORCE_INLINE xxh_u32 XXH_readLE32(
const void* memPtr)
2734 const xxh_u8* bytePtr = (
const xxh_u8 *)memPtr;
2736 | ((xxh_u32)bytePtr[1] << 8)
2737 | ((xxh_u32)bytePtr[2] << 16)
2738 | ((xxh_u32)bytePtr[3] << 24);
2741XXH_FORCE_INLINE xxh_u32 XXH_readBE32(
const void* memPtr)
2743 const xxh_u8* bytePtr = (
const xxh_u8 *)memPtr;
2745 | ((xxh_u32)bytePtr[2] << 8)
2746 | ((xxh_u32)bytePtr[1] << 16)
2747 | ((xxh_u32)bytePtr[0] << 24);
2751XXH_FORCE_INLINE xxh_u32 XXH_readLE32(
const void* ptr)
2753 return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
2756static xxh_u32 XXH_readBE32(
const void* ptr)
2758 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
2762XXH_FORCE_INLINE xxh_u32
2763XXH_readLE32_align(
const void* ptr, XXH_alignment align)
2765 if (align==XXH_unaligned) {
2766 return XXH_readLE32(ptr);
2768 return XXH_CPU_LITTLE_ENDIAN ? *(
const xxh_u32*)ptr : XXH_swap32(*(
const xxh_u32*)ptr);
2792#define XXH_PRIME32_1 0x9E3779B1U
2793#define XXH_PRIME32_2 0x85EBCA77U
2794#define XXH_PRIME32_3 0xC2B2AE3DU
2795#define XXH_PRIME32_4 0x27D4EB2FU
2796#define XXH_PRIME32_5 0x165667B1U
2799# define PRIME32_1 XXH_PRIME32_1
2800# define PRIME32_2 XXH_PRIME32_2
2801# define PRIME32_3 XXH_PRIME32_3
2802# define PRIME32_4 XXH_PRIME32_4
2803# define PRIME32_5 XXH_PRIME32_5
2817static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
2819 acc +=
input * XXH_PRIME32_2;
2820 acc = XXH_rotl32(acc, 13);
2821 acc *= XXH_PRIME32_1;
2822#if (defined(__SSE4_1__) || defined(__aarch64__) || defined(__wasm_simd128__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
2859 XXH_COMPILER_GUARD(acc);
2874static xxh_u32 XXH32_avalanche(xxh_u32 hash)
2877 hash *= XXH_PRIME32_2;
2879 hash *= XXH_PRIME32_3;
2884#define XXH_get32bits(p) XXH_readLE32_align(p, align)
2902XXH32_finalize(xxh_u32 hash,
const xxh_u8* ptr,
size_t len, XXH_alignment align)
2904#define XXH_PROCESS1 do { \
2905 hash += (*ptr++) * XXH_PRIME32_5; \
2906 hash = XXH_rotl32(hash, 11) * XXH_PRIME32_1; \
2909#define XXH_PROCESS4 do { \
2910 hash += XXH_get32bits(ptr) * XXH_PRIME32_3; \
2912 hash = XXH_rotl32(hash, 17) * XXH_PRIME32_4; \
2915 if (ptr==NULL) XXH_ASSERT(len == 0);
2918 if (!XXH32_ENDJMP) {
2928 return XXH32_avalanche(hash);
2931 case 12: XXH_PROCESS4;
2933 case 8: XXH_PROCESS4;
2935 case 4: XXH_PROCESS4;
2936 return XXH32_avalanche(hash);
2938 case 13: XXH_PROCESS4;
2940 case 9: XXH_PROCESS4;
2942 case 5: XXH_PROCESS4;
2944 return XXH32_avalanche(hash);
2946 case 14: XXH_PROCESS4;
2948 case 10: XXH_PROCESS4;
2950 case 6: XXH_PROCESS4;
2953 return XXH32_avalanche(hash);
2955 case 15: XXH_PROCESS4;
2957 case 11: XXH_PROCESS4;
2959 case 7: XXH_PROCESS4;
2961 case 3: XXH_PROCESS1;
2963 case 2: XXH_PROCESS1;
2965 case 1: XXH_PROCESS1;
2967 case 0:
return XXH32_avalanche(hash);
2975# define PROCESS1 XXH_PROCESS1
2976# define PROCESS4 XXH_PROCESS4
2991XXH32_endian_align(
const xxh_u8* input,
size_t len, xxh_u32 seed, XXH_alignment align)
2995 if (input==NULL) XXH_ASSERT(len == 0);
2998 const xxh_u8*
const bEnd =
input + len;
2999 const xxh_u8*
const limit = bEnd - 15;
3000 xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
3001 xxh_u32 v2 = seed + XXH_PRIME32_2;
3002 xxh_u32 v3 = seed + 0;
3003 xxh_u32 v4 = seed - XXH_PRIME32_1;
3006 v1 = XXH32_round(v1, XXH_get32bits(input));
input += 4;
3007 v2 = XXH32_round(v2, XXH_get32bits(input));
input += 4;
3008 v3 = XXH32_round(v3, XXH_get32bits(input));
input += 4;
3009 v4 = XXH32_round(v4, XXH_get32bits(input));
input += 4;
3010 }
while (input < limit);
3012 h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
3013 + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
3015 h32 = seed + XXH_PRIME32_5;
3018 h32 += (xxh_u32)len;
3020 return XXH32_finalize(h32, input, len&15, align);
3026#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2
3033 if (XXH_FORCE_ALIGN_CHECK) {
3034 if ((((
size_t)input) & 3) == 0) {
3035 return XXH32_endian_align((
const xxh_u8*)input, len, seed, XXH_aligned);
3038 return XXH32_endian_align((
const xxh_u8*)input, len, seed, XXH_unaligned);
3045#ifndef XXH_NO_STREAM
3061 XXH_memcpy(dstState, srcState,
sizeof(*dstState));
3067 XXH_ASSERT(statePtr != NULL);
3068 memset(statePtr, 0,
sizeof(*statePtr));
3069 statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
3070 statePtr->v[1] = seed + XXH_PRIME32_2;
3071 statePtr->v[2] = seed + 0;
3072 statePtr->v[3] = seed - XXH_PRIME32_1;
3082 XXH_ASSERT(len == 0);
3086 {
const xxh_u8* p = (
const xxh_u8*)input;
3087 const xxh_u8*
const bEnd = p + len;
3089 state->total_len_32 += (XXH32_hash_t)len;
3090 state->large_len |= (XXH32_hash_t)((len>=16) | (
state->total_len_32>=16));
3092 if (
state->memsize + len < 16) {
3093 XXH_memcpy((xxh_u8*)(
state->mem32) +
state->memsize, input, len);
3094 state->memsize += (XXH32_hash_t)len;
3098 if (
state->memsize) {
3099 XXH_memcpy((xxh_u8*)(
state->mem32) +
state->memsize, input, 16-
state->memsize);
3100 {
const xxh_u32* p32 =
state->mem32;
3101 state->v[0] = XXH32_round(
state->v[0], XXH_readLE32(p32)); p32++;
3102 state->v[1] = XXH32_round(
state->v[1], XXH_readLE32(p32)); p32++;
3103 state->v[2] = XXH32_round(
state->v[2], XXH_readLE32(p32)); p32++;
3104 state->v[3] = XXH32_round(
state->v[3], XXH_readLE32(p32));
3106 p += 16-
state->memsize;
3111 const xxh_u8*
const limit = bEnd - 16;
3114 state->v[0] = XXH32_round(
state->v[0], XXH_readLE32(p)); p+=4;
3115 state->v[1] = XXH32_round(
state->v[1], XXH_readLE32(p)); p+=4;
3116 state->v[2] = XXH32_round(
state->v[2], XXH_readLE32(p)); p+=4;
3117 state->v[3] = XXH32_round(
state->v[3], XXH_readLE32(p)); p+=4;
3123 XXH_memcpy(
state->mem32, p, (
size_t)(bEnd-p));
3124 state->memsize = (unsigned)(bEnd-p);
3137 if (
state->large_len) {
3138 h32 = XXH_rotl32(
state->v[0], 1)
3139 + XXH_rotl32(
state->v[1], 7)
3140 + XXH_rotl32(
state->v[2], 12)
3141 + XXH_rotl32(
state->v[3], 18);
3143 h32 =
state->v[2] + XXH_PRIME32_5;
3146 h32 +=
state->total_len_32;
3148 return XXH32_finalize(h32, (
const xxh_u8*)
state->mem32,
state->memsize, XXH_aligned);
3158 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
3159 XXH_memcpy(dst, &hash,
sizeof(*dst));
3164 return XXH_readBE32(src);
3168#ifndef XXH_NO_LONG_LONG
3186#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
3191#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
3194static xxh_u64 XXH_read64(
const void* memPtr)
3196 return *(
const xxh_u64*) memPtr;
3199#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
3209typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
3211static xxh_u64 XXH_read64(
const void* ptr)
3213 typedef __attribute__((aligned(1))) xxh_u64 xxh_unalign64;
3214 return *((const xxh_unalign64*)ptr);
3223static xxh_u64 XXH_read64(
const void* memPtr)
3226 XXH_memcpy(&val, memPtr,
sizeof(val));
3232#if defined(_MSC_VER)
3233# define XXH_swap64 _byteswap_uint64
3234#elif XXH_GCC_VERSION >= 403
3235# define XXH_swap64 __builtin_bswap64
3237static xxh_u64 XXH_swap64(xxh_u64 x)
3239 return ((x << 56) & 0xff00000000000000ULL) |
3240 ((
x << 40) & 0x00ff000000000000ULL) |
3241 ((
x << 24) & 0x0000ff0000000000ULL) |
3242 ((
x << 8) & 0x000000ff00000000ULL) |
3243 ((
x >> 8) & 0x00000000ff000000ULL) |
3244 ((
x >> 24) & 0x0000000000ff0000ULL) |
3245 ((
x >> 40) & 0x000000000000ff00ULL) |
3246 ((
x >> 56) & 0x00000000000000ffULL);
3252#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
3254XXH_FORCE_INLINE xxh_u64 XXH_readLE64(
const void* memPtr)
3256 const xxh_u8* bytePtr = (
const xxh_u8 *)memPtr;
3258 | ((xxh_u64)bytePtr[1] << 8)
3259 | ((xxh_u64)bytePtr[2] << 16)
3260 | ((xxh_u64)bytePtr[3] << 24)
3261 | ((xxh_u64)bytePtr[4] << 32)
3262 | ((xxh_u64)bytePtr[5] << 40)
3263 | ((xxh_u64)bytePtr[6] << 48)
3264 | ((xxh_u64)bytePtr[7] << 56);
3267XXH_FORCE_INLINE xxh_u64 XXH_readBE64(
const void* memPtr)
3269 const xxh_u8* bytePtr = (
const xxh_u8 *)memPtr;
3271 | ((xxh_u64)bytePtr[6] << 8)
3272 | ((xxh_u64)bytePtr[5] << 16)
3273 | ((xxh_u64)bytePtr[4] << 24)
3274 | ((xxh_u64)bytePtr[3] << 32)
3275 | ((xxh_u64)bytePtr[2] << 40)
3276 | ((xxh_u64)bytePtr[1] << 48)
3277 | ((xxh_u64)bytePtr[0] << 56);
3281XXH_FORCE_INLINE xxh_u64 XXH_readLE64(
const void* ptr)
3283 return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
3286static xxh_u64 XXH_readBE64(
const void* ptr)
3288 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
3292XXH_FORCE_INLINE xxh_u64
3293XXH_readLE64_align(
const void* ptr, XXH_alignment align)
3295 if (align==XXH_unaligned)
3296 return XXH_readLE64(ptr);
3298 return XXH_CPU_LITTLE_ENDIAN ? *(
const xxh_u64*)ptr : XXH_swap64(*(
const xxh_u64*)ptr);
3312#define XXH_PRIME64_1 0x9E3779B185EBCA87ULL
3313#define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL
3314#define XXH_PRIME64_3 0x165667B19E3779F9ULL
3315#define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL
3316#define XXH_PRIME64_5 0x27D4EB2F165667C5ULL
3319# define PRIME64_1 XXH_PRIME64_1
3320# define PRIME64_2 XXH_PRIME64_2
3321# define PRIME64_3 XXH_PRIME64_3
3322# define PRIME64_4 XXH_PRIME64_4
3323# define PRIME64_5 XXH_PRIME64_5
3327static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
3329 acc +=
input * XXH_PRIME64_2;
3330 acc = XXH_rotl64(acc, 31);
3331 acc *= XXH_PRIME64_1;
3332#if (defined(__AVX512F__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
3347 XXH_COMPILER_GUARD(acc);
3352static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
3354 val = XXH64_round(0, val);
3356 acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
3361static xxh_u64 XXH64_avalanche(xxh_u64 hash)
3364 hash *= XXH_PRIME64_2;
3366 hash *= XXH_PRIME64_3;
3372#define XXH_get64bits(p) XXH_readLE64_align(p, align)
3390XXH64_finalize(xxh_u64 hash,
const xxh_u8* ptr,
size_t len, XXH_alignment align)
3392 if (ptr==NULL) XXH_ASSERT(len == 0);
3395 xxh_u64
const k1 = XXH64_round(0, XXH_get64bits(ptr));
3398 hash = XXH_rotl64(hash,27) * XXH_PRIME64_1 + XXH_PRIME64_4;
3402 hash ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;
3404 hash = XXH_rotl64(hash, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
3408 hash ^= (*ptr++) * XXH_PRIME64_5;
3409 hash = XXH_rotl64(hash, 11) * XXH_PRIME64_1;
3412 return XXH64_avalanche(hash);
3416# define PROCESS1_64 XXH_PROCESS1_64
3417# define PROCESS4_64 XXH_PROCESS4_64
3418# define PROCESS8_64 XXH_PROCESS8_64
3420# undef XXH_PROCESS1_64
3421# undef XXH_PROCESS4_64
3422# undef XXH_PROCESS8_64
3434XXH64_endian_align(
const xxh_u8* input,
size_t len, xxh_u64 seed, XXH_alignment align)
3437 if (input==NULL) XXH_ASSERT(len == 0);
3440 const xxh_u8*
const bEnd =
input + len;
3441 const xxh_u8*
const limit = bEnd - 31;
3442 xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
3443 xxh_u64 v2 = seed + XXH_PRIME64_2;
3444 xxh_u64 v3 = seed + 0;
3445 xxh_u64 v4 = seed - XXH_PRIME64_1;
3448 v1 = XXH64_round(v1, XXH_get64bits(input));
input+=8;
3449 v2 = XXH64_round(v2, XXH_get64bits(input));
input+=8;
3450 v3 = XXH64_round(v3, XXH_get64bits(input));
input+=8;
3451 v4 = XXH64_round(v4, XXH_get64bits(input));
input+=8;
3452 }
while (input<limit);
3454 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
3455 h64 = XXH64_mergeRound(h64, v1);
3456 h64 = XXH64_mergeRound(h64, v2);
3457 h64 = XXH64_mergeRound(h64, v3);
3458 h64 = XXH64_mergeRound(h64, v4);
3461 h64 = seed + XXH_PRIME64_5;
3464 h64 += (xxh_u64) len;
3466 return XXH64_finalize(h64, input, len, align);
3473#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2
3480 if (XXH_FORCE_ALIGN_CHECK) {
3481 if ((((
size_t)input) & 7)==0) {
3482 return XXH64_endian_align((
const xxh_u8*)input, len, seed, XXH_aligned);
3485 return XXH64_endian_align((
const xxh_u8*)input, len, seed, XXH_unaligned);
3491#ifndef XXH_NO_STREAM
3507 XXH_memcpy(dstState, srcState,
sizeof(*dstState));
3513 XXH_ASSERT(statePtr != NULL);
3514 memset(statePtr, 0,
sizeof(*statePtr));
3515 statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
3516 statePtr->v[1] = seed + XXH_PRIME64_2;
3517 statePtr->v[2] = seed + 0;
3518 statePtr->v[3] = seed - XXH_PRIME64_1;
3527 XXH_ASSERT(len == 0);
3531 {
const xxh_u8* p = (
const xxh_u8*)input;
3532 const xxh_u8*
const bEnd = p + len;
3534 state->total_len += len;
3536 if (
state->memsize + len < 32) {
3537 XXH_memcpy(((xxh_u8*)
state->mem64) +
state->memsize, input, len);
3538 state->memsize += (xxh_u32)len;
3542 if (
state->memsize) {
3543 XXH_memcpy(((xxh_u8*)
state->mem64) +
state->memsize, input, 32-
state->memsize);
3548 p += 32 -
state->memsize;
3553 const xxh_u8*
const limit = bEnd - 32;
3556 state->v[0] = XXH64_round(
state->v[0], XXH_readLE64(p)); p+=8;
3557 state->v[1] = XXH64_round(
state->v[1], XXH_readLE64(p)); p+=8;
3558 state->v[2] = XXH64_round(
state->v[2], XXH_readLE64(p)); p+=8;
3559 state->v[3] = XXH64_round(
state->v[3], XXH_readLE64(p)); p+=8;
3565 XXH_memcpy(
state->mem64, p, (
size_t)(bEnd-p));
3566 state->memsize = (unsigned)(bEnd-p);
3579 if (
state->total_len >= 32) {
3580 h64 = XXH_rotl64(
state->v[0], 1) + XXH_rotl64(
state->v[1], 7) + XXH_rotl64(
state->v[2], 12) + XXH_rotl64(
state->v[3], 18);
3581 h64 = XXH64_mergeRound(h64,
state->v[0]);
3582 h64 = XXH64_mergeRound(h64,
state->v[1]);
3583 h64 = XXH64_mergeRound(h64,
state->v[2]);
3584 h64 = XXH64_mergeRound(h64,
state->v[3]);
3586 h64 =
state->v[2] + XXH_PRIME64_5;
3589 h64 += (xxh_u64)
state->total_len;
3591 return XXH64_finalize(h64, (
const xxh_u8*)
state->mem64, (size_t)
state->total_len, XXH_aligned);
3601 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
3602 XXH_memcpy(dst, &hash,
sizeof(*dst));
3608 return XXH_readBE64(src);
3626#if ((defined(sun) || defined(__sun)) && __cplusplus)
3627# define XXH_RESTRICT
3628#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
3629# define XXH_RESTRICT restrict
3630#elif (defined (__GNUC__) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))) \
3631 || (defined (__clang__)) \
3632 || (defined (_MSC_VER) && (_MSC_VER >= 1400)) \
3633 || (defined (__INTEL_COMPILER) && (__INTEL_COMPILER >= 1300))
3638# define XXH_RESTRICT __restrict
3640# define XXH_RESTRICT
3643#if (defined(__GNUC__) && (__GNUC__ >= 3)) \
3644 || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \
3645 || defined(__clang__)
3646# define XXH_likely(x) __builtin_expect(x, 1)
3647# define XXH_unlikely(x) __builtin_expect(x, 0)
3649# define XXH_likely(x) (x)
3650# define XXH_unlikely(x) (x)
3653#ifndef XXH_HAS_INCLUDE
3654# ifdef __has_include
3659# define XXH_HAS_INCLUDE __has_include
3661# define XXH_HAS_INCLUDE(x) 0
3665#if defined(__GNUC__) || defined(__clang__)
3666# if defined(__ARM_FEATURE_SVE)
3667# include <arm_sve.h>
3669# if defined(__ARM_NEON__) || defined(__ARM_NEON) \
3670 || (defined(_M_ARM) && _M_ARM >= 7) \
3671 || defined(_M_ARM64) || defined(_M_ARM64EC) \
3672 || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE(<arm_neon.h>))
3673# define inline __inline__
3674# include <arm_neon.h>
3676# elif defined(__AVX2__)
3677# include <immintrin.h>
3678# elif defined(__SSE2__)
3679# include <emmintrin.h>
3683#if defined(_MSC_VER)
3756#if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM)
3757# warning "XXH3 is highly inefficient without ARM or Thumb-2."
3775# define XXH_VECTOR XXH_SCALAR
3785enum XXH_VECTOR_TYPE {
3812# define XXH_ACC_ALIGN 8
3817# define XXH_SCALAR 0
3820# define XXH_AVX512 3
3827# if defined(__ARM_FEATURE_SVE)
3828# define XXH_VECTOR XXH_SVE
3830 defined(__ARM_NEON__) || defined(__ARM_NEON) \
3831 || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) \
3832 || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE(<arm_neon.h>)) \
3834 defined(_WIN32) || defined(__LITTLE_ENDIAN__) \
3835 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
3837# define XXH_VECTOR XXH_NEON
3838# elif defined(__AVX512F__)
3839# define XXH_VECTOR XXH_AVX512
3840# elif defined(__AVX2__)
3841# define XXH_VECTOR XXH_AVX2
3842# elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
3843# define XXH_VECTOR XXH_SSE2
3844# elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \
3845 || (defined(__s390x__) && defined(__VEC__)) \
3846 && defined(__GNUC__)
3847# define XXH_VECTOR XXH_VSX
3849# define XXH_VECTOR XXH_SCALAR
3854#if (XXH_VECTOR == XXH_SVE) && !defined(__ARM_FEATURE_SVE)
3856# pragma warning(once : 4606)
3858# warning "__ARM_FEATURE_SVE isn't supported. Use SCALAR instead."
3861# define XXH_VECTOR XXH_SCALAR
3868#ifndef XXH_ACC_ALIGN
3869# if defined(XXH_X86DISPATCH)
3870# define XXH_ACC_ALIGN 64
3871# elif XXH_VECTOR == XXH_SCALAR
3872# define XXH_ACC_ALIGN 8
3873# elif XXH_VECTOR == XXH_SSE2
3874# define XXH_ACC_ALIGN 16
3875# elif XXH_VECTOR == XXH_AVX2
3876# define XXH_ACC_ALIGN 32
3877# elif XXH_VECTOR == XXH_NEON
3878# define XXH_ACC_ALIGN 16
3879# elif XXH_VECTOR == XXH_VSX
3880# define XXH_ACC_ALIGN 16
3881# elif XXH_VECTOR == XXH_AVX512
3882# define XXH_ACC_ALIGN 64
3883# elif XXH_VECTOR == XXH_SVE
3884# define XXH_ACC_ALIGN 64
3888#if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \
3889 || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
3890# define XXH_SEC_ALIGN XXH_ACC_ALIGN
3891#elif XXH_VECTOR == XXH_SVE
3892# define XXH_SEC_ALIGN XXH_ACC_ALIGN
3894# define XXH_SEC_ALIGN 8
3897#if defined(__GNUC__) || defined(__clang__)
3898# define XXH_ALIASING __attribute__((may_alias))
3900# define XXH_ALIASING
3924#if XXH_VECTOR == XXH_AVX2 \
3925 && defined(__GNUC__) && !defined(__clang__) \
3926 && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0
3927# pragma GCC push_options
3928# pragma GCC optimize("-O2")
3931#if XXH_VECTOR == XXH_NEON
3940typedef uint64x2_t xxh_aliasing_uint64x2_t XXH_ALIASING;
3955#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__)
3956XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(
void const* ptr)
3958 return *(xxh_aliasing_uint64x2_t
const *)ptr;
3961XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(
void const* ptr)
3963 return vreinterpretq_u64_u8(vld1q_u8((
uint8_t const*)ptr));
3975#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 11
3976XXH_FORCE_INLINE uint64x2_t
3977XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
3980 __asm__(
"umlal %0.2d, %1.2s, %2.2s" :
"+w" (acc) :
"w" (lhs),
"w" (rhs));
3983XXH_FORCE_INLINE uint64x2_t
3984XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
3987 return vmlal_high_u32(acc, lhs, rhs);
3991XXH_FORCE_INLINE uint64x2_t
3992XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
3994 return vmlal_u32(acc, vget_low_u32(lhs), vget_low_u32(rhs));
3998XXH_FORCE_INLINE uint64x2_t
3999XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
4001 return vmlal_u32(acc, vget_high_u32(lhs), vget_high_u32(rhs));
4043# ifndef XXH3_NEON_LANES
4044# if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \
4045 && !defined(__APPLE__) && XXH_SIZE_OPT <= 0
4046# define XXH3_NEON_LANES 6
4048# define XXH3_NEON_LANES XXH_ACC_NB
4061#if XXH_VECTOR == XXH_VSX
4070# pragma push_macro("bool")
4071# pragma push_macro("vector")
4072# pragma push_macro("pixel")
4078# if defined(__s390x__)
4079# include <s390intrin.h>
4081# include <altivec.h>
4085# pragma pop_macro("pixel")
4086# pragma pop_macro("vector")
4087# pragma pop_macro("bool")
4089typedef __vector
unsigned long long xxh_u64x2;
4090typedef __vector
unsigned char xxh_u8x16;
4091typedef __vector
unsigned xxh_u32x4;
4096typedef xxh_u64x2 xxh_aliasing_u64x2 XXH_ALIASING;
4099# if defined(__BIG_ENDIAN__) \
4100 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
4101# define XXH_VSX_BE 1
4102# elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
4103# warning "-maltivec=be is not recommended. Please use native endianness."
4104# define XXH_VSX_BE 1
4106# define XXH_VSX_BE 0
4111# if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
4112# define XXH_vec_revb vec_revb
4117XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val)
4119 xxh_u8x16
const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
4120 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 };
4121 return vec_perm(val, val, vByteSwap);
4129XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(
const void *ptr)
4132 XXH_memcpy(&ret, ptr,
sizeof(xxh_u64x2));
4134 ret = XXH_vec_revb(ret);
4145# if defined(__s390x__)
4147# define XXH_vec_mulo vec_mulo
4148# define XXH_vec_mule vec_mule
4149# elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) && !defined(__ibmxl__)
4152# define XXH_vec_mulo __builtin_altivec_vmulouw
4153# define XXH_vec_mule __builtin_altivec_vmuleuw
4157XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b)
4160 __asm__(
"vmulouw %0, %1, %2" :
"=v" (result) :
"v" (a),
"v" (b));
4163XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b)
4166 __asm__(
"vmuleuw %0, %1, %2" :
"=v" (result) :
"v" (a),
"v" (b));
4172#if XXH_VECTOR == XXH_SVE
4173#define ACCRND(acc, offset) \
4175 svuint64_t input_vec = svld1_u64(mask, xinput + offset); \
4176 svuint64_t secret_vec = svld1_u64(mask, xsecret + offset); \
4177 svuint64_t mixed = sveor_u64_x(mask, secret_vec, input_vec); \
4178 svuint64_t swapped = svtbl_u64(input_vec, kSwap); \
4179 svuint64_t mixed_lo = svextw_u64_x(mask, mixed); \
4180 svuint64_t mixed_hi = svlsr_n_u64_x(mask, mixed, 32); \
4181 svuint64_t mul = svmad_u64_x(mask, mixed_lo, mixed_hi, swapped); \
4182 acc = svadd_u64_x(mask, acc, mul); \
4188#if defined(XXH_NO_PREFETCH)
4189# define XXH_PREFETCH(ptr) (void)(ptr)
4191# if XXH_SIZE_OPT >= 1
4192# define XXH_PREFETCH(ptr) (void)(ptr)
4193# elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))
4194# include <mmintrin.h>
4195# define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
4196# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
4197# define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 , 3 )
4199# define XXH_PREFETCH(ptr) (void)(ptr)
4208#define XXH_SECRET_DEFAULT_SIZE 192
4210#if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
4211# error "default keyset is not large enough"
4215XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
4216 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
4217 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
4218 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
4219 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c,
4220 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3,
4221 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
4222 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d,
4223 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
4224 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
4225 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e,
4226 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
4227 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
4230static const xxh_u64 PRIME_MX1 = 0x165667919E3779F9ULL;
4231static const xxh_u64 PRIME_MX2 = 0x9FB21C651E98DF25ULL;
4234# define kSecret XXH3_kSecret
4254XXH_FORCE_INLINE xxh_u64
4255XXH_mult32to64(xxh_u64 x, xxh_u64 y)
4257 return (x & 0xFFFFFFFF) * (
y & 0xFFFFFFFF);
4259#elif defined(_MSC_VER) && defined(_M_IX86)
4260# define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
4269# define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
4282XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs)
4299#if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \
4300 && defined(__SIZEOF_INT128__) \
4301 || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
4303 __uint128_t
const product = (__uint128_t)lhs * (__uint128_t)rhs;
4305 r128.low64 = (xxh_u64)(product);
4306 r128.high64 = (xxh_u64)(product >> 64);
4316#elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC)
4319# pragma intrinsic(_umul128)
4321 xxh_u64 product_high;
4322 xxh_u64
const product_low = _umul128(lhs, rhs, &product_high);
4324 r128.low64 = product_low;
4325 r128.high64 = product_high;
4333#elif defined(_M_ARM64) || defined(_M_ARM64EC)
4336# pragma intrinsic(__umulh)
4339 r128.low64 = lhs * rhs;
4340 r128.high64 = __umulh(lhs, rhs);
4388 xxh_u64
const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
4389 xxh_u64
const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF);
4390 xxh_u64
const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
4391 xxh_u64
const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32);
4394 xxh_u64
const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
4395 xxh_u64
const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi;
4396 xxh_u64
const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
4400 r128.high64 = upper;
4416XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs)
4418 XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
4419 return product.low64 ^ product.high64;
4423XXH_FORCE_INLINE
XXH_CONSTF xxh_u64 XXH_xorshift64(xxh_u64 v64,
int shift)
4425 XXH_ASSERT(0 <= shift && shift < 64);
4426 return v64 ^ (v64 >> shift);
4435 h64 = XXH_xorshift64(h64, 37);
4437 h64 = XXH_xorshift64(h64, 32);
4446static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len)
4449 h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24);
4451 h64 ^= (h64 >> 35) + len ;
4453 return XXH_xorshift64(h64, 28);
4491XXH3_len_1to3_64b(
const xxh_u8* input,
size_t len,
const xxh_u8* secret,
XXH64_hash_t seed)
4493 XXH_ASSERT(input != NULL);
4494 XXH_ASSERT(1 <= len && len <= 3);
4495 XXH_ASSERT(secret != NULL);
4501 { xxh_u8
const c1 =
input[0];
4502 xxh_u8
const c2 =
input[len >> 1];
4503 xxh_u8
const c3 =
input[len - 1];
4504 xxh_u32
const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24)
4505 | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
4506 xxh_u64
const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
4507 xxh_u64
const keyed = (xxh_u64)combined ^ bitflip;
4508 return XXH64_avalanche(keyed);
4513XXH3_len_4to8_64b(
const xxh_u8* input,
size_t len,
const xxh_u8* secret,
XXH64_hash_t seed)
4515 XXH_ASSERT(input != NULL);
4516 XXH_ASSERT(secret != NULL);
4517 XXH_ASSERT(4 <= len && len <= 8);
4518 seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
4519 { xxh_u32
const input1 = XXH_readLE32(input);
4520 xxh_u32
const input2 = XXH_readLE32(input + len - 4);
4521 xxh_u64
const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed;
4522 xxh_u64
const input64 = input2 + (((xxh_u64)input1) << 32);
4523 xxh_u64
const keyed = input64 ^ bitflip;
4524 return XXH3_rrmxmx(keyed, len);
4529XXH3_len_9to16_64b(
const xxh_u8* input,
size_t len,
const xxh_u8* secret,
XXH64_hash_t seed)
4531 XXH_ASSERT(input != NULL);
4532 XXH_ASSERT(secret != NULL);
4533 XXH_ASSERT(9 <= len && len <= 16);
4534 { xxh_u64
const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed;
4535 xxh_u64
const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed;
4536 xxh_u64
const input_lo = XXH_readLE64(input) ^ bitflip1;
4537 xxh_u64
const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2;
4538 xxh_u64
const acc = len
4539 + XXH_swap64(input_lo) + input_hi
4540 + XXH3_mul128_fold64(input_lo, input_hi);
4541 return XXH3_avalanche(acc);
4546XXH3_len_0to16_64b(
const xxh_u8* input,
size_t len,
const xxh_u8* secret,
XXH64_hash_t seed)
4548 XXH_ASSERT(len <= 16);
4549 {
if (XXH_likely(len > 8))
return XXH3_len_9to16_64b(input, len, secret, seed);
4550 if (XXH_likely(len >= 4))
return XXH3_len_4to8_64b(input, len, secret, seed);
4551 if (len)
return XXH3_len_1to3_64b(input, len, secret, seed);
4552 return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64)));
4582XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(
const xxh_u8* XXH_RESTRICT input,
4583 const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64)
4585#if defined(__GNUC__) && !defined(__clang__) \
4586 && defined(__i386__) && defined(__SSE2__) \
4587 && !defined(XXH_ENABLE_AUTOVECTORIZE)
4603 XXH_COMPILER_GUARD(seed64);
4605 { xxh_u64
const input_lo = XXH_readLE64(input);
4606 xxh_u64
const input_hi = XXH_readLE64(input+8);
4607 return XXH3_mul128_fold64(
4608 input_lo ^ (XXH_readLE64(secret) + seed64),
4609 input_hi ^ (XXH_readLE64(secret+8) - seed64)
4616XXH3_len_17to128_64b(
const xxh_u8* XXH_RESTRICT input,
size_t len,
4617 const xxh_u8* XXH_RESTRICT secret,
size_t secretSize,
4620 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
4621 XXH_ASSERT(16 < len && len <= 128);
4623 { xxh_u64 acc = len * XXH_PRIME64_1;
4624#if XXH_SIZE_OPT >= 1
4626 unsigned int i = (
unsigned int)(len - 1) / 32;
4628 acc += XXH3_mix16B(input+16 * i, secret+32*i, seed);
4629 acc += XXH3_mix16B(input+len-16*(i+1), secret+32*i+16, seed);
4635 acc += XXH3_mix16B(input+48, secret+96, seed);
4636 acc += XXH3_mix16B(input+len-64, secret+112, seed);
4638 acc += XXH3_mix16B(input+32, secret+64, seed);
4639 acc += XXH3_mix16B(input+len-48, secret+80, seed);
4641 acc += XXH3_mix16B(input+16, secret+32, seed);
4642 acc += XXH3_mix16B(input+len-32, secret+48, seed);
4644 acc += XXH3_mix16B(input+0, secret+0, seed);
4645 acc += XXH3_mix16B(input+len-16, secret+16, seed);
4647 return XXH3_avalanche(acc);
4654#define XXH3_MIDSIZE_MAX 240
4657XXH3_len_129to240_64b(
const xxh_u8* XXH_RESTRICT input,
size_t len,
4658 const xxh_u8* XXH_RESTRICT secret,
size_t secretSize,
4661 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
4662 XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
4664 #define XXH3_MIDSIZE_STARTOFFSET 3
4665 #define XXH3_MIDSIZE_LASTOFFSET 17
4667 { xxh_u64 acc = len * XXH_PRIME64_1;
4669 unsigned int const nbRounds = (
unsigned int)len / 16;
4671 XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
4672 for (i=0; i<8; i++) {
4673 acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed);
4676 acc_end = XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed);
4677 XXH_ASSERT(nbRounds >= 8);
4678 acc = XXH3_avalanche(acc);
4679#if defined(__clang__) \
4680 && (defined(__ARM_NEON) || defined(__ARM_NEON__)) \
4681 && !defined(XXH_ENABLE_AUTOVECTORIZE)
4702 #pragma clang loop vectorize(disable)
4704 for (i=8 ; i < nbRounds; i++) {
4708 XXH_COMPILER_GUARD(acc);
4709 acc_end += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed);
4711 return XXH3_avalanche(acc + acc_end);
4718#define XXH_STRIPE_LEN 64
4719#define XXH_SECRET_CONSUME_RATE 8
4720#define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
4723# define STRIPE_LEN XXH_STRIPE_LEN
4724# define ACC_NB XXH_ACC_NB
4727#ifndef XXH_PREFETCH_DIST
4729# define XXH_PREFETCH_DIST 320
4731# if (XXH_VECTOR == XXH_AVX512)
4732# define XXH_PREFETCH_DIST 512
4734# define XXH_PREFETCH_DIST 384
4749#define XXH3_ACCUMULATE_TEMPLATE(name) \
4751XXH3_accumulate_##name(xxh_u64* XXH_RESTRICT acc, \
4752 const xxh_u8* XXH_RESTRICT input, \
4753 const xxh_u8* XXH_RESTRICT secret, \
4757 for (n = 0; n < nbStripes; n++ ) { \
4758 const xxh_u8* const in = input + n*XXH_STRIPE_LEN; \
4759 XXH_PREFETCH(in + XXH_PREFETCH_DIST); \
4760 XXH3_accumulate_512_##name( \
4763 secret + n*XXH_SECRET_CONSUME_RATE); \
4768XXH_FORCE_INLINE
void XXH_writeLE64(
void* dst, xxh_u64 v64)
4770 if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
4771 XXH_memcpy(dst, &v64,
sizeof(v64));
4779#if !defined (__VMS) \
4780 && (defined (__cplusplus) \
4781 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) ) )
4782 typedef int64_t xxh_i64;
4785 typedef long long xxh_i64;
4812#if (XXH_VECTOR == XXH_AVX512) \
4813 || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0)
4815#ifndef XXH_TARGET_AVX512
4816# define XXH_TARGET_AVX512
4819XXH_FORCE_INLINE XXH_TARGET_AVX512
void
4820XXH3_accumulate_512_avx512(
void* XXH_RESTRICT acc,
4821 const void* XXH_RESTRICT input,
4822 const void* XXH_RESTRICT secret)
4824 __m512i*
const xacc = (__m512i *) acc;
4825 XXH_ASSERT((((
size_t)acc) & 63) == 0);
4826 XXH_STATIC_ASSERT(XXH_STRIPE_LEN ==
sizeof(__m512i));
4830 __m512i
const data_vec = _mm512_loadu_si512 (input);
4832 __m512i
const key_vec = _mm512_loadu_si512 (secret);
4834 __m512i
const data_key = _mm512_xor_si512 (data_vec, key_vec);
4836 __m512i
const data_key_lo = _mm512_srli_epi64 (data_key, 32);
4838 __m512i
const product = _mm512_mul_epu32 (data_key, data_key_lo);
4840 __m512i
const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
4841 __m512i
const sum = _mm512_add_epi64(*xacc, data_swap);
4843 *xacc = _mm512_add_epi64(product, sum);
4846XXH_FORCE_INLINE XXH_TARGET_AVX512 XXH3_ACCUMULATE_TEMPLATE(avx512)
4869XXH_FORCE_INLINE XXH_TARGET_AVX512
void
4870XXH3_scrambleAcc_avx512(
void* XXH_RESTRICT acc,
const void* XXH_RESTRICT secret)
4872 XXH_ASSERT((((
size_t)acc) & 63) == 0);
4873 XXH_STATIC_ASSERT(XXH_STRIPE_LEN ==
sizeof(__m512i));
4874 { __m512i*
const xacc = (__m512i*) acc;
4875 const __m512i prime32 = _mm512_set1_epi32((
int)XXH_PRIME32_1);
4878 __m512i
const acc_vec = *xacc;
4879 __m512i
const shifted = _mm512_srli_epi64 (acc_vec, 47);
4881 __m512i
const key_vec = _mm512_loadu_si512 (secret);
4882 __m512i
const data_key = _mm512_ternarylogic_epi32(key_vec, acc_vec, shifted, 0x96 );
4885 __m512i
const data_key_hi = _mm512_srli_epi64 (data_key, 32);
4886 __m512i
const prod_lo = _mm512_mul_epu32 (data_key, prime32);
4887 __m512i
const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32);
4888 *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
4892XXH_FORCE_INLINE XXH_TARGET_AVX512
void
4893XXH3_initCustomSecret_avx512(
void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4895 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
4896 XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
4897 XXH_ASSERT(((
size_t)customSecret & 63) == 0);
4898 (void)(&XXH_writeLE64);
4899 {
int const nbRounds = XXH_SECRET_DEFAULT_SIZE /
sizeof(__m512i);
4900 __m512i
const seed_pos = _mm512_set1_epi64((xxh_i64)seed64);
4901 __m512i
const seed = _mm512_mask_sub_epi64(seed_pos, 0xAA, _mm512_set1_epi8(0), seed_pos);
4903 const __m512i*
const src = (
const __m512i*) ((
const void*) XXH3_kSecret);
4904 __m512i*
const dest = ( __m512i*) customSecret;
4906 XXH_ASSERT(((
size_t)src & 63) == 0);
4907 XXH_ASSERT(((
size_t)dest & 63) == 0);
4908 for (i=0; i < nbRounds; ++i) {
4909 dest[i] = _mm512_add_epi64(_mm512_load_si512(src + i), seed);
4915#if (XXH_VECTOR == XXH_AVX2) \
4916 || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0)
4918#ifndef XXH_TARGET_AVX2
4919# define XXH_TARGET_AVX2
4922XXH_FORCE_INLINE XXH_TARGET_AVX2
void
4923XXH3_accumulate_512_avx2(
void* XXH_RESTRICT acc,
4924 const void* XXH_RESTRICT input,
4925 const void* XXH_RESTRICT secret)
4927 XXH_ASSERT((((
size_t)acc) & 31) == 0);
4928 { __m256i*
const xacc = (__m256i *) acc;
4931 const __m256i*
const xinput = (
const __m256i *) input;
4934 const __m256i*
const xsecret = (
const __m256i *) secret;
4937 for (i=0; i < XXH_STRIPE_LEN/
sizeof(__m256i); i++) {
4939 __m256i
const data_vec = _mm256_loadu_si256 (xinput+i);
4941 __m256i
const key_vec = _mm256_loadu_si256 (xsecret+i);
4943 __m256i
const data_key = _mm256_xor_si256 (data_vec, key_vec);
4945 __m256i
const data_key_lo = _mm256_srli_epi64 (data_key, 32);
4947 __m256i
const product = _mm256_mul_epu32 (data_key, data_key_lo);
4949 __m256i
const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
4950 __m256i
const sum = _mm256_add_epi64(xacc[i], data_swap);
4952 xacc[i] = _mm256_add_epi64(product, sum);
4955XXH_FORCE_INLINE XXH_TARGET_AVX2 XXH3_ACCUMULATE_TEMPLATE(avx2)
4957XXH_FORCE_INLINE XXH_TARGET_AVX2
void
4958XXH3_scrambleAcc_avx2(
void* XXH_RESTRICT acc,
const void* XXH_RESTRICT secret)
4960 XXH_ASSERT((((
size_t)acc) & 31) == 0);
4961 { __m256i*
const xacc = (__m256i*) acc;
4964 const __m256i*
const xsecret = (
const __m256i *) secret;
4965 const __m256i prime32 = _mm256_set1_epi32((
int)XXH_PRIME32_1);
4968 for (i=0; i < XXH_STRIPE_LEN/
sizeof(__m256i); i++) {
4970 __m256i
const acc_vec = xacc[i];
4971 __m256i
const shifted = _mm256_srli_epi64 (acc_vec, 47);
4972 __m256i
const data_vec = _mm256_xor_si256 (acc_vec, shifted);
4974 __m256i
const key_vec = _mm256_loadu_si256 (xsecret+i);
4975 __m256i
const data_key = _mm256_xor_si256 (data_vec, key_vec);
4978 __m256i
const data_key_hi = _mm256_srli_epi64 (data_key, 32);
4979 __m256i
const prod_lo = _mm256_mul_epu32 (data_key, prime32);
4980 __m256i
const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32);
4981 xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
4986XXH_FORCE_INLINE XXH_TARGET_AVX2
void XXH3_initCustomSecret_avx2(
void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4988 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
4989 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE /
sizeof(__m256i)) == 6);
4990 XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
4991 (void)(&XXH_writeLE64);
4992 XXH_PREFETCH(customSecret);
4993 { __m256i
const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64);
4995 const __m256i*
const src = (
const __m256i*) ((
const void*) XXH3_kSecret);
4996 __m256i*
dest = ( __m256i*) customSecret;
4998# if defined(__GNUC__) || defined(__clang__)
5004 XXH_COMPILER_GUARD(dest);
5006 XXH_ASSERT(((
size_t)src & 31) == 0);
5007 XXH_ASSERT(((
size_t)dest & 31) == 0);
5010 dest[0] = _mm256_add_epi64(_mm256_load_si256(src+0), seed);
5011 dest[1] = _mm256_add_epi64(_mm256_load_si256(src+1), seed);
5012 dest[2] = _mm256_add_epi64(_mm256_load_si256(src+2), seed);
5013 dest[3] = _mm256_add_epi64(_mm256_load_si256(src+3), seed);
5014 dest[4] = _mm256_add_epi64(_mm256_load_si256(src+4), seed);
5015 dest[5] = _mm256_add_epi64(_mm256_load_si256(src+5), seed);
5022#if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
5024#ifndef XXH_TARGET_SSE2
5025# define XXH_TARGET_SSE2
5028XXH_FORCE_INLINE XXH_TARGET_SSE2
void
5029XXH3_accumulate_512_sse2(
void* XXH_RESTRICT acc,
5030 const void* XXH_RESTRICT input,
5031 const void* XXH_RESTRICT secret)
5034 XXH_ASSERT((((
size_t)acc) & 15) == 0);
5035 { __m128i*
const xacc = (__m128i *) acc;
5038 const __m128i*
const xinput = (
const __m128i *) input;
5041 const __m128i*
const xsecret = (
const __m128i *) secret;
5044 for (i=0; i < XXH_STRIPE_LEN/
sizeof(__m128i); i++) {
5046 __m128i
const data_vec = _mm_loadu_si128 (xinput+i);
5048 __m128i
const key_vec = _mm_loadu_si128 (xsecret+i);
5050 __m128i
const data_key = _mm_xor_si128 (data_vec, key_vec);
5052 __m128i
const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
5054 __m128i
const product = _mm_mul_epu32 (data_key, data_key_lo);
5056 __m128i
const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2));
5057 __m128i
const sum = _mm_add_epi64(xacc[i], data_swap);
5059 xacc[i] = _mm_add_epi64(product, sum);
5062XXH_FORCE_INLINE XXH_TARGET_SSE2 XXH3_ACCUMULATE_TEMPLATE(sse2)
5064XXH_FORCE_INLINE XXH_TARGET_SSE2
void
5065XXH3_scrambleAcc_sse2(
void* XXH_RESTRICT acc,
const void* XXH_RESTRICT secret)
5067 XXH_ASSERT((((
size_t)acc) & 15) == 0);
5068 { __m128i*
const xacc = (__m128i*) acc;
5071 const __m128i*
const xsecret = (
const __m128i *) secret;
5072 const __m128i prime32 = _mm_set1_epi32((
int)XXH_PRIME32_1);
5075 for (i=0; i < XXH_STRIPE_LEN/
sizeof(__m128i); i++) {
5077 __m128i
const acc_vec = xacc[i];
5078 __m128i
const shifted = _mm_srli_epi64 (acc_vec, 47);
5079 __m128i
const data_vec = _mm_xor_si128 (acc_vec, shifted);
5081 __m128i
const key_vec = _mm_loadu_si128 (xsecret+i);
5082 __m128i
const data_key = _mm_xor_si128 (data_vec, key_vec);
5085 __m128i
const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
5086 __m128i
const prod_lo = _mm_mul_epu32 (data_key, prime32);
5087 __m128i
const prod_hi = _mm_mul_epu32 (data_key_hi, prime32);
5088 xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
5093XXH_FORCE_INLINE XXH_TARGET_SSE2
void XXH3_initCustomSecret_sse2(
void* XXH_RESTRICT customSecret, xxh_u64 seed64)
5095 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
5096 (void)(&XXH_writeLE64);
5097 {
int const nbRounds = XXH_SECRET_DEFAULT_SIZE /
sizeof(__m128i);
5099# if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
5101 XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0
U - seed64) };
5102 __m128i
const seed = _mm_load_si128((__m128i
const*)seed64x2);
5104 __m128i
const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64);
5108 const void*
const src16 = XXH3_kSecret;
5109 __m128i* dst16 = (__m128i*) customSecret;
5110# if defined(__GNUC__) || defined(__clang__)
5116 XXH_COMPILER_GUARD(dst16);
5118 XXH_ASSERT(((
size_t)src16 & 15) == 0);
5119 XXH_ASSERT(((
size_t)dst16 & 15) == 0);
5121 for (i=0; i < nbRounds; ++i) {
5122 dst16[i] = _mm_add_epi64(_mm_load_si128((
const __m128i *)src16+i), seed);
5128#if (XXH_VECTOR == XXH_NEON)
5131XXH_FORCE_INLINE
void
5132XXH3_scalarRound(
void* XXH_RESTRICT acc,
void const* XXH_RESTRICT input,
5133 void const* XXH_RESTRICT secret,
size_t lane);
5135XXH_FORCE_INLINE
void
5136XXH3_scalarScrambleRound(
void* XXH_RESTRICT acc,
5137 void const* XXH_RESTRICT secret,
size_t lane);
5163XXH_FORCE_INLINE
void
5164XXH3_accumulate_512_neon(
void* XXH_RESTRICT acc,
5165 const void* XXH_RESTRICT input,
5166 const void* XXH_RESTRICT secret)
5168 XXH_ASSERT((((
size_t)acc) & 15) == 0);
5169 XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0);
5171 xxh_aliasing_uint64x2_t*
const xacc = (xxh_aliasing_uint64x2_t*) acc;
5177#ifdef __wasm_simd128__
5195 XXH_COMPILER_GUARD(xsecret);
5198 for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
5199 XXH3_scalarRound(acc, input, secret, i);
5203 for (; i+1 < XXH3_NEON_LANES / 2; i+=2) {
5205 uint64x2_t data_vec_1 = XXH_vld1q_u64(xinput + (i * 16));
5206 uint64x2_t data_vec_2 = XXH_vld1q_u64(xinput + ((i+1) * 16));
5208 uint64x2_t key_vec_1 = XXH_vld1q_u64(xsecret + (i * 16));
5209 uint64x2_t key_vec_2 = XXH_vld1q_u64(xsecret + ((i+1) * 16));
5211 uint64x2_t data_swap_1 = vextq_u64(data_vec_1, data_vec_1, 1);
5212 uint64x2_t data_swap_2 = vextq_u64(data_vec_2, data_vec_2, 1);
5214 uint64x2_t data_key_1 = veorq_u64(data_vec_1, key_vec_1);
5215 uint64x2_t data_key_2 = veorq_u64(data_vec_2, key_vec_2);
5230 uint32x4x2_t unzipped = vuzpq_u32(
5231 vreinterpretq_u32_u64(data_key_1),
5232 vreinterpretq_u32_u64(data_key_2)
5235 uint32x4_t data_key_lo = unzipped.val[0];
5237 uint32x4_t data_key_hi = unzipped.val[1];
5245 uint64x2_t sum_1 = XXH_vmlal_low_u32(data_swap_1, data_key_lo, data_key_hi);
5246 uint64x2_t sum_2 = XXH_vmlal_high_u32(data_swap_2, data_key_lo, data_key_hi);
5259 XXH_COMPILER_GUARD_CLANG_NEON(sum_1);
5260 XXH_COMPILER_GUARD_CLANG_NEON(sum_2);
5262 xacc[i] = vaddq_u64(xacc[i], sum_1);
5263 xacc[i+1] = vaddq_u64(xacc[i+1], sum_2);
5266 for (; i < XXH3_NEON_LANES / 2; i++) {
5268 uint64x2_t data_vec = XXH_vld1q_u64(xinput + (i * 16));
5270 uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16));
5272 uint64x2_t data_swap = vextq_u64(data_vec, data_vec, 1);
5274 uint64x2_t data_key = veorq_u64(data_vec, key_vec);
5277 uint32x2_t data_key_lo = vmovn_u64(data_key);
5279 uint32x2_t data_key_hi = vshrn_n_u64(data_key, 32);
5281 uint64x2_t sum = vmlal_u32(data_swap, data_key_lo, data_key_hi);
5283 XXH_COMPILER_GUARD_CLANG_NEON(sum);
5285 xacc[i] = vaddq_u64 (xacc[i], sum);
5289XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(neon)
5291XXH_FORCE_INLINE
void
5292XXH3_scrambleAcc_neon(
void* XXH_RESTRICT acc,
const void* XXH_RESTRICT secret)
5294 XXH_ASSERT((((
size_t)acc) & 15) == 0);
5296 { xxh_aliasing_uint64x2_t* xacc = (xxh_aliasing_uint64x2_t*) acc;
5301#ifndef __wasm_simd128__
5303 uint32x2_t
const kPrimeLo = vdup_n_u32(XXH_PRIME32_1);
5305 uint32x4_t
const kPrimeHi = vreinterpretq_u32_u64(vdupq_n_u64((xxh_u64)XXH_PRIME32_1 << 32));
5309 for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
5310 XXH3_scalarScrambleRound(acc, secret, i);
5312 for (i=0; i < XXH3_NEON_LANES / 2; i++) {
5314 uint64x2_t acc_vec = xacc[i];
5315 uint64x2_t shifted = vshrq_n_u64(acc_vec, 47);
5316 uint64x2_t data_vec = veorq_u64(acc_vec, shifted);
5319 uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16));
5320 uint64x2_t data_key = veorq_u64(data_vec, key_vec);
5322#ifdef __wasm_simd128__
5324 xacc[i] = data_key * XXH_PRIME32_1;
5337 uint32x4_t prod_hi = vmulq_u32 (vreinterpretq_u32_u64(data_key), kPrimeHi);
5339 uint32x2_t data_key_lo = vmovn_u64(data_key);
5341 xacc[i] = vmlal_u32(vreinterpretq_u64_u32(prod_hi), data_key_lo, kPrimeLo);
5348#if (XXH_VECTOR == XXH_VSX)
5350XXH_FORCE_INLINE
void
5351XXH3_accumulate_512_vsx(
void* XXH_RESTRICT acc,
5352 const void* XXH_RESTRICT input,
5353 const void* XXH_RESTRICT secret)
5356 xxh_aliasing_u64x2*
const xacc = (xxh_aliasing_u64x2*) acc;
5357 xxh_u8
const*
const xinput = (xxh_u8
const*) input;
5358 xxh_u8
const*
const xsecret = (xxh_u8
const*) secret;
5359 xxh_u64x2
const v32 = { 32, 32 };
5361 for (i = 0; i < XXH_STRIPE_LEN /
sizeof(xxh_u64x2); i++) {
5363 xxh_u64x2
const data_vec = XXH_vec_loadu(xinput + 16*i);
5365 xxh_u64x2
const key_vec = XXH_vec_loadu(xsecret + 16*i);
5366 xxh_u64x2
const data_key = data_vec ^ key_vec;
5368 xxh_u32x4
const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
5370 xxh_u64x2
const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
5372 xxh_u64x2 acc_vec = xacc[i];
5377 acc_vec += vec_permi(data_vec, data_vec, 2);
5379 acc_vec += vec_xxpermdi(data_vec, data_vec, 2);
5384XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(vsx)
5386XXH_FORCE_INLINE
void
5387XXH3_scrambleAcc_vsx(
void* XXH_RESTRICT acc,
const void* XXH_RESTRICT secret)
5389 XXH_ASSERT((((
size_t)acc) & 15) == 0);
5391 { xxh_aliasing_u64x2*
const xacc = (xxh_aliasing_u64x2*) acc;
5392 const xxh_u8*
const xsecret = (
const xxh_u8*) secret;
5394 xxh_u64x2
const v32 = { 32, 32 };
5395 xxh_u64x2
const v47 = { 47, 47 };
5396 xxh_u32x4
const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 };
5398 for (i = 0; i < XXH_STRIPE_LEN /
sizeof(xxh_u64x2); i++) {
5400 xxh_u64x2
const acc_vec = xacc[i];
5401 xxh_u64x2
const data_vec = acc_vec ^ (acc_vec >> v47);
5404 xxh_u64x2
const key_vec = XXH_vec_loadu(xsecret + 16*i);
5405 xxh_u64x2
const data_key = data_vec ^ key_vec;
5409 xxh_u64x2
const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime);
5411 xxh_u64x2
const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime);
5412 xacc[i] = prod_odd + (prod_even << v32);
5418#if (XXH_VECTOR == XXH_SVE)
5420XXH_FORCE_INLINE
void
5421XXH3_accumulate_512_sve(
void* XXH_RESTRICT acc,
5422 const void* XXH_RESTRICT input,
5423 const void* XXH_RESTRICT secret)
5428 svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1);
5430 if (element_count >= 8) {
5431 svbool_t mask = svptrue_pat_b64(SV_VL8);
5432 svuint64_t vacc = svld1_u64(mask, xacc);
5434 svst1_u64(mask, xacc, vacc);
5435 }
else if (element_count == 2) {
5436 svbool_t mask = svptrue_pat_b64(SV_VL2);
5437 svuint64_t acc0 = svld1_u64(mask, xacc + 0);
5438 svuint64_t acc1 = svld1_u64(mask, xacc + 2);
5439 svuint64_t acc2 = svld1_u64(mask, xacc + 4);
5440 svuint64_t acc3 = svld1_u64(mask, xacc + 6);
5445 svst1_u64(mask, xacc + 0, acc0);
5446 svst1_u64(mask, xacc + 2, acc1);
5447 svst1_u64(mask, xacc + 4, acc2);
5448 svst1_u64(mask, xacc + 6, acc3);
5450 svbool_t mask = svptrue_pat_b64(SV_VL4);
5451 svuint64_t acc0 = svld1_u64(mask, xacc + 0);
5452 svuint64_t acc1 = svld1_u64(mask, xacc + 4);
5455 svst1_u64(mask, xacc + 0, acc0);
5456 svst1_u64(mask, xacc + 4, acc1);
5460XXH_FORCE_INLINE
void
5461XXH3_accumulate_sve(xxh_u64* XXH_RESTRICT acc,
5462 const xxh_u8* XXH_RESTRICT input,
5463 const xxh_u8* XXH_RESTRICT secret,
5466 if (nbStripes != 0) {
5470 svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1);
5472 if (element_count >= 8) {
5473 svbool_t mask = svptrue_pat_b64(SV_VL8);
5474 svuint64_t vacc = svld1_u64(mask, xacc + 0);
5477 svprfd(mask, xinput + 128, SV_PLDL1STRM);
5482 }
while (nbStripes != 0);
5484 svst1_u64(mask, xacc + 0, vacc);
5485 }
else if (element_count == 2) {
5486 svbool_t mask = svptrue_pat_b64(SV_VL2);
5487 svuint64_t acc0 = svld1_u64(mask, xacc + 0);
5488 svuint64_t acc1 = svld1_u64(mask, xacc + 2);
5489 svuint64_t acc2 = svld1_u64(mask, xacc + 4);
5490 svuint64_t acc3 = svld1_u64(mask, xacc + 6);
5492 svprfd(mask, xinput + 128, SV_PLDL1STRM);
5500 }
while (nbStripes != 0);
5502 svst1_u64(mask, xacc + 0, acc0);
5503 svst1_u64(mask, xacc + 2, acc1);
5504 svst1_u64(mask, xacc + 4, acc2);
5505 svst1_u64(mask, xacc + 6, acc3);
5507 svbool_t mask = svptrue_pat_b64(SV_VL4);
5508 svuint64_t acc0 = svld1_u64(mask, xacc + 0);
5509 svuint64_t acc1 = svld1_u64(mask, xacc + 4);
5511 svprfd(mask, xinput + 128, SV_PLDL1STRM);
5517 }
while (nbStripes != 0);
5519 svst1_u64(mask, xacc + 0, acc0);
5520 svst1_u64(mask, xacc + 4, acc1);
5529#if defined(__aarch64__) && (defined(__GNUC__) || defined(__clang__))
5544XXH_FORCE_INLINE xxh_u64
5545XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc)
5549 __asm__(
"umaddl %x0, %w1, %w2, %x3" :
"=r" (ret) :
"r" (lhs),
"r" (rhs),
"r" (acc));
5553XXH_FORCE_INLINE xxh_u64
5554XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc)
5556 return XXH_mult32to64((xxh_u32)lhs, (xxh_u32)rhs) + acc;
5567XXH_FORCE_INLINE
void
5568XXH3_scalarRound(
void* XXH_RESTRICT acc,
5569 void const* XXH_RESTRICT input,
5570 void const* XXH_RESTRICT secret,
5573 xxh_u64* xacc = (xxh_u64*) acc;
5574 xxh_u8
const* xinput = (xxh_u8
const*) input;
5575 xxh_u8
const* xsecret = (xxh_u8
const*) secret;
5576 XXH_ASSERT(lane < XXH_ACC_NB);
5577 XXH_ASSERT(((
size_t)acc & (XXH_ACC_ALIGN-1)) == 0);
5579 xxh_u64
const data_val = XXH_readLE64(xinput + lane * 8);
5580 xxh_u64
const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8);
5581 xacc[lane ^ 1] += data_val;
5582 xacc[lane] = XXH_mult32to64_add64(data_key , data_key >> 32, xacc[lane]);
5590XXH_FORCE_INLINE
void
5591XXH3_accumulate_512_scalar(
void* XXH_RESTRICT acc,
5592 const void* XXH_RESTRICT input,
5593 const void* XXH_RESTRICT secret)
5597#if defined(__GNUC__) && !defined(__clang__) \
5598 && (defined(__arm__) || defined(__thumb2__)) \
5599 && defined(__ARM_FEATURE_UNALIGNED) \
5600 && XXH_SIZE_OPT <= 0
5601# pragma GCC unroll 8
5603 for (i=0; i < XXH_ACC_NB; i++) {
5604 XXH3_scalarRound(acc, input, secret, i);
5607XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(scalar)
5616XXH_FORCE_INLINE
void
5617XXH3_scalarScrambleRound(
void* XXH_RESTRICT acc,
5618 void const* XXH_RESTRICT secret,
5621 xxh_u64*
const xacc = (xxh_u64*) acc;
5622 const xxh_u8*
const xsecret = (
const xxh_u8*) secret;
5623 XXH_ASSERT((((
size_t)acc) & (XXH_ACC_ALIGN-1)) == 0);
5624 XXH_ASSERT(lane < XXH_ACC_NB);
5626 xxh_u64
const key64 = XXH_readLE64(xsecret + lane * 8);
5627 xxh_u64 acc64 = xacc[lane];
5628 acc64 = XXH_xorshift64(acc64, 47);
5630 acc64 *= XXH_PRIME32_1;
5639XXH_FORCE_INLINE
void
5640XXH3_scrambleAcc_scalar(
void* XXH_RESTRICT acc,
const void* XXH_RESTRICT secret)
5643 for (i=0; i < XXH_ACC_NB; i++) {
5644 XXH3_scalarScrambleRound(acc, secret, i);
5648XXH_FORCE_INLINE
void
5649XXH3_initCustomSecret_scalar(
void* XXH_RESTRICT customSecret, xxh_u64 seed64)
5656 const xxh_u8* kSecretPtr = XXH3_kSecret;
5657 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
5659#if defined(__GNUC__) && defined(__aarch64__)
5692 XXH_COMPILER_GUARD(kSecretPtr);
5694 {
int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
5696 for (i=0; i < nbRounds; i++) {
5703 xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64;
5704 xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64;
5705 XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo);
5706 XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi);
5711typedef void (*XXH3_f_accumulate)(xxh_u64* XXH_RESTRICT,
const xxh_u8* XXH_RESTRICT,
const xxh_u8* XXH_RESTRICT, size_t);
5712typedef void (*XXH3_f_scrambleAcc)(
void* XXH_RESTRICT,
const void*);
5713typedef void (*XXH3_f_initCustomSecret)(
void* XXH_RESTRICT, xxh_u64);
5716#if (XXH_VECTOR == XXH_AVX512)
5718#define XXH3_accumulate_512 XXH3_accumulate_512_avx512
5719#define XXH3_accumulate XXH3_accumulate_avx512
5720#define XXH3_scrambleAcc XXH3_scrambleAcc_avx512
5721#define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
5723#elif (XXH_VECTOR == XXH_AVX2)
5725#define XXH3_accumulate_512 XXH3_accumulate_512_avx2
5726#define XXH3_accumulate XXH3_accumulate_avx2
5727#define XXH3_scrambleAcc XXH3_scrambleAcc_avx2
5728#define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
5730#elif (XXH_VECTOR == XXH_SSE2)
5732#define XXH3_accumulate_512 XXH3_accumulate_512_sse2
5733#define XXH3_accumulate XXH3_accumulate_sse2
5734#define XXH3_scrambleAcc XXH3_scrambleAcc_sse2
5735#define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
5737#elif (XXH_VECTOR == XXH_NEON)
5739#define XXH3_accumulate_512 XXH3_accumulate_512_neon
5740#define XXH3_accumulate XXH3_accumulate_neon
5741#define XXH3_scrambleAcc XXH3_scrambleAcc_neon
5742#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5744#elif (XXH_VECTOR == XXH_VSX)
5746#define XXH3_accumulate_512 XXH3_accumulate_512_vsx
5747#define XXH3_accumulate XXH3_accumulate_vsx
5748#define XXH3_scrambleAcc XXH3_scrambleAcc_vsx
5749#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5751#elif (XXH_VECTOR == XXH_SVE)
5752#define XXH3_accumulate_512 XXH3_accumulate_512_sve
5753#define XXH3_accumulate XXH3_accumulate_sve
5754#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
5755#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5759#define XXH3_accumulate_512 XXH3_accumulate_512_scalar
5760#define XXH3_accumulate XXH3_accumulate_scalar
5761#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
5762#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5766#if XXH_SIZE_OPT >= 1
5767# undef XXH3_initCustomSecret
5768# define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5771XXH_FORCE_INLINE
void
5772XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc,
5773 const xxh_u8* XXH_RESTRICT input,
size_t len,
5774 const xxh_u8* XXH_RESTRICT secret,
size_t secretSize,
5775 XXH3_f_accumulate f_acc,
5776 XXH3_f_scrambleAcc f_scramble)
5778 size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
5779 size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
5780 size_t const nb_blocks = (len - 1) / block_len;
5784 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
5786 for (n = 0;
n < nb_blocks;
n++) {
5787 f_acc(acc, input + n*block_len, secret, nbStripesPerBlock);
5788 f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
5792 XXH_ASSERT(len > XXH_STRIPE_LEN);
5793 {
size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
5794 XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
5795 f_acc(acc, input + nb_blocks*block_len, secret, nbStripes);
5798 {
const xxh_u8*
const p =
input + len - XXH_STRIPE_LEN;
5799#define XXH_SECRET_LASTACC_START 7
5800 XXH3_accumulate_512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
5804XXH_FORCE_INLINE xxh_u64
5805XXH3_mix2Accs(
const xxh_u64* XXH_RESTRICT acc,
const xxh_u8* XXH_RESTRICT secret)
5807 return XXH3_mul128_fold64(
5808 acc[0] ^ XXH_readLE64(secret),
5809 acc[1] ^ XXH_readLE64(secret+8) );
5813XXH3_mergeAccs(
const xxh_u64* XXH_RESTRICT acc,
const xxh_u8* XXH_RESTRICT secret, xxh_u64 start)
5815 xxh_u64 result64 = start;
5818 for (i = 0; i < 4; i++) {
5819 result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i);
5820#if defined(__clang__) \
5821 && (defined(__arm__) || defined(__thumb__)) \
5822 && (defined(__ARM_NEON) || defined(__ARM_NEON__)) \
5823 && !defined(XXH_ENABLE_AUTOVECTORIZE)
5832 XXH_COMPILER_GUARD(result64);
5836 return XXH3_avalanche(result64);
5839#define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \
5840 XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 }
5843XXH3_hashLong_64b_internal(
const void* XXH_RESTRICT input,
size_t len,
5844 const void* XXH_RESTRICT secret,
size_t secretSize,
5845 XXH3_f_accumulate f_acc,
5846 XXH3_f_scrambleAcc f_scramble)
5848 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
5850 XXH3_hashLong_internal_loop(acc, (
const xxh_u8*)input, len, (
const xxh_u8*)secret, secretSize, f_acc, f_scramble);
5853 XXH_STATIC_ASSERT(
sizeof(acc) == 64);
5855#define XXH_SECRET_MERGEACCS_START 11
5856 XXH_ASSERT(secretSize >=
sizeof(acc) + XXH_SECRET_MERGEACCS_START);
5857 return XXH3_mergeAccs(acc, (
const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1);
5868XXH3_hashLong_64b_withSecret(
const void* XXH_RESTRICT input,
size_t len,
5869 XXH64_hash_t seed64,
const xxh_u8* XXH_RESTRICT secret,
size_t secretLen)
5872 return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate, XXH3_scrambleAcc);
5882XXH3_hashLong_64b_default(
const void* XXH_RESTRICT input,
size_t len,
5883 XXH64_hash_t seed64,
const xxh_u8* XXH_RESTRICT secret,
size_t secretLen)
5885 (void)seed64; (void)secret; (void)secretLen;
5886 return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret,
sizeof(XXH3_kSecret), XXH3_accumulate, XXH3_scrambleAcc);
5901XXH3_hashLong_64b_withSeed_internal(
const void* input,
size_t len,
5903 XXH3_f_accumulate f_acc,
5904 XXH3_f_scrambleAcc f_scramble,
5905 XXH3_f_initCustomSecret f_initSec)
5907#if XXH_SIZE_OPT <= 0
5909 return XXH3_hashLong_64b_internal(input, len,
5910 XXH3_kSecret,
sizeof(XXH3_kSecret),
5913 { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
5914 f_initSec(secret, seed);
5915 return XXH3_hashLong_64b_internal(input, len, secret,
sizeof(secret),
5924XXH3_hashLong_64b_withSeed(
const void* XXH_RESTRICT input,
size_t len,
5925 XXH64_hash_t seed,
const xxh_u8* XXH_RESTRICT secret,
size_t secretLen)
5927 (void)secret; (void)secretLen;
5928 return XXH3_hashLong_64b_withSeed_internal(input, len, seed,
5929 XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret);
5933typedef XXH64_hash_t (*XXH3_hashLong64_f)(
const void* XXH_RESTRICT, size_t,
5937XXH3_64bits_internal(
const void* XXH_RESTRICT input,
size_t len,
5938 XXH64_hash_t seed64,
const void* XXH_RESTRICT secret,
size_t secretLen,
5939 XXH3_hashLong64_f f_hashLong)
5941 XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
5950 return XXH3_len_0to16_64b((
const xxh_u8*)input, len, (
const xxh_u8*)secret, seed64);
5952 return XXH3_len_17to128_64b((
const xxh_u8*)input, len, (
const xxh_u8*)secret, secretLen, seed64);
5953 if (len <= XXH3_MIDSIZE_MAX)
5954 return XXH3_len_129to240_64b((
const xxh_u8*)input, len, (
const xxh_u8*)secret, secretLen, seed64);
5955 return f_hashLong(input, len, seed64, (
const xxh_u8*)secret, secretLen);
5964 return XXH3_64bits_internal(input, length, 0, XXH3_kSecret,
sizeof(XXH3_kSecret), XXH3_hashLong_64b_default);
5969XXH3_64bits_withSecret(XXH_NOESCAPE
const void* input,
size_t length, XXH_NOESCAPE
const void* secret,
size_t secretSize)
5971 return XXH3_64bits_internal(input, length, 0, secret, secretSize, XXH3_hashLong_64b_withSecret);
5978 return XXH3_64bits_internal(input, length, seed, XXH3_kSecret,
sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
5984 if (length <= XXH3_MIDSIZE_MAX)
5985 return XXH3_64bits_internal(input, length, seed, XXH3_kSecret,
sizeof(XXH3_kSecret), NULL);
5986 return XXH3_hashLong_64b_withSecret(input, length, seed, (
const xxh_u8*)secret, secretSize);
5991#ifndef XXH_NO_STREAM
6015static XXH_MALLOCF void* XXH_alignedMalloc(
size_t s,
size_t align)
6017 XXH_ASSERT(align <= 128 && align >= 8);
6018 XXH_ASSERT((align & (align-1)) == 0);
6019 XXH_ASSERT(s != 0 && s < (s + align));
6021 xxh_u8*
base = (xxh_u8*)XXH_malloc(s + align);
6029 size_t offset = align - ((size_t)base & (align - 1));
6031 xxh_u8* ptr =
base + offset;
6033 XXH_ASSERT((
size_t)ptr % align == 0);
6036 ptr[-1] = (xxh_u8)offset;
6046static void XXH_alignedFree(
void* p)
6049 xxh_u8* ptr = (xxh_u8*)p;
6051 xxh_u8 offset = ptr[-1];
6053 xxh_u8*
base = ptr - offset;
6068 XXH3_state_t*
const state = (XXH3_state_t*)XXH_alignedMalloc(
sizeof(XXH3_state_t), 64);
6069 if (state==NULL)
return NULL;
6070 XXH3_INITSTATE(state);
6086 XXH_alignedFree(statePtr);
6092XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE
const XXH3_state_t* src_state)
6094 XXH_memcpy(dst_state, src_state,
sizeof(*dst_state));
6098XXH3_reset_internal(XXH3_state_t* statePtr,
6100 const void* secret,
size_t secretSize)
6102 size_t const initStart = offsetof(XXH3_state_t, bufferedSize);
6103 size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart;
6104 XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart);
6105 XXH_ASSERT(statePtr != NULL);
6107 memset((
char*)statePtr + initStart, 0, initLength);
6108 statePtr->acc[0] = XXH_PRIME32_3;
6109 statePtr->acc[1] = XXH_PRIME64_1;
6110 statePtr->acc[2] = XXH_PRIME64_2;
6111 statePtr->acc[3] = XXH_PRIME64_3;
6112 statePtr->acc[4] = XXH_PRIME64_4;
6113 statePtr->acc[5] = XXH_PRIME32_2;
6114 statePtr->acc[6] = XXH_PRIME64_5;
6115 statePtr->acc[7] = XXH_PRIME32_1;
6116 statePtr->seed = seed;
6117 statePtr->useSeed = (seed != 0);
6118 statePtr->extSecret = (
const unsigned char*)secret;
6119 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
6120 statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
6121 statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
6129 XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
6138 XXH3_reset_internal(statePtr, 0, secret, secretSize);
6140 if (secretSize < XXH3_SECRET_SIZE_MIN)
return XXH_ERROR;
6150 if ((seed != statePtr->seed) || (statePtr->extSecret != NULL))
6151 XXH3_initCustomSecret(statePtr->customSecret, seed);
6152 XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
6162 if (secretSize < XXH3_SECRET_SIZE_MIN)
return XXH_ERROR;
6163 XXH3_reset_internal(statePtr, seed64, secret, secretSize);
6164 statePtr->useSeed = 1;
6185XXH_FORCE_INLINE
const xxh_u8 *
6186XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc,
6187 size_t* XXH_RESTRICT nbStripesSoFarPtr,
size_t nbStripesPerBlock,
6188 const xxh_u8* XXH_RESTRICT input,
size_t nbStripes,
6189 const xxh_u8* XXH_RESTRICT secret,
size_t secretLimit,
6190 XXH3_f_accumulate f_acc,
6191 XXH3_f_scrambleAcc f_scramble)
6193 const xxh_u8* initialSecret = secret + *nbStripesSoFarPtr * XXH_SECRET_CONSUME_RATE;
6195 if (nbStripes >= (nbStripesPerBlock - *nbStripesSoFarPtr)) {
6197 size_t nbStripesThisIter = nbStripesPerBlock - *nbStripesSoFarPtr;
6201 f_acc(acc, input, initialSecret, nbStripesThisIter);
6202 f_scramble(acc, secret + secretLimit);
6203 input += nbStripesThisIter * XXH_STRIPE_LEN;
6204 nbStripes -= nbStripesThisIter;
6206 nbStripesThisIter = nbStripesPerBlock;
6207 initialSecret = secret;
6208 }
while (nbStripes >= nbStripesPerBlock);
6209 *nbStripesSoFarPtr = 0;
6212 if (nbStripes > 0) {
6213 f_acc(acc, input, initialSecret, nbStripes);
6214 input += nbStripes * XXH_STRIPE_LEN;
6215 *nbStripesSoFarPtr += nbStripes;
6221#ifndef XXH3_STREAM_USE_STACK
6222# if XXH_SIZE_OPT <= 0 && !defined(__clang__)
6223# define XXH3_STREAM_USE_STACK 1
6230XXH3_update(XXH3_state_t* XXH_RESTRICT
const state,
6231 const xxh_u8* XXH_RESTRICT input,
size_t len,
6232 XXH3_f_accumulate f_acc,
6233 XXH3_f_scrambleAcc f_scramble)
6236 XXH_ASSERT(len == 0);
6240 XXH_ASSERT(state != NULL);
6241 {
const xxh_u8*
const bEnd =
input + len;
6242 const unsigned char*
const secret = (
state->extSecret == NULL) ?
state->customSecret :
state->extSecret;
6243#
if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
6248 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8];
6249 XXH_memcpy(acc,
state->acc,
sizeof(acc));
6251 xxh_u64* XXH_RESTRICT
const acc =
state->acc;
6253 state->totalLen += len;
6254 XXH_ASSERT(
state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE);
6257 if (len <= XXH3_INTERNALBUFFER_SIZE - state->bufferedSize) {
6258 XXH_memcpy(
state->buffer +
state->bufferedSize, input, len);
6259 state->bufferedSize += (XXH32_hash_t)len;
6264 #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
6265 XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0);
6271 if (
state->bufferedSize) {
6272 size_t const loadSize = XXH3_INTERNALBUFFER_SIZE -
state->bufferedSize;
6273 XXH_memcpy(
state->buffer +
state->bufferedSize, input, loadSize);
6275 XXH3_consumeStripes(acc,
6276 &
state->nbStripesSoFar,
state->nbStripesPerBlock,
6277 state->buffer, XXH3_INTERNALBUFFER_STRIPES,
6278 secret,
state->secretLimit,
6280 state->bufferedSize = 0;
6282 XXH_ASSERT(input < bEnd);
6283 if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) {
6284 size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN;
6285 input = XXH3_consumeStripes(acc,
6286 &
state->nbStripesSoFar,
state->nbStripesPerBlock,
6288 secret,
state->secretLimit,
6290 XXH_memcpy(
state->buffer +
sizeof(
state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
6294 XXH_ASSERT(input < bEnd);
6295 XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE);
6296 XXH_ASSERT(
state->bufferedSize == 0);
6297 XXH_memcpy(
state->buffer, input, (
size_t)(bEnd-input));
6298 state->bufferedSize = (XXH32_hash_t)(bEnd-input);
6299#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
6301 XXH_memcpy(
state->acc, acc,
sizeof(acc));
6310XXH3_64bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE
const void* input,
size_t len)
6312 return XXH3_update(state, (
const xxh_u8*)input, len,
6313 XXH3_accumulate, XXH3_scrambleAcc);
6317XXH_FORCE_INLINE
void
6319 const XXH3_state_t* state,
6320 const unsigned char* secret)
6322 xxh_u8 lastStripe[XXH_STRIPE_LEN];
6323 const xxh_u8* lastStripePtr;
6329 XXH_memcpy(acc,
state->acc,
sizeof(
state->acc));
6330 if (
state->bufferedSize >= XXH_STRIPE_LEN) {
6332 size_t const nbStripes = (
state->bufferedSize - 1) / XXH_STRIPE_LEN;
6333 size_t nbStripesSoFar =
state->nbStripesSoFar;
6334 XXH3_consumeStripes(acc,
6335 &nbStripesSoFar,
state->nbStripesPerBlock,
6336 state->buffer, nbStripes,
6337 secret,
state->secretLimit,
6338 XXH3_accumulate, XXH3_scrambleAcc);
6339 lastStripePtr =
state->buffer +
state->bufferedSize - XXH_STRIPE_LEN;
6342 size_t const catchupSize = XXH_STRIPE_LEN -
state->bufferedSize;
6343 XXH_ASSERT(
state->bufferedSize > 0);
6344 XXH_memcpy(lastStripe,
state->buffer +
sizeof(
state->buffer) - catchupSize, catchupSize);
6345 XXH_memcpy(lastStripe + catchupSize,
state->buffer,
state->bufferedSize);
6346 lastStripePtr = lastStripe;
6349 XXH3_accumulate_512(acc,
6351 secret +
state->secretLimit - XXH_SECRET_LASTACC_START);
6357 const unsigned char*
const secret = (
state->extSecret == NULL) ?
state->customSecret :
state->extSecret;
6358 if (
state->totalLen > XXH3_MIDSIZE_MAX) {
6360 XXH3_digest_long(acc, state, secret);
6361 return XXH3_mergeAccs(acc,
6362 secret + XXH_SECRET_MERGEACCS_START,
6363 (xxh_u64)
state->totalLen * XXH_PRIME64_1);
6369 secret,
state->secretLimit + XXH_STRIPE_LEN);
6392XXH3_len_1to3_128b(
const xxh_u8* input,
size_t len,
const xxh_u8* secret,
XXH64_hash_t seed)
6395 XXH_ASSERT(input != NULL);
6396 XXH_ASSERT(1 <= len && len <= 3);
6397 XXH_ASSERT(secret != NULL);
6403 { xxh_u8
const c1 =
input[0];
6404 xxh_u8
const c2 =
input[len >> 1];
6405 xxh_u8
const c3 =
input[len - 1];
6406 xxh_u32
const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24)
6407 | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
6408 xxh_u32
const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
6409 xxh_u64
const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
6410 xxh_u64
const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed;
6411 xxh_u64
const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
6412 xxh_u64
const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
6414 h128.low64 = XXH64_avalanche(keyed_lo);
6415 h128.high64 = XXH64_avalanche(keyed_hi);
6421XXH3_len_4to8_128b(
const xxh_u8* input,
size_t len,
const xxh_u8* secret,
XXH64_hash_t seed)
6423 XXH_ASSERT(input != NULL);
6424 XXH_ASSERT(secret != NULL);
6425 XXH_ASSERT(4 <= len && len <= 8);
6426 seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
6427 { xxh_u32
const input_lo = XXH_readLE32(input);
6428 xxh_u32
const input_hi = XXH_readLE32(input + len - 4);
6429 xxh_u64
const input_64 = input_lo + ((xxh_u64)input_hi << 32);
6430 xxh_u64
const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed;
6431 xxh_u64
const keyed = input_64 ^ bitflip;
6434 XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2));
6436 m128.high64 += (m128.low64 << 1);
6437 m128.low64 ^= (m128.high64 >> 3);
6439 m128.low64 = XXH_xorshift64(m128.low64, 35);
6440 m128.low64 *= PRIME_MX2;
6441 m128.low64 = XXH_xorshift64(m128.low64, 28);
6442 m128.high64 = XXH3_avalanche(m128.high64);
6448XXH3_len_9to16_128b(
const xxh_u8* input,
size_t len,
const xxh_u8* secret,
XXH64_hash_t seed)
6450 XXH_ASSERT(input != NULL);
6451 XXH_ASSERT(secret != NULL);
6452 XXH_ASSERT(9 <= len && len <= 16);
6453 { xxh_u64
const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed;
6454 xxh_u64
const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed;
6455 xxh_u64
const input_lo = XXH_readLE64(input);
6456 xxh_u64 input_hi = XXH_readLE64(input + len - 8);
6457 XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
6462 m128.low64 += (xxh_u64)(len - 1) << 54;
6463 input_hi ^= bitfliph;
6471 if (
sizeof(
void *) <
sizeof(xxh_u64)) {
6478 m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
6504 m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
6507 m128.low64 ^= XXH_swap64(m128.high64);
6510 XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
6511 h128.high64 += m128.high64 * XXH_PRIME64_2;
6513 h128.low64 = XXH3_avalanche(h128.low64);
6514 h128.high64 = XXH3_avalanche(h128.high64);
6523XXH3_len_0to16_128b(
const xxh_u8* input,
size_t len,
const xxh_u8* secret,
XXH64_hash_t seed)
6525 XXH_ASSERT(len <= 16);
6526 {
if (len > 8)
return XXH3_len_9to16_128b(input, len, secret, seed);
6527 if (len >= 4)
return XXH3_len_4to8_128b(input, len, secret, seed);
6528 if (len)
return XXH3_len_1to3_128b(input, len, secret, seed);
6529 { XXH128_hash_t h128;
6530 xxh_u64
const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72);
6531 xxh_u64
const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88);
6532 h128.low64 = XXH64_avalanche(seed ^ bitflipl);
6533 h128.high64 = XXH64_avalanche( seed ^ bitfliph);
6541XXH_FORCE_INLINE XXH128_hash_t
6542XXH128_mix32B(XXH128_hash_t acc,
const xxh_u8* input_1,
const xxh_u8* input_2,
6545 acc.low64 += XXH3_mix16B (input_1, secret+0, seed);
6546 acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
6547 acc.high64 += XXH3_mix16B (input_2, secret+16, seed);
6548 acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
6554XXH3_len_17to128_128b(
const xxh_u8* XXH_RESTRICT input,
size_t len,
6555 const xxh_u8* XXH_RESTRICT secret,
size_t secretSize,
6558 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
6559 XXH_ASSERT(16 < len && len <= 128);
6561 { XXH128_hash_t acc;
6562 acc.low64 = len * XXH_PRIME64_1;
6565#if XXH_SIZE_OPT >= 1
6568 unsigned int i = (
unsigned int)(len - 1) / 32;
6570 acc = XXH128_mix32B(acc, input+16*i, input+len-16*(i+1), secret+32*i, seed);
6577 acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed);
6579 acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed);
6581 acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed);
6583 acc = XXH128_mix32B(acc, input, input+len-16, secret, seed);
6585 { XXH128_hash_t h128;
6586 h128.low64 = acc.low64 + acc.high64;
6587 h128.high64 = (acc.low64 * XXH_PRIME64_1)
6588 + (acc.high64 * XXH_PRIME64_4)
6589 + ((len - seed) * XXH_PRIME64_2);
6590 h128.low64 = XXH3_avalanche(h128.low64);
6591 h128.high64 = (
XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
6598XXH3_len_129to240_128b(
const xxh_u8* XXH_RESTRICT input,
size_t len,
6599 const xxh_u8* XXH_RESTRICT secret,
size_t secretSize,
6602 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
6603 XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
6605 { XXH128_hash_t acc;
6607 acc.low64 = len * XXH_PRIME64_1;
6615 for (i = 32; i < 160; i += 32) {
6616 acc = XXH128_mix32B(acc,
6622 acc.low64 = XXH3_avalanche(acc.low64);
6623 acc.high64 = XXH3_avalanche(acc.high64);
6629 for (i=160; i <= len; i += 32) {
6630 acc = XXH128_mix32B(acc,
6633 secret + XXH3_MIDSIZE_STARTOFFSET + i - 160,
6637 acc = XXH128_mix32B(acc,
6640 secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
6643 { XXH128_hash_t h128;
6644 h128.low64 = acc.low64 + acc.high64;
6645 h128.high64 = (acc.low64 * XXH_PRIME64_1)
6646 + (acc.high64 * XXH_PRIME64_4)
6647 + ((len - seed) * XXH_PRIME64_2);
6648 h128.low64 = XXH3_avalanche(h128.low64);
6649 h128.high64 = (
XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
6655XXH_FORCE_INLINE XXH128_hash_t
6656XXH3_hashLong_128b_internal(
const void* XXH_RESTRICT input,
size_t len,
6657 const xxh_u8* XXH_RESTRICT secret,
size_t secretSize,
6658 XXH3_f_accumulate f_acc,
6659 XXH3_f_scrambleAcc f_scramble)
6661 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
6663 XXH3_hashLong_internal_loop(acc, (
const xxh_u8*)input, len, secret, secretSize, f_acc, f_scramble);
6666 XXH_STATIC_ASSERT(
sizeof(acc) == 64);
6667 XXH_ASSERT(secretSize >=
sizeof(acc) + XXH_SECRET_MERGEACCS_START);
6668 { XXH128_hash_t h128;
6669 h128.low64 = XXH3_mergeAccs(acc,
6670 secret + XXH_SECRET_MERGEACCS_START,
6671 (xxh_u64)len * XXH_PRIME64_1);
6672 h128.high64 = XXH3_mergeAccs(acc,
6674 -
sizeof(acc) - XXH_SECRET_MERGEACCS_START,
6675 ~((xxh_u64)len * XXH_PRIME64_2));
6684XXH3_hashLong_128b_default(
const void* XXH_RESTRICT input,
size_t len,
6686 const void* XXH_RESTRICT secret,
size_t secretLen)
6688 (void)seed64; (void)secret; (void)secretLen;
6689 return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret,
sizeof(XXH3_kSecret),
6690 XXH3_accumulate, XXH3_scrambleAcc);
6700XXH3_WITH_SECRET_INLINE XXH128_hash_t
6701XXH3_hashLong_128b_withSecret(
const void* XXH_RESTRICT input,
size_t len,
6703 const void* XXH_RESTRICT secret,
size_t secretLen)
6706 return XXH3_hashLong_128b_internal(input, len, (
const xxh_u8*)secret, secretLen,
6707 XXH3_accumulate, XXH3_scrambleAcc);
6710XXH_FORCE_INLINE XXH128_hash_t
6711XXH3_hashLong_128b_withSeed_internal(
const void* XXH_RESTRICT input,
size_t len,
6713 XXH3_f_accumulate f_acc,
6714 XXH3_f_scrambleAcc f_scramble,
6715 XXH3_f_initCustomSecret f_initSec)
6718 return XXH3_hashLong_128b_internal(input, len,
6719 XXH3_kSecret,
sizeof(XXH3_kSecret),
6721 { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
6722 f_initSec(secret, seed64);
6723 return XXH3_hashLong_128b_internal(input, len, (
const xxh_u8*)secret,
sizeof(secret),
6731XXH_NO_INLINE XXH128_hash_t
6732XXH3_hashLong_128b_withSeed(
const void* input,
size_t len,
6733 XXH64_hash_t seed64,
const void* XXH_RESTRICT secret,
size_t secretLen)
6735 (void)secret; (void)secretLen;
6736 return XXH3_hashLong_128b_withSeed_internal(input, len, seed64,
6737 XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret);
6740typedef XXH128_hash_t (*XXH3_hashLong128_f)(
const void* XXH_RESTRICT, size_t,
6743XXH_FORCE_INLINE XXH128_hash_t
6744XXH3_128bits_internal(
const void* input,
size_t len,
6745 XXH64_hash_t seed64,
const void* XXH_RESTRICT secret,
size_t secretLen,
6746 XXH3_hashLong128_f f_hl128)
6748 XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
6756 return XXH3_len_0to16_128b((
const xxh_u8*)input, len, (
const xxh_u8*)secret, seed64);
6758 return XXH3_len_17to128_128b((
const xxh_u8*)input, len, (
const xxh_u8*)secret, secretLen, seed64);
6759 if (len <= XXH3_MIDSIZE_MAX)
6760 return XXH3_len_129to240_128b((
const xxh_u8*)input, len, (
const xxh_u8*)secret, secretLen, seed64);
6761 return f_hl128(input, len, seed64, secret, secretLen);
6770 return XXH3_128bits_internal(input, len, 0,
6771 XXH3_kSecret,
sizeof(XXH3_kSecret),
6772 XXH3_hashLong_128b_default);
6777XXH3_128bits_withSecret(XXH_NOESCAPE
const void* input,
size_t len, XXH_NOESCAPE
const void* secret,
size_t secretSize)
6779 return XXH3_128bits_internal(input, len, 0,
6780 (
const xxh_u8*)secret, secretSize,
6781 XXH3_hashLong_128b_withSecret);
6788 return XXH3_128bits_internal(input, len, seed,
6789 XXH3_kSecret,
sizeof(XXH3_kSecret),
6790 XXH3_hashLong_128b_withSeed);
6797 if (len <= XXH3_MIDSIZE_MAX)
6798 return XXH3_128bits_internal(input, len, seed, XXH3_kSecret,
sizeof(XXH3_kSecret), NULL);
6799 return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize);
6811#ifndef XXH_NO_STREAM
6847XXH3_128bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE
const void* input,
size_t len)
6855 const unsigned char*
const secret = (
state->extSecret == NULL) ?
state->customSecret :
state->extSecret;
6856 if (
state->totalLen > XXH3_MIDSIZE_MAX) {
6858 XXH3_digest_long(acc, state, secret);
6859 XXH_ASSERT(
state->secretLimit + XXH_STRIPE_LEN >=
sizeof(acc) + XXH_SECRET_MERGEACCS_START);
6860 { XXH128_hash_t h128;
6861 h128.low64 = XXH3_mergeAccs(acc,
6862 secret + XXH_SECRET_MERGEACCS_START,
6863 (xxh_u64)
state->totalLen * XXH_PRIME64_1);
6864 h128.high64 = XXH3_mergeAccs(acc,
6865 secret +
state->secretLimit + XXH_STRIPE_LEN
6866 -
sizeof(acc) - XXH_SECRET_MERGEACCS_START,
6867 ~((xxh_u64)
state->totalLen * XXH_PRIME64_2));
6875 secret,
state->secretLimit + XXH_STRIPE_LEN);
6887 return !(memcmp(&h1, &h2,
sizeof(h1)));
6897 XXH128_hash_t
const h1 = *(
const XXH128_hash_t*)h128_1;
6898 XXH128_hash_t
const h2 = *(
const XXH128_hash_t*)h128_2;
6899 int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
6901 if (hcmp)
return hcmp;
6902 return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
6911 XXH_STATIC_ASSERT(
sizeof(XXH128_canonical_t) ==
sizeof(XXH128_hash_t));
6912 if (XXH_CPU_LITTLE_ENDIAN) {
6913 hash.high64 = XXH_swap64(hash.high64);
6914 hash.low64 = XXH_swap64(hash.low64);
6916 XXH_memcpy(dst, &hash.high64,
sizeof(hash.high64));
6917 XXH_memcpy((
char*)dst +
sizeof(hash.high64), &hash.low64,
sizeof(hash.low64));
6925 h.high64 = XXH_readBE64(src);
6926 h.low64 = XXH_readBE64(src->digest + 8);
6936#define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
6938XXH_FORCE_INLINE
void XXH3_combine16(
void* dst, XXH128_hash_t h128)
6940 XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 );
6941 XXH_writeLE64( (
char*)dst+8, XXH_readLE64((
char*)dst+8) ^ h128.high64 );
6946XXH3_generateSecret(XXH_NOESCAPE
void* secretBuffer,
size_t secretSize, XXH_NOESCAPE
const void* customSeed,
size_t customSeedSize)
6948#if (XXH_DEBUGLEVEL >= 1)
6949 XXH_ASSERT(secretBuffer != NULL);
6950 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
6953 if (secretBuffer == NULL)
return XXH_ERROR;
6954 if (secretSize < XXH3_SECRET_SIZE_MIN)
return XXH_ERROR;
6957 if (customSeedSize == 0) {
6958 customSeed = XXH3_kSecret;
6959 customSeedSize = XXH_SECRET_DEFAULT_SIZE;
6961#if (XXH_DEBUGLEVEL >= 1)
6962 XXH_ASSERT(customSeed != NULL);
6964 if (customSeed == NULL)
return XXH_ERROR;
6969 while (pos < secretSize) {
6970 size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize);
6971 memcpy((
char*)secretBuffer + pos, customSeed, toCopy);
6975 {
size_t const nbSeg16 = secretSize / 16;
6977 XXH128_canonical_t scrambler;
6979 for (n=0;
n<nbSeg16;
n++) {
6980 XXH128_hash_t
const h128 =
XXH128(&scrambler,
sizeof(scrambler), n);
6981 XXH3_combine16((
char*)secretBuffer + n*16, h128);
6993 XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
6994 XXH3_initCustomSecret(secret, seed);
6995 XXH_ASSERT(secretBuffer != NULL);
6996 memcpy(secretBuffer, secret, XXH_SECRET_DEFAULT_SIZE);
7002#if XXH_VECTOR == XXH_AVX2 \
7003 && defined(__GNUC__) && !defined(__clang__) \
7004 && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0
7005# pragma GCC pop_options
7018#if defined (__cplusplus)
struct XXH32_state_s XXH32_state_t
The opaque state struct for the XXH32 streaming API.
#define XXH32_createState
Allocates an XXH32_state_t.
struct XXH64_state_s XXH64_state_t
The opaque state struct for the XXH64 streaming API.
#define XXH64_createState
Allocates an XXH64_state_t.
#define XXH3_128bits_reset
#define XXH3_64bits_reset_withSecret
#define XXH3_64bits_withSecretandSeed
#define XXH64_hashFromCanonical
#define XXH128_canonicalFromHash
#define XXH3_128bits_reset_withSecretandSeed
#define XXH3_64bits_withSecret
#define XXH3_generateSecret
unsigned long long XXH64_hash_t
#define XXH3_64bits_withSeed
#define XXH3_64bits_update
#define XXH3_128bits_update
XXH_errorcode
Exit code for the streaming API.
#define XXH3_64bits_reset_withSeed
#define XXH_PUBLIC_API
Marks a global symbol.
#define XXH3_128bits_reset_withSeed
#define XXH32_hashFromCanonical
#define XXH3_64bits_reset_withSecretandSeed
#define XXH3_64bits_digest
#define XXH3_128bits_withSecretandSeed
#define XXH3_128bits_withSecret
#define XXH3_generateSecret_fromSeed
#define XXH32_canonicalFromHash
#define XXH_VERSION_NUMBER
Version number, encoded as two digits each.
#define XXH64_canonicalFromHash
#define XXH3_128bits_digest
#define XXH_versionNumber
Obtains the xxHash version.
#define XXH128_hashFromCanonical
#define XXH3_128bits_withSeed
#define XXH3_64bits_reset
#define XXH3_128bits_reset_withSecret
Canonical (big endian) representation of XXH32_hash_t.
Canonical (big endian) representation of XXH64_hash_t.