2 xxHash - Fast Hash algorithm
3 Copyright (C) 2012-2014, Yann Collet.
4 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are
10 * Redistributions of source code must retain the above copyright
11 notice, this list of conditions and the following disclaimer.
12 * Redistributions in binary form must reproduce the above
13 copyright notice, this list of conditions and the following disclaimer
14 in the documentation and/or other materials provided with the
17 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 You can contact the author at :
30 - xxHash source repository : http://code.google.com/p/xxhash/
31 - public discussion board : https://groups.google.com/forum/#!forum/lz4c
35 //**************************************
37 //**************************************
38 // Unaligned memory access is automatically enabled for "common" CPU, such as x86.
39 // For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected.
40 // If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance.
41 // You can also enable this parameter if you know your input data will always be aligned (boundaries of 4, for U32).
42 #if defined(__ARM_FEATURE_UNALIGNED) || defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
43 # define XXH_USE_UNALIGNED_ACCESS 1
46 // XXH_ACCEPT_NULL_INPUT_POINTER :
47 // If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
48 // When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
49 // This option has a very small performance cost (only measurable on small inputs).
50 // By default, this option is disabled. To enable it, uncomment below define :
51 // #define XXH_ACCEPT_NULL_INPUT_POINTER 1
53 // XXH_FORCE_NATIVE_FORMAT :
54 // By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
55 // Results are therefore identical for little-endian and big-endian CPU.
56 // This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
57 // Should endian-independance be of no importance for your application, you may set the #define below to 1.
58 // It will improve speed for Big-endian CPU.
59 // This option has no impact on Little_Endian CPU.
60 #define XXH_FORCE_NATIVE_FORMAT 0
62 //**************************************
63 // Compiler Specific Options
64 //**************************************
65 // Disable some Visual warning messages
66 #ifdef _MSC_VER // Visual Studio
67 # pragma warning(disable : 4127) // disable: C4127: conditional expression is constant
70 #ifdef _MSC_VER // Visual Studio
71 # define FORCE_INLINE static __forceinline
74 # define FORCE_INLINE static inline __attribute__((always_inline))
76 # define FORCE_INLINE static inline
80 //**************************************
81 // Includes & Memory related functions
82 //**************************************
84 // Modify the local functions below should you wish to use some other memory routines
85 // for malloc(), free()
87 static void* XXH_malloc(size_t s) { return malloc(s); }
88 static void XXH_free (void* p) { free(p); }
91 static void* XXH_memcpy(void* dest, const void* src, size_t size)
93 return memcpy(dest,src,size);
97 //**************************************
99 //**************************************
100 #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99
102 typedef uint8_t BYTE;
103 typedef uint16_t U16;
104 typedef uint32_t U32;
106 typedef uint64_t U64;
108 typedef unsigned char BYTE;
109 typedef unsigned short U16;
110 typedef unsigned int U32;
111 typedef signed int S32;
112 typedef uint64_t U64;
115 #if defined(__GNUC__) && !defined(XXH_USE_UNALIGNED_ACCESS)
116 # define _PACKED __attribute__ ((packed))
121 #if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__)
125 # pragma pack(push, 1)
129 typedef struct _U32_S
133 typedef struct _U64_S
138 #if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__)
142 #define A32(x) (((U32_S *)(x))->v)
143 #define A64(x) (((U64_S *)(x))->v)
146 //***************************************
147 // Compiler-specific Functions and Macros
148 //***************************************
149 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
151 // Note : although _rotl exists for minGW (GCC under windows), performance seems poor
152 #if defined(_MSC_VER)
153 # define XXH_rotl32(x,r) _rotl(x,r)
154 # define XXH_rotl64(x,r) _rotl64(x,r)
156 # define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
157 # define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
160 #if defined(_MSC_VER) // Visual Studio
161 # define XXH_swap32 _byteswap_ulong
162 # define XXH_swap64 _byteswap_uint64
163 #elif GCC_VERSION >= 403 || defined(__clang__)
164 # define XXH_swap32 __builtin_bswap32
165 # define XXH_swap64 __builtin_bswap64
167 static inline U32 XXH_swap32 (U32 x)
169 return ((x << 24) & 0xff000000 ) |
170 ((x << 8) & 0x00ff0000 ) |
171 ((x >> 8) & 0x0000ff00 ) |
172 ((x >> 24) & 0x000000ff );
174 static inline U64 XXH_swap64 (U64 x)
176 return ((x << 56) & 0xff00000000000000ULL) |
177 ((x << 40) & 0x00ff000000000000ULL) |
178 ((x << 24) & 0x0000ff0000000000ULL) |
179 ((x << 8) & 0x000000ff00000000ULL) |
180 ((x >> 8) & 0x00000000ff000000ULL) |
181 ((x >> 24) & 0x0000000000ff0000ULL) |
182 ((x >> 40) & 0x000000000000ff00ULL) |
183 ((x >> 56) & 0x00000000000000ffULL);
188 //**************************************
190 //**************************************
191 #define PRIME32_1 2654435761U
192 #define PRIME32_2 2246822519U
193 #define PRIME32_3 3266489917U
194 #define PRIME32_4 668265263U
195 #define PRIME32_5 374761393U
197 #define PRIME64_1 11400714785074694791ULL
198 #define PRIME64_2 14029467366897019727ULL
199 #define PRIME64_3 1609587929392839161ULL
200 #define PRIME64_4 9650029242287828579ULL
201 #define PRIME64_5 2870177450012600261ULL
203 //**************************************
204 // Architecture Macros
205 //**************************************
206 typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
207 #ifndef XXH_CPU_LITTLE_ENDIAN // It is possible to define XXH_CPU_LITTLE_ENDIAN externally, for example using a compiler switch
208 static const int one = 1;
209 # define XXH_CPU_LITTLE_ENDIAN (*(char*)(&one))
213 //**************************************
215 //**************************************
216 #define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(!!(c)) }; } // use only *after* variable declarations
219 //****************************
221 //****************************
222 typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
224 FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
226 if (align==XXH_unaligned)
227 return endian==XXH_littleEndian ? A32(ptr) : XXH_swap32(A32(ptr));
229 return endian==XXH_littleEndian ? *(U32*)ptr : XXH_swap32(*(U32*)ptr);
232 FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
234 return XXH_readLE32_align(ptr, endian, XXH_unaligned);
237 FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
239 if (align==XXH_unaligned)
240 return endian==XXH_littleEndian ? A64(ptr) : XXH_swap64(A64(ptr));
242 return endian==XXH_littleEndian ? *(U64*)ptr : XXH_swap64(*(U64*)ptr);
245 FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
247 return XXH_readLE64_align(ptr, endian, XXH_unaligned);
251 //****************************
252 // Simple Hash Functions
253 //****************************
254 FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
256 const BYTE* p = (const BYTE*)input;
257 const BYTE* bEnd = p + len;
259 #define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
261 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
265 bEnd=p=(const BYTE*)(size_t)16;
271 const BYTE* const limit = bEnd - 16;
272 U32 v1 = seed + PRIME32_1 + PRIME32_2;
273 U32 v2 = seed + PRIME32_2;
275 U32 v4 = seed - PRIME32_1;
279 v1 += XXH_get32bits(p) * PRIME32_2;
280 v1 = XXH_rotl32(v1, 13);
283 v2 += XXH_get32bits(p) * PRIME32_2;
284 v2 = XXH_rotl32(v2, 13);
287 v3 += XXH_get32bits(p) * PRIME32_2;
288 v3 = XXH_rotl32(v3, 13);
291 v4 += XXH_get32bits(p) * PRIME32_2;
292 v4 = XXH_rotl32(v4, 13);
298 h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
302 h32 = seed + PRIME32_5;
309 h32 += XXH_get32bits(p) * PRIME32_3;
310 h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
316 h32 += (*p) * PRIME32_5;
317 h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
331 unsigned int XXH32 (const void* input, size_t len, unsigned seed)
334 // Simple version, good for code maintenance, but unfortunately slow for small inputs
336 XXH32_reset(&state, seed);
337 XXH32_update(&state, input, len);
338 return XXH32_digest(&state);
340 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
342 # if !defined(XXH_USE_UNALIGNED_ACCESS)
343 if ((((size_t)input) & 3) == 0) // Input is aligned, let's leverage the speed advantage
345 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
346 return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
348 return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
352 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
353 return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
355 return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
359 FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
361 const BYTE* p = (const BYTE*)input;
362 const BYTE* bEnd = p + len;
364 #define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
366 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
370 bEnd=p=(const BYTE*)(size_t)32;
376 const BYTE* const limit = bEnd - 32;
377 U64 v1 = seed + PRIME64_1 + PRIME64_2;
378 U64 v2 = seed + PRIME64_2;
380 U64 v4 = seed - PRIME64_1;
384 v1 += XXH_get64bits(p) * PRIME64_2;
386 v1 = XXH_rotl64(v1, 31);
388 v2 += XXH_get64bits(p) * PRIME64_2;
390 v2 = XXH_rotl64(v2, 31);
392 v3 += XXH_get64bits(p) * PRIME64_2;
394 v3 = XXH_rotl64(v3, 31);
396 v4 += XXH_get64bits(p) * PRIME64_2;
398 v4 = XXH_rotl64(v4, 31);
403 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
406 v1 = XXH_rotl64(v1, 31);
409 h64 = h64 * PRIME64_1 + PRIME64_4;
412 v2 = XXH_rotl64(v2, 31);
415 h64 = h64 * PRIME64_1 + PRIME64_4;
418 v3 = XXH_rotl64(v3, 31);
421 h64 = h64 * PRIME64_1 + PRIME64_4;
424 v4 = XXH_rotl64(v4, 31);
427 h64 = h64 * PRIME64_1 + PRIME64_4;
431 h64 = seed + PRIME64_5;
438 U64 k1 = XXH_get64bits(p);
440 k1 = XXH_rotl64(k1,31);
443 h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
449 h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
450 h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
456 h64 ^= (*p) * PRIME64_5;
457 h64 = XXH_rotl64(h64, 11) * PRIME64_1;
471 uint64_t XXH64 (const void* input, size_t len, uint64_t seed)
474 // Simple version, good for code maintenance, but unfortunately slow for small inputs
476 XXH64_reset(&state, seed);
477 XXH64_update(&state, input, len);
478 return XXH64_digest(&state);
480 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
482 # if !defined(XXH_USE_UNALIGNED_ACCESS)
483 if ((((size_t)input) & 7)==0) // Input is aligned, let's leverage the speed advantage
485 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
486 return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
488 return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
492 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
493 return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
495 return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
499 /****************************************************
500 * Advanced Hash Functions
501 ****************************************************/
512 U32 mem32[4]; /* defined as U32 for alignment */
524 U64 mem64[4]; /* defined as U64 for alignment */
529 XXH32_state_t* XXH32_createState(void)
531 XXH_STATIC_ASSERT(sizeof(XXH32_state_t) >= sizeof(XXH_istate32_t)); // A compilation error here means XXH32_state_t is not large enough
532 return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
535 void* XXH32_init (unsigned seed)
537 XXH32_state_t *st = XXH32_createState();
538 XXH32_reset(st, seed);
543 XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
549 XXH64_state_t* XXH64_createState(void)
551 XXH_STATIC_ASSERT(sizeof(XXH64_state_t) >= sizeof(XXH_istate64_t)); // A compilation error here means XXH64_state_t is not large enough
552 return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
554 XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
563 XXH_errorcode XXH32_reset(XXH32_state_t* state_in, U32 seed)
565 XXH_istate32_t* state = (XXH_istate32_t*) state_in;
567 state->v1 = seed + PRIME32_1 + PRIME32_2;
568 state->v2 = seed + PRIME32_2;
569 state->v3 = seed + 0;
570 state->v4 = seed - PRIME32_1;
571 state->total_len = 0;
576 XXH_errorcode XXH64_reset(XXH64_state_t* state_in, uint64_t seed)
578 XXH_istate64_t* state = (XXH_istate64_t*) state_in;
580 state->v1 = seed + PRIME64_1 + PRIME64_2;
581 state->v2 = seed + PRIME64_2;
582 state->v3 = seed + 0;
583 state->v4 = seed - PRIME64_1;
584 state->total_len = 0;
590 FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state_in, const void* input, size_t len, XXH_endianess endian)
592 XXH_istate32_t* state = (XXH_istate32_t *) state_in;
593 const BYTE* p = (const BYTE*)input;
594 const BYTE* const bEnd = p + len;
596 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
597 if (input==NULL) return XXH_ERROR;
600 state->total_len += len;
602 if (state->memsize + len < 16) // fill in tmp buffer
604 XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
605 state->memsize += (U32)len;
609 if (state->memsize) // some data left from previous update
611 XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
613 const U32* p32 = state->mem32;
614 state->v1 += XXH_readLE32(p32, endian) * PRIME32_2;
615 state->v1 = XXH_rotl32(state->v1, 13);
616 state->v1 *= PRIME32_1;
618 state->v2 += XXH_readLE32(p32, endian) * PRIME32_2;
619 state->v2 = XXH_rotl32(state->v2, 13);
620 state->v2 *= PRIME32_1;
622 state->v3 += XXH_readLE32(p32, endian) * PRIME32_2;
623 state->v3 = XXH_rotl32(state->v3, 13);
624 state->v3 *= PRIME32_1;
626 state->v4 += XXH_readLE32(p32, endian) * PRIME32_2;
627 state->v4 = XXH_rotl32(state->v4, 13);
628 state->v4 *= PRIME32_1;
631 p += 16-state->memsize;
637 const BYTE* const limit = bEnd - 16;
645 v1 += XXH_readLE32(p, endian) * PRIME32_2;
646 v1 = XXH_rotl32(v1, 13);
649 v2 += XXH_readLE32(p, endian) * PRIME32_2;
650 v2 = XXH_rotl32(v2, 13);
653 v3 += XXH_readLE32(p, endian) * PRIME32_2;
654 v3 = XXH_rotl32(v3, 13);
657 v4 += XXH_readLE32(p, endian) * PRIME32_2;
658 v4 = XXH_rotl32(v4, 13);
672 XXH_memcpy(state->mem32, p, bEnd-p);
673 state->memsize = (int)(bEnd-p);
679 XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
681 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
683 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
684 return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
686 return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
691 FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state_in, XXH_endianess endian)
693 XXH_istate32_t* state = (XXH_istate32_t*) state_in;
694 const BYTE * p = (const BYTE*)state->mem32;
695 BYTE* bEnd = (BYTE*)(state->mem32) + state->memsize;
698 if (state->total_len >= 16)
700 h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
704 h32 = state->seed + PRIME32_5;
707 h32 += (U32) state->total_len;
711 h32 += XXH_readLE32(p, endian) * PRIME32_3;
712 h32 = XXH_rotl32(h32, 17) * PRIME32_4;
718 h32 += (*p) * PRIME32_5;
719 h32 = XXH_rotl32(h32, 11) * PRIME32_1;
729 XXH32_freeState((XXH32_state_t *)state_in);
735 U32 XXH32_digest (const XXH32_state_t* state_in)
737 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
739 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
740 return XXH32_digest_endian(state_in, XXH_littleEndian);
742 return XXH32_digest_endian(state_in, XXH_bigEndian);
746 FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state_in, const void* input, size_t len, XXH_endianess endian)
748 XXH_istate64_t * state = (XXH_istate64_t *) state_in;
749 const BYTE* p = (const BYTE*)input;
750 const BYTE* const bEnd = p + len;
752 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
753 if (input==NULL) return XXH_ERROR;
756 state->total_len += len;
758 if (state->memsize + len < 32) // fill in tmp buffer
760 XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
761 state->memsize += (U32)len;
765 if (state->memsize) // some data left from previous update
767 XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
769 const U64* p64 = state->mem64;
770 state->v1 += XXH_readLE64(p64, endian) * PRIME64_2;
771 state->v1 = XXH_rotl64(state->v1, 31);
772 state->v1 *= PRIME64_1;
774 state->v2 += XXH_readLE64(p64, endian) * PRIME64_2;
775 state->v2 = XXH_rotl64(state->v2, 31);
776 state->v2 *= PRIME64_1;
778 state->v3 += XXH_readLE64(p64, endian) * PRIME64_2;
779 state->v3 = XXH_rotl64(state->v3, 31);
780 state->v3 *= PRIME64_1;
782 state->v4 += XXH_readLE64(p64, endian) * PRIME64_2;
783 state->v4 = XXH_rotl64(state->v4, 31);
784 state->v4 *= PRIME64_1;
787 p += 32-state->memsize;
793 const BYTE* const limit = bEnd - 32;
801 v1 += XXH_readLE64(p, endian) * PRIME64_2;
802 v1 = XXH_rotl64(v1, 31);
805 v2 += XXH_readLE64(p, endian) * PRIME64_2;
806 v2 = XXH_rotl64(v2, 31);
809 v3 += XXH_readLE64(p, endian) * PRIME64_2;
810 v3 = XXH_rotl64(v3, 31);
813 v4 += XXH_readLE64(p, endian) * PRIME64_2;
814 v4 = XXH_rotl64(v4, 31);
828 XXH_memcpy(state->mem64, p, bEnd-p);
829 state->memsize = (int)(bEnd-p);
835 XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
837 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
839 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
840 return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
842 return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
847 FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state_in, XXH_endianess endian)
849 XXH_istate64_t * state = (XXH_istate64_t *) state_in;
850 const BYTE * p = (const BYTE*)state->mem64;
851 BYTE* bEnd = (BYTE*)state->mem64 + state->memsize;
854 if (state->total_len >= 32)
861 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
864 v1 = XXH_rotl64(v1, 31);
867 h64 = h64*PRIME64_1 + PRIME64_4;
870 v2 = XXH_rotl64(v2, 31);
873 h64 = h64*PRIME64_1 + PRIME64_4;
876 v3 = XXH_rotl64(v3, 31);
879 h64 = h64*PRIME64_1 + PRIME64_4;
882 v4 = XXH_rotl64(v4, 31);
885 h64 = h64*PRIME64_1 + PRIME64_4;
889 h64 = state->seed + PRIME64_5;
892 h64 += (U64) state->total_len;
896 U64 k1 = XXH_readLE64(p, endian);
898 k1 = XXH_rotl64(k1,31);
901 h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
907 h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
908 h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
914 h64 ^= (*p) * PRIME64_5;
915 h64 = XXH_rotl64(h64, 11) * PRIME64_1;
925 XXH64_freeState((XXH64_state_t *)state_in);
931 uint64_t XXH64_digest (const XXH64_state_t* state_in)
933 XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
935 if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
936 return XXH64_digest_endian(state_in, XXH_littleEndian);
938 return XXH64_digest_endian(state_in, XXH_bigEndian);