2 * Copyright 2012-2015 Samy Al Bahra
3 * Copyright 2011-2014 AppNexus, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * This is the Murmur hash written by Austin Appleby.
34 #include <ck_stdint.h>
35 #include <ck_string.h>
37 //-----------------------------------------------------------------------------
38 // MurmurHash3 was written by Austin Appleby, and is placed in the public
39 // domain. The author hereby disclaims copyright to this source code.
41 // Note - The x86 and x64 versions do _not_ produce the same results, as the
42 // algorithms are optimized for their respective platforms. You can still
43 // compile and run any of them on any platform, but your performance with the
44 // non-native version will be less than optimal.
46 //-----------------------------------------------------------------------------
47 // Platform-specific functions and macros
49 // Microsoft Visual Studio
53 #define FORCE_INLINE __forceinline
57 #define ROTL32(x,y) _rotl(x,y)
58 #define ROTL64(x,y) _rotl64(x,y)
60 #define BIG_CONSTANT(x) (x)
64 #else // defined(_MSC_VER)
66 #define FORCE_INLINE inline __attribute__((always_inline))
68 static inline uint32_t rotl32 ( uint32_t x, int8_t r )
70 return (x << r) | (x >> (32 - r));
73 static inline uint64_t rotl64 ( uint64_t x, int8_t r )
75 return (x << r) | (x >> (64 - r));
78 #define ROTL32(x,y) rotl32(x,y)
79 #define ROTL64(x,y) rotl64(x,y)
81 #define BIG_CONSTANT(x) (x##LLU)
83 #endif // !defined(_MSC_VER)
85 //-----------------------------------------------------------------------------
86 // Block read - if your platform needs to do endian-swapping or can only
87 // handle aligned reads, do the conversion here
89 FORCE_INLINE static uint32_t getblock ( const uint32_t * p, int i )
94 __asm__ (" lrv %0,%1\n"
95 : "=r" (res) : "Q" (p[i]) : "cc", "mem");
99 #endif /* !__s390x__ */
102 //-----------------------------------------------------------------------------
103 // Finalization mix - force all bits of a hash block to avalanche
105 FORCE_INLINE static uint32_t fmix ( uint32_t h )
116 //-----------------------------------------------------------------------------
118 static inline void MurmurHash3_x86_32 ( const void * key, int len,
119 uint32_t seed, uint32_t * out )
121 const uint8_t * data = (const uint8_t*)key;
122 const int nblocks = len / 4;
127 uint32_t c1 = 0xcc9e2d51;
128 uint32_t c2 = 0x1b873593;
133 const uint32_t * blocks = (const uint32_t *)(const void *)(data + nblocks*4);
135 for(i = -nblocks; i; i++)
137 uint32_t k1 = getblock(blocks,i);
145 h1 = h1*5+0xe6546b64;
151 const uint8_t * tail = (const uint8_t*)(data + nblocks*4);
157 case 3: k1 ^= tail[2] << 16;
159 case 2: k1 ^= tail[1] << 8;
161 case 1: k1 ^= tail[0];
162 k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
172 *(uint32_t *)out = h1;
175 static inline uint64_t MurmurHash64A ( const void * key, int len, uint64_t seed )
177 const uint64_t m = BIG_CONSTANT(0xc6a4a7935bd1e995);
180 uint64_t h = seed ^ (len * m);
182 const uint64_t * data = (const uint64_t *)key;
183 const uint64_t * end = data + (len/8);
189 if (!((uintptr_t)data & 0x7))
192 memcpy(&k, data, sizeof(k));
204 const unsigned char * data2 = (const unsigned char*)data;
208 case 7: h ^= (uint64_t)(data2[6]) << 48;
210 case 6: h ^= (uint64_t)(data2[5]) << 40;
212 case 5: h ^= (uint64_t)(data2[4]) << 32;
214 case 4: h ^= (uint64_t)(data2[3]) << 24;
216 case 3: h ^= (uint64_t)(data2[2]) << 16;
218 case 2: h ^= (uint64_t)(data2[1]) << 8;
220 case 1: h ^= (uint64_t)(data2[0]);
232 // 64-bit hash for 32-bit platforms
234 static inline uint64_t MurmurHash64B ( const void * key, int len, uint64_t seed )
236 const uint32_t m = 0x5bd1e995;
239 uint32_t h1 = (uint32_t)(seed) ^ len;
240 uint32_t h2 = (uint32_t)(seed >> 32);
242 const uint32_t * data = (const uint32_t *)key;
246 uint32_t k1 = *data++;
247 k1 *= m; k1 ^= k1 >> r; k1 *= m;
251 uint32_t k2 = *data++;
252 k2 *= m; k2 ^= k2 >> r; k2 *= m;
259 uint32_t k1 = *data++;
260 k1 *= m; k1 ^= k1 >> r; k1 *= m;
267 case 3: h2 ^= ((const unsigned char*)data)[2] << 16;
269 case 2: h2 ^= ((const unsigned char*)data)[1] << 8;
271 case 1: h2 ^= ((const unsigned char*)data)[0];
275 h1 ^= h2 >> 18; h1 *= m;
276 h2 ^= h1 >> 22; h2 *= m;
277 h1 ^= h2 >> 17; h1 *= m;
278 h2 ^= h1 >> 19; h2 *= m;
287 #endif /* CK_HT_HASH_H */