2 * Copyright (C) - Bob Jenkins, May 2006, Public Domain.
3 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
4 * Copyright (C) 2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * These are functions for producing 32-bit hashes for hash table lookup.
7 * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() are
8 * externally useful functions. Routines to test the hash are included if
9 * SELF_TEST is defined. You can use this free for any purpose. It's in the
10 * public domain. It has no warranty.
12 * You probably want to use hashlittle(). hashlittle() and hashbig() hash byte
13 * arrays. hashlittle() is is faster than hashbig() on little-endian machines.
14 * Intel and AMD are little-endian machines. On second thought, you probably
15 * want hashlittle2(), which is identical to hashlittle() except it returns two
16 * 32-bit hashes for the price of one. You could implement hashbig2() if you
17 * wanted but I haven't bothered here.
19 * If you want to find a hash of, say, exactly 7 integers, do
20 * a = i1; b = i2; c = i3;
22 * a += i4; b += i5; c += i6;
26 * then use c as the hash value. If you have a variable length array of
27 * 4-byte integers to hash, use hashword(). If you have a byte array (like
28 * a character string), use hashlittle(). If you have several byte arrays, or
29 * a mix of things, see the comments above hashlittle().
31 * Why is this so big? I read 12 bytes at a time into 3 4-byte integers, then
32 * mix those integers. This is fast (you can do a lot more thorough mixing
33 * with 12*3 instructions on 3 integers than you can with 3 instructions on 1
34 * byte), but shoehorning those bytes into integers efficiently is messy.
37 #include <stdio.h> /* defines printf for tests */
38 #include <time.h> /* defines time_t for timings in the test */
39 #include <stdint.h> /* defines uint32_t etc */
40 #include <sys/param.h> /* attempt to define endianness */
41 #include <endian.h> /* attempt to define endianness */
44 #include <urcu/compiler.h>
47 * My best guess at if you are big-endian or little-endian. This may
50 #if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \
51 __BYTE_ORDER == __LITTLE_ENDIAN) || \
52 (defined(i386) || defined(__i386__) || defined(__i486__) || \
53 defined(__i586__) || defined(__i686__) || defined(vax) || defined(MIPSEL))
54 # define HASH_LITTLE_ENDIAN 1
55 # define HASH_BIG_ENDIAN 0
56 #elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \
57 __BYTE_ORDER == __BIG_ENDIAN) || \
58 (defined(sparc) || defined(POWERPC) || defined(mc68000) || defined(sel))
59 # define HASH_LITTLE_ENDIAN 0
60 # define HASH_BIG_ENDIAN 1
62 # define HASH_LITTLE_ENDIAN 0
63 # define HASH_BIG_ENDIAN 0
66 #define hashsize(n) ((uint32_t)1<<(n))
67 #define hashmask(n) (hashsize(n)-1)
68 #define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
71 * mix -- mix 3 32-bit values reversibly.
73 * This is reversible, so any information in (a,b,c) before mix() is
74 * still in (a,b,c) after mix().
76 * If four pairs of (a,b,c) inputs are run through mix(), or through
77 * mix() in reverse, there are at least 32 bits of the output that
78 * are sometimes the same for one pair and different for another pair.
79 * This was tested for:
80 * * pairs that differed by one bit, by two bits, in any combination
81 * of top bits of (a,b,c), or in any combination of bottom bits of
83 * * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
84 * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
85 * is commonly produced by subtraction) look like a single 1-bit
87 * * the base values were pseudorandom, all zero but one bit set, or
88 * all zero plus a counter that starts at zero.
90 * Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
95 * Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
96 * for "differ" defined as + with a one-bit base and a two-bit delta. I
97 * used http://burtleburtle.net/bob/hash/avalanche.html to choose
98 * the operations, constants, and arrangements of the variables.
100 * This does not achieve avalanche. There are input bits of (a,b,c)
101 * that fail to affect some output bits of (a,b,c), especially of a. The
102 * most thoroughly mixed value is c, but it doesn't really even achieve
105 * This allows some parallelism. Read-after-writes are good at doubling
106 * the number of bits affected, so the goal of mixing pulls in the opposite
107 * direction as the goal of parallelism. I did what I could. Rotates
108 * seem to cost as much as shifts on every machine I could lay my hands
109 * on, and rotates are much kinder to the top and bottom bits, so I used
114 a -= c; a ^= rot(c, 4); c += b; \
115 b -= a; b ^= rot(a, 6); a += c; \
116 c -= b; c ^= rot(b, 8); b += a; \
117 a -= c; a ^= rot(c,16); c += b; \
118 b -= a; b ^= rot(a,19); a += c; \
119 c -= b; c ^= rot(b, 4); b += a; \
123 * final -- final mixing of 3 32-bit values (a,b,c) into c
125 * Pairs of (a,b,c) values differing in only a few bits will usually
126 * produce values of c that look totally different. This was tested for
127 * * pairs that differed by one bit, by two bits, in any combination
128 * of top bits of (a,b,c), or in any combination of bottom bits of
130 * * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
131 * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
132 * is commonly produced by subtraction) look like a single 1-bit
134 * * the base values were pseudorandom, all zero but one bit set, or
135 * all zero plus a counter that starts at zero.
137 * These constants passed:
138 * 14 11 25 16 4 14 24
139 * 12 14 25 16 4 14 24
140 * and these came close:
145 #define final(a,b,c) \
147 c ^= b; c -= rot(b,14); \
148 a ^= c; a -= rot(c,11); \
149 b ^= a; b -= rot(a,25); \
150 c ^= b; c -= rot(b,16); \
151 a ^= c; a -= rot(c,4); \
152 b ^= a; b -= rot(a,14); \
153 c ^= b; c -= rot(b,24); \
156 static __attribute__((unused
))
158 const uint32_t *k
, /* the key, an array of uint32_t values */
159 size_t length
, /* the length of the key, in uint32_ts */
160 uint32_t initval
) /* the previous hash, or an arbitrary value */
164 /* Set up the internal state */
165 a
= b
= c
= 0xdeadbeef + (((uint32_t) length
) << 2) + initval
;
167 /*----------------------------------------- handle most of the key */
177 /*----------------------------------- handle the last 3 uint32_t's */
178 switch (length
) { /* all the case statements fall through */
183 case 0: /* case 0: nothing left to add */
186 /*---------------------------------------------- report the result */
192 * hashword2() -- same as hashword(), but take two seeds and return two 32-bit
193 * values. pc and pb must both be nonnull, and *pc and *pb must both be
194 * initialized with seeds. If you pass in (*pb)==0, the output (*pc) will be
195 * the same as the return value from hashword().
197 static __attribute__((unused
))
198 void hashword2(const uint32_t *k
, size_t length
,
199 uint32_t *pc
, uint32_t *pb
)
203 /* Set up the internal state */
204 a
= b
= c
= 0xdeadbeef + ((uint32_t) (length
<< 2)) + *pc
;
224 case 0: /* case 0: nothing left to add */
233 * hashlittle() -- hash a variable-length key into a 32-bit value
234 * k : the key (the unaligned variable-length array of bytes)
235 * length : the length of the key, counting by bytes
236 * initval : can be any 4-byte value
237 * Returns a 32-bit value. Every bit of the key affects every bit of
238 * the return value. Two keys differing by one or two bits will have
239 * totally different hash values.
241 * The best hash table sizes are powers of 2. There is no need to do
242 * mod a prime (mod is sooo slow!). If you need less than 32 bits,
243 * use a bitmask. For example, if you need only 10 bits, do
244 * h = (h & hashmask(10));
245 * In which case, the hash table should have hashsize(10) elements.
247 * If you are hashing n strings (uint8_t **)k, do it like this:
248 * for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);
250 * By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
251 * code any way you wish, private, educational, or commercial. It's free.
253 * Use for hash table lookup, or anything where one collision in 2^^32 is
254 * acceptable. Do NOT use for cryptographic purposes.
257 static uint32_t hashlittle(const void *key
, size_t length
, uint32_t initval
)
263 } u
; /* needed for Mac Powerbook G4 */
265 /* Set up the internal state */
266 a
= b
= c
= 0xdeadbeef + ((uint32_t)length
) + initval
;
269 if (HASH_LITTLE_ENDIAN
&& ((u
.i
& 0x3) == 0)) {
270 const uint32_t *k
= (const uint32_t *)key
; /* read 32-bit chunks */
272 /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
273 while (length
> 12) {
283 * "k[2]&0xffffff" actually reads beyond the end of the string, but
284 * then masks off the part it's not allowed to read. Because the
285 * string is aligned, the masked-off tail is in the same word as the
286 * rest of the string. Every machine with memory protection I've seen
287 * does it on word boundaries, so is OK with this. But VALGRIND will
288 * still catch it and complain. The masking trick does make the hash
289 * noticably faster for short strings (like English words).
294 case 12: c
+=k
[2]; b
+=k
[1]; a
+=k
[0]; break;
295 case 11: c
+=k
[2]&0xffffff; b
+=k
[1]; a
+=k
[0]; break;
296 case 10: c
+=k
[2]&0xffff; b
+=k
[1]; a
+=k
[0]; break;
297 case 9 : c
+=k
[2]&0xff; b
+=k
[1]; a
+=k
[0]; break;
298 case 8 : b
+=k
[1]; a
+=k
[0]; break;
299 case 7 : b
+=k
[1]&0xffffff; a
+=k
[0]; break;
300 case 6 : b
+=k
[1]&0xffff; a
+=k
[0]; break;
301 case 5 : b
+=k
[1]&0xff; a
+=k
[0]; break;
302 case 4 : a
+=k
[0]; break;
303 case 3 : a
+=k
[0]&0xffffff; break;
304 case 2 : a
+=k
[0]&0xffff; break;
305 case 1 : a
+=k
[0]&0xff; break;
306 case 0 : return c
; /* zero length strings require no mixing */
308 #else /* make valgrind happy */
311 k8
= (const uint8_t *)k
;
313 case 12: c
+=k
[2]; b
+=k
[1]; a
+=k
[0]; break;
314 case 11: c
+=((uint32_t)k8
[10])<<16; /* fall through */
315 case 10: c
+=((uint32_t)k8
[9])<<8; /* fall through */
316 case 9 : c
+=k8
[8]; /* fall through */
317 case 8 : b
+=k
[1]; a
+=k
[0]; break;
318 case 7 : b
+=((uint32_t)k8
[6])<<16; /* fall through */
319 case 6 : b
+=((uint32_t)k8
[5])<<8; /* fall through */
320 case 5 : b
+=k8
[4]; /* fall through */
321 case 4 : a
+=k
[0]; break;
322 case 3 : a
+=((uint32_t)k8
[2])<<16; /* fall through */
323 case 2 : a
+=((uint32_t)k8
[1])<<8; /* fall through */
324 case 1 : a
+=k8
[0]; break;
327 #endif /* !valgrind */
328 } else if (HASH_LITTLE_ENDIAN
&& ((u
.i
& 0x1) == 0)) {
329 const uint16_t *k
= (const uint16_t *)key
; /* read 16-bit chunks */
332 /*--------------- all but last block: aligned reads and different mixing */
333 while (length
> 12) {
334 a
+= k
[0] + (((uint32_t)k
[1])<<16);
335 b
+= k
[2] + (((uint32_t)k
[3])<<16);
336 c
+= k
[4] + (((uint32_t)k
[5])<<16);
342 k8
= (const uint8_t *)k
;
345 c
+=k
[4]+(((uint32_t)k
[5])<<16);
346 b
+=k
[2]+(((uint32_t)k
[3])<<16);
347 a
+=k
[0]+(((uint32_t)k
[1])<<16);
350 c
+=((uint32_t)k8
[10])<<16; /* fall through */
353 b
+=k
[2]+(((uint32_t)k
[3])<<16);
354 a
+=k
[0]+(((uint32_t)k
[1])<<16);
357 c
+=k8
[8]; /* fall through */
359 b
+=k
[2]+(((uint32_t)k
[3])<<16);
360 a
+=k
[0]+(((uint32_t)k
[1])<<16);
363 b
+=((uint32_t)k8
[6])<<16; /* fall through */
366 a
+=k
[0]+(((uint32_t)k
[1])<<16);
369 b
+=k8
[4]; /* fall through */
371 a
+=k
[0]+(((uint32_t)k
[1])<<16);
374 a
+=((uint32_t)k8
[2])<<16; /* fall through */
382 return c
; /* zero length requires no mixing */
385 } else { /* need to read the key one byte at a time */
386 const uint8_t *k
= (const uint8_t *)key
;
388 while (length
> 12) {
390 a
+= ((uint32_t)k
[1])<<8;
391 a
+= ((uint32_t)k
[2])<<16;
392 a
+= ((uint32_t)k
[3])<<24;
394 b
+= ((uint32_t)k
[5])<<8;
395 b
+= ((uint32_t)k
[6])<<16;
396 b
+= ((uint32_t)k
[7])<<24;
398 c
+= ((uint32_t)k
[9])<<8;
399 c
+= ((uint32_t)k
[10])<<16;
400 c
+= ((uint32_t)k
[11])<<24;
406 switch(length
) { /* all the case statements fall through */
407 case 12: c
+=((uint32_t)k
[11])<<24;
408 case 11: c
+=((uint32_t)k
[10])<<16;
409 case 10: c
+=((uint32_t)k
[9])<<8;
411 case 8: b
+=((uint32_t)k
[7])<<24;
412 case 7: b
+=((uint32_t)k
[6])<<16;
413 case 6: b
+=((uint32_t)k
[5])<<8;
415 case 4: a
+=((uint32_t)k
[3])<<24;
416 case 3: a
+=((uint32_t)k
[2])<<16;
417 case 2: a
+=((uint32_t)k
[1])<<8;
430 #if (CAA_BITS_PER_LONG == 64)
432 * Hash function for number value.
434 unsigned long hash_key(void *_key
, size_t length
, unsigned long seed
)
445 assert(length
== sizeof(unsigned long));
446 v
.v64
= (uint64_t) seed
;
447 key
.v64
= (uint64_t) _key
;
448 hashword2(key
.v32
, 2, &v
.v32
[0], &v
.v32
[1]);
453 * Hash function for number value.
455 unsigned long hash_key(void *_key
, size_t length
, unsigned long seed
)
457 uint32_t key
= (uint32_t) _key
;
459 assert(length
== sizeof(uint32_t));
460 return hashword(&key
, 1, seed
);
465 * Hash function for string.
467 unsigned long hash_key_str(void *key
, size_t length
, unsigned long seed
)
469 return hashlittle(key
, length
, seed
);
473 * Hash function compare for number value.
475 unsigned long hash_compare_key(void *key1
, size_t key1_len
,
476 void *key2
, size_t key2_len
)
478 if (key1_len
!= key2_len
) {
490 * Hash compare function for string.
492 unsigned long hash_compare_key_str(void *key1
, size_t key1_len
,
493 void *key2
, size_t key2_len
)
495 if (key1_len
!= key2_len
) {
499 if (strncmp(key1
, key2
, key1_len
) == 0) {
This page took 0.041413 seconds and 4 git commands to generate.