8d0e515aecafbb94a347c418f23bf341f719f71c
[lttng-tools.git] / src / common / hashtable / utils.c
1 /*
2 * Copyright (C) - Bob Jenkins, May 2006
3 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
4 * Copyright (C) 2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 /*
21 * These are functions for producing 32-bit hashes for hash table lookup.
22 * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() are
23 * externally useful functions. Routines to test the hash are included if
24 * SELF_TEST is defined. You can use this free for any purpose. It's in the
25 * public domain. It has no warranty.
26 *
27 * You probably want to use hashlittle(). hashlittle() and hashbig() hash byte
28 * arrays. hashlittle() is is faster than hashbig() on little-endian machines.
29 * Intel and AMD are little-endian machines. On second thought, you probably
30 * want hashlittle2(), which is identical to hashlittle() except it returns two
31 * 32-bit hashes for the price of one. You could implement hashbig2() if you
32 * wanted but I haven't bothered here.
33 *
34 * If you want to find a hash of, say, exactly 7 integers, do
35 * a = i1; b = i2; c = i3;
36 * mix(a,b,c);
37 * a += i4; b += i5; c += i6;
38 * mix(a,b,c);
39 * a += i7;
40 * final(a,b,c);
41 * then use c as the hash value. If you have a variable length array of
42 * 4-byte integers to hash, use hashword(). If you have a byte array (like
43 * a character string), use hashlittle(). If you have several byte arrays, or
44 * a mix of things, see the comments above hashlittle().
45 *
46 * Why is this so big? I read 12 bytes at a time into 3 4-byte integers, then
47 * mix those integers. This is fast (you can do a lot more thorough mixing
48 * with 12*3 instructions on 3 integers than you can with 3 instructions on 1
49 * byte), but shoehorning those bytes into integers efficiently is messy.
50 */
51 #define _GNU_SOURCE
52 #include <assert.h>
53 #include <stdint.h> /* defines uint32_t etc */
54 #include <stdio.h> /* defines printf for tests */
55 #include <string.h>
56 #include <sys/param.h> /* attempt to define endianness */
57 #include <time.h> /* defines time_t for timings in the test */
58 #include <urcu/compiler.h>
59
60 #include "utils.h"
61 #include <common/compat/endian.h> /* attempt to define endianness */
62 #include <common/common.h>
63
64 /*
65 * My best guess at if you are big-endian or little-endian. This may
66 * need adjustment.
67 */
68 #if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \
69 __BYTE_ORDER == __LITTLE_ENDIAN) || \
70 (defined(i386) || defined(__i386__) || defined(__i486__) || \
71 defined(__i586__) || defined(__i686__) || defined(vax) || defined(MIPSEL))
72 # define HASH_LITTLE_ENDIAN 1
73 # define HASH_BIG_ENDIAN 0
74 #elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \
75 __BYTE_ORDER == __BIG_ENDIAN) || \
76 (defined(sparc) || defined(POWERPC) || defined(mc68000) || defined(sel))
77 # define HASH_LITTLE_ENDIAN 0
78 # define HASH_BIG_ENDIAN 1
79 #else
80 # define HASH_LITTLE_ENDIAN 0
81 # define HASH_BIG_ENDIAN 0
82 #endif
83
84 #define hashsize(n) ((uint32_t)1<<(n))
85 #define hashmask(n) (hashsize(n)-1)
86 #define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
87
88 /*
89 * mix -- mix 3 32-bit values reversibly.
90 *
91 * This is reversible, so any information in (a,b,c) before mix() is
92 * still in (a,b,c) after mix().
93 *
94 * If four pairs of (a,b,c) inputs are run through mix(), or through
95 * mix() in reverse, there are at least 32 bits of the output that
96 * are sometimes the same for one pair and different for another pair.
97 * This was tested for:
98 * * pairs that differed by one bit, by two bits, in any combination
99 * of top bits of (a,b,c), or in any combination of bottom bits of
100 * (a,b,c).
101 * * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
102 * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
103 * is commonly produced by subtraction) look like a single 1-bit
104 * difference.
105 * * the base values were pseudorandom, all zero but one bit set, or
106 * all zero plus a counter that starts at zero.
107 *
108 * Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
109 * satisfy this are
110 * 4 6 8 16 19 4
111 * 9 15 3 18 27 15
112 * 14 9 3 7 17 3
113 * Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
114 * for "differ" defined as + with a one-bit base and a two-bit delta. I
115 * used http://burtleburtle.net/bob/hash/avalanche.html to choose
116 * the operations, constants, and arrangements of the variables.
117 *
118 * This does not achieve avalanche. There are input bits of (a,b,c)
119 * that fail to affect some output bits of (a,b,c), especially of a. The
120 * most thoroughly mixed value is c, but it doesn't really even achieve
121 * avalanche in c.
122 *
123 * This allows some parallelism. Read-after-writes are good at doubling
124 * the number of bits affected, so the goal of mixing pulls in the opposite
125 * direction as the goal of parallelism. I did what I could. Rotates
126 * seem to cost as much as shifts on every machine I could lay my hands
127 * on, and rotates are much kinder to the top and bottom bits, so I used
128 * rotates.
129 */
130 #define mix(a,b,c) \
131 { \
132 a -= c; a ^= rot(c, 4); c += b; \
133 b -= a; b ^= rot(a, 6); a += c; \
134 c -= b; c ^= rot(b, 8); b += a; \
135 a -= c; a ^= rot(c,16); c += b; \
136 b -= a; b ^= rot(a,19); a += c; \
137 c -= b; c ^= rot(b, 4); b += a; \
138 }
139
140 /*
141 * final -- final mixing of 3 32-bit values (a,b,c) into c
142 *
143 * Pairs of (a,b,c) values differing in only a few bits will usually
144 * produce values of c that look totally different. This was tested for
145 * * pairs that differed by one bit, by two bits, in any combination
146 * of top bits of (a,b,c), or in any combination of bottom bits of
147 * (a,b,c).
148 * * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
149 * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
150 * is commonly produced by subtraction) look like a single 1-bit
151 * difference.
152 * * the base values were pseudorandom, all zero but one bit set, or
153 * all zero plus a counter that starts at zero.
154 *
155 * These constants passed:
156 * 14 11 25 16 4 14 24
157 * 12 14 25 16 4 14 24
158 * and these came close:
159 * 4 8 15 26 3 22 24
160 * 10 8 15 26 3 22 24
161 * 11 8 15 26 3 22 24
162 */
163 #define final(a,b,c) \
164 { \
165 c ^= b; c -= rot(b,14); \
166 a ^= c; a -= rot(c,11); \
167 b ^= a; b -= rot(a,25); \
168 c ^= b; c -= rot(b,16); \
169 a ^= c; a -= rot(c,4); \
170 b ^= a; b -= rot(a,14); \
171 c ^= b; c -= rot(b,24); \
172 }
173
174 /*
175 * k - the key, an array of uint32_t values
176 * length - the length of the key, in uint32_ts
177 * initval - the previous hash, or an arbitrary value
178 */
179 static uint32_t __attribute__((unused)) hashword(const uint32_t *k,
180 size_t length, uint32_t initval)
181 {
182 uint32_t a, b, c;
183
184 /* Set up the internal state */
185 a = b = c = 0xdeadbeef + (((uint32_t) length) << 2) + initval;
186
187 /*----------------------------------------- handle most of the key */
188 while (length > 3) {
189 a += k[0];
190 b += k[1];
191 c += k[2];
192 mix(a, b, c);
193 length -= 3;
194 k += 3;
195 }
196
197 /*----------------------------------- handle the last 3 uint32_t's */
198 switch (length) { /* all the case statements fall through */
199 case 3: c += k[2];
200 case 2: b += k[1];
201 case 1: a += k[0];
202 final(a, b, c);
203 case 0: /* case 0: nothing left to add */
204 break;
205 }
206 /*---------------------------------------------- report the result */
207 return c;
208 }
209
210
211 /*
212 * hashword2() -- same as hashword(), but take two seeds and return two 32-bit
213 * values. pc and pb must both be nonnull, and *pc and *pb must both be
214 * initialized with seeds. If you pass in (*pb)==0, the output (*pc) will be
215 * the same as the return value from hashword().
216 */
217 static void __attribute__((unused)) hashword2(const uint32_t *k, size_t length,
218 uint32_t *pc, uint32_t *pb)
219 {
220 uint32_t a, b, c;
221
222 /* Set up the internal state */
223 a = b = c = 0xdeadbeef + ((uint32_t) (length << 2)) + *pc;
224 c += *pb;
225
226 while (length > 3) {
227 a += k[0];
228 b += k[1];
229 c += k[2];
230 mix(a, b, c);
231 length -= 3;
232 k += 3;
233 }
234
235 switch (length) {
236 case 3 :
237 c += k[2];
238 case 2 :
239 b += k[1];
240 case 1 :
241 a += k[0];
242 final(a, b, c);
243 case 0: /* case 0: nothing left to add */
244 break;
245 }
246
247 *pc = c;
248 *pb = b;
249 }
250
251 /*
252 * hashlittle() -- hash a variable-length key into a 32-bit value
253 * k : the key (the unaligned variable-length array of bytes)
254 * length : the length of the key, counting by bytes
255 * initval : can be any 4-byte value
256 * Returns a 32-bit value. Every bit of the key affects every bit of
257 * the return value. Two keys differing by one or two bits will have
258 * totally different hash values.
259 *
260 * The best hash table sizes are powers of 2. There is no need to do
261 * mod a prime (mod is sooo slow!). If you need less than 32 bits,
262 * use a bitmask. For example, if you need only 10 bits, do
263 * h = (h & hashmask(10));
264 * In which case, the hash table should have hashsize(10) elements.
265 *
266 * If you are hashing n strings (uint8_t **)k, do it like this:
267 * for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);
268 *
269 * By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
270 * code any way you wish, private, educational, or commercial. It's free.
271 *
272 * Use for hash table lookup, or anything where one collision in 2^^32 is
273 * acceptable. Do NOT use for cryptographic purposes.
274 */
275 static uint32_t __attribute__((unused)) hashlittle(const void *key,
276 size_t length, uint32_t initval)
277 {
278 uint32_t a,b,c;
279 union {
280 const void *ptr;
281 size_t i;
282 } u; /* needed for Mac Powerbook G4 */
283
284 /* Set up the internal state */
285 a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
286
287 u.ptr = key;
288 if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
289 const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
290
291 /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
292 while (length > 12) {
293 a += k[0];
294 b += k[1];
295 c += k[2];
296 mix(a,b,c);
297 length -= 12;
298 k += 3;
299 }
300
301 /*
302 * "k[2]&0xffffff" actually reads beyond the end of the string, but
303 * then masks off the part it's not allowed to read. Because the
304 * string is aligned, the masked-off tail is in the same word as the
305 * rest of the string. Every machine with memory protection I've seen
306 * does it on word boundaries, so is OK with this. But VALGRIND will
307 * still catch it and complain. The masking trick does make the hash
308 * noticably faster for short strings (like English words).
309 */
310 #ifndef VALGRIND
311
312 switch (length) {
313 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
314 case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
315 case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
316 case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
317 case 8 : b+=k[1]; a+=k[0]; break;
318 case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
319 case 6 : b+=k[1]&0xffff; a+=k[0]; break;
320 case 5 : b+=k[1]&0xff; a+=k[0]; break;
321 case 4 : a+=k[0]; break;
322 case 3 : a+=k[0]&0xffffff; break;
323 case 2 : a+=k[0]&0xffff; break;
324 case 1 : a+=k[0]&0xff; break;
325 case 0 : return c; /* zero length strings require no mixing */
326 }
327 #else /* make valgrind happy */
328 const uint8_t *k8;
329
330 k8 = (const uint8_t *)k;
331 switch (length) {
332 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
333 case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
334 case 10: c+=((uint32_t)k8[9])<<8; /* fall through */
335 case 9 : c+=k8[8]; /* fall through */
336 case 8 : b+=k[1]; a+=k[0]; break;
337 case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
338 case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */
339 case 5 : b+=k8[4]; /* fall through */
340 case 4 : a+=k[0]; break;
341 case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
342 case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */
343 case 1 : a+=k8[0]; break;
344 case 0 : return c;
345 }
346 #endif /* !valgrind */
347 } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
348 const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
349 const uint8_t *k8;
350
351 /*--------------- all but last block: aligned reads and different mixing */
352 while (length > 12) {
353 a += k[0] + (((uint32_t)k[1])<<16);
354 b += k[2] + (((uint32_t)k[3])<<16);
355 c += k[4] + (((uint32_t)k[5])<<16);
356 mix(a,b,c);
357 length -= 12;
358 k += 6;
359 }
360
361 k8 = (const uint8_t *)k;
362 switch (length) {
363 case 12:
364 c+=k[4]+(((uint32_t)k[5])<<16);
365 b+=k[2]+(((uint32_t)k[3])<<16);
366 a+=k[0]+(((uint32_t)k[1])<<16);
367 break;
368 case 11:
369 c+=((uint32_t)k8[10])<<16; /* fall through */
370 case 10:
371 c+=k[4];
372 b+=k[2]+(((uint32_t)k[3])<<16);
373 a+=k[0]+(((uint32_t)k[1])<<16);
374 break;
375 case 9:
376 c+=k8[8]; /* fall through */
377 case 8:
378 b+=k[2]+(((uint32_t)k[3])<<16);
379 a+=k[0]+(((uint32_t)k[1])<<16);
380 break;
381 case 7:
382 b+=((uint32_t)k8[6])<<16; /* fall through */
383 case 6:
384 b+=k[2];
385 a+=k[0]+(((uint32_t)k[1])<<16);
386 break;
387 case 5:
388 b+=k8[4]; /* fall through */
389 case 4:
390 a+=k[0]+(((uint32_t)k[1])<<16);
391 break;
392 case 3:
393 a+=((uint32_t)k8[2])<<16; /* fall through */
394 case 2:
395 a+=k[0];
396 break;
397 case 1:
398 a+=k8[0];
399 break;
400 case 0:
401 return c; /* zero length requires no mixing */
402 }
403
404 } else { /* need to read the key one byte at a time */
405 const uint8_t *k = (const uint8_t *)key;
406
407 while (length > 12) {
408 a += k[0];
409 a += ((uint32_t)k[1])<<8;
410 a += ((uint32_t)k[2])<<16;
411 a += ((uint32_t)k[3])<<24;
412 b += k[4];
413 b += ((uint32_t)k[5])<<8;
414 b += ((uint32_t)k[6])<<16;
415 b += ((uint32_t)k[7])<<24;
416 c += k[8];
417 c += ((uint32_t)k[9])<<8;
418 c += ((uint32_t)k[10])<<16;
419 c += ((uint32_t)k[11])<<24;
420 mix(a,b,c);
421 length -= 12;
422 k += 12;
423 }
424
425 switch(length) { /* all the case statements fall through */
426 case 12: c+=((uint32_t)k[11])<<24;
427 case 11: c+=((uint32_t)k[10])<<16;
428 case 10: c+=((uint32_t)k[9])<<8;
429 case 9: c+=k[8];
430 case 8: b+=((uint32_t)k[7])<<24;
431 case 7: b+=((uint32_t)k[6])<<16;
432 case 6: b+=((uint32_t)k[5])<<8;
433 case 5: b+=k[4];
434 case 4: a+=((uint32_t)k[3])<<24;
435 case 3: a+=((uint32_t)k[2])<<16;
436 case 2: a+=((uint32_t)k[1])<<8;
437 case 1:
438 a+=k[0];
439 break;
440 case 0:
441 return c;
442 }
443 }
444
445 final(a,b,c);
446 return c;
447 }
448
449 LTTNG_HIDDEN
450 unsigned long hash_key_u64(void *_key, unsigned long seed)
451 {
452 union {
453 uint64_t v64;
454 uint32_t v32[2];
455 } v;
456 union {
457 uint64_t v64;
458 uint32_t v32[2];
459 } key;
460
461 v.v64 = (uint64_t) seed;
462 key.v64 = *(uint64_t *) _key;
463 hashword2(key.v32, 2, &v.v32[0], &v.v32[1]);
464 return v.v64;
465 }
466
467 #if (CAA_BITS_PER_LONG == 64)
468 /*
469 * Hash function for number value.
470 */
471 LTTNG_HIDDEN
472 unsigned long hash_key_ulong(void *_key, unsigned long seed)
473 {
474 uint64_t __key = (uint64_t) _key;
475 return (unsigned long) hash_key_u64(&__key, seed);
476 }
477 #else
478 /*
479 * Hash function for number value.
480 */
481 LTTNG_HIDDEN
482 unsigned long hash_key_ulong(void *_key, unsigned long seed)
483 {
484 uint32_t key = (uint32_t) _key;
485
486 return hashword(&key, 1, seed);
487 }
488 #endif /* CAA_BITS_PER_LONG */
489
490 /*
491 * Hash function for string.
492 */
493 LTTNG_HIDDEN
494 unsigned long hash_key_str(void *key, unsigned long seed)
495 {
496 return hashlittle(key, strlen((char *) key), seed);
497 }
498
499 /*
500 * Hash function compare for number value.
501 */
502 LTTNG_HIDDEN
503 int hash_match_key_ulong(void *key1, void *key2)
504 {
505 if (key1 == key2) {
506 return 1;
507 }
508
509 return 0;
510 }
511
512 /*
513 * Hash function compare for number value.
514 */
515 LTTNG_HIDDEN
516 int hash_match_key_u64(void *key1, void *key2)
517 {
518 if (*(uint64_t *) key1 == *(uint64_t *) key2) {
519 return 1;
520 }
521
522 return 0;
523 }
524
525 /*
526 * Hash compare function for string.
527 */
528 LTTNG_HIDDEN
529 int hash_match_key_str(void *key1, void *key2)
530 {
531 if (strcmp(key1, key2) == 0) {
532 return 1;
533 }
534
535 return 0;
536 }
This page took 0.064818 seconds and 3 git commands to generate.