1ea699d6edb921364a30d3c92e31e9ee538571b4
[lttng-tools.git] / src / common / hashtable / utils.c
1 /*
2 * Copyright (C) - Bob Jenkins, May 2006
3 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
4 * Copyright (C) 2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 /*
21 * These are functions for producing 32-bit hashes for hash table lookup.
22 * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() are
23 * externally useful functions. Routines to test the hash are included if
24 * SELF_TEST is defined. You can use this free for any purpose. It's in the
25 * public domain. It has no warranty.
26 *
27 * You probably want to use hashlittle(). hashlittle() and hashbig() hash byte
28 * arrays. hashlittle() is is faster than hashbig() on little-endian machines.
29 * Intel and AMD are little-endian machines. On second thought, you probably
30 * want hashlittle2(), which is identical to hashlittle() except it returns two
31 * 32-bit hashes for the price of one. You could implement hashbig2() if you
32 * wanted but I haven't bothered here.
33 *
34 * If you want to find a hash of, say, exactly 7 integers, do
35 * a = i1; b = i2; c = i3;
36 * mix(a,b,c);
37 * a += i4; b += i5; c += i6;
38 * mix(a,b,c);
39 * a += i7;
40 * final(a,b,c);
41 * then use c as the hash value. If you have a variable length array of
42 * 4-byte integers to hash, use hashword(). If you have a byte array (like
43 * a character string), use hashlittle(). If you have several byte arrays, or
44 * a mix of things, see the comments above hashlittle().
45 *
46 * Why is this so big? I read 12 bytes at a time into 3 4-byte integers, then
47 * mix those integers. This is fast (you can do a lot more thorough mixing
48 * with 12*3 instructions on 3 integers than you can with 3 instructions on 1
49 * byte), but shoehorning those bytes into integers efficiently is messy.
50 */
51 #define _GNU_SOURCE
52 #include <assert.h>
53 #include <stdint.h> /* defines uint32_t etc */
54 #include <stdio.h> /* defines printf for tests */
55 #include <string.h>
56 #include <sys/param.h> /* attempt to define endianness */
57 #include <time.h> /* defines time_t for timings in the test */
58 #include <urcu/compiler.h>
59
60 #include "utils.h"
61 #include <common/compat/endian.h> /* attempt to define endianness */
62 #include <common/common.h>
63 #include <common/hashtable/hashtable.h>
64
65 /*
66 * My best guess at if you are big-endian or little-endian. This may
67 * need adjustment.
68 */
69 #if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \
70 __BYTE_ORDER == __LITTLE_ENDIAN) || \
71 (defined(i386) || defined(__i386__) || defined(__i486__) || \
72 defined(__i586__) || defined(__i686__) || defined(vax) || defined(MIPSEL))
73 # define HASH_LITTLE_ENDIAN 1
74 # define HASH_BIG_ENDIAN 0
75 #elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \
76 __BYTE_ORDER == __BIG_ENDIAN) || \
77 (defined(sparc) || defined(POWERPC) || defined(mc68000) || defined(sel))
78 # define HASH_LITTLE_ENDIAN 0
79 # define HASH_BIG_ENDIAN 1
80 #else
81 # define HASH_LITTLE_ENDIAN 0
82 # define HASH_BIG_ENDIAN 0
83 #endif
84
85 #define hashsize(n) ((uint32_t)1<<(n))
86 #define hashmask(n) (hashsize(n)-1)
87 #define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
88
89 /*
90 * mix -- mix 3 32-bit values reversibly.
91 *
92 * This is reversible, so any information in (a,b,c) before mix() is
93 * still in (a,b,c) after mix().
94 *
95 * If four pairs of (a,b,c) inputs are run through mix(), or through
96 * mix() in reverse, there are at least 32 bits of the output that
97 * are sometimes the same for one pair and different for another pair.
98 * This was tested for:
99 * * pairs that differed by one bit, by two bits, in any combination
100 * of top bits of (a,b,c), or in any combination of bottom bits of
101 * (a,b,c).
102 * * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
103 * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
104 * is commonly produced by subtraction) look like a single 1-bit
105 * difference.
106 * * the base values were pseudorandom, all zero but one bit set, or
107 * all zero plus a counter that starts at zero.
108 *
109 * Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
110 * satisfy this are
111 * 4 6 8 16 19 4
112 * 9 15 3 18 27 15
113 * 14 9 3 7 17 3
114 * Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
115 * for "differ" defined as + with a one-bit base and a two-bit delta. I
116 * used http://burtleburtle.net/bob/hash/avalanche.html to choose
117 * the operations, constants, and arrangements of the variables.
118 *
119 * This does not achieve avalanche. There are input bits of (a,b,c)
120 * that fail to affect some output bits of (a,b,c), especially of a. The
121 * most thoroughly mixed value is c, but it doesn't really even achieve
122 * avalanche in c.
123 *
124 * This allows some parallelism. Read-after-writes are good at doubling
125 * the number of bits affected, so the goal of mixing pulls in the opposite
126 * direction as the goal of parallelism. I did what I could. Rotates
127 * seem to cost as much as shifts on every machine I could lay my hands
128 * on, and rotates are much kinder to the top and bottom bits, so I used
129 * rotates.
130 */
131 #define mix(a,b,c) \
132 { \
133 a -= c; a ^= rot(c, 4); c += b; \
134 b -= a; b ^= rot(a, 6); a += c; \
135 c -= b; c ^= rot(b, 8); b += a; \
136 a -= c; a ^= rot(c,16); c += b; \
137 b -= a; b ^= rot(a,19); a += c; \
138 c -= b; c ^= rot(b, 4); b += a; \
139 }
140
141 /*
142 * final -- final mixing of 3 32-bit values (a,b,c) into c
143 *
144 * Pairs of (a,b,c) values differing in only a few bits will usually
145 * produce values of c that look totally different. This was tested for
146 * * pairs that differed by one bit, by two bits, in any combination
147 * of top bits of (a,b,c), or in any combination of bottom bits of
148 * (a,b,c).
149 * * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
150 * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
151 * is commonly produced by subtraction) look like a single 1-bit
152 * difference.
153 * * the base values were pseudorandom, all zero but one bit set, or
154 * all zero plus a counter that starts at zero.
155 *
156 * These constants passed:
157 * 14 11 25 16 4 14 24
158 * 12 14 25 16 4 14 24
159 * and these came close:
160 * 4 8 15 26 3 22 24
161 * 10 8 15 26 3 22 24
162 * 11 8 15 26 3 22 24
163 */
164 #define final(a,b,c) \
165 { \
166 c ^= b; c -= rot(b,14); \
167 a ^= c; a -= rot(c,11); \
168 b ^= a; b -= rot(a,25); \
169 c ^= b; c -= rot(b,16); \
170 a ^= c; a -= rot(c,4); \
171 b ^= a; b -= rot(a,14); \
172 c ^= b; c -= rot(b,24); \
173 }
174
175 /*
176 * k - the key, an array of uint32_t values
177 * length - the length of the key, in uint32_ts
178 * initval - the previous hash, or an arbitrary value
179 */
180 static uint32_t __attribute__((unused)) hashword(const uint32_t *k,
181 size_t length, uint32_t initval)
182 {
183 uint32_t a, b, c;
184
185 /* Set up the internal state */
186 a = b = c = 0xdeadbeef + (((uint32_t) length) << 2) + initval;
187
188 /*----------------------------------------- handle most of the key */
189 while (length > 3) {
190 a += k[0];
191 b += k[1];
192 c += k[2];
193 mix(a, b, c);
194 length -= 3;
195 k += 3;
196 }
197
198 /*----------------------------------- handle the last 3 uint32_t's */
199 switch (length) { /* all the case statements fall through */
200 case 3: c += k[2];
201 case 2: b += k[1];
202 case 1: a += k[0];
203 final(a, b, c);
204 case 0: /* case 0: nothing left to add */
205 break;
206 }
207 /*---------------------------------------------- report the result */
208 return c;
209 }
210
211
212 /*
213 * hashword2() -- same as hashword(), but take two seeds and return two 32-bit
214 * values. pc and pb must both be nonnull, and *pc and *pb must both be
215 * initialized with seeds. If you pass in (*pb)==0, the output (*pc) will be
216 * the same as the return value from hashword().
217 */
218 static void __attribute__((unused)) hashword2(const uint32_t *k, size_t length,
219 uint32_t *pc, uint32_t *pb)
220 {
221 uint32_t a, b, c;
222
223 /* Set up the internal state */
224 a = b = c = 0xdeadbeef + ((uint32_t) (length << 2)) + *pc;
225 c += *pb;
226
227 while (length > 3) {
228 a += k[0];
229 b += k[1];
230 c += k[2];
231 mix(a, b, c);
232 length -= 3;
233 k += 3;
234 }
235
236 switch (length) {
237 case 3 :
238 c += k[2];
239 case 2 :
240 b += k[1];
241 case 1 :
242 a += k[0];
243 final(a, b, c);
244 case 0: /* case 0: nothing left to add */
245 break;
246 }
247
248 *pc = c;
249 *pb = b;
250 }
251
252 /*
253 * hashlittle() -- hash a variable-length key into a 32-bit value
254 * k : the key (the unaligned variable-length array of bytes)
255 * length : the length of the key, counting by bytes
256 * initval : can be any 4-byte value
257 * Returns a 32-bit value. Every bit of the key affects every bit of
258 * the return value. Two keys differing by one or two bits will have
259 * totally different hash values.
260 *
261 * The best hash table sizes are powers of 2. There is no need to do
262 * mod a prime (mod is sooo slow!). If you need less than 32 bits,
263 * use a bitmask. For example, if you need only 10 bits, do
264 * h = (h & hashmask(10));
265 * In which case, the hash table should have hashsize(10) elements.
266 *
267 * If you are hashing n strings (uint8_t **)k, do it like this:
268 * for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);
269 *
270 * By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
271 * code any way you wish, private, educational, or commercial. It's free.
272 *
273 * Use for hash table lookup, or anything where one collision in 2^^32 is
274 * acceptable. Do NOT use for cryptographic purposes.
275 */
276 static uint32_t __attribute__((unused)) hashlittle(const void *key,
277 size_t length, uint32_t initval)
278 {
279 uint32_t a,b,c;
280 union {
281 const void *ptr;
282 size_t i;
283 } u; /* needed for Mac Powerbook G4 */
284
285 /* Set up the internal state */
286 a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
287
288 u.ptr = key;
289 if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
290 const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
291
292 /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
293 while (length > 12) {
294 a += k[0];
295 b += k[1];
296 c += k[2];
297 mix(a,b,c);
298 length -= 12;
299 k += 3;
300 }
301
302 /*
303 * "k[2]&0xffffff" actually reads beyond the end of the string, but
304 * then masks off the part it's not allowed to read. Because the
305 * string is aligned, the masked-off tail is in the same word as the
306 * rest of the string. Every machine with memory protection I've seen
307 * does it on word boundaries, so is OK with this. But VALGRIND will
308 * still catch it and complain. The masking trick does make the hash
309 * noticably faster for short strings (like English words).
310 */
311 #ifndef VALGRIND
312
313 switch (length) {
314 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
315 case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
316 case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
317 case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
318 case 8 : b+=k[1]; a+=k[0]; break;
319 case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
320 case 6 : b+=k[1]&0xffff; a+=k[0]; break;
321 case 5 : b+=k[1]&0xff; a+=k[0]; break;
322 case 4 : a+=k[0]; break;
323 case 3 : a+=k[0]&0xffffff; break;
324 case 2 : a+=k[0]&0xffff; break;
325 case 1 : a+=k[0]&0xff; break;
326 case 0 : return c; /* zero length strings require no mixing */
327 }
328 #else /* make valgrind happy */
329 const uint8_t *k8;
330
331 k8 = (const uint8_t *)k;
332 switch (length) {
333 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
334 case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
335 case 10: c+=((uint32_t)k8[9])<<8; /* fall through */
336 case 9 : c+=k8[8]; /* fall through */
337 case 8 : b+=k[1]; a+=k[0]; break;
338 case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
339 case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */
340 case 5 : b+=k8[4]; /* fall through */
341 case 4 : a+=k[0]; break;
342 case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
343 case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */
344 case 1 : a+=k8[0]; break;
345 case 0 : return c;
346 }
347 #endif /* !valgrind */
348 } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
349 const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
350 const uint8_t *k8;
351
352 /*--------------- all but last block: aligned reads and different mixing */
353 while (length > 12) {
354 a += k[0] + (((uint32_t)k[1])<<16);
355 b += k[2] + (((uint32_t)k[3])<<16);
356 c += k[4] + (((uint32_t)k[5])<<16);
357 mix(a,b,c);
358 length -= 12;
359 k += 6;
360 }
361
362 k8 = (const uint8_t *)k;
363 switch (length) {
364 case 12:
365 c+=k[4]+(((uint32_t)k[5])<<16);
366 b+=k[2]+(((uint32_t)k[3])<<16);
367 a+=k[0]+(((uint32_t)k[1])<<16);
368 break;
369 case 11:
370 c+=((uint32_t)k8[10])<<16; /* fall through */
371 case 10:
372 c+=k[4];
373 b+=k[2]+(((uint32_t)k[3])<<16);
374 a+=k[0]+(((uint32_t)k[1])<<16);
375 break;
376 case 9:
377 c+=k8[8]; /* fall through */
378 case 8:
379 b+=k[2]+(((uint32_t)k[3])<<16);
380 a+=k[0]+(((uint32_t)k[1])<<16);
381 break;
382 case 7:
383 b+=((uint32_t)k8[6])<<16; /* fall through */
384 case 6:
385 b+=k[2];
386 a+=k[0]+(((uint32_t)k[1])<<16);
387 break;
388 case 5:
389 b+=k8[4]; /* fall through */
390 case 4:
391 a+=k[0]+(((uint32_t)k[1])<<16);
392 break;
393 case 3:
394 a+=((uint32_t)k8[2])<<16; /* fall through */
395 case 2:
396 a+=k[0];
397 break;
398 case 1:
399 a+=k8[0];
400 break;
401 case 0:
402 return c; /* zero length requires no mixing */
403 }
404
405 } else { /* need to read the key one byte at a time */
406 const uint8_t *k = (const uint8_t *)key;
407
408 while (length > 12) {
409 a += k[0];
410 a += ((uint32_t)k[1])<<8;
411 a += ((uint32_t)k[2])<<16;
412 a += ((uint32_t)k[3])<<24;
413 b += k[4];
414 b += ((uint32_t)k[5])<<8;
415 b += ((uint32_t)k[6])<<16;
416 b += ((uint32_t)k[7])<<24;
417 c += k[8];
418 c += ((uint32_t)k[9])<<8;
419 c += ((uint32_t)k[10])<<16;
420 c += ((uint32_t)k[11])<<24;
421 mix(a,b,c);
422 length -= 12;
423 k += 12;
424 }
425
426 switch(length) { /* all the case statements fall through */
427 case 12: c+=((uint32_t)k[11])<<24;
428 case 11: c+=((uint32_t)k[10])<<16;
429 case 10: c+=((uint32_t)k[9])<<8;
430 case 9: c+=k[8];
431 case 8: b+=((uint32_t)k[7])<<24;
432 case 7: b+=((uint32_t)k[6])<<16;
433 case 6: b+=((uint32_t)k[5])<<8;
434 case 5: b+=k[4];
435 case 4: a+=((uint32_t)k[3])<<24;
436 case 3: a+=((uint32_t)k[2])<<16;
437 case 2: a+=((uint32_t)k[1])<<8;
438 case 1:
439 a+=k[0];
440 break;
441 case 0:
442 return c;
443 }
444 }
445
446 final(a,b,c);
447 return c;
448 }
449
450 LTTNG_HIDDEN
451 unsigned long hash_key_u64(void *_key, unsigned long seed)
452 {
453 union {
454 uint64_t v64;
455 uint32_t v32[2];
456 } v;
457 union {
458 uint64_t v64;
459 uint32_t v32[2];
460 } key;
461
462 v.v64 = (uint64_t) seed;
463 key.v64 = *(uint64_t *) _key;
464 hashword2(key.v32, 2, &v.v32[0], &v.v32[1]);
465 return v.v64;
466 }
467
468 #if (CAA_BITS_PER_LONG == 64)
469 /*
470 * Hash function for number value.
471 */
472 LTTNG_HIDDEN
473 unsigned long hash_key_ulong(void *_key, unsigned long seed)
474 {
475 uint64_t __key = (uint64_t) _key;
476 return (unsigned long) hash_key_u64(&__key, seed);
477 }
478 #else
479 /*
480 * Hash function for number value.
481 */
482 LTTNG_HIDDEN
483 unsigned long hash_key_ulong(void *_key, unsigned long seed)
484 {
485 uint32_t key = (uint32_t) _key;
486
487 return hashword(&key, 1, seed);
488 }
489 #endif /* CAA_BITS_PER_LONG */
490
491 /*
492 * Hash function for string.
493 */
494 LTTNG_HIDDEN
495 unsigned long hash_key_str(void *key, unsigned long seed)
496 {
497 return hashlittle(key, strlen((char *) key), seed);
498 }
499
500 /*
501 * Hash function for two uint64_t.
502 */
503 LTTNG_HIDDEN
504 unsigned long hash_key_two_u64(void *key, unsigned long seed)
505 {
506 struct lttng_ht_two_u64 *k = (struct lttng_ht_two_u64 *) key;
507
508 return hash_key_u64(&k->key1, seed) ^ hash_key_u64(&k->key2, seed);
509 }
510
511 /*
512 * Hash function compare for number value.
513 */
514 LTTNG_HIDDEN
515 int hash_match_key_ulong(void *key1, void *key2)
516 {
517 if (key1 == key2) {
518 return 1;
519 }
520
521 return 0;
522 }
523
524 /*
525 * Hash function compare for number value.
526 */
527 LTTNG_HIDDEN
528 int hash_match_key_u64(void *key1, void *key2)
529 {
530 if (*(uint64_t *) key1 == *(uint64_t *) key2) {
531 return 1;
532 }
533
534 return 0;
535 }
536
537 /*
538 * Hash compare function for string.
539 */
540 LTTNG_HIDDEN
541 int hash_match_key_str(void *key1, void *key2)
542 {
543 if (strcmp(key1, key2) == 0) {
544 return 1;
545 }
546
547 return 0;
548 }
549
550 /*
551 * Hash function compare two uint64_t.
552 */
553 LTTNG_HIDDEN
554 int hash_match_key_two_u64(void *key1, void *key2)
555 {
556 struct lttng_ht_two_u64 *k1 = (struct lttng_ht_two_u64 *) key1;
557 struct lttng_ht_two_u64 *k2 = (struct lttng_ht_two_u64 *) key2;
558
559 if (hash_match_key_u64(&k1->key1, &k2->key1) &&
560 hash_match_key_u64(&k1->key2, &k2->key2)) {
561 return 1;
562 }
563
564 return 0;
565 }
This page took 0.042166 seconds and 3 git commands to generate.