docs: Add supported versions and fix-backport policy
[lttng-tools.git] / src / common / hashtable / utils.cpp
1 /*
2 * Copyright (C) 2006 Bob Jenkins
3 * Copyright (C) 2011 EfficiOS Inc.
4 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * SPDX-License-Identifier: LGPL-2.1-only
7 *
8 */
9
10 /*
11 * These are functions for producing 32-bit hashes for hash table lookup.
12 * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() are
13 * externally useful functions. Routines to test the hash are included if
14 * SELF_TEST is defined. You can use this free for any purpose. It's in the
15 * public domain. It has no warranty.
16 *
17 * You probably want to use hashlittle(). hashlittle() and hashbig() hash byte
18 * arrays. hashlittle() is is faster than hashbig() on little-endian machines.
19 * Intel and AMD are little-endian machines. On second thought, you probably
20 * want hashlittle2(), which is identical to hashlittle() except it returns two
21 * 32-bit hashes for the price of one. You could implement hashbig2() if you
22 * wanted but I haven't bothered here.
23 *
24 * If you want to find a hash of, say, exactly 7 integers, do
25 * a = i1; b = i2; c = i3;
26 * mix(a,b,c);
27 * a += i4; b += i5; c += i6;
28 * mix(a,b,c);
29 * a += i7;
30 * final(a,b,c);
31 * then use c as the hash value. If you have a variable length array of
32 * 4-byte integers to hash, use hashword(). If you have a byte array (like
33 * a character string), use hashlittle(). If you have several byte arrays, or
34 * a mix of things, see the comments above hashlittle().
35 *
36 * Why is this so big? I read 12 bytes at a time into 3 4-byte integers, then
37 * mix those integers. This is fast (you can do a lot more thorough mixing
38 * with 12*3 instructions on 3 integers than you can with 3 instructions on 1
39 * byte), but shoehorning those bytes into integers efficiently is messy.
40 */
41
42 #define _LGPL_SOURCE
43 #include <stdint.h> /* defines uint32_t etc */
44 #include <stdio.h> /* defines printf for tests */
45 #include <string.h>
46 #include <sys/param.h> /* attempt to define endianness */
47 #include <time.h> /* defines time_t for timings in the test */
48 #include <urcu/compiler.h>
49
50 #include "utils.h"
51 #include <common/compat/endian.h> /* attempt to define endianness */
52 #include <common/common.h>
53 #include <common/hashtable/hashtable.h>
54
55 /*
56 * My best guess at if you are big-endian or little-endian. This may
57 * need adjustment.
58 */
59 #if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \
60 __BYTE_ORDER == __LITTLE_ENDIAN) || \
61 (defined(i386) || defined(__i386__) || defined(__i486__) || \
62 defined(__i586__) || defined(__i686__) || defined(vax) || defined(MIPSEL))
63 # define HASH_LITTLE_ENDIAN 1
64 # define HASH_BIG_ENDIAN 0
65 #elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \
66 __BYTE_ORDER == __BIG_ENDIAN) || \
67 (defined(sparc) || defined(POWERPC) || defined(mc68000) || defined(sel))
68 # define HASH_LITTLE_ENDIAN 0
69 # define HASH_BIG_ENDIAN 1
70 #else
71 # define HASH_LITTLE_ENDIAN 0
72 # define HASH_BIG_ENDIAN 0
73 #endif
74
75 #define hashsize(n) ((uint32_t)1<<(n))
76 #define hashmask(n) (hashsize(n)-1)
77 #define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
78
79 /*
80 * mix -- mix 3 32-bit values reversibly.
81 *
82 * This is reversible, so any information in (a,b,c) before mix() is
83 * still in (a,b,c) after mix().
84 *
85 * If four pairs of (a,b,c) inputs are run through mix(), or through
86 * mix() in reverse, there are at least 32 bits of the output that
87 * are sometimes the same for one pair and different for another pair.
88 * This was tested for:
89 * * pairs that differed by one bit, by two bits, in any combination
90 * of top bits of (a,b,c), or in any combination of bottom bits of
91 * (a,b,c).
92 * * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
93 * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
94 * is commonly produced by subtraction) look like a single 1-bit
95 * difference.
96 * * the base values were pseudorandom, all zero but one bit set, or
97 * all zero plus a counter that starts at zero.
98 *
99 * Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
100 * satisfy this are
101 * 4 6 8 16 19 4
102 * 9 15 3 18 27 15
103 * 14 9 3 7 17 3
104 * Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
105 * for "differ" defined as + with a one-bit base and a two-bit delta. I
106 * used http://burtleburtle.net/bob/hash/avalanche.html to choose
107 * the operations, constants, and arrangements of the variables.
108 *
109 * This does not achieve avalanche. There are input bits of (a,b,c)
110 * that fail to affect some output bits of (a,b,c), especially of a. The
111 * most thoroughly mixed value is c, but it doesn't really even achieve
112 * avalanche in c.
113 *
114 * This allows some parallelism. Read-after-writes are good at doubling
115 * the number of bits affected, so the goal of mixing pulls in the opposite
116 * direction as the goal of parallelism. I did what I could. Rotates
117 * seem to cost as much as shifts on every machine I could lay my hands
118 * on, and rotates are much kinder to the top and bottom bits, so I used
119 * rotates.
120 */
121 #define mix(a,b,c) \
122 { \
123 a -= c; a ^= rot(c, 4); c += b; \
124 b -= a; b ^= rot(a, 6); a += c; \
125 c -= b; c ^= rot(b, 8); b += a; \
126 a -= c; a ^= rot(c,16); c += b; \
127 b -= a; b ^= rot(a,19); a += c; \
128 c -= b; c ^= rot(b, 4); b += a; \
129 }
130
131 /*
132 * final -- final mixing of 3 32-bit values (a,b,c) into c
133 *
134 * Pairs of (a,b,c) values differing in only a few bits will usually
135 * produce values of c that look totally different. This was tested for
136 * * pairs that differed by one bit, by two bits, in any combination
137 * of top bits of (a,b,c), or in any combination of bottom bits of
138 * (a,b,c).
139 * * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
140 * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
141 * is commonly produced by subtraction) look like a single 1-bit
142 * difference.
143 * * the base values were pseudorandom, all zero but one bit set, or
144 * all zero plus a counter that starts at zero.
145 *
146 * These constants passed:
147 * 14 11 25 16 4 14 24
148 * 12 14 25 16 4 14 24
149 * and these came close:
150 * 4 8 15 26 3 22 24
151 * 10 8 15 26 3 22 24
152 * 11 8 15 26 3 22 24
153 */
154 #define final(a,b,c) \
155 { \
156 c ^= b; c -= rot(b,14); \
157 a ^= c; a -= rot(c,11); \
158 b ^= a; b -= rot(a,25); \
159 c ^= b; c -= rot(b,16); \
160 a ^= c; a -= rot(c,4); \
161 b ^= a; b -= rot(a,14); \
162 c ^= b; c -= rot(b,24); \
163 }
164
165 /*
166 * k - the key, an array of uint32_t values
167 * length - the length of the key, in uint32_ts
168 * initval - the previous hash, or an arbitrary value
169 */
170 static uint32_t __attribute__((unused)) hashword(const uint32_t *k,
171 size_t length, uint32_t initval)
172 {
173 uint32_t a, b, c;
174
175 /* Set up the internal state */
176 a = b = c = 0xdeadbeef + (((uint32_t) length) << 2) + initval;
177
178 /*----------------------------------------- handle most of the key */
179 while (length > 3) {
180 a += k[0];
181 b += k[1];
182 c += k[2];
183 mix(a, b, c);
184 length -= 3;
185 k += 3;
186 }
187
188 /*----------------------------------- handle the last 3 uint32_t's */
189 switch (length) { /* all the case statements fall through */
190 case 3: c += k[2];
191 case 2: b += k[1];
192 case 1: a += k[0];
193 final(a, b, c);
194 case 0: /* case 0: nothing left to add */
195 break;
196 }
197 /*---------------------------------------------- report the result */
198 return c;
199 }
200
201
202 /*
203 * hashword2() -- same as hashword(), but take two seeds and return two 32-bit
204 * values. pc and pb must both be nonnull, and *pc and *pb must both be
205 * initialized with seeds. If you pass in (*pb)==0, the output (*pc) will be
206 * the same as the return value from hashword().
207 */
208 static void __attribute__((unused)) hashword2(const uint32_t *k, size_t length,
209 uint32_t *pc, uint32_t *pb)
210 {
211 uint32_t a, b, c;
212
213 /* Set up the internal state */
214 a = b = c = 0xdeadbeef + ((uint32_t) (length << 2)) + *pc;
215 c += *pb;
216
217 while (length > 3) {
218 a += k[0];
219 b += k[1];
220 c += k[2];
221 mix(a, b, c);
222 length -= 3;
223 k += 3;
224 }
225
226 switch (length) {
227 case 3 :
228 c += k[2];
229 case 2 :
230 b += k[1];
231 case 1 :
232 a += k[0];
233 final(a, b, c);
234 case 0: /* case 0: nothing left to add */
235 break;
236 }
237
238 *pc = c;
239 *pb = b;
240 }
241
242 /*
243 * hashlittle() -- hash a variable-length key into a 32-bit value
244 * k : the key (the unaligned variable-length array of bytes)
245 * length : the length of the key, counting by bytes
246 * initval : can be any 4-byte value
247 * Returns a 32-bit value. Every bit of the key affects every bit of
248 * the return value. Two keys differing by one or two bits will have
249 * totally different hash values.
250 *
251 * The best hash table sizes are powers of 2. There is no need to do
252 * mod a prime (mod is sooo slow!). If you need less than 32 bits,
253 * use a bitmask. For example, if you need only 10 bits, do
254 * h = (h & hashmask(10));
255 * In which case, the hash table should have hashsize(10) elements.
256 *
257 * If you are hashing n strings (uint8_t **)k, do it like this:
258 * for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);
259 *
260 * By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
261 * code any way you wish, private, educational, or commercial. It's free.
262 *
263 * Use for hash table lookup, or anything where one collision in 2^^32 is
264 * acceptable. Do NOT use for cryptographic purposes.
265 */
266 LTTNG_NO_SANITIZE_ADDRESS
267 __attribute__((unused))
268 static uint32_t hashlittle(const void *key,
269 size_t length, uint32_t initval)
270 {
271 uint32_t a,b,c;
272 union {
273 const void *ptr;
274 size_t i;
275 } u; /* needed for Mac Powerbook G4 */
276
277 /* Set up the internal state */
278 a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
279
280 u.ptr = key;
281 if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
282 const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
283
284 /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
285 while (length > 12) {
286 a += k[0];
287 b += k[1];
288 c += k[2];
289 mix(a,b,c);
290 length -= 12;
291 k += 3;
292 }
293
294 /*
295 * "k[2]&0xffffff" actually reads beyond the end of the string, but
296 * then masks off the part it's not allowed to read. Because the
297 * string is aligned, the masked-off tail is in the same word as the
298 * rest of the string. Every machine with memory protection I've seen
299 * does it on word boundaries, so is OK with this. But VALGRIND will
300 * still catch it and complain. The masking trick does make the hash
301 * noticably faster for short strings (like English words).
302 */
303 #ifndef VALGRIND
304
305 switch (length) {
306 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
307 case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
308 case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
309 case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
310 case 8 : b+=k[1]; a+=k[0]; break;
311 case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
312 case 6 : b+=k[1]&0xffff; a+=k[0]; break;
313 case 5 : b+=k[1]&0xff; a+=k[0]; break;
314 case 4 : a+=k[0]; break;
315 case 3 : a+=k[0]&0xffffff; break;
316 case 2 : a+=k[0]&0xffff; break;
317 case 1 : a+=k[0]&0xff; break;
318 case 0 : return c; /* zero length strings require no mixing */
319 }
320 #else /* make valgrind happy */
321 const uint8_t *k8;
322
323 k8 = (const uint8_t *)k;
324 switch (length) {
325 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
326 case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
327 case 10: c+=((uint32_t)k8[9])<<8; /* fall through */
328 case 9 : c+=k8[8]; /* fall through */
329 case 8 : b+=k[1]; a+=k[0]; break;
330 case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
331 case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */
332 case 5 : b+=k8[4]; /* fall through */
333 case 4 : a+=k[0]; break;
334 case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
335 case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */
336 case 1 : a+=k8[0]; break;
337 case 0 : return c;
338 }
339 #endif /* !valgrind */
340 } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
341 const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
342 const uint8_t *k8;
343
344 /*--------------- all but last block: aligned reads and different mixing */
345 while (length > 12) {
346 a += k[0] + (((uint32_t)k[1])<<16);
347 b += k[2] + (((uint32_t)k[3])<<16);
348 c += k[4] + (((uint32_t)k[5])<<16);
349 mix(a,b,c);
350 length -= 12;
351 k += 6;
352 }
353
354 k8 = (const uint8_t *)k;
355 switch (length) {
356 case 12:
357 c+=k[4]+(((uint32_t)k[5])<<16);
358 b+=k[2]+(((uint32_t)k[3])<<16);
359 a+=k[0]+(((uint32_t)k[1])<<16);
360 break;
361 case 11:
362 c+=((uint32_t)k8[10])<<16; /* fall through */
363 case 10:
364 c+=k[4];
365 b+=k[2]+(((uint32_t)k[3])<<16);
366 a+=k[0]+(((uint32_t)k[1])<<16);
367 break;
368 case 9:
369 c+=k8[8]; /* fall through */
370 case 8:
371 b+=k[2]+(((uint32_t)k[3])<<16);
372 a+=k[0]+(((uint32_t)k[1])<<16);
373 break;
374 case 7:
375 b+=((uint32_t)k8[6])<<16; /* fall through */
376 case 6:
377 b+=k[2];
378 a+=k[0]+(((uint32_t)k[1])<<16);
379 break;
380 case 5:
381 b+=k8[4]; /* fall through */
382 case 4:
383 a+=k[0]+(((uint32_t)k[1])<<16);
384 break;
385 case 3:
386 a+=((uint32_t)k8[2])<<16; /* fall through */
387 case 2:
388 a+=k[0];
389 break;
390 case 1:
391 a+=k8[0];
392 break;
393 case 0:
394 return c; /* zero length requires no mixing */
395 }
396
397 } else { /* need to read the key one byte at a time */
398 const uint8_t *k = (const uint8_t *)key;
399
400 while (length > 12) {
401 a += k[0];
402 a += ((uint32_t)k[1])<<8;
403 a += ((uint32_t)k[2])<<16;
404 a += ((uint32_t)k[3])<<24;
405 b += k[4];
406 b += ((uint32_t)k[5])<<8;
407 b += ((uint32_t)k[6])<<16;
408 b += ((uint32_t)k[7])<<24;
409 c += k[8];
410 c += ((uint32_t)k[9])<<8;
411 c += ((uint32_t)k[10])<<16;
412 c += ((uint32_t)k[11])<<24;
413 mix(a,b,c);
414 length -= 12;
415 k += 12;
416 }
417
418 switch(length) { /* all the case statements fall through */
419 case 12: c+=((uint32_t)k[11])<<24;
420 case 11: c+=((uint32_t)k[10])<<16;
421 case 10: c+=((uint32_t)k[9])<<8;
422 case 9: c+=k[8];
423 case 8: b+=((uint32_t)k[7])<<24;
424 case 7: b+=((uint32_t)k[6])<<16;
425 case 6: b+=((uint32_t)k[5])<<8;
426 case 5: b+=k[4];
427 case 4: a+=((uint32_t)k[3])<<24;
428 case 3: a+=((uint32_t)k[2])<<16;
429 case 2: a+=((uint32_t)k[1])<<8;
430 case 1:
431 a+=k[0];
432 break;
433 case 0:
434 return c;
435 }
436 }
437
438 final(a,b,c);
439 return c;
440 }
441
442 unsigned long hash_key_u64(const void *_key, unsigned long seed)
443 {
444 union {
445 uint64_t v64;
446 uint32_t v32[2];
447 } v;
448 union {
449 uint64_t v64;
450 uint32_t v32[2];
451 } key;
452
453 v.v64 = (uint64_t) seed;
454 key.v64 = *(const uint64_t *) _key;
455 hashword2(key.v32, 2, &v.v32[0], &v.v32[1]);
456 return v.v64;
457 }
458
459 #if (CAA_BITS_PER_LONG == 64)
460 /*
461 * Hash function for number value.
462 * Pass the value itself as the key, not its address.
463 */
464 unsigned long hash_key_ulong(const void *_key, unsigned long seed)
465 {
466 uint64_t __key = (uint64_t) _key;
467 return (unsigned long) hash_key_u64(&__key, seed);
468 }
469 #else
470 /*
471 * Hash function for number value.
472 * Pass the value itself as the key, not its address.
473 */
474 unsigned long hash_key_ulong(const void *_key, unsigned long seed)
475 {
476 uint32_t key = (uint32_t) _key;
477
478 return hashword(&key, 1, seed);
479 }
480 #endif /* CAA_BITS_PER_LONG */
481
482 /*
483 * Hash function for string.
484 */
485 unsigned long hash_key_str(const void *key, unsigned long seed)
486 {
487 return hashlittle(key, strlen((const char *) key), seed);
488 }
489
490 /*
491 * Hash function for two uint64_t.
492 */
493 unsigned long hash_key_two_u64(const void *key, unsigned long seed)
494 {
495 const struct lttng_ht_two_u64 *k =
496 (const struct lttng_ht_two_u64 *) key;
497
498 return hash_key_u64(&k->key1, seed) ^ hash_key_u64(&k->key2, seed);
499 }
500
501 /*
502 * Hash function compare for number value.
503 */
504 int hash_match_key_ulong(const void *key1, const void *key2)
505 {
506 if (key1 == key2) {
507 return 1;
508 }
509
510 return 0;
511 }
512
513 /*
514 * Hash function compare for number value.
515 */
516 int hash_match_key_u64(const void *key1, const void *key2)
517 {
518 if (*(const uint64_t *) key1 == *(const uint64_t *) key2) {
519 return 1;
520 }
521
522 return 0;
523 }
524
525 /*
526 * Hash compare function for string.
527 */
528 int hash_match_key_str(const void *key1, const void *key2)
529 {
530 if (strcmp((const char *) key1, (const char *) key2) == 0) {
531 return 1;
532 }
533
534 return 0;
535 }
536
537 /*
538 * Hash function compare two uint64_t.
539 */
540 int hash_match_key_two_u64(const void *key1, const void *key2)
541 {
542 const struct lttng_ht_two_u64 *k1 =
543 (const struct lttng_ht_two_u64 *) key1;
544 const struct lttng_ht_two_u64 *k2 =
545 (const struct lttng_ht_two_u64 *) key2;
546
547 if (hash_match_key_u64(&k1->key1, &k2->key1) &&
548 hash_match_key_u64(&k1->key2, &k2->key2)) {
549 return 1;
550 }
551
552 return 0;
553 }
This page took 0.041698 seconds and 4 git commands to generate.