Fix: notification: client with uid != trigger uid assert on gid check
[lttng-tools.git] / src / common / hashtable / utils.c
1 /*
2 * Copyright (C) 2006 Bob Jenkins
3 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
4 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0-only
7 *
8 */
9
10 /*
11 * These are functions for producing 32-bit hashes for hash table lookup.
12 * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() are
13 * externally useful functions. Routines to test the hash are included if
14 * SELF_TEST is defined. You can use this free for any purpose. It's in the
15 * public domain. It has no warranty.
16 *
17 * You probably want to use hashlittle(). hashlittle() and hashbig() hash byte
18 * arrays. hashlittle() is is faster than hashbig() on little-endian machines.
19 * Intel and AMD are little-endian machines. On second thought, you probably
20 * want hashlittle2(), which is identical to hashlittle() except it returns two
21 * 32-bit hashes for the price of one. You could implement hashbig2() if you
22 * wanted but I haven't bothered here.
23 *
24 * If you want to find a hash of, say, exactly 7 integers, do
25 * a = i1; b = i2; c = i3;
26 * mix(a,b,c);
27 * a += i4; b += i5; c += i6;
28 * mix(a,b,c);
29 * a += i7;
30 * final(a,b,c);
31 * then use c as the hash value. If you have a variable length array of
32 * 4-byte integers to hash, use hashword(). If you have a byte array (like
33 * a character string), use hashlittle(). If you have several byte arrays, or
34 * a mix of things, see the comments above hashlittle().
35 *
36 * Why is this so big? I read 12 bytes at a time into 3 4-byte integers, then
37 * mix those integers. This is fast (you can do a lot more thorough mixing
38 * with 12*3 instructions on 3 integers than you can with 3 instructions on 1
39 * byte), but shoehorning those bytes into integers efficiently is messy.
40 */
41
42 #define _LGPL_SOURCE
43 #include <assert.h>
44 #include <stdint.h> /* defines uint32_t etc */
45 #include <stdio.h> /* defines printf for tests */
46 #include <string.h>
47 #include <sys/param.h> /* attempt to define endianness */
48 #include <time.h> /* defines time_t for timings in the test */
49 #include <urcu/compiler.h>
50
51 #include "utils.h"
52 #include <common/compat/endian.h> /* attempt to define endianness */
53 #include <common/common.h>
54 #include <common/hashtable/hashtable.h>
55
56 /*
57 * My best guess at if you are big-endian or little-endian. This may
58 * need adjustment.
59 */
60 #if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \
61 __BYTE_ORDER == __LITTLE_ENDIAN) || \
62 (defined(i386) || defined(__i386__) || defined(__i486__) || \
63 defined(__i586__) || defined(__i686__) || defined(vax) || defined(MIPSEL))
64 # define HASH_LITTLE_ENDIAN 1
65 # define HASH_BIG_ENDIAN 0
66 #elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \
67 __BYTE_ORDER == __BIG_ENDIAN) || \
68 (defined(sparc) || defined(POWERPC) || defined(mc68000) || defined(sel))
69 # define HASH_LITTLE_ENDIAN 0
70 # define HASH_BIG_ENDIAN 1
71 #else
72 # define HASH_LITTLE_ENDIAN 0
73 # define HASH_BIG_ENDIAN 0
74 #endif
75
76 #define hashsize(n) ((uint32_t)1<<(n))
77 #define hashmask(n) (hashsize(n)-1)
78 #define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
79
80 /*
81 * mix -- mix 3 32-bit values reversibly.
82 *
83 * This is reversible, so any information in (a,b,c) before mix() is
84 * still in (a,b,c) after mix().
85 *
86 * If four pairs of (a,b,c) inputs are run through mix(), or through
87 * mix() in reverse, there are at least 32 bits of the output that
88 * are sometimes the same for one pair and different for another pair.
89 * This was tested for:
90 * * pairs that differed by one bit, by two bits, in any combination
91 * of top bits of (a,b,c), or in any combination of bottom bits of
92 * (a,b,c).
93 * * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
94 * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
95 * is commonly produced by subtraction) look like a single 1-bit
96 * difference.
97 * * the base values were pseudorandom, all zero but one bit set, or
98 * all zero plus a counter that starts at zero.
99 *
100 * Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
101 * satisfy this are
102 * 4 6 8 16 19 4
103 * 9 15 3 18 27 15
104 * 14 9 3 7 17 3
105 * Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
106 * for "differ" defined as + with a one-bit base and a two-bit delta. I
107 * used http://burtleburtle.net/bob/hash/avalanche.html to choose
108 * the operations, constants, and arrangements of the variables.
109 *
110 * This does not achieve avalanche. There are input bits of (a,b,c)
111 * that fail to affect some output bits of (a,b,c), especially of a. The
112 * most thoroughly mixed value is c, but it doesn't really even achieve
113 * avalanche in c.
114 *
115 * This allows some parallelism. Read-after-writes are good at doubling
116 * the number of bits affected, so the goal of mixing pulls in the opposite
117 * direction as the goal of parallelism. I did what I could. Rotates
118 * seem to cost as much as shifts on every machine I could lay my hands
119 * on, and rotates are much kinder to the top and bottom bits, so I used
120 * rotates.
121 */
122 #define mix(a,b,c) \
123 { \
124 a -= c; a ^= rot(c, 4); c += b; \
125 b -= a; b ^= rot(a, 6); a += c; \
126 c -= b; c ^= rot(b, 8); b += a; \
127 a -= c; a ^= rot(c,16); c += b; \
128 b -= a; b ^= rot(a,19); a += c; \
129 c -= b; c ^= rot(b, 4); b += a; \
130 }
131
132 /*
133 * final -- final mixing of 3 32-bit values (a,b,c) into c
134 *
135 * Pairs of (a,b,c) values differing in only a few bits will usually
136 * produce values of c that look totally different. This was tested for
137 * * pairs that differed by one bit, by two bits, in any combination
138 * of top bits of (a,b,c), or in any combination of bottom bits of
139 * (a,b,c).
140 * * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
141 * the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
142 * is commonly produced by subtraction) look like a single 1-bit
143 * difference.
144 * * the base values were pseudorandom, all zero but one bit set, or
145 * all zero plus a counter that starts at zero.
146 *
147 * These constants passed:
148 * 14 11 25 16 4 14 24
149 * 12 14 25 16 4 14 24
150 * and these came close:
151 * 4 8 15 26 3 22 24
152 * 10 8 15 26 3 22 24
153 * 11 8 15 26 3 22 24
154 */
155 #define final(a,b,c) \
156 { \
157 c ^= b; c -= rot(b,14); \
158 a ^= c; a -= rot(c,11); \
159 b ^= a; b -= rot(a,25); \
160 c ^= b; c -= rot(b,16); \
161 a ^= c; a -= rot(c,4); \
162 b ^= a; b -= rot(a,14); \
163 c ^= b; c -= rot(b,24); \
164 }
165
166 /*
167 * k - the key, an array of uint32_t values
168 * length - the length of the key, in uint32_ts
169 * initval - the previous hash, or an arbitrary value
170 */
171 static uint32_t __attribute__((unused)) hashword(const uint32_t *k,
172 size_t length, uint32_t initval)
173 {
174 uint32_t a, b, c;
175
176 /* Set up the internal state */
177 a = b = c = 0xdeadbeef + (((uint32_t) length) << 2) + initval;
178
179 /*----------------------------------------- handle most of the key */
180 while (length > 3) {
181 a += k[0];
182 b += k[1];
183 c += k[2];
184 mix(a, b, c);
185 length -= 3;
186 k += 3;
187 }
188
189 /*----------------------------------- handle the last 3 uint32_t's */
190 switch (length) { /* all the case statements fall through */
191 case 3: c += k[2];
192 case 2: b += k[1];
193 case 1: a += k[0];
194 final(a, b, c);
195 case 0: /* case 0: nothing left to add */
196 break;
197 }
198 /*---------------------------------------------- report the result */
199 return c;
200 }
201
202
203 /*
204 * hashword2() -- same as hashword(), but take two seeds and return two 32-bit
205 * values. pc and pb must both be nonnull, and *pc and *pb must both be
206 * initialized with seeds. If you pass in (*pb)==0, the output (*pc) will be
207 * the same as the return value from hashword().
208 */
209 static void __attribute__((unused)) hashword2(const uint32_t *k, size_t length,
210 uint32_t *pc, uint32_t *pb)
211 {
212 uint32_t a, b, c;
213
214 /* Set up the internal state */
215 a = b = c = 0xdeadbeef + ((uint32_t) (length << 2)) + *pc;
216 c += *pb;
217
218 while (length > 3) {
219 a += k[0];
220 b += k[1];
221 c += k[2];
222 mix(a, b, c);
223 length -= 3;
224 k += 3;
225 }
226
227 switch (length) {
228 case 3 :
229 c += k[2];
230 case 2 :
231 b += k[1];
232 case 1 :
233 a += k[0];
234 final(a, b, c);
235 case 0: /* case 0: nothing left to add */
236 break;
237 }
238
239 *pc = c;
240 *pb = b;
241 }
242
243 /*
244 * hashlittle() -- hash a variable-length key into a 32-bit value
245 * k : the key (the unaligned variable-length array of bytes)
246 * length : the length of the key, counting by bytes
247 * initval : can be any 4-byte value
248 * Returns a 32-bit value. Every bit of the key affects every bit of
249 * the return value. Two keys differing by one or two bits will have
250 * totally different hash values.
251 *
252 * The best hash table sizes are powers of 2. There is no need to do
253 * mod a prime (mod is sooo slow!). If you need less than 32 bits,
254 * use a bitmask. For example, if you need only 10 bits, do
255 * h = (h & hashmask(10));
256 * In which case, the hash table should have hashsize(10) elements.
257 *
258 * If you are hashing n strings (uint8_t **)k, do it like this:
259 * for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);
260 *
261 * By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
262 * code any way you wish, private, educational, or commercial. It's free.
263 *
264 * Use for hash table lookup, or anything where one collision in 2^^32 is
265 * acceptable. Do NOT use for cryptographic purposes.
266 */
267 LTTNG_NO_SANITIZE_ADDRESS
268 __attribute__((unused))
269 static uint32_t hashlittle(const void *key,
270 size_t length, uint32_t initval)
271 {
272 uint32_t a,b,c;
273 union {
274 const void *ptr;
275 size_t i;
276 } u; /* needed for Mac Powerbook G4 */
277
278 /* Set up the internal state */
279 a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
280
281 u.ptr = key;
282 if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
283 const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
284
285 /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
286 while (length > 12) {
287 a += k[0];
288 b += k[1];
289 c += k[2];
290 mix(a,b,c);
291 length -= 12;
292 k += 3;
293 }
294
295 /*
296 * "k[2]&0xffffff" actually reads beyond the end of the string, but
297 * then masks off the part it's not allowed to read. Because the
298 * string is aligned, the masked-off tail is in the same word as the
299 * rest of the string. Every machine with memory protection I've seen
300 * does it on word boundaries, so is OK with this. But VALGRIND will
301 * still catch it and complain. The masking trick does make the hash
302 * noticably faster for short strings (like English words).
303 */
304 #ifndef VALGRIND
305
306 switch (length) {
307 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
308 case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
309 case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
310 case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
311 case 8 : b+=k[1]; a+=k[0]; break;
312 case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
313 case 6 : b+=k[1]&0xffff; a+=k[0]; break;
314 case 5 : b+=k[1]&0xff; a+=k[0]; break;
315 case 4 : a+=k[0]; break;
316 case 3 : a+=k[0]&0xffffff; break;
317 case 2 : a+=k[0]&0xffff; break;
318 case 1 : a+=k[0]&0xff; break;
319 case 0 : return c; /* zero length strings require no mixing */
320 }
321 #else /* make valgrind happy */
322 const uint8_t *k8;
323
324 k8 = (const uint8_t *)k;
325 switch (length) {
326 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
327 case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
328 case 10: c+=((uint32_t)k8[9])<<8; /* fall through */
329 case 9 : c+=k8[8]; /* fall through */
330 case 8 : b+=k[1]; a+=k[0]; break;
331 case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
332 case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */
333 case 5 : b+=k8[4]; /* fall through */
334 case 4 : a+=k[0]; break;
335 case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
336 case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */
337 case 1 : a+=k8[0]; break;
338 case 0 : return c;
339 }
340 #endif /* !valgrind */
341 } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
342 const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
343 const uint8_t *k8;
344
345 /*--------------- all but last block: aligned reads and different mixing */
346 while (length > 12) {
347 a += k[0] + (((uint32_t)k[1])<<16);
348 b += k[2] + (((uint32_t)k[3])<<16);
349 c += k[4] + (((uint32_t)k[5])<<16);
350 mix(a,b,c);
351 length -= 12;
352 k += 6;
353 }
354
355 k8 = (const uint8_t *)k;
356 switch (length) {
357 case 12:
358 c+=k[4]+(((uint32_t)k[5])<<16);
359 b+=k[2]+(((uint32_t)k[3])<<16);
360 a+=k[0]+(((uint32_t)k[1])<<16);
361 break;
362 case 11:
363 c+=((uint32_t)k8[10])<<16; /* fall through */
364 case 10:
365 c+=k[4];
366 b+=k[2]+(((uint32_t)k[3])<<16);
367 a+=k[0]+(((uint32_t)k[1])<<16);
368 break;
369 case 9:
370 c+=k8[8]; /* fall through */
371 case 8:
372 b+=k[2]+(((uint32_t)k[3])<<16);
373 a+=k[0]+(((uint32_t)k[1])<<16);
374 break;
375 case 7:
376 b+=((uint32_t)k8[6])<<16; /* fall through */
377 case 6:
378 b+=k[2];
379 a+=k[0]+(((uint32_t)k[1])<<16);
380 break;
381 case 5:
382 b+=k8[4]; /* fall through */
383 case 4:
384 a+=k[0]+(((uint32_t)k[1])<<16);
385 break;
386 case 3:
387 a+=((uint32_t)k8[2])<<16; /* fall through */
388 case 2:
389 a+=k[0];
390 break;
391 case 1:
392 a+=k8[0];
393 break;
394 case 0:
395 return c; /* zero length requires no mixing */
396 }
397
398 } else { /* need to read the key one byte at a time */
399 const uint8_t *k = (const uint8_t *)key;
400
401 while (length > 12) {
402 a += k[0];
403 a += ((uint32_t)k[1])<<8;
404 a += ((uint32_t)k[2])<<16;
405 a += ((uint32_t)k[3])<<24;
406 b += k[4];
407 b += ((uint32_t)k[5])<<8;
408 b += ((uint32_t)k[6])<<16;
409 b += ((uint32_t)k[7])<<24;
410 c += k[8];
411 c += ((uint32_t)k[9])<<8;
412 c += ((uint32_t)k[10])<<16;
413 c += ((uint32_t)k[11])<<24;
414 mix(a,b,c);
415 length -= 12;
416 k += 12;
417 }
418
419 switch(length) { /* all the case statements fall through */
420 case 12: c+=((uint32_t)k[11])<<24;
421 case 11: c+=((uint32_t)k[10])<<16;
422 case 10: c+=((uint32_t)k[9])<<8;
423 case 9: c+=k[8];
424 case 8: b+=((uint32_t)k[7])<<24;
425 case 7: b+=((uint32_t)k[6])<<16;
426 case 6: b+=((uint32_t)k[5])<<8;
427 case 5: b+=k[4];
428 case 4: a+=((uint32_t)k[3])<<24;
429 case 3: a+=((uint32_t)k[2])<<16;
430 case 2: a+=((uint32_t)k[1])<<8;
431 case 1:
432 a+=k[0];
433 break;
434 case 0:
435 return c;
436 }
437 }
438
439 final(a,b,c);
440 return c;
441 }
442
443 LTTNG_HIDDEN
444 unsigned long hash_key_u64(const void *_key, unsigned long seed)
445 {
446 union {
447 uint64_t v64;
448 uint32_t v32[2];
449 } v;
450 union {
451 uint64_t v64;
452 uint32_t v32[2];
453 } key;
454
455 v.v64 = (uint64_t) seed;
456 key.v64 = *(const uint64_t *) _key;
457 hashword2(key.v32, 2, &v.v32[0], &v.v32[1]);
458 return v.v64;
459 }
460
461 #if (CAA_BITS_PER_LONG == 64)
462 /*
463 * Hash function for number value.
464 * Pass the value itself as the key, not its address.
465 */
466 LTTNG_HIDDEN
467 unsigned long hash_key_ulong(const void *_key, unsigned long seed)
468 {
469 uint64_t __key = (uint64_t) _key;
470 return (unsigned long) hash_key_u64(&__key, seed);
471 }
472 #else
473 /*
474 * Hash function for number value.
475 * Pass the value itself as the key, not its address.
476 */
477 LTTNG_HIDDEN
478 unsigned long hash_key_ulong(const void *_key, unsigned long seed)
479 {
480 uint32_t key = (uint32_t) _key;
481
482 return hashword(&key, 1, seed);
483 }
484 #endif /* CAA_BITS_PER_LONG */
485
486 /*
487 * Hash function for string.
488 */
489 LTTNG_HIDDEN
490 unsigned long hash_key_str(const void *key, unsigned long seed)
491 {
492 return hashlittle(key, strlen((const char *) key), seed);
493 }
494
495 /*
496 * Hash function for two uint64_t.
497 */
498 LTTNG_HIDDEN
499 unsigned long hash_key_two_u64(const void *key, unsigned long seed)
500 {
501 const struct lttng_ht_two_u64 *k =
502 (const struct lttng_ht_two_u64 *) key;
503
504 return hash_key_u64(&k->key1, seed) ^ hash_key_u64(&k->key2, seed);
505 }
506
507 /*
508 * Hash function compare for number value.
509 */
510 LTTNG_HIDDEN
511 int hash_match_key_ulong(const void *key1, const void *key2)
512 {
513 if (key1 == key2) {
514 return 1;
515 }
516
517 return 0;
518 }
519
520 /*
521 * Hash function compare for number value.
522 */
523 LTTNG_HIDDEN
524 int hash_match_key_u64(const void *key1, const void *key2)
525 {
526 if (*(const uint64_t *) key1 == *(const uint64_t *) key2) {
527 return 1;
528 }
529
530 return 0;
531 }
532
533 /*
534 * Hash compare function for string.
535 */
536 LTTNG_HIDDEN
537 int hash_match_key_str(const void *key1, const void *key2)
538 {
539 if (strcmp(key1, key2) == 0) {
540 return 1;
541 }
542
543 return 0;
544 }
545
546 /*
547 * Hash function compare two uint64_t.
548 */
549 LTTNG_HIDDEN
550 int hash_match_key_two_u64(const void *key1, const void *key2)
551 {
552 const struct lttng_ht_two_u64 *k1 =
553 (const struct lttng_ht_two_u64 *) key1;
554 const struct lttng_ht_two_u64 *k2 =
555 (const struct lttng_ht_two_u64 *) key2;
556
557 if (hash_match_key_u64(&k1->key1, &k2->key1) &&
558 hash_match_key_u64(&k1->key2, &k2->key2)) {
559 return 1;
560 }
561
562 return 0;
563 }
This page took 0.058833 seconds and 4 git commands to generate.