Move to kernel style SPDX license identifiers
[lttng-ust.git] / liblttng-ust / jhash.h
CommitLineData
596c4223 1/*
c0c0989a 2 * SPDX-License-Identifier: LGPL-2.1-only
596c4223 3 *
c0c0989a 4 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
596c4223
MD
5 */
6
b4051ad8 7#include <stddef.h>
fb31eb73 8#include <stdint.h>
596c4223 9#include <urcu/compiler.h>
2ae57758 10#include <lttng/ust-endian.h>
596c4223
MD
11
12/*
13 * Hash function
14 * Source: http://burtleburtle.net/bob/c/lookup3.c
15 * Originally Public Domain
16 */
17
18#define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
19
20#define mix(a, b, c) \
21do { \
22 a -= c; a ^= rot(c, 4); c += b; \
23 b -= a; b ^= rot(a, 6); a += c; \
24 c -= b; c ^= rot(b, 8); b += a; \
25 a -= c; a ^= rot(c, 16); c += b; \
26 b -= a; b ^= rot(a, 19); a += c; \
27 c -= b; c ^= rot(b, 4); b += a; \
28} while (0)
29
30#define final(a, b, c) \
31{ \
32 c ^= b; c -= rot(b, 14); \
33 a ^= c; a -= rot(c, 11); \
34 b ^= a; b -= rot(a, 25); \
35 c ^= b; c -= rot(b, 16); \
36 a ^= c; a -= rot(c, 4);\
37 b ^= a; b -= rot(a, 14); \
38 c ^= b; c -= rot(b, 24); \
39}
40
41#if (BYTE_ORDER == LITTLE_ENDIAN)
42#define HASH_LITTLE_ENDIAN 1
43#else
44#define HASH_LITTLE_ENDIAN 0
45#endif
46
47/*
48 *
49 * hashlittle() -- hash a variable-length key into a 32-bit value
50 * k : the key (the unaligned variable-length array of bytes)
51 * length : the length of the key, counting by bytes
52 * initval : can be any 4-byte value
53 * Returns a 32-bit value. Every bit of the key affects every bit of
54 * the return value. Two keys differing by one or two bits will have
55 * totally different hash values.
a60af3a5 56 *
596c4223
MD
57 * The best hash table sizes are powers of 2. There is no need to do
58 * mod a prime (mod is sooo slow!). If you need less than 32 bits,
59 * use a bitmask. For example, if you need only 10 bits, do
60 * h = (h & hashmask(10));
61 * In which case, the hash table should have hashsize(10) elements.
a60af3a5 62 *
596c4223
MD
63 * If you are hashing n strings (uint8_t **)k, do it like this:
64 * for (i = 0, h = 0; i < n; ++i) h = hashlittle(k[i], len[i], h);
a60af3a5 65 *
596c4223
MD
66 * By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
67 * code any way you wish, private, educational, or commercial. It's free.
a60af3a5 68 *
596c4223
MD
69 * Use for hash table lookup, or anything where one collision in 2^^32 is
70 * acceptable. Do NOT use for cryptographic purposes.
71 */
72static
73uint32_t hashlittle(const void *key, size_t length, uint32_t initval)
74{
75 uint32_t a, b, c; /* internal state */
76 union {
77 const void *ptr;
78 size_t i;
79 } u;
80
81 /* Set up the internal state */
82 a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
83
84 u.ptr = key;
85 if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
86 const uint32_t *k = (const uint32_t *) key; /* read 32-bit chunks */
87
88 /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
89 while (length > 12) {
90 a += k[0];
91 b += k[1];
92 c += k[2];
93 mix(a, b, c);
94 length -= 12;
95 k += 3;
96 }
97
98 /*----------------------------- handle the last (probably partial) block */
a60af3a5 99 /*
9ee5b088
MD
100 * The original jhash.h reads beyond the end of string, and implements
101 * a special code path for VALGRIND. It seems to make ASan unhappy too
102 * though, so considering that hashing event names is not a fast-path
103 * in lttng-ust, remove the "fast" code entirely and use the slower
104 * but verifiable VALGRIND version of the code which does not issue
105 * out-of-bound reads.
596c4223 106 */
596c4223
MD
107 {
108 const uint8_t *k8;
109
110 k8 = (const uint8_t *) k;
111 switch (length) {
112 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
113 case 11: c+=((uint32_t) k8[10])<<16; /* fall through */
114 case 10: c+=((uint32_t) k8[9])<<8; /* fall through */
115 case 9 : c+=k8[8]; /* fall through */
116 case 8 : b+=k[1]; a+=k[0]; break;
117 case 7 : b+=((uint32_t) k8[6])<<16; /* fall through */
118 case 6 : b+=((uint32_t) k8[5])<<8; /* fall through */
119 case 5 : b+=k8[4]; /* fall through */
120 case 4 : a+=k[0]; break;
121 case 3 : a+=((uint32_t) k8[2])<<16; /* fall through */
122 case 2 : a+=((uint32_t) k8[1])<<8; /* fall through */
123 case 1 : a+=k8[0]; break;
124 case 0 : return c;
125 }
126 }
596c4223
MD
127
128 } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
129 const uint16_t *k = (const uint16_t *) key; /* read 16-bit chunks */
130 const uint8_t *k8;
131
132 /*--------------- all but last block: aligned reads and different mixing */
133 while (length > 12)
134 {
135 a += k[0] + (((uint32_t) k[1])<<16);
136 b += k[2] + (((uint32_t) k[3])<<16);
137 c += k[4] + (((uint32_t) k[5])<<16);
138 mix(a, b, c);
139 length -= 12;
140 k += 6;
141 }
142
143 /*----------------------------- handle the last (probably partial) block */
144 k8 = (const uint8_t *) k;
145 switch(length)
146 {
147 case 12: c+=k[4]+(((uint32_t) k[5])<<16);
148 b+=k[2]+(((uint32_t) k[3])<<16);
149 a+=k[0]+(((uint32_t) k[1])<<16);
150 break;
151 case 11: c+=((uint32_t) k8[10])<<16; /* fall through */
152 case 10: c+=k[4];
153 b+=k[2]+(((uint32_t) k[3])<<16);
154 a+=k[0]+(((uint32_t) k[1])<<16);
155 break;
156 case 9 : c+=k8[8]; /* fall through */
157 case 8 : b+=k[2]+(((uint32_t) k[3])<<16);
158 a+=k[0]+(((uint32_t) k[1])<<16);
159 break;
160 case 7 : b+=((uint32_t) k8[6])<<16; /* fall through */
161 case 6 : b+=k[2];
162 a+=k[0]+(((uint32_t) k[1])<<16);
163 break;
164 case 5 : b+=k8[4]; /* fall through */
165 case 4 : a+=k[0]+(((uint32_t) k[1])<<16);
166 break;
167 case 3 : a+=((uint32_t) k8[2])<<16; /* fall through */
168 case 2 : a+=k[0];
169 break;
170 case 1 : a+=k8[0];
171 break;
172 case 0 : return c; /* zero length requires no mixing */
173 }
174
175 } else { /* need to read the key one byte at a time */
176 const uint8_t *k = (const uint8_t *)key;
177
178 /*--------------- all but the last block: affect some 32 bits of (a, b, c) */
179 while (length > 12) {
180 a += k[0];
181 a += ((uint32_t) k[1])<<8;
182 a += ((uint32_t) k[2])<<16;
183 a += ((uint32_t) k[3])<<24;
184 b += k[4];
185 b += ((uint32_t) k[5])<<8;
186 b += ((uint32_t) k[6])<<16;
187 b += ((uint32_t) k[7])<<24;
188 c += k[8];
189 c += ((uint32_t) k[9])<<8;
190 c += ((uint32_t) k[10])<<16;
191 c += ((uint32_t) k[11])<<24;
192 mix(a,b,c);
193 length -= 12;
194 k += 12;
195 }
196
197 /*-------------------------------- last block: affect all 32 bits of (c) */
198 switch (length) { /* all the case statements fall through */
199 case 12: c+=((uint32_t) k[11])<<24;
200 case 11: c+=((uint32_t) k[10])<<16;
201 case 10: c+=((uint32_t) k[9])<<8;
202 case 9 : c+=k[8];
203 case 8 : b+=((uint32_t) k[7])<<24;
204 case 7 : b+=((uint32_t) k[6])<<16;
205 case 6 : b+=((uint32_t) k[5])<<8;
206 case 5 : b+=k[4];
207 case 4 : a+=((uint32_t) k[3])<<24;
208 case 3 : a+=((uint32_t) k[2])<<16;
209 case 2 : a+=((uint32_t) k[1])<<8;
210 case 1 : a+=k[0];
211 break;
212 case 0 : return c;
213 }
214 }
215
216 final(a, b, c);
217 return c;
218}
219
220static inline
221uint32_t jhash(const void *key, size_t length, uint32_t seed)
222{
223 return hashlittle(key, length, seed);
224}
This page took 0.037188 seconds and 4 git commands to generate.