X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=liblttng-ust%2Fjhash.h;h=fef55a1f9599c5938348a09ab2f43f8b2d4d7ccf;hb=9560f5ebf55ec70baefe6da3e3d644fc7d497b63;hp=da1e7dd02c7b994bbdda9e5f8f7ce6eae27d4f5b;hpb=e3feda7db3de3530e7c9c511536c36cccf317013;p=lttng-ust.git diff --git a/liblttng-ust/jhash.h b/liblttng-ust/jhash.h index da1e7dd0..fef55a1f 100644 --- a/liblttng-ust/jhash.h +++ b/liblttng-ust/jhash.h @@ -16,6 +16,8 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +#include +#include #include #include @@ -63,19 +65,19 @@ do { \ * Returns a 32-bit value. Every bit of the key affects every bit of * the return value. Two keys differing by one or two bits will have * totally different hash values. - * + * * The best hash table sizes are powers of 2. There is no need to do * mod a prime (mod is sooo slow!). If you need less than 32 bits, * use a bitmask. For example, if you need only 10 bits, do * h = (h & hashmask(10)); * In which case, the hash table should have hashsize(10) elements. - * + * * If you are hashing n strings (uint8_t **)k, do it like this: * for (i = 0, h = 0; i < n; ++i) h = hashlittle(k[i], len[i], h); - * + * * By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this * code any way you wish, private, educational, or commercial. It's free. - * + * * Use for hash table lookup, or anything where one collision in 2^^32 is * acceptable. Do NOT use for cryptographic purposes. */ @@ -106,34 +108,14 @@ uint32_t hashlittle(const void *key, size_t length, uint32_t initval) } /*----------------------------- handle the last (probably partial) block */ - /* - * "k[2]&0xffffff" actually reads beyond the end of the string, but - * then masks off the part it's not allowed to read. Because the - * string is aligned, the masked-off tail is in the same word as the - * rest of the string. Every machine with memory protection I've seen - * does it on word boundaries, so is OK with this. But VALGRIND will - * still catch it and complain. The masking trick does make the hash - * noticably faster for short strings (like English words). + /* + * The original jhash.h reads beyond the end of string, and implements + * a special code path for VALGRIND. It seems to make ASan unhappy too + * though, so considering that hashing event names is not a fast-path + * in lttng-ust, remove the "fast" code entirely and use the slower + * but verifiable VALGRIND version of the code which does not issue + * out-of-bound reads. */ -#ifndef VALGRIND - - switch (length) { - case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; - case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break; - case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break; - case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break; - case 8 : b+=k[1]; a+=k[0]; break; - case 7 : b+=k[1]&0xffffff; a+=k[0]; break; - case 6 : b+=k[1]&0xffff; a+=k[0]; break; - case 5 : b+=k[1]&0xff; a+=k[0]; break; - case 4 : a+=k[0]; break; - case 3 : a+=k[0]&0xffffff; break; - case 2 : a+=k[0]&0xffff; break; - case 1 : a+=k[0]&0xff; break; - case 0 : return c; /* zero length strings require no mixing */ - } - -#else /* make valgrind happy */ { const uint8_t *k8; @@ -154,7 +136,6 @@ uint32_t hashlittle(const void *key, size_t length, uint32_t initval) case 0 : return c; } } -#endif /* !valgrind */ } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) { const uint16_t *k = (const uint16_t *) key; /* read 16-bit chunks */