X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=liblttng-ust%2Fjhash.h;h=91993e8bf64ea6d5c6a45ae210a4bd1f4145b3ad;hb=c0c0989ab70574e09b2f7e8b48c2da6af664a849;hp=fe785bd5a9560572c0442eb0bee2ae6b8100b580;hpb=a60af3a5ee5d990c867ef190acfa81c180301ea2;p=lttng-ust.git diff --git a/liblttng-ust/jhash.h b/liblttng-ust/jhash.h index fe785bd5..91993e8b 100644 --- a/liblttng-ust/jhash.h +++ b/liblttng-ust/jhash.h @@ -1,19 +1,7 @@ /* - * Copyright (C) 2011 Mathieu Desnoyers - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; - * version 2.1 of the License. + * SPDX-License-Identifier: LGPL-2.1-only * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * Copyright (C) 2011 Mathieu Desnoyers */ #include @@ -109,33 +97,13 @@ uint32_t hashlittle(const void *key, size_t length, uint32_t initval) /*----------------------------- handle the last (probably partial) block */ /* - * "k[2]&0xffffff" actually reads beyond the end of the string, but - * then masks off the part it's not allowed to read. Because the - * string is aligned, the masked-off tail is in the same word as the - * rest of the string. Every machine with memory protection I've seen - * does it on word boundaries, so is OK with this. But VALGRIND will - * still catch it and complain. The masking trick does make the hash - * noticably faster for short strings (like English words). + * The original jhash.h reads beyond the end of string, and implements + * a special code path for VALGRIND. It seems to make ASan unhappy too + * though, so considering that hashing event names is not a fast-path + * in lttng-ust, remove the "fast" code entirely and use the slower + * but verifiable VALGRIND version of the code which does not issue + * out-of-bound reads. */ -#ifndef VALGRIND - - switch (length) { - case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; - case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break; - case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break; - case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break; - case 8 : b+=k[1]; a+=k[0]; break; - case 7 : b+=k[1]&0xffffff; a+=k[0]; break; - case 6 : b+=k[1]&0xffff; a+=k[0]; break; - case 5 : b+=k[1]&0xff; a+=k[0]; break; - case 4 : a+=k[0]; break; - case 3 : a+=k[0]&0xffffff; break; - case 2 : a+=k[0]&0xffff; break; - case 1 : a+=k[0]&0xff; break; - case 0 : return c; /* zero length strings require no mixing */ - } - -#else /* make valgrind happy */ { const uint8_t *k8; @@ -156,7 +124,6 @@ uint32_t hashlittle(const void *key, size_t length, uint32_t initval) case 0 : return c; } } -#endif /* !valgrind */ } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) { const uint16_t *k = (const uint16_t *) key; /* read 16-bit chunks */