* To discuss these guarantees, we first define "read" operation as any
* of the the basic cds_lfht_lookup, cds_lfht_next_duplicate,
* cds_lfht_first, cds_lfht_next operation, as well as
- * cds_lfht_add_unique (failure).
+ * cds_lfht_add_unique (failure).
*
* We define "read traversal" operation as any of the following
* group of operations
* shrink hash table from order 6 to 5: fini the index=6 bucket node table
*
* A bit of ascii art explanation:
- *
+ *
* The order index is the off-by-one compared to the actual power of 2
* because we use index 0 to deal with the 0 special-case.
- *
+ *
* This shows the nodes for a small table ordered by reversed bits:
- *
+ *
* bits reverse
* 0 000 000
* 4 100 001
* 5 101 101
* 3 011 110
* 7 111 111
- *
- * This shows the nodes in order of non-reversed bits, linked by
+ *
+ * This shows the nodes in order of non-reversed bits, linked by
* reversed-bit order.
- *
+ *
* order bits reverse
* 0 0 000 000
* 1 | 1 001 100 <-
#include <stdint.h>
#include <string.h>
#include <sched.h>
+#include <unistd.h>
#include "config.h"
-#include <urcu.h>
+#include "compat-getcpu.h"
+#include <urcu-pointer.h>
#include <urcu-call-rcu.h>
#include <urcu-flavor.h>
#include <urcu/arch.h>
* Originally from Public Domain.
*/
-static const uint8_t BitReverseTable256[256] =
+static const uint8_t BitReverseTable256[256] =
{
#define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
#define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
static
uint32_t bit_reverse_u32(uint32_t v)
{
- return ((uint32_t) bit_reverse_u8(v) << 24) |
- ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
- ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
+ return ((uint32_t) bit_reverse_u8(v) << 24) |
+ ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
+ ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
((uint32_t) bit_reverse_u8(v >> 24));
}
#else
static
uint64_t bit_reverse_u64(uint64_t v)
{
- return ((uint64_t) bit_reverse_u8(v) << 56) |
- ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
+ return ((uint64_t) bit_reverse_u8(v) << 56) |
+ ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
((uint64_t) bit_reverse_u8(v >> 16) << 40) |
((uint64_t) bit_reverse_u8(v >> 24) << 32) |
- ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
- ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
+ ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
+ ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
((uint64_t) bit_reverse_u8(v >> 48) << 8) |
((uint64_t) bit_reverse_u8(v >> 56));
}
{
int r;
- asm("bsrl %1,%0\n\t"
+ __asm__ ("bsrl %1,%0\n\t"
"jnz 1f\n\t"
"movl $-1,%0\n\t"
"1:\n\t"
{
long r;
- asm("bsrq %1,%0\n\t"
+ __asm__ ("bsrq %1,%0\n\t"
"jnz 1f\n\t"
"movq $-1,%0\n\t"
"1:\n\t"
poison_free(ht->split_count);
}
-#if defined(HAVE_SCHED_GETCPU)
static
int ht_get_split_count_index(unsigned long hash)
{
int cpu;
assert(split_count_mask >= 0);
- cpu = sched_getcpu();
+ cpu = urcu_sched_getcpu();
if (caa_unlikely(cpu < 0))
return hash & split_count_mask;
else
return cpu & split_count_mask;
}
-#else /* #if defined(HAVE_SCHED_GETCPU) */
-static
-int ht_get_split_count_index(unsigned long hash)
-{
- return hash & split_count_mask;
-}
-#endif /* #else #if defined(HAVE_SCHED_GETCPU) */
static
void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash)
void (*fct)(struct cds_lfht *ht, unsigned long i,
unsigned long start, unsigned long len))
{
- unsigned long partition_len;
+ unsigned long partition_len, start = 0;
struct partition_resize_work *work;
int thread, ret;
unsigned long nr_threads;
+ assert(nr_cpus_mask != -1);
+ if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD)
+ goto fallback;
+
/*
* Note: nr_cpus_mask + 1 is always power of 2.
* We spawn just the number of threads we need to satisfy the minimum
work[thread].fct = fct;
ret = pthread_create(&(work[thread].thread_id), ht->resize_attr,
partition_resize_thread, &work[thread]);
+ if (ret == EAGAIN) {
+ /*
+ * Out of resources: wait and join the threads
+ * we've created, then handle leftovers.
+ */
+ dbg_printf("error spawning for resize, single-threading\n");
+ start = work[thread].start;
+ len -= start;
+ nr_threads = thread;
+ break;
+ }
assert(!ret);
}
for (thread = 0; thread < nr_threads; thread++) {
assert(!ret);
}
free(work);
- return;
+
+ /*
+ * A pthread_create failure above will either lead in us having
+ * no threads to join or starting at a non-zero offset,
+ * fallback to single thread processing of leftovers.
+ */
+ if (start == 0 && nr_threads > 0)
+ return;
fallback:
ht->flavor->thread_online();
- fct(ht, i, 0, len);
+ fct(ht, i, start, len);
ht->flavor->thread_offline();
}
void init_table_populate(struct cds_lfht *ht, unsigned long i,
unsigned long len)
{
- assert(nr_cpus_mask != -1);
- if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) {
- ht->flavor->thread_online();
- init_table_populate_partition(ht, i, 0, len);
- ht->flavor->thread_offline();
- return;
- }
partition_resize_helper(ht, i, len, init_table_populate_partition);
}
static
void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len)
{
-
- assert(nr_cpus_mask != -1);
- if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) {
- ht->flavor->thread_online();
- remove_table_partition(ht, i, 0, len);
- ht->flavor->thread_offline();
- return;
- }
partition_resize_helper(ht, i, len, remove_table_partition);
}
unsigned long len;
len = 1UL << (i - 1);
- dbg_printf("fini order %lu len: %lu\n", i, len);
+ dbg_printf("fini order %ld len: %lu\n", i, len);
/* Stop shrink if the resize target changes under us */
if (CMM_LOAD_SHARED(ht->resize_target) > (1UL << (i - 1)))
free_split_items_count(ht);
if (attr)
*attr = ht->resize_attr;
+ ret = pthread_mutex_destroy(&ht->resize_mutex);
+ if (ret)
+ ret = -EBUSY;
poison_free(ht);
end:
return ret;