+#ifdef POISON_FREE
+#define poison_free(ptr) \
+ do { \
+ memset(ptr, 0x42, sizeof(*(ptr))); \
+ free(ptr); \
+ } while (0)
+#else
+#define poison_free(ptr) free(ptr)
+#endif
+
+static
+void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth);
+
+/*
+ * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
+ * available, then we support hash table item accounting.
+ * In the unfortunate event the number of CPUs reported would be
+ * inaccurate, we use modulo arithmetic on the number of CPUs we got.
+ */
+#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
+
+static
+void cds_lfht_resize_lazy_count(struct cds_lfht *ht, struct rcu_table *t,
+ unsigned long count);
+
+static long nr_cpus_mask = -1;
+
+static
+struct ht_items_count *alloc_per_cpu_items_count(void)
+{
+ struct ht_items_count *count;
+
+ switch (nr_cpus_mask) {
+ case -2:
+ return NULL;
+ case -1:
+ {
+ long maxcpus;
+
+ maxcpus = sysconf(_SC_NPROCESSORS_CONF);
+ if (maxcpus <= 0) {
+ nr_cpus_mask = -2;
+ return NULL;
+ }
+ /*
+ * round up number of CPUs to next power of two, so we
+ * can use & for modulo.
+ */
+ maxcpus = 1UL << get_count_order_ulong(maxcpus);
+ nr_cpus_mask = maxcpus - 1;
+ }
+ /* Fall-through */
+ default:
+ return calloc(nr_cpus_mask + 1, sizeof(*count));
+ }
+}
+
+static
+void free_per_cpu_items_count(struct ht_items_count *count)
+{
+ poison_free(count);
+}
+
+static
+int ht_get_cpu(void)
+{
+ int cpu;
+
+ assert(nr_cpus_mask >= 0);
+ cpu = sched_getcpu();
+ if (unlikely(cpu < 0))
+ return cpu;
+ else
+ return cpu & nr_cpus_mask;
+}
+
+static
+void ht_count_add(struct cds_lfht *ht, struct rcu_table *t)
+{
+ unsigned long percpu_count;
+ int cpu;
+
+ if (unlikely(!ht->percpu_count))
+ return;
+ cpu = ht_get_cpu();
+ if (unlikely(cpu < 0))
+ return;
+ percpu_count = uatomic_add_return(&ht->percpu_count[cpu].add, 1);
+ if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
+ unsigned long count;
+
+ dbg_printf("add percpu %lu\n", percpu_count);
+ count = uatomic_add_return(&ht->count,
+ 1UL << COUNT_COMMIT_ORDER);
+ /* If power of 2 */
+ if (!(count & (count - 1))) {
+ if ((count >> CHAIN_LEN_RESIZE_THRESHOLD)
+ < t->size)
+ return;
+ dbg_printf("add set global %lu\n", count);
+ cds_lfht_resize_lazy_count(ht, t,
+ count >> (CHAIN_LEN_TARGET - 1));
+ }
+ }
+}
+
+static
+void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t)
+{
+ unsigned long percpu_count;
+ int cpu;
+
+ if (unlikely(!ht->percpu_count))
+ return;
+ cpu = ht_get_cpu();
+ if (unlikely(cpu < 0))
+ return;
+ percpu_count = uatomic_add_return(&ht->percpu_count[cpu].remove, -1);
+ if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
+ unsigned long count;
+
+ dbg_printf("remove percpu %lu\n", percpu_count);
+ count = uatomic_add_return(&ht->count,
+ -(1UL << COUNT_COMMIT_ORDER));
+ /* If power of 2 */
+ if (!(count & (count - 1))) {
+ if ((count >> CHAIN_LEN_RESIZE_THRESHOLD)
+ >= t->size)
+ return;
+ dbg_printf("remove set global %lu\n", count);
+ cds_lfht_resize_lazy_count(ht, t,
+ count >> (CHAIN_LEN_TARGET - 1));
+ }
+ }
+}
+
+#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
+
+static const long nr_cpus_mask = -1;
+
+static
+struct ht_items_count *alloc_per_cpu_items_count(void)
+{
+ return NULL;
+}
+
+static
+void free_per_cpu_items_count(struct ht_items_count *count)
+{
+}
+