Cleanup: compiler warning on 32-bit architectures
[urcu.git] / src / rculfhash.c
index b63a0a6c420652a6ce557b31e35602d10ed80793..e0c5860c7b6b91e448b68b250c5c0a58afd49825 100644 (file)
@@ -380,6 +380,27 @@ static int cds_lfht_workqueue_atfork_nesting;
 static void cds_lfht_init_worker(const struct rcu_flavor_struct *flavor);
 static void cds_lfht_fini_worker(const struct rcu_flavor_struct *flavor);
 
+#ifdef CONFIG_CDS_LFHT_ITER_DEBUG
+
+static
+void cds_lfht_iter_debug_set_ht(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+{
+       iter->lfht = ht;
+}
+
+#define cds_lfht_iter_debug_assert(...)                assert(__VA_ARGS__)
+
+#else
+
+static
+void cds_lfht_iter_debug_set_ht(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+{
+}
+
+#define cds_lfht_iter_debug_assert(...)
+
+#endif
+
 /*
  * Algorithm to reverse bits in a word by lookup table, extended to
  * 64-bit words.
@@ -1068,7 +1089,13 @@ void _cds_lfht_add(struct cds_lfht *ht,
                        if (unique_ret
                            && !is_bucket(next)
                            && clear_flag(iter)->reverse_hash == node->reverse_hash) {
-                               struct cds_lfht_iter d_iter = { .node = node, .next = iter, };
+                               struct cds_lfht_iter d_iter = {
+                                       .node = node,
+                                       .next = iter,
+#ifdef CONFIG_CDS_LFHT_ITER_DEBUG
+                                       .lfht = ht,
+#endif
+                               };
 
                                /*
                                 * uniquely adding inserts the node as the first
@@ -1516,6 +1543,32 @@ void cds_lfht_create_bucket(struct cds_lfht *ht, unsigned long size)
        }
 }
 
+#if (CAA_BITS_PER_LONG > 32)
+/*
+ * For 64-bit architectures, with max number of buckets small enough not to
+ * use the entire 64-bit memory mapping space (and allowing a fair number of
+ * hash table instances), use the mmap allocator, which is faster. Otherwise,
+ * fallback to the order allocator.
+ */
+static
+const struct cds_lfht_mm_type *get_mm_type(unsigned long max_nr_buckets)
+{
+       if (max_nr_buckets && max_nr_buckets <= (1ULL << 32))
+               return &cds_lfht_mm_mmap;
+       else
+               return &cds_lfht_mm_order;
+}
+#else
+/*
+ * For 32-bit architectures, use the order allocator.
+ */
+static
+const struct cds_lfht_mm_type *get_mm_type(unsigned long max_nr_buckets)
+{
+       return &cds_lfht_mm_order;
+}
+#endif
+
 struct cds_lfht *_cds_lfht_new(unsigned long init_size,
                        unsigned long min_nr_alloc_buckets,
                        unsigned long max_nr_buckets,
@@ -1538,26 +1591,8 @@ struct cds_lfht *_cds_lfht_new(unsigned long init_size,
        /*
         * Memory management plugin default.
         */
-       if (!mm) {
-               if (CAA_BITS_PER_LONG > 32
-                               && max_nr_buckets
-                               && max_nr_buckets <= (1ULL << 32)) {
-                       /*
-                        * For 64-bit architectures, with max number of
-                        * buckets small enough not to use the entire
-                        * 64-bit memory mapping space (and allowing a
-                        * fair number of hash table instances), use the
-                        * mmap allocator, which is faster than the
-                        * order allocator.
-                        */
-                       mm = &cds_lfht_mm_mmap;
-               } else {
-                       /*
-                        * The fallback is to use the order allocator.
-                        */
-                       mm = &cds_lfht_mm_order;
-               }
-       }
+       if (!mm)
+               mm = get_mm_type(max_nr_buckets);
 
        /* max_nr_buckets == 0 for order based mm means infinite */
        if (mm == &cds_lfht_mm_order && !max_nr_buckets)
@@ -1600,6 +1635,8 @@ void cds_lfht_lookup(struct cds_lfht *ht, unsigned long hash,
        struct cds_lfht_node *node, *next, *bucket;
        unsigned long reverse_hash, size;
 
+       cds_lfht_iter_debug_set_ht(ht, iter);
+
        reverse_hash = bit_reverse_ulong(hash);
 
        size = rcu_dereference(ht->size);
@@ -1637,6 +1674,7 @@ void cds_lfht_next_duplicate(struct cds_lfht *ht, cds_lfht_match_fct match,
        struct cds_lfht_node *node, *next;
        unsigned long reverse_hash;
 
+       cds_lfht_iter_debug_assert(ht == iter->lfht);
        node = iter->node;
        reverse_hash = node->reverse_hash;
        next = iter->next;
@@ -1668,6 +1706,7 @@ void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter)
 {
        struct cds_lfht_node *node, *next;
 
+       cds_lfht_iter_debug_assert(ht == iter->lfht);
        node = clear_flag(iter->next);
        for (;;) {
                if (caa_unlikely(is_end(node))) {
@@ -1688,6 +1727,7 @@ void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter)
 
 void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter)
 {
+       cds_lfht_iter_debug_set_ht(ht, iter);
        /*
         * Get next after first bucket node. The first bucket node is the
         * first node of the linked list.
This page took 0.023925 seconds and 4 git commands to generate.