Restrict supported arch ot P6+ on Intel x86 32.
[urcu.git] / urcu-qsbr.c
index c603c1df3e2643939533984d7aee79be7e0b768f..56d86f9e561d6ea8fc372eed83ffc36f35dc358b 100644 (file)
@@ -37,6 +37,8 @@
 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
 #include "urcu-qsbr.h"
 
+void __attribute__((destructor)) rcu_exit(void);
+
 static pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
 
 int gp_futex;
@@ -95,39 +97,49 @@ static void internal_urcu_unlock(void)
 /*
  * synchronize_rcu() waiting. Single thread.
  */
-static void wait_gp(struct urcu_reader *index)
+static void wait_gp(void)
 {
-       uatomic_dec(&gp_futex);
-       smp_mb(); /* Write futex before read reader_gp */
-       if (!rcu_gp_ongoing(&index->ctr)) {
-               /* Read reader_gp before write futex */
-               smp_mb();
-               /* Callbacks are queued, don't wait. */
-               uatomic_set(&gp_futex, 0);
-       } else {
-               /* Read reader_gp before read futex */
-               smp_rmb();
-               if (uatomic_read(&gp_futex) == -1)
-                       futex(&gp_futex, FUTEX_WAIT, -1,
-                             NULL, NULL, 0);
-       }
+       /* Read reader_gp before read futex */
+       smp_rmb();
+       if (uatomic_read(&gp_futex) == -1)
+               futex(&gp_futex, FUTEX_WAIT, -1,
+                     NULL, NULL, 0);
 }
 
 static void wait_for_quiescent_state(void)
 {
-       struct urcu_reader *index;
+       LIST_HEAD(qsreaders);
+       int wait_loops = 0;
+       struct urcu_reader *index, *tmp;
 
        if (list_empty(&registry))
                return;
        /*
         * Wait for each thread rcu_reader_qs_gp count to become 0.
         */
-       list_for_each_entry(index, &registry, head) {
-               int wait_loops = 0;
+       for (;;) {
+               wait_loops++;
+               if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
+                       uatomic_dec(&gp_futex);
+                       /* Write futex before read reader_gp */
+                       smp_mb();
+               }
 
-               while (rcu_gp_ongoing(&index->ctr)) {
-                       if (wait_loops++ == RCU_QS_ACTIVE_ATTEMPTS) {
-                               wait_gp(index);
+               list_for_each_entry_safe(index, tmp, &registry, head) {
+                       if (!rcu_gp_ongoing(&index->ctr))
+                               list_move(&index->head, &qsreaders);
+               }
+
+               if (list_empty(&registry)) {
+                       if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
+                               /* Read reader_gp before write futex */
+                               smp_mb();
+                               uatomic_set(&gp_futex, 0);
+                       }
+                       break;
+               } else {
+                       if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
+                               wait_gp();
                        } else {
 #ifndef HAS_INCOHERENT_CACHES
                                cpu_relax();
@@ -137,6 +149,8 @@ static void wait_for_quiescent_state(void)
                        }
                }
        }
+       /* put back the reader list in the registry */
+       list_splice(&qsreaders, &registry);
 }
 
 /*
@@ -264,38 +278,6 @@ void rcu_read_unlock(void)
        _rcu_read_unlock();
 }
 
-void *rcu_dereference(void *p)
-{
-       return _rcu_dereference(p);
-}
-
-void *rcu_assign_pointer_sym(void **p, void *v)
-{
-       wmb();
-       return STORE_SHARED(p, v);
-}
-
-void *rcu_cmpxchg_pointer_sym(void **p, void *old, void *_new)
-{
-       wmb();
-       return uatomic_cmpxchg(p, old, _new);
-}
-
-void *rcu_xchg_pointer_sym(void **p, void *v)
-{
-       wmb();
-       return uatomic_xchg(p, v);
-}
-
-void *rcu_publish_content_sym(void **p, void *v)
-{
-       void *oldptr;
-
-       oldptr = _rcu_xchg_pointer(p, v);
-       synchronize_rcu();
-       return oldptr;
-}
-
 void rcu_quiescent_state(void)
 {
        _rcu_quiescent_state();
@@ -333,3 +315,8 @@ void rcu_unregister_thread(void)
        list_del(&urcu_reader.head);
        internal_urcu_unlock();
 }
+
+void rcu_exit(void)
+{
+       assert(list_empty(&registry));
+}
This page took 0.02362 seconds and 4 git commands to generate.