Ensure LOAD_SHARED/STORE_SHARED semantic is used in QSBR RCU
authorMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Wed, 16 Sep 2009 16:40:27 +0000 (12:40 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Wed, 16 Sep 2009 16:40:27 +0000 (12:40 -0400)
Ensures all volatile accesses are indeed volatile.
On architectures without coherent caches, will perform the proper cache flushes
when needed.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
urcu-qsbr-static.h
urcu-qsbr.c

index 86ae7d4aca91a6dd0ac998e5bf425b054a152716..b18affeb1be6ea203b7636e5f235011cd01580b7 100644 (file)
@@ -171,10 +171,12 @@ extern long __thread rcu_reader_qs_gp;
 
 static inline int rcu_gp_ongoing(long *value)
 {
+       long reader_gp;
+
        if (value == NULL)
                return 0;
-
-       return LOAD_SHARED(*value) & 1;
+       reader_gp = LOAD_SHARED(*value);
+       return (reader_gp & 1) && (reader_gp - urcu_gp_ctr < 0);
 }
 
 static inline void _rcu_read_lock(void)
@@ -189,19 +191,19 @@ static inline void _rcu_read_unlock(void)
 static inline void _rcu_quiescent_state(void)
 {
        smp_mb();       
-       rcu_reader_qs_gp = ACCESS_ONCE(urcu_gp_ctr) + 1;
+       _STORE_SHARED(rcu_reader_qs_gp, _LOAD_SHARED(urcu_gp_ctr) + 1);
        smp_mb();
 }
 
 static inline void _rcu_thread_offline(void)
 {
        smp_mb();
-       rcu_reader_qs_gp = 0;
+       STORE_SHARED(rcu_reader_qs_gp, 0);
 }
 
 static inline void _rcu_thread_online(void)
 {
-       rcu_reader_qs_gp = ACCESS_ONCE(urcu_gp_ctr) + 1;
+       _STORE_SHARED(rcu_reader_qs_gp, LOAD_SHARED(urcu_gp_ctr) + 1);
        smp_mb();
 }
 
index ccecab4e7b02b63d80f6c87085486dee0a5452f5..e077eac8f2b4b4bc7c44cadaf0f96d012be60fe3 100644 (file)
@@ -122,8 +122,7 @@ static void wait_for_quiescent_state(void)
         */
        for (index = registry; index < registry + num_readers; index++) {
 #ifndef HAS_INCOHERENT_CACHES
-               while (rcu_gp_ongoing(index->rcu_reader_qs_gp) &&
-                      (*index->rcu_reader_qs_gp - urcu_gp_ctr < 0))
+               while (rcu_gp_ongoing(index->rcu_reader_qs_gp))
                        cpu_relax();
 #else /* #ifndef HAS_INCOHERENT_CACHES */
                int wait_loops = 0;
@@ -131,8 +130,7 @@ static void wait_for_quiescent_state(void)
                 * BUSY-LOOP. Force the reader thread to commit its
                 * rcu_reader_qs_gp update to memory if we wait for too long.
                 */
-               while (rcu_gp_ongoing(index->rcu_reader_qs_gp) &&
-                      (*index->rcu_reader_qs_gp - urcu_gp_ctr < 0)) {
+               while (rcu_gp_ongoing(index->rcu_reader_qs_gp)) {
                        if (wait_loops++ == KICK_READER_LOOPS) {
                                force_mb_single_thread(index);
                                wait_loops = 0;
@@ -160,7 +158,7 @@ void synchronize_rcu(void)
 
        internal_urcu_lock();
        force_mb_all_threads();
-       urcu_gp_ctr += 2;
+       STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr + 2);
        wait_for_quiescent_state();
        force_mb_all_threads();
        internal_urcu_unlock();
This page took 0.026505 seconds and 4 git commands to generate.