Add Paul's URCU model
[urcu.git] / debug_yield.patch
CommitLineData
ae878d0d
MD
1diff --git a/urcu.c b/urcu.c
2index 162ce00..018e09b 100644
3--- a/urcu.c
4+++ b/urcu.c
5@@ -91,24 +91,17 @@ static void force_mb_all_threads(void)
6 */
7 if (!reader_data)
8 return;
9- debug_yield_write();
10 sig_done = 0;
11- debug_yield_write();
12 smp_mb(); /* write sig_done before sending the signals */
13- debug_yield_write();
14- for (index = reader_data; index < reader_data + num_readers; index++) {
15+ for (index = reader_data; index < reader_data + num_readers; index++)
16 pthread_kill(index->tid, SIGURCU);
17- debug_yield_write();
18- }
19 /*
20 * Wait for sighandler (and thus mb()) to execute on every thread.
21 * BUSY-LOOP.
22 */
23 while (sig_done < num_readers)
24 barrier();
25- debug_yield_write();
26 smp_mb(); /* read sig_done before ending the barrier */
27- debug_yield_write();
28 }
29 #endif
30
31@@ -135,13 +128,10 @@ void synchronize_rcu(void)
32 * where new ptr points to. */
33 /* Write new ptr before changing the qparity */
34 force_mb_all_threads();
35- debug_yield_write();
36
37 internal_urcu_lock();
38- debug_yield_write();
39
40 switch_next_urcu_qparity(); /* 0 -> 1 */
41- debug_yield_write();
42
43 /*
44 * Must commit qparity update to memory before waiting for parity
45@@ -155,7 +145,6 @@ void synchronize_rcu(void)
46 * Wait for previous parity to be empty of readers.
47 */
48 wait_for_quiescent_state(); /* Wait readers in parity 0 */
49- debug_yield_write();
50
51 /*
52 * Must finish waiting for quiescent state for parity 0 before
53@@ -166,7 +155,6 @@ void synchronize_rcu(void)
54 smp_mb();
55
56 switch_next_urcu_qparity(); /* 1 -> 0 */
57- debug_yield_write();
58
59 /*
60 * Must commit qparity update to memory before waiting for parity
61@@ -180,17 +168,14 @@ void synchronize_rcu(void)
62 * Wait for previous parity to be empty of readers.
63 */
64 wait_for_quiescent_state(); /* Wait readers in parity 1 */
65- debug_yield_write();
66
67 internal_urcu_unlock();
68- debug_yield_write();
69
70 /* All threads should finish using the data referred to by old ptr
71 * before decrementing their urcu_active_readers count */
72 /* Finish waiting for reader threads before letting the old ptr being
73 * freed. */
74 force_mb_all_threads();
75- debug_yield_write();
76 }
77
78 void urcu_add_reader(pthread_t id)
79diff --git a/urcu.h b/urcu.h
80index 92b31df..1b663c7 100644
81--- a/urcu.h
82+++ b/urcu.h
83@@ -219,13 +219,11 @@ static inline int rcu_old_gp_ongoing(long *value)
84
85 if (value == NULL)
86 return 0;
87- debug_yield_write();
88 /*
89 * Make sure both tests below are done on the same version of *value
90 * to insure consistency.
91 */
92 v = ACCESS_ONCE(*value);
93- debug_yield_write();
94 return (v & RCU_GP_CTR_NEST_MASK) &&
95 ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT);
96 }
97@@ -234,34 +232,27 @@ static inline void rcu_read_lock(void)
98 {
99 long tmp;
100
101- debug_yield_read();
102 tmp = urcu_active_readers;
103- debug_yield_read();
104 /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
105 if (likely(!(tmp & RCU_GP_CTR_NEST_MASK)))
106 urcu_active_readers = urcu_gp_ctr;
107 else
108 urcu_active_readers = tmp + RCU_GP_COUNT;
109- debug_yield_read();
110 /*
111 * Increment active readers count before accessing the pointer.
112 * See force_mb_all_threads().
113 */
114 read_barrier();
115- debug_yield_read();
116 }
117
118 static inline void rcu_read_unlock(void)
119 {
120- debug_yield_read();
121 read_barrier();
122- debug_yield_read();
123 /*
124 * Finish using rcu before decrementing the pointer.
125 * See force_mb_all_threads().
126 */
127 urcu_active_readers -= RCU_GP_COUNT;
128- debug_yield_read();
129 }
130
131 /**
132@@ -302,7 +293,6 @@ extern void synchronize_rcu(void);
133 #define urcu_publish_content(p, v) \
134 ({ \
135 void *oldptr; \
136- debug_yield_write(); \
137 oldptr = rcu_xchg_pointer(p, v); \
138 synchronize_rcu(); \
139 oldptr; \
This page took 0.026941 seconds and 4 git commands to generate.