4 * Userspace RCU library
6 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
8 * Distributed under GPLv2
20 pthread_mutex_t urcu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
23 * Global grace period counter.
24 * Contains the current RCU_GP_CTR_BIT.
25 * Also has a RCU_GP_CTR_BIT of 1, to accelerate the reader fast path.
27 long urcu_gp_ctr
= RCU_GP_COUNT
;
29 long __thread urcu_active_readers
;
31 /* Thread IDs of registered readers */
32 #define INIT_NUM_THREADS 4
36 long *urcu_active_readers
;
40 unsigned int yield_active
;
41 unsigned int __thread rand_yield
;
44 static struct reader_data
*reader_data
;
45 static int num_readers
, alloc_readers
;
50 void internal_urcu_lock(void)
53 ret
= pthread_mutex_lock(&urcu_mutex
);
55 perror("Error in pthread mutex lock");
60 void internal_urcu_unlock(void)
64 ret
= pthread_mutex_unlock(&urcu_mutex
);
66 perror("Error in pthread mutex unlock");
72 * called with urcu_mutex held.
74 static void switch_next_urcu_qparity(void)
76 urcu_gp_ctr
^= RCU_GP_CTR_BIT
;
80 static void force_mb_single_thread(pthread_t tid
)
85 static void force_mb_all_threads(void)
91 static void force_mb_single_thread(pthread_t tid
)
96 * pthread_kill has a smp_mb(). But beware, we assume it performs
97 * a cache flush on architectures with non-coherent cache. Let's play
98 * safe and don't assume anything : we use smp_mc() to make sure the
99 * cache flush is enforced.
100 * smp_mb(); write sig_done before sending the signals
102 smp_mc(); /* write sig_done before sending the signals */
103 pthread_kill(tid
, SIGURCU
);
105 * Wait for sighandler (and thus mb()) to execute on every thread.
108 while (LOAD_SHARED(sig_done
) < 1)
110 smp_mb(); /* read sig_done before ending the barrier */
113 static void force_mb_all_threads(void)
115 struct reader_data
*index
;
117 * Ask for each threads to execute a smp_mb() so we can consider the
118 * compiler barriers around rcu read lock as real memory barriers.
124 * pthread_kill has a smp_mb(). But beware, we assume it performs
125 * a cache flush on architectures with non-coherent cache. Let's play
126 * safe and don't assume anything : we use smp_mc() to make sure the
127 * cache flush is enforced.
128 * smp_mb(); write sig_done before sending the signals
130 smp_mc(); /* write sig_done before sending the signals */
131 for (index
= reader_data
; index
< reader_data
+ num_readers
; index
++)
132 pthread_kill(index
->tid
, SIGURCU
);
134 * Wait for sighandler (and thus mb()) to execute on every thread.
137 while (LOAD_SHARED(sig_done
) < num_readers
)
139 smp_mb(); /* read sig_done before ending the barrier */
143 void wait_for_quiescent_state(void)
145 struct reader_data
*index
;
150 * Wait for each thread urcu_active_readers count to become 0.
152 for (index
= reader_data
; index
< reader_data
+ num_readers
; index
++) {
155 * BUSY-LOOP. Force the reader thread to commit its
156 * urcu_active_readers update to memory if we wait for too long.
158 while (rcu_old_gp_ongoing(index
->urcu_active_readers
)) {
159 if (wait_loops
++ == KICK_READER_LOOPS
) {
160 force_mb_single_thread(index
->tid
);
169 void synchronize_rcu(void)
171 internal_urcu_lock();
173 /* All threads should read qparity before accessing data structure
174 * where new ptr points to. Must be done within internal_urcu_lock
175 * because it iterates on reader threads.*/
176 /* Write new ptr before changing the qparity */
177 force_mb_all_threads();
179 switch_next_urcu_qparity(); /* 0 -> 1 */
182 * Must commit qparity update to memory before waiting for parity
183 * 0 quiescent state. Failure to do so could result in the writer
184 * waiting forever while new readers are always accessing data (no
190 * Wait for previous parity to be empty of readers.
192 wait_for_quiescent_state(); /* Wait readers in parity 0 */
195 * Must finish waiting for quiescent state for parity 0 before
196 * committing qparity update to memory. Failure to do so could result in
197 * the writer waiting forever while new readers are always accessing
198 * data (no progress).
202 switch_next_urcu_qparity(); /* 1 -> 0 */
205 * Must commit qparity update to memory before waiting for parity
206 * 1 quiescent state. Failure to do so could result in the writer
207 * waiting forever while new readers are always accessing data (no
213 * Wait for previous parity to be empty of readers.
215 wait_for_quiescent_state(); /* Wait readers in parity 1 */
217 /* Finish waiting for reader threads before letting the old ptr being
218 * freed. Must be done within internal_urcu_lock because it iterates on
220 force_mb_all_threads();
222 internal_urcu_unlock();
225 void urcu_add_reader(pthread_t id
)
227 struct reader_data
*oldarray
;
230 alloc_readers
= INIT_NUM_THREADS
;
233 malloc(sizeof(struct reader_data
) * alloc_readers
);
235 if (alloc_readers
< num_readers
+ 1) {
236 oldarray
= reader_data
;
237 reader_data
= malloc(sizeof(struct reader_data
)
238 * (alloc_readers
<< 1));
239 memcpy(reader_data
, oldarray
,
240 sizeof(struct reader_data
) * alloc_readers
);
244 reader_data
[num_readers
].tid
= id
;
245 /* reference to the TLS of _this_ reader thread. */
246 reader_data
[num_readers
].urcu_active_readers
= &urcu_active_readers
;
251 * Never shrink (implementation limitation).
252 * This is O(nb threads). Eventually use a hash table.
254 void urcu_remove_reader(pthread_t id
)
256 struct reader_data
*index
;
258 assert(reader_data
!= NULL
);
259 for (index
= reader_data
; index
< reader_data
+ num_readers
; index
++) {
260 if (pthread_equal(index
->tid
, id
)) {
261 memcpy(index
, &reader_data
[num_readers
- 1],
262 sizeof(struct reader_data
));
263 reader_data
[num_readers
- 1].tid
= 0;
264 reader_data
[num_readers
- 1].urcu_active_readers
= NULL
;
269 /* Hrm not found, forgot to register ? */
273 void urcu_register_thread(void)
275 internal_urcu_lock();
276 urcu_add_reader(pthread_self());
277 internal_urcu_unlock();
280 void urcu_unregister_thread(void)
282 internal_urcu_lock();
283 urcu_remove_reader(pthread_self());
284 internal_urcu_unlock();
287 #ifndef DEBUG_FULL_MB
288 void sigurcu_handler(int signo
, siginfo_t
*siginfo
, void *context
)
291 * Executing this smp_mb() is the only purpose of this signal handler.
292 * It punctually promotes barrier() into smp_mb() on every thread it is
296 atomic_inc(&sig_done
);
299 void __attribute__((constructor
)) urcu_init(void)
301 struct sigaction act
;
304 act
.sa_sigaction
= sigurcu_handler
;
305 ret
= sigaction(SIGURCU
, &act
, NULL
);
307 perror("Error in sigaction");
312 void __attribute__((destructor
)) urcu_exit(void)
314 struct sigaction act
;
317 ret
= sigaction(SIGURCU
, NULL
, &act
);
319 perror("Error in sigaction");
322 assert(act
.sa_sigaction
== sigurcu_handler
);