Commit | Line | Data |
---|---|---|
b257a10b MD |
1 | /* |
2 | * urcu.c | |
3 | * | |
4 | * Userspace RCU library | |
5 | * | |
af02d47e MD |
6 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> |
7 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. | |
b257a10b | 8 | * |
af02d47e MD |
9 | * This library is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU Lesser General Public | |
11 | * License as published by the Free Software Foundation; either | |
12 | * version 2.1 of the License, or (at your option) any later version. | |
13 | * | |
14 | * This library is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * Lesser General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU Lesser General Public | |
20 | * License along with this library; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
54843abc PM |
22 | * |
23 | * IBM's contributions to this file may be relicensed under LGPLv2 or later. | |
b257a10b MD |
24 | */ |
25 | ||
27b012e2 MD |
26 | #include <stdio.h> |
27 | #include <pthread.h> | |
28 | #include <signal.h> | |
29 | #include <assert.h> | |
f69f195a MD |
30 | #include <stdlib.h> |
31 | #include <string.h> | |
09a9f986 | 32 | #include <errno.h> |
e8043c1b | 33 | #include <poll.h> |
27b012e2 | 34 | |
121a5d44 MD |
35 | #include "urcu-static.h" |
36 | /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ | |
27b012e2 MD |
37 | #include "urcu.h" |
38 | ||
39 | pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER; | |
40 | ||
128166c9 MD |
41 | /* |
42 | * Global grace period counter. | |
43 | * Contains the current RCU_GP_CTR_BIT. | |
44 | * Also has a RCU_GP_CTR_BIT of 1, to accelerate the reader fast path. | |
b0d5e790 | 45 | * Written to only by writer with mutex taken. Read by both writer and readers. |
128166c9 MD |
46 | */ |
47 | long urcu_gp_ctr = RCU_GP_COUNT; | |
27b012e2 | 48 | |
b0d5e790 MD |
49 | /* |
50 | * Written to only by each individual reader. Read by both the reader and the | |
51 | * writers. | |
52 | */ | |
6e8b8429 | 53 | long __thread urcu_active_readers; |
27b012e2 MD |
54 | |
55 | /* Thread IDs of registered readers */ | |
56 | #define INIT_NUM_THREADS 4 | |
57 | ||
0a52082b | 58 | struct reader_registry { |
27b012e2 | 59 | pthread_t tid; |
128166c9 | 60 | long *urcu_active_readers; |
09a9f986 | 61 | char *need_mb; |
27b012e2 MD |
62 | }; |
63 | ||
cf380c2f | 64 | #ifdef DEBUG_YIELD |
9d335088 MD |
65 | unsigned int yield_active; |
66 | unsigned int __thread rand_yield; | |
cf380c2f MD |
67 | #endif |
68 | ||
0a52082b | 69 | static struct reader_registry *registry; |
09a9f986 | 70 | static char __thread need_mb; |
27b012e2 | 71 | static int num_readers, alloc_readers; |
27b012e2 | 72 | |
c265818b | 73 | void internal_urcu_lock(void) |
41718ff9 MD |
74 | { |
75 | int ret; | |
09a9f986 PM |
76 | |
77 | #ifndef DISTRUST_SIGNALS_EXTREME | |
41718ff9 MD |
78 | ret = pthread_mutex_lock(&urcu_mutex); |
79 | if (ret) { | |
80 | perror("Error in pthread mutex lock"); | |
81 | exit(-1); | |
82 | } | |
09a9f986 PM |
83 | #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ |
84 | while ((ret = pthread_mutex_trylock(&urcu_mutex)) != 0) { | |
85 | if (ret != EBUSY && ret != EINTR) { | |
86 | printf("ret = %d, errno = %d\n", ret, errno); | |
87 | perror("Error in pthread mutex lock"); | |
88 | exit(-1); | |
89 | } | |
90 | if (need_mb) { | |
91 | smp_mb(); | |
92 | need_mb = 0; | |
93 | smp_mb(); | |
94 | } | |
95 | poll(NULL,0,10); | |
96 | } | |
97 | #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ | |
41718ff9 MD |
98 | } |
99 | ||
c265818b | 100 | void internal_urcu_unlock(void) |
41718ff9 MD |
101 | { |
102 | int ret; | |
103 | ||
104 | ret = pthread_mutex_unlock(&urcu_mutex); | |
105 | if (ret) { | |
106 | perror("Error in pthread mutex unlock"); | |
107 | exit(-1); | |
108 | } | |
109 | } | |
110 | ||
27b012e2 MD |
111 | /* |
112 | * called with urcu_mutex held. | |
113 | */ | |
1430ee0b | 114 | static void switch_next_urcu_qparity(void) |
27b012e2 | 115 | { |
b0d5e790 | 116 | STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR_BIT); |
27b012e2 MD |
117 | } |
118 | ||
bb488185 | 119 | #ifdef DEBUG_FULL_MB |
e8043c1b | 120 | #ifdef HAS_INCOHERENT_CACHES |
09a9f986 | 121 | static void force_mb_single_thread(struct reader_registry *index) |
40e140c9 MD |
122 | { |
123 | smp_mb(); | |
124 | } | |
e8043c1b | 125 | #endif /* #ifdef HAS_INCOHERENT_CACHES */ |
40e140c9 | 126 | |
bb488185 MD |
127 | static void force_mb_all_threads(void) |
128 | { | |
b715b99e | 129 | smp_mb(); |
bb488185 | 130 | } |
e8043c1b MD |
131 | #else /* #ifdef DEBUG_FULL_MB */ |
132 | #ifdef HAS_INCOHERENT_CACHES | |
09a9f986 | 133 | static void force_mb_single_thread(struct reader_registry *index) |
40e140c9 | 134 | { |
0a52082b | 135 | assert(registry); |
157dca95 MD |
136 | /* |
137 | * pthread_kill has a smp_mb(). But beware, we assume it performs | |
138 | * a cache flush on architectures with non-coherent cache. Let's play | |
139 | * safe and don't assume anything : we use smp_mc() to make sure the | |
140 | * cache flush is enforced. | |
157dca95 | 141 | */ |
09a9f986 PM |
142 | *index->need_mb = 1; |
143 | smp_mc(); /* write ->need_mb before sending the signals */ | |
144 | pthread_kill(index->tid, SIGURCU); | |
145 | smp_mb(); | |
40e140c9 MD |
146 | /* |
147 | * Wait for sighandler (and thus mb()) to execute on every thread. | |
148 | * BUSY-LOOP. | |
149 | */ | |
09a9f986 PM |
150 | while (*index->need_mb) { |
151 | poll(NULL, 0, 1); | |
152 | } | |
153 | smp_mb(); /* read ->need_mb before ending the barrier */ | |
40e140c9 | 154 | } |
e8043c1b | 155 | #endif /* #ifdef HAS_INCOHERENT_CACHES */ |
40e140c9 | 156 | |
27b012e2 MD |
157 | static void force_mb_all_threads(void) |
158 | { | |
0a52082b | 159 | struct reader_registry *index; |
27b012e2 | 160 | /* |
b715b99e | 161 | * Ask for each threads to execute a smp_mb() so we can consider the |
27b012e2 MD |
162 | * compiler barriers around rcu read lock as real memory barriers. |
163 | */ | |
0a52082b | 164 | if (!registry) |
27b012e2 | 165 | return; |
3a86deba MD |
166 | /* |
167 | * pthread_kill has a smp_mb(). But beware, we assume it performs | |
157dca95 MD |
168 | * a cache flush on architectures with non-coherent cache. Let's play |
169 | * safe and don't assume anything : we use smp_mc() to make sure the | |
170 | * cache flush is enforced. | |
3a86deba | 171 | */ |
09a9f986 PM |
172 | for (index = registry; index < registry + num_readers; index++) { |
173 | *index->need_mb = 1; | |
174 | smp_mc(); /* write need_mb before sending the signal */ | |
f69f195a | 175 | pthread_kill(index->tid, SIGURCU); |
09a9f986 | 176 | } |
27b012e2 MD |
177 | /* |
178 | * Wait for sighandler (and thus mb()) to execute on every thread. | |
09a9f986 PM |
179 | * |
180 | * Note that the pthread_kill() will never be executed on systems | |
181 | * that correctly deliver signals in a timely manner. However, it | |
182 | * is not uncommon for kernels to have bugs that can result in | |
183 | * lost or unduly delayed signals. | |
184 | * | |
185 | * If you are seeing the below pthread_kill() executing much at | |
186 | * all, we suggest testing the underlying kernel and filing the | |
187 | * relevant bug report. For Linux kernels, we recommend getting | |
188 | * the Linux Test Project (LTP). | |
27b012e2 | 189 | */ |
09a9f986 PM |
190 | for (index = registry; index < registry + num_readers; index++) { |
191 | while (*index->need_mb) { | |
192 | pthread_kill(index->tid, SIGURCU); | |
193 | poll(NULL, 0, 1); | |
194 | } | |
195 | } | |
196 | smp_mb(); /* read ->need_mb before ending the barrier */ | |
27b012e2 | 197 | } |
e8043c1b | 198 | #endif /* #else #ifdef DEBUG_FULL_MB */ |
27b012e2 | 199 | |
1430ee0b | 200 | void wait_for_quiescent_state(void) |
27b012e2 | 201 | { |
0a52082b | 202 | struct reader_registry *index; |
27b012e2 | 203 | |
0a52082b | 204 | if (!registry) |
27b012e2 | 205 | return; |
40e140c9 MD |
206 | /* |
207 | * Wait for each thread urcu_active_readers count to become 0. | |
27b012e2 | 208 | */ |
0a52082b | 209 | for (index = registry; index < registry + num_readers; index++) { |
e8043c1b MD |
210 | #ifndef HAS_INCOHERENT_CACHES |
211 | while (rcu_old_gp_ongoing(index->urcu_active_readers)) | |
212 | cpu_relax(); | |
213 | #else /* #ifndef HAS_INCOHERENT_CACHES */ | |
40e140c9 | 214 | int wait_loops = 0; |
27b012e2 | 215 | /* |
40e140c9 MD |
216 | * BUSY-LOOP. Force the reader thread to commit its |
217 | * urcu_active_readers update to memory if we wait for too long. | |
27b012e2 | 218 | */ |
40e140c9 MD |
219 | while (rcu_old_gp_ongoing(index->urcu_active_readers)) { |
220 | if (wait_loops++ == KICK_READER_LOOPS) { | |
09a9f986 | 221 | force_mb_single_thread(index); |
40e140c9 | 222 | wait_loops = 0; |
3b55dbf4 MD |
223 | } else { |
224 | cpu_relax(); | |
40e140c9 MD |
225 | } |
226 | } | |
e8043c1b | 227 | #endif /* #else #ifndef HAS_INCOHERENT_CACHES */ |
27b012e2 | 228 | } |
27b012e2 MD |
229 | } |
230 | ||
9598a481 | 231 | void synchronize_rcu(void) |
2bc59bd7 | 232 | { |
135530fd MD |
233 | internal_urcu_lock(); |
234 | ||
9598a481 | 235 | /* All threads should read qparity before accessing data structure |
135530fd MD |
236 | * where new ptr points to. Must be done within internal_urcu_lock |
237 | * because it iterates on reader threads.*/ | |
9598a481 | 238 | /* Write new ptr before changing the qparity */ |
2bc59bd7 | 239 | force_mb_all_threads(); |
9598a481 | 240 | |
9598a481 | 241 | switch_next_urcu_qparity(); /* 0 -> 1 */ |
2bc59bd7 PM |
242 | |
243 | /* | |
9598a481 MD |
244 | * Must commit qparity update to memory before waiting for parity |
245 | * 0 quiescent state. Failure to do so could result in the writer | |
246 | * waiting forever while new readers are always accessing data (no | |
247 | * progress). | |
b0d5e790 | 248 | * Ensured by STORE_SHARED and LOAD_SHARED. |
2bc59bd7 | 249 | */ |
2bc59bd7 | 250 | |
9598a481 MD |
251 | /* |
252 | * Wait for previous parity to be empty of readers. | |
253 | */ | |
254 | wait_for_quiescent_state(); /* Wait readers in parity 0 */ | |
9598a481 MD |
255 | |
256 | /* | |
257 | * Must finish waiting for quiescent state for parity 0 before | |
258 | * committing qparity update to memory. Failure to do so could result in | |
259 | * the writer waiting forever while new readers are always accessing | |
260 | * data (no progress). | |
b0d5e790 | 261 | * Ensured by STORE_SHARED and LOAD_SHARED. |
9598a481 | 262 | */ |
9598a481 MD |
263 | |
264 | switch_next_urcu_qparity(); /* 1 -> 0 */ | |
9598a481 MD |
265 | |
266 | /* | |
267 | * Must commit qparity update to memory before waiting for parity | |
268 | * 1 quiescent state. Failure to do so could result in the writer | |
269 | * waiting forever while new readers are always accessing data (no | |
270 | * progress). | |
b0d5e790 | 271 | * Ensured by STORE_SHARED and LOAD_SHARED. |
9598a481 | 272 | */ |
9598a481 MD |
273 | |
274 | /* | |
275 | * Wait for previous parity to be empty of readers. | |
276 | */ | |
277 | wait_for_quiescent_state(); /* Wait readers in parity 1 */ | |
9598a481 | 278 | |
9598a481 | 279 | /* Finish waiting for reader threads before letting the old ptr being |
135530fd MD |
280 | * freed. Must be done within internal_urcu_lock because it iterates on |
281 | * reader threads. */ | |
9598a481 | 282 | force_mb_all_threads(); |
135530fd MD |
283 | |
284 | internal_urcu_unlock(); | |
2bc59bd7 PM |
285 | } |
286 | ||
121a5d44 MD |
287 | /* |
288 | * library wrappers to be used by non-LGPL compatible source code. | |
289 | */ | |
290 | ||
291 | void rcu_read_lock(void) | |
292 | { | |
293 | _rcu_read_lock(); | |
294 | } | |
295 | ||
296 | void rcu_read_unlock(void) | |
297 | { | |
298 | _rcu_read_unlock(); | |
299 | } | |
300 | ||
301 | void *rcu_dereference(void *p) | |
302 | { | |
303 | return _rcu_dereference(p); | |
304 | } | |
305 | ||
306 | void *rcu_assign_pointer_sym(void **p, void *v) | |
307 | { | |
308 | wmb(); | |
309 | return STORE_SHARED(p, v); | |
310 | } | |
311 | ||
312 | void *rcu_xchg_pointer_sym(void **p, void *v) | |
313 | { | |
314 | wmb(); | |
315 | return xchg(p, v); | |
316 | } | |
317 | ||
318 | void *rcu_publish_content_sym(void **p, void *v) | |
319 | { | |
320 | void *oldptr; | |
321 | ||
322 | oldptr = _rcu_xchg_pointer(p, v); | |
323 | synchronize_rcu(); | |
324 | return oldptr; | |
325 | } | |
326 | ||
327 | static void rcu_add_reader(pthread_t id) | |
27b012e2 | 328 | { |
0a52082b | 329 | struct reader_registry *oldarray; |
f69f195a | 330 | |
0a52082b | 331 | if (!registry) { |
27b012e2 | 332 | alloc_readers = INIT_NUM_THREADS; |
f69f195a | 333 | num_readers = 0; |
0a52082b MD |
334 | registry = |
335 | malloc(sizeof(struct reader_registry) * alloc_readers); | |
27b012e2 MD |
336 | } |
337 | if (alloc_readers < num_readers + 1) { | |
0a52082b MD |
338 | oldarray = registry; |
339 | registry = malloc(sizeof(struct reader_registry) | |
27b012e2 | 340 | * (alloc_readers << 1)); |
0a52082b MD |
341 | memcpy(registry, oldarray, |
342 | sizeof(struct reader_registry) * alloc_readers); | |
27b012e2 MD |
343 | alloc_readers <<= 1; |
344 | free(oldarray); | |
345 | } | |
0a52082b | 346 | registry[num_readers].tid = id; |
27b012e2 | 347 | /* reference to the TLS of _this_ reader thread. */ |
0a52082b | 348 | registry[num_readers].urcu_active_readers = &urcu_active_readers; |
09a9f986 | 349 | registry[num_readers].need_mb = &need_mb; |
27b012e2 MD |
350 | num_readers++; |
351 | } | |
352 | ||
353 | /* | |
354 | * Never shrink (implementation limitation). | |
355 | * This is O(nb threads). Eventually use a hash table. | |
356 | */ | |
121a5d44 | 357 | static void rcu_remove_reader(pthread_t id) |
27b012e2 | 358 | { |
0a52082b | 359 | struct reader_registry *index; |
27b012e2 | 360 | |
0a52082b MD |
361 | assert(registry != NULL); |
362 | for (index = registry; index < registry + num_readers; index++) { | |
e6d6e2dc | 363 | if (pthread_equal(index->tid, id)) { |
0a52082b MD |
364 | memcpy(index, ®istry[num_readers - 1], |
365 | sizeof(struct reader_registry)); | |
366 | registry[num_readers - 1].tid = 0; | |
367 | registry[num_readers - 1].urcu_active_readers = NULL; | |
27b012e2 MD |
368 | num_readers--; |
369 | return; | |
370 | } | |
371 | } | |
372 | /* Hrm not found, forgot to register ? */ | |
373 | assert(0); | |
374 | } | |
375 | ||
121a5d44 | 376 | void rcu_register_thread(void) |
27b012e2 | 377 | { |
c265818b | 378 | internal_urcu_lock(); |
121a5d44 | 379 | rcu_add_reader(pthread_self()); |
c265818b | 380 | internal_urcu_unlock(); |
27b012e2 MD |
381 | } |
382 | ||
121a5d44 | 383 | void rcu_unregister_thread(void) |
27b012e2 | 384 | { |
c265818b | 385 | internal_urcu_lock(); |
121a5d44 | 386 | rcu_remove_reader(pthread_self()); |
c265818b | 387 | internal_urcu_unlock(); |
27b012e2 MD |
388 | } |
389 | ||
bb488185 | 390 | #ifndef DEBUG_FULL_MB |
121a5d44 | 391 | static void sigurcu_handler(int signo, siginfo_t *siginfo, void *context) |
27b012e2 | 392 | { |
40e140c9 MD |
393 | /* |
394 | * Executing this smp_mb() is the only purpose of this signal handler. | |
395 | * It punctually promotes barrier() into smp_mb() on every thread it is | |
396 | * executed on. | |
397 | */ | |
b715b99e | 398 | smp_mb(); |
09a9f986 PM |
399 | need_mb = 0; |
400 | smp_mb(); | |
27b012e2 MD |
401 | } |
402 | ||
403 | void __attribute__((constructor)) urcu_init(void) | |
404 | { | |
405 | struct sigaction act; | |
406 | int ret; | |
407 | ||
408 | act.sa_sigaction = sigurcu_handler; | |
409 | ret = sigaction(SIGURCU, &act, NULL); | |
f69f195a MD |
410 | if (ret) { |
411 | perror("Error in sigaction"); | |
27b012e2 MD |
412 | exit(-1); |
413 | } | |
414 | } | |
415 | ||
416 | void __attribute__((destructor)) urcu_exit(void) | |
417 | { | |
418 | struct sigaction act; | |
419 | int ret; | |
420 | ||
421 | ret = sigaction(SIGURCU, NULL, &act); | |
f69f195a MD |
422 | if (ret) { |
423 | perror("Error in sigaction"); | |
27b012e2 MD |
424 | exit(-1); |
425 | } | |
426 | assert(act.sa_sigaction == sigurcu_handler); | |
0a52082b | 427 | free(registry); |
27b012e2 | 428 | } |
e8043c1b | 429 | #endif /* #ifndef DEBUG_FULL_MB */ |