4 * Userspace RCU library - batch memory reclamation
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
35 #include "urcu-defer-static.h"
36 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
37 #include "urcu-defer.h"
39 #define futex(...) syscall(__NR_futex, __VA_ARGS__)
43 void __attribute__((destructor
)) urcu_defer_exit(void);
45 extern void synchronize_rcu(void);
48 * urcu_defer_mutex nests inside defer_thread_mutex.
50 static pthread_mutex_t urcu_defer_mutex
= PTHREAD_MUTEX_INITIALIZER
;
51 static pthread_mutex_t defer_thread_mutex
= PTHREAD_MUTEX_INITIALIZER
;
53 static int defer_thread_futex
;
56 * Written to only by each individual deferer. Read by both the deferer and
57 * the reclamation tread.
59 static struct defer_queue __thread defer_queue
;
61 /* Thread IDs of registered deferers */
62 #define INIT_NUM_THREADS 4
64 struct deferer_registry
{
66 struct defer_queue
*defer_queue
;
67 unsigned long last_head
;
70 static struct deferer_registry
*registry
;
71 static int num_deferers
, alloc_deferers
;
73 static pthread_t tid_defer
;
75 static void internal_urcu_lock(pthread_mutex_t
*mutex
)
79 #ifndef DISTRUST_SIGNALS_EXTREME
80 ret
= pthread_mutex_lock(mutex
);
82 perror("Error in pthread mutex lock");
85 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
86 while ((ret
= pthread_mutex_trylock(mutex
)) != 0) {
87 if (ret
!= EBUSY
&& ret
!= EINTR
) {
88 printf("ret = %d, errno = %d\n", ret
, errno
);
89 perror("Error in pthread mutex lock");
95 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
98 static void internal_urcu_unlock(pthread_mutex_t
*mutex
)
102 ret
= pthread_mutex_unlock(mutex
);
104 perror("Error in pthread mutex unlock");
110 * Wake-up any waiting defer thread. Called from many concurrent threads.
112 static void wake_up_defer(void)
114 if (unlikely(atomic_read(&defer_thread_futex
) == -1)) {
115 atomic_set(&defer_thread_futex
, 0);
116 futex(&defer_thread_futex
, FUTEX_WAKE
, 1,
121 static unsigned long rcu_defer_num_callbacks(void)
123 unsigned long num_items
= 0, head
;
124 struct deferer_registry
*index
;
126 internal_urcu_lock(&urcu_defer_mutex
);
127 for (index
= registry
; index
< registry
+ num_deferers
; index
++) {
128 head
= LOAD_SHARED(index
->defer_queue
->head
);
129 num_items
+= head
- index
->defer_queue
->tail
;
131 internal_urcu_unlock(&urcu_defer_mutex
);
136 * Defer thread waiting. Single thread.
138 static void wait_defer(void)
140 atomic_dec(&defer_thread_futex
);
141 smp_mb(); /* Write futex before read queue */
142 if (rcu_defer_num_callbacks()) {
143 smp_mb(); /* Read queue before write futex */
144 /* Callbacks are queued, don't wait. */
145 atomic_set(&defer_thread_futex
, 0);
147 smp_rmb(); /* Read queue before read futex */
148 if (atomic_read(&defer_thread_futex
) == -1)
149 futex(&defer_thread_futex
, FUTEX_WAIT
, -1,
155 * Must be called after Q.S. is reached.
157 static void rcu_defer_barrier_queue(struct defer_queue
*queue
,
161 void (*fct
)(void *p
);
165 * Tail is only modified when lock is held.
166 * Head is only modified by owner thread.
169 for (i
= queue
->tail
; i
!= head
;) {
170 smp_rmb(); /* read head before q[]. */
171 p
= LOAD_SHARED(queue
->q
[i
++ & DEFER_QUEUE_MASK
]);
172 if (unlikely(DQ_IS_FCT_BIT(p
))) {
174 queue
->last_fct_out
= p
;
175 p
= LOAD_SHARED(queue
->q
[i
++ & DEFER_QUEUE_MASK
]);
176 } else if (unlikely(p
== DQ_FCT_MARK
)) {
177 p
= LOAD_SHARED(queue
->q
[i
++ & DEFER_QUEUE_MASK
]);
178 queue
->last_fct_out
= p
;
179 p
= LOAD_SHARED(queue
->q
[i
++ & DEFER_QUEUE_MASK
]);
181 fct
= queue
->last_fct_out
;
184 smp_mb(); /* push tail after having used q[] */
185 STORE_SHARED(queue
->tail
, i
);
188 static void _rcu_defer_barrier_thread(void)
190 unsigned long head
, num_items
;
192 head
= defer_queue
.head
;
193 num_items
= head
- defer_queue
.tail
;
194 if (unlikely(!num_items
))
197 rcu_defer_barrier_queue(&defer_queue
, head
);
201 void rcu_defer_barrier_thread(void)
203 internal_urcu_lock(&urcu_defer_mutex
);
204 _rcu_defer_barrier_thread();
205 internal_urcu_unlock(&urcu_defer_mutex
);
209 * rcu_defer_barrier - Execute all queued rcu callbacks.
211 * Execute all RCU callbacks queued before rcu_defer_barrier() execution.
212 * All callbacks queued on the local thread prior to a rcu_defer_barrier() call
213 * are guaranteed to be executed.
214 * Callbacks queued by other threads concurrently with rcu_defer_barrier()
215 * execution are not guaranteed to be executed in the current batch (could
216 * be left for the next batch). These callbacks queued by other threads are only
217 * guaranteed to be executed if there is explicit synchronization between
218 * the thread adding to the queue and the thread issuing the defer_barrier call.
221 void rcu_defer_barrier(void)
223 struct deferer_registry
*index
;
224 unsigned long num_items
= 0;
229 internal_urcu_lock(&urcu_defer_mutex
);
230 for (index
= registry
; index
< registry
+ num_deferers
; index
++) {
231 index
->last_head
= LOAD_SHARED(index
->defer_queue
->head
);
232 num_items
+= index
->last_head
- index
->defer_queue
->tail
;
234 if (likely(!num_items
)) {
236 * We skip the grace period because there are no queued
237 * callbacks to execute.
242 for (index
= registry
; index
< registry
+ num_deferers
; index
++)
243 rcu_defer_barrier_queue(index
->defer_queue
,
246 internal_urcu_unlock(&urcu_defer_mutex
);
250 * _rcu_defer_queue - Queue a RCU callback.
252 void _rcu_defer_queue(void (*fct
)(void *p
), void *p
)
254 unsigned long head
, tail
;
257 * Head is only modified by ourself. Tail can be modified by reclamation
260 head
= defer_queue
.head
;
261 tail
= LOAD_SHARED(defer_queue
.tail
);
264 * If queue is full, empty it ourself.
265 * Worse-case: must allow 2 supplementary entries for fct pointer.
267 if (unlikely(head
- tail
>= DEFER_QUEUE_SIZE
- 2)) {
268 assert(head
- tail
<= DEFER_QUEUE_SIZE
);
269 rcu_defer_barrier_thread();
270 assert(head
- LOAD_SHARED(defer_queue
.tail
) == 0);
273 if (unlikely(defer_queue
.last_fct_in
!= fct
)) {
274 defer_queue
.last_fct_in
= fct
;
275 if (unlikely(DQ_IS_FCT_BIT(fct
) || fct
== DQ_FCT_MARK
)) {
277 * If the function to encode is not aligned or the
278 * marker, write DQ_FCT_MARK followed by the function
281 _STORE_SHARED(defer_queue
.q
[head
++ & DEFER_QUEUE_MASK
],
283 _STORE_SHARED(defer_queue
.q
[head
++ & DEFER_QUEUE_MASK
],
287 _STORE_SHARED(defer_queue
.q
[head
++ & DEFER_QUEUE_MASK
],
291 if (unlikely(DQ_IS_FCT_BIT(p
) || p
== DQ_FCT_MARK
)) {
293 * If the data to encode is not aligned or the marker,
294 * write DQ_FCT_MARK followed by the function pointer.
296 _STORE_SHARED(defer_queue
.q
[head
++ & DEFER_QUEUE_MASK
],
298 _STORE_SHARED(defer_queue
.q
[head
++ & DEFER_QUEUE_MASK
],
302 _STORE_SHARED(defer_queue
.q
[head
++ & DEFER_QUEUE_MASK
], p
);
303 smp_wmb(); /* Publish new pointer before head */
304 /* Write q[] before head. */
305 STORE_SHARED(defer_queue
.head
, head
);
306 smp_mb(); /* Write queue head before read futex */
308 * Wake-up any waiting defer thread.
313 void *thr_defer(void *args
)
316 pthread_testcancel();
318 * "Be green". Don't wake up the CPU if there is no RCU work
319 * to perform whatsoever. Aims at saving laptop battery life by
320 * leaving the processor in sleep state when idle.
323 /* Sleeping after wait_defer to let many callbacks enqueue */
324 poll(NULL
,0,100); /* wait for 100ms */
332 * library wrappers to be used by non-LGPL compatible source code.
335 void rcu_defer_queue(void (*fct
)(void *p
), void *p
)
337 _rcu_defer_queue(fct
, p
);
340 static void rcu_add_deferer(pthread_t id
)
342 struct deferer_registry
*oldarray
;
345 alloc_deferers
= INIT_NUM_THREADS
;
348 malloc(sizeof(struct deferer_registry
) * alloc_deferers
);
350 if (alloc_deferers
< num_deferers
+ 1) {
352 registry
= malloc(sizeof(struct deferer_registry
)
353 * (alloc_deferers
<< 1));
354 memcpy(registry
, oldarray
,
355 sizeof(struct deferer_registry
) * alloc_deferers
);
356 alloc_deferers
<<= 1;
359 registry
[num_deferers
].tid
= id
;
360 /* reference to the TLS of _this_ deferer thread. */
361 registry
[num_deferers
].defer_queue
= &defer_queue
;
362 registry
[num_deferers
].last_head
= 0;
367 * Never shrink (implementation limitation).
368 * This is O(nb threads). Eventually use a hash table.
370 static void rcu_remove_deferer(pthread_t id
)
372 struct deferer_registry
*index
;
374 assert(registry
!= NULL
);
375 for (index
= registry
; index
< registry
+ num_deferers
; index
++) {
376 if (pthread_equal(index
->tid
, id
)) {
377 memcpy(index
, ®istry
[num_deferers
- 1],
378 sizeof(struct deferer_registry
));
379 registry
[num_deferers
- 1].tid
= 0;
380 registry
[num_deferers
- 1].defer_queue
= NULL
;
381 registry
[num_deferers
- 1].last_head
= 0;
386 /* Hrm not found, forgot to register ? */
390 static void start_defer_thread(void)
394 ret
= pthread_create(&tid_defer
, NULL
, thr_defer
,
399 static void stop_defer_thread(void)
404 pthread_cancel(tid_defer
);
406 ret
= pthread_join(tid_defer
, &tret
);
410 void rcu_defer_register_thread(void)
414 internal_urcu_lock(&defer_thread_mutex
);
415 internal_urcu_lock(&urcu_defer_mutex
);
416 defer_queue
.q
= malloc(sizeof(void *) * DEFER_QUEUE_SIZE
);
417 rcu_add_deferer(pthread_self());
418 deferers
= num_deferers
;
419 internal_urcu_unlock(&urcu_defer_mutex
);
422 start_defer_thread();
423 internal_urcu_unlock(&defer_thread_mutex
);
426 void rcu_defer_unregister_thread(void)
430 internal_urcu_lock(&defer_thread_mutex
);
431 internal_urcu_lock(&urcu_defer_mutex
);
432 rcu_remove_deferer(pthread_self());
433 _rcu_defer_barrier_thread();
435 defer_queue
.q
= NULL
;
436 deferers
= num_deferers
;
437 internal_urcu_unlock(&urcu_defer_mutex
);
441 internal_urcu_unlock(&defer_thread_mutex
);
444 void urcu_defer_exit(void)