/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
#include "urcu-defer.h"
-void __attribute__((destructor)) urcu_defer_exit(void);
+void __attribute__((destructor)) rcu_defer_exit(void);
extern void synchronize_rcu(void);
/*
- * urcu_defer_mutex nests inside defer_thread_mutex.
+ * rcu_defer_mutex nests inside defer_thread_mutex.
*/
-static pthread_mutex_t urcu_defer_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t rcu_defer_mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_mutex_t defer_thread_mutex = PTHREAD_MUTEX_INITIALIZER;
static int defer_thread_futex;
static LIST_HEAD(registry);
static pthread_t tid_defer;
-static void internal_urcu_lock(pthread_mutex_t *mutex)
+static void mutex_lock(pthread_mutex_t *mutex)
{
int ret;
#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
}
-static void internal_urcu_unlock(pthread_mutex_t *mutex)
+static void mutex_unlock(pthread_mutex_t *mutex)
{
int ret;
unsigned long num_items = 0, head;
struct defer_queue *index;
- internal_urcu_lock(&urcu_defer_mutex);
+ mutex_lock(&rcu_defer_mutex);
list_for_each_entry(index, ®istry, list) {
head = LOAD_SHARED(index->head);
num_items += head - index->tail;
}
- internal_urcu_unlock(&urcu_defer_mutex);
+ mutex_unlock(&rcu_defer_mutex);
return num_items;
}
void rcu_defer_barrier_thread(void)
{
- internal_urcu_lock(&urcu_defer_mutex);
+ mutex_lock(&rcu_defer_mutex);
_rcu_defer_barrier_thread();
- internal_urcu_unlock(&urcu_defer_mutex);
+ mutex_unlock(&rcu_defer_mutex);
}
/*
if (list_empty(®istry))
return;
- internal_urcu_lock(&urcu_defer_mutex);
+ mutex_lock(&rcu_defer_mutex);
list_for_each_entry(index, ®istry, list) {
index->last_head = LOAD_SHARED(index->head);
num_items += index->last_head - index->tail;
list_for_each_entry(index, ®istry, list)
rcu_defer_barrier_queue(index, index->last_head);
end:
- internal_urcu_unlock(&urcu_defer_mutex);
+ mutex_unlock(&rcu_defer_mutex);
}
/*
tail = LOAD_SHARED(defer_queue.tail);
/*
- * If queue is full, empty it ourself.
+ * If queue is full, or reached threshold. Empty queue ourself.
* Worse-case: must allow 2 supplementary entries for fct pointer.
*/
- if (unlikely(head - tail >= DEFER_QUEUE_SIZE - 2)) {
+ if (unlikely(sync || (head - tail >= DEFER_QUEUE_SIZE - 2))) {
assert(head - tail <= DEFER_QUEUE_SIZE);
rcu_defer_barrier_thread();
assert(head - LOAD_SHARED(defer_queue.tail) == 0);
assert(defer_queue.q == NULL);
defer_queue.q = malloc(sizeof(void *) * DEFER_QUEUE_SIZE);
- internal_urcu_lock(&defer_thread_mutex);
- internal_urcu_lock(&urcu_defer_mutex);
+ mutex_lock(&defer_thread_mutex);
+ mutex_lock(&rcu_defer_mutex);
was_empty = list_empty(®istry);
list_add(&defer_queue.list, ®istry);
- internal_urcu_unlock(&urcu_defer_mutex);
+ mutex_unlock(&rcu_defer_mutex);
if (was_empty)
start_defer_thread();
- internal_urcu_unlock(&defer_thread_mutex);
+ mutex_unlock(&defer_thread_mutex);
}
void rcu_defer_unregister_thread(void)
{
int is_empty;
- internal_urcu_lock(&defer_thread_mutex);
- internal_urcu_lock(&urcu_defer_mutex);
+ mutex_lock(&defer_thread_mutex);
+ mutex_lock(&rcu_defer_mutex);
list_del(&defer_queue.list);
_rcu_defer_barrier_thread();
free(defer_queue.q);
defer_queue.q = NULL;
is_empty = list_empty(®istry);
- internal_urcu_unlock(&urcu_defer_mutex);
+ mutex_unlock(&rcu_defer_mutex);
if (is_empty)
stop_defer_thread();
- internal_urcu_unlock(&defer_thread_mutex);
+ mutex_unlock(&defer_thread_mutex);
}
-void urcu_defer_exit(void)
+void rcu_defer_exit(void)
{
assert(list_empty(®istry));
}