projects
/
urcu.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Cleanup: cast poll() return value to void for delays
[urcu.git]
/
urcu-call-rcu-impl.h
diff --git
a/urcu-call-rcu-impl.h
b/urcu-call-rcu-impl.h
index fb3568f802f210d2015b9a42e9d20691ee59df4c..5cc02d97704514df5e45a7dd459744d7f79eb597 100644
(file)
--- a/
urcu-call-rcu-impl.h
+++ b/
urcu-call-rcu-impl.h
@@
-42,6
+42,7
@@
#include "urcu/list.h"
#include "urcu/futex.h"
#include "urcu/tls-compat.h"
#include "urcu/list.h"
#include "urcu/futex.h"
#include "urcu/tls-compat.h"
+#include "urcu/ref.h"
#include "urcu-die.h"
/* Data structure that identifies a call_rcu thread. */
#include "urcu-die.h"
/* Data structure that identifies a call_rcu thread. */
@@
-67,6
+68,7
@@
struct call_rcu_data {
struct call_rcu_completion {
int barrier_count;
int32_t futex;
struct call_rcu_completion {
int barrier_count;
int32_t futex;
+ struct urcu_ref ref;
};
struct call_rcu_completion_work {
};
struct call_rcu_completion_work {
@@
-307,7
+309,9
@@
static void *call_rcu_thread(void *arg)
cmm_smp_mb__before_uatomic_or();
uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSED);
while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) != 0)
cmm_smp_mb__before_uatomic_or();
uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSED);
while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) != 0)
- poll(NULL, 0, 1);
+ (void) poll(NULL, 0, 1);
+ uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSED);
+ cmm_smp_mb__after_uatomic_and();
rcu_register_thread();
}
rcu_register_thread();
}
@@
-337,7
+341,7
@@
static void *call_rcu_thread(void *arg)
if (cds_wfcq_empty(&crdp->cbs_head,
&crdp->cbs_tail)) {
call_rcu_wait(crdp);
if (cds_wfcq_empty(&crdp->cbs_head,
&crdp->cbs_tail)) {
call_rcu_wait(crdp);
- poll(NULL, 0, 10);
+
(void)
poll(NULL, 0, 10);
uatomic_dec(&crdp->futex);
/*
* Decrement futex before reading
uatomic_dec(&crdp->futex);
/*
* Decrement futex before reading
@@
-345,10
+349,10
@@
static void *call_rcu_thread(void *arg)
*/
cmm_smp_mb();
} else {
*/
cmm_smp_mb();
} else {
- poll(NULL, 0, 10);
+
(void)
poll(NULL, 0, 10);
}
} else {
}
} else {
- poll(NULL, 0, 10);
+
(void)
poll(NULL, 0, 10);
}
rcu_thread_online();
}
}
rcu_thread_online();
}
@@
-665,10
+669,10
@@
void call_rcu(struct rcu_head *head,
struct call_rcu_data *crdp;
/* Holding rcu read-side lock across use of per-cpu crdp */
struct call_rcu_data *crdp;
/* Holding rcu read-side lock across use of per-cpu crdp */
- rcu_read_lock();
+
_
rcu_read_lock();
crdp = get_call_rcu_data();
_call_rcu(head, func, crdp);
crdp = get_call_rcu_data();
_call_rcu(head, func, crdp);
- rcu_read_unlock();
+
_
rcu_read_unlock();
}
/*
}
/*
@@
-706,7
+710,7
@@
void call_rcu_data_free(struct call_rcu_data *crdp)
uatomic_or(&crdp->flags, URCU_CALL_RCU_STOP);
wake_call_rcu_thread(crdp);
while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0)
uatomic_or(&crdp->flags, URCU_CALL_RCU_STOP);
wake_call_rcu_thread(crdp);
while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0)
- poll(NULL, 0, 1);
+
(void)
poll(NULL, 0, 1);
}
if (!cds_wfcq_empty(&crdp->cbs_head, &crdp->cbs_tail)) {
/* Create default call rcu data if need be */
}
if (!cds_wfcq_empty(&crdp->cbs_head, &crdp->cbs_tail)) {
/* Create default call rcu data if need be */
@@
-766,6
+770,15
@@
void free_all_cpu_call_rcu_data(void)
free(crdp);
}
free(crdp);
}
+static
+void free_completion(struct urcu_ref *ref)
+{
+ struct call_rcu_completion *completion;
+
+ completion = caa_container_of(ref, struct call_rcu_completion, ref);
+ free(completion);
+}
+
static
void _rcu_barrier_complete(struct rcu_head *head)
{
static
void _rcu_barrier_complete(struct rcu_head *head)
{
@@
-774,8
+787,9
@@
void _rcu_barrier_complete(struct rcu_head *head)
work = caa_container_of(head, struct call_rcu_completion_work, head);
completion = work->completion;
work = caa_container_of(head, struct call_rcu_completion_work, head);
completion = work->completion;
- uatomic_dec(&completion->barrier_count);
- call_rcu_completion_wake_up(completion);
+ if (!uatomic_sub_return(&completion->barrier_count, 1))
+ call_rcu_completion_wake_up(completion);
+ urcu_ref_put(&completion->ref, free_completion);
free(work);
}
free(work);
}
@@
-785,19
+799,19
@@
void _rcu_barrier_complete(struct rcu_head *head)
void rcu_barrier(void)
{
struct call_rcu_data *crdp;
void rcu_barrier(void)
{
struct call_rcu_data *crdp;
- struct call_rcu_completion completion;
- int count = 0
, work_count = 0
;
+ struct call_rcu_completion
*
completion;
+ int count = 0;
int was_online;
/* Put in offline state in QSBR. */
int was_online;
/* Put in offline state in QSBR. */
- was_online = rcu_read_ongoing();
+ was_online =
_
rcu_read_ongoing();
if (was_online)
rcu_thread_offline();
/*
* Calling a rcu_barrier() within a RCU read-side critical
* section is an error.
*/
if (was_online)
rcu_thread_offline();
/*
* Calling a rcu_barrier() within a RCU read-side critical
* section is an error.
*/
- if (rcu_read_ongoing()) {
+ if (
_
rcu_read_ongoing()) {
static int warned = 0;
if (!warned) {
static int warned = 0;
if (!warned) {
@@
-807,43
+821,41
@@
void rcu_barrier(void)
goto online;
}
goto online;
}
+ completion = calloc(sizeof(*completion), 1);
+ if (!completion)
+ urcu_die(errno);
+
call_rcu_lock(&call_rcu_mutex);
cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
count++;
call_rcu_lock(&call_rcu_mutex);
cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
count++;
- completion.barrier_count = count;
+ /* Referenced by rcu_barrier() and each call_rcu thread. */
+ urcu_ref_set(&completion->ref, count + 1);
+ completion->barrier_count = count;
cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
struct call_rcu_completion_work *work;
work = calloc(sizeof(*work), 1);
cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
struct call_rcu_completion_work *work;
work = calloc(sizeof(*work), 1);
- if (!work) {
- static int warned = 0;
-
- if (!warned) {
- fprintf(stderr, "[error] liburcu: unable to allocate memory for rcu_barrier()\n");
- }
- warned = 1;
- break;
- }
- work->completion = &completion;
+ if (!work)
+ urcu_die(errno);
+ work->completion = completion;
_call_rcu(&work->head, _rcu_barrier_complete, crdp);
_call_rcu(&work->head, _rcu_barrier_complete, crdp);
- work_count++;
}
call_rcu_unlock(&call_rcu_mutex);
}
call_rcu_unlock(&call_rcu_mutex);
- if (work_count != count)
- uatomic_sub(&completion.barrier_count, count - work_count);
-
/* Wait for them */
for (;;) {
/* Wait for them */
for (;;) {
- uatomic_dec(&completion
.
futex);
+ uatomic_dec(&completion
->
futex);
/* Decrement futex before reading barrier_count */
cmm_smp_mb();
/* Decrement futex before reading barrier_count */
cmm_smp_mb();
- if (!uatomic_read(&completion
.
barrier_count))
+ if (!uatomic_read(&completion
->
barrier_count))
break;
break;
- call_rcu_completion_wait(
&
completion);
+ call_rcu_completion_wait(completion);
}
}
+
+ urcu_ref_put(&completion->ref, free_completion);
+
online:
if (was_online)
rcu_thread_online();
online:
if (was_online)
rcu_thread_online();
@@
-868,7
+880,7
@@
void call_rcu_before_fork(void)
}
cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) == 0)
}
cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) == 0)
- poll(NULL, 0, 1);
+
(void)
poll(NULL, 0, 1);
}
}
}
}
@@
-883,6
+895,10
@@
void call_rcu_after_fork_parent(void)
cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSE);
cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSE);
+ cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
+ while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) != 0)
+ (void) poll(NULL, 0, 1);
+ }
call_rcu_unlock(&call_rcu_mutex);
}
call_rcu_unlock(&call_rcu_mutex);
}
This page took
0.030699 seconds
and
4
git commands to generate.