projects
/
urcu.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
call_rcu threads should clear their PAUSED flag when they unpause
[urcu.git]
/
urcu.c
diff --git
a/urcu.c
b/urcu.c
index a5178c0054bea5f0363b571d542b98b4d6803f9e..8420ee494cef759bc2b87287168bacddc525315c 100644
(file)
--- a/
urcu.c
+++ b/
urcu.c
@@
-52,9
+52,9
@@
/*
* If a reader is really non-cooperative and refuses to commit its
* rcu_active_readers count to memory (there is no barrier in the reader
/*
* If a reader is really non-cooperative and refuses to commit its
* rcu_active_readers count to memory (there is no barrier in the reader
- * per-se), kick it after
a few
loops waiting for it.
+ * per-se), kick it after
10
loops waiting for it.
*/
*/
-#define KICK_READER_LOOPS
1000
0
+#define KICK_READER_LOOPS
1
0
/*
* Active attempts to check for reader Q.S. before calling futex().
/*
* Active attempts to check for reader Q.S. before calling futex().
@@
-97,11
+97,11
@@
unsigned long rcu_gp_ctr = RCU_GP_COUNT;
* Written to only by each individual reader. Read by both the reader and the
* writers.
*/
* Written to only by each individual reader. Read by both the reader and the
* writers.
*/
-
DEFINE_URCU_TLS
(struct rcu_reader, rcu_reader);
+
__DEFINE_URCU_TLS_GLOBAL
(struct rcu_reader, rcu_reader);
#ifdef DEBUG_YIELD
unsigned int yield_active;
#ifdef DEBUG_YIELD
unsigned int yield_active;
-
DEFINE_URCU_TLS
(unsigned int, rand_yield);
+
__DEFINE_URCU_TLS_GLOBAL
(unsigned int, rand_yield);
#endif
static CDS_LIST_HEAD(registry);
#endif
static CDS_LIST_HEAD(registry);
@@
-218,8
+218,11
@@
static void wait_gp(void)
void update_counter_and_wait(void)
{
CDS_LIST_HEAD(qsreaders);
void update_counter_and_wait(void)
{
CDS_LIST_HEAD(qsreaders);
- int wait_loops = 0;
+
unsigned
int wait_loops = 0;
struct rcu_reader *index, *tmp;
struct rcu_reader *index, *tmp;
+#ifdef HAS_INCOHERENT_CACHES
+ unsigned int wait_gp_loops = 0;
+#endif /* HAS_INCOHERENT_CACHES */
/* Switch parity: 0 -> 1, 1 -> 0 */
CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
/* Switch parity: 0 -> 1, 1 -> 0 */
CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
@@
-244,8
+247,9
@@
void update_counter_and_wait(void)
* Wait for each thread URCU_TLS(rcu_reader).ctr count to become 0.
*/
for (;;) {
* Wait for each thread URCU_TLS(rcu_reader).ctr count to become 0.
*/
for (;;) {
- wait_loops++;
- if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
+ if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
+ wait_loops++;
+ if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
uatomic_dec(&gp_futex);
/* Write futex before read reader_gp */
smp_mb_master(RCU_MB_GROUP);
uatomic_dec(&gp_futex);
/* Write futex before read reader_gp */
smp_mb_master(RCU_MB_GROUP);
@@
-258,14
+262,14
@@
void update_counter_and_wait(void)
#ifndef HAS_INCOHERENT_CACHES
if (cds_list_empty(®istry)) {
#ifndef HAS_INCOHERENT_CACHES
if (cds_list_empty(®istry)) {
- if (wait_loops
=
= RCU_QS_ACTIVE_ATTEMPTS) {
+ if (wait_loops
>
= RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
smp_mb_master(RCU_MB_GROUP);
uatomic_set(&gp_futex, 0);
}
break;
} else {
/* Read reader_gp before write futex */
smp_mb_master(RCU_MB_GROUP);
uatomic_set(&gp_futex, 0);
}
break;
} else {
- if (wait_loops
=
= RCU_QS_ACTIVE_ATTEMPTS)
+ if (wait_loops
>
= RCU_QS_ACTIVE_ATTEMPTS)
wait_gp();
else
caa_cpu_relax();
wait_gp();
else
caa_cpu_relax();
@@
-277,22
+281,21
@@
void update_counter_and_wait(void)
* for too long.
*/
if (cds_list_empty(®istry)) {
* for too long.
*/
if (cds_list_empty(®istry)) {
- if (wait_loops
=
= RCU_QS_ACTIVE_ATTEMPTS) {
+ if (wait_loops
>
= RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
smp_mb_master(RCU_MB_GROUP);
uatomic_set(&gp_futex, 0);
}
break;
} else {
/* Read reader_gp before write futex */
smp_mb_master(RCU_MB_GROUP);
uatomic_set(&gp_futex, 0);
}
break;
} else {
- switch (wait_loops) {
- case RCU_QS_ACTIVE_ATTEMPTS:
- wait_gp();
- break; /* only escape switch */
- case KICK_READER_LOOPS:
+ if (wait_gp_loops == KICK_READER_LOOPS) {
smp_mb_master(RCU_MB_GROUP);
smp_mb_master(RCU_MB_GROUP);
- wait_loops = 0;
- break; /* only escape switch */
- default:
+ wait_gp_loops = 0;
+ }
+ if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
+ wait_gp();
+ wait_gp_loops++;
+ } else {
caa_cpu_relax();
}
}
caa_cpu_relax();
}
}
This page took
0.026846 seconds
and
4
git commands to generate.