X-Git-Url: https://git.lttng.org/?p=urcu.git;a=blobdiff_plain;f=urcu.c;h=ac1a4d6c9314499d339fdd2a939479ec96deed5f;hp=1a276ce33663439b7cb128e1ab6bea147faec1ed;hb=bb48818526ec4317f9e6daeb0aa1cd64d528f754;hpb=2bc59bd7ce26378150512280b67599f9004732a5 diff --git a/urcu.c b/urcu.c index 1a276ce..ac1a4d6 100644 --- a/urcu.c +++ b/urcu.c @@ -19,24 +19,33 @@ pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER; -/* Global quiescent period parity */ -int urcu_qparity; +/* + * Global grace period counter. + * Contains the current RCU_GP_CTR_BIT. + * Also has a RCU_GP_CTR_BIT of 1, to accelerate the reader fast path. + */ +long urcu_gp_ctr = RCU_GP_COUNT; -int __thread urcu_active_readers[2]; +long __thread urcu_active_readers; /* Thread IDs of registered readers */ #define INIT_NUM_THREADS 4 struct reader_data { pthread_t tid; - int *urcu_active_readers; + long *urcu_active_readers; }; +#ifdef DEBUG_YIELD +unsigned int yield_active; +unsigned int __thread rand_yield; +#endif + static struct reader_data *reader_data; static int num_readers, alloc_readers; static int sig_done; -void rcu_write_lock(void) +void internal_urcu_lock(void) { int ret; ret = pthread_mutex_lock(&urcu_mutex); @@ -46,7 +55,7 @@ void rcu_write_lock(void) } } -void rcu_write_unlock(void) +void internal_urcu_unlock(void) { int ret; @@ -60,13 +69,17 @@ void rcu_write_unlock(void) /* * called with urcu_mutex held. */ -static int switch_next_urcu_qparity(void) +static void switch_next_urcu_qparity(void) { - int old_parity = urcu_qparity; - urcu_qparity = 1 - old_parity; - return old_parity; + urcu_gp_ctr ^= RCU_GP_CTR_BIT; } +#ifdef DEBUG_FULL_MB +static void force_mb_all_threads(void) +{ + mb(); +} +#else static void force_mb_all_threads(void) { struct reader_data *index; @@ -76,20 +89,28 @@ static void force_mb_all_threads(void) */ if (!reader_data) return; + debug_yield_write(); sig_done = 0; + debug_yield_write(); mb(); /* write sig_done before sending the signals */ - for (index = reader_data; index < reader_data + num_readers; index++) + debug_yield_write(); + for (index = reader_data; index < reader_data + num_readers; index++) { pthread_kill(index->tid, SIGURCU); + debug_yield_write(); + } /* * Wait for sighandler (and thus mb()) to execute on every thread. * BUSY-LOOP. */ while (sig_done < num_readers) barrier(); + debug_yield_write(); mb(); /* read sig_done before ending the barrier */ + debug_yield_write(); } +#endif -void wait_for_quiescent_state(int parity) +void wait_for_quiescent_state(void) { struct reader_data *index; @@ -101,7 +122,7 @@ void wait_for_quiescent_state(int parity) /* * BUSY-LOOP. */ - while (index->urcu_active_readers[parity] != 0) + while (rcu_old_gp_ongoing(index->urcu_active_readers)) barrier(); } /* @@ -115,52 +136,30 @@ void wait_for_quiescent_state(int parity) static void switch_qparity(void) { - int prev_parity; - /* All threads should read qparity before accessing data structure. */ /* Write ptr before changing the qparity */ force_mb_all_threads(); - prev_parity = switch_next_urcu_qparity(); + debug_yield_write(); + switch_next_urcu_qparity(); + debug_yield_write(); /* * Wait for previous parity to be empty of readers. */ - wait_for_quiescent_state(prev_parity); + wait_for_quiescent_state(); } void synchronize_rcu(void) { - rcu_write_lock(); - switch_qparity(); - switch_qparity(); - rcu_write_unlock(); -} - -/* - * Return old pointer, OK to free, no more reference exist. - * Called under rcu_write_lock. - */ -void *urcu_publish_content(void **ptr, void *new) -{ - void *oldptr; - - /* - * We can publish the new pointer before we change the current qparity. - * Readers seeing the new pointer while being in the previous qparity - * window will make us wait until the end of the quiescent state before - * we release the unrelated memory area. However, given we hold the - * urcu_mutex, we are making sure that no further garbage collection can - * occur until we release the mutex, therefore we guarantee that this - * given reader will have completed its execution using the new pointer - * when the next quiescent state window will be over. - */ - oldptr = *ptr; - *ptr = new; - + debug_yield_write(); + internal_urcu_lock(); + debug_yield_write(); switch_qparity(); + debug_yield_write(); switch_qparity(); - - return oldptr; + debug_yield_write(); + internal_urcu_unlock(); + debug_yield_write(); } void urcu_add_reader(pthread_t id) @@ -184,7 +183,7 @@ void urcu_add_reader(pthread_t id) } reader_data[num_readers].tid = id; /* reference to the TLS of _this_ reader thread. */ - reader_data[num_readers].urcu_active_readers = urcu_active_readers; + reader_data[num_readers].urcu_active_readers = &urcu_active_readers; num_readers++; } @@ -213,18 +212,19 @@ void urcu_remove_reader(pthread_t id) void urcu_register_thread(void) { - rcu_write_lock(); + internal_urcu_lock(); urcu_add_reader(pthread_self()); - rcu_write_unlock(); + internal_urcu_unlock(); } void urcu_unregister_thread(void) { - rcu_write_lock(); + internal_urcu_lock(); urcu_remove_reader(pthread_self()); - rcu_write_unlock(); + internal_urcu_unlock(); } +#ifndef DEBUG_FULL_MB void sigurcu_handler(int signo, siginfo_t *siginfo, void *context) { mb(); @@ -257,3 +257,4 @@ void __attribute__((destructor)) urcu_exit(void) assert(act.sa_sigaction == sigurcu_handler); free(reader_data); } +#endif