fix: handle EINTR correctly in get_cpu_mask_from_sysfs
[urcu.git] / tests / regression / rcutorture.h
index ac5348cd465f004179839b14986e91e0781d7125..441ff79bf349fa301b60e7ee61935e0caa58aba2 100644 (file)
@@ -1,3 +1,7 @@
+// SPDX-FileCopyrightText: 2008 Paul E. McKenney, IBM Corporation.
+//
+// SPDX-License-Identifier: GPL-2.0-or-later
+
 /*
  * rcutorture.h: simple user-level performance/stress test of RCU.
  *
  * data.  A correct RCU implementation will have all but the first two
  * numbers non-zero.
  *
+ * rcu_stress_count: Histogram of "ages" of structures seen by readers.  If any
+ * entries past the first two are non-zero, RCU is broken. The age of a newly
+ * allocated structure is zero, it becomes one when removed from reader
+ * visibility, and is incremented once per grace period subsequently -- and is
+ * freed after passing through (RCU_STRESS_PIPE_LEN-2) grace periods.  Since
+ * this tests only has one true writer (there are fake writers), only buckets at
+ * indexes 0 and 1 should be none-zero.
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
 #include <stdlib.h>
 #include "tap.h"
 
+#include <urcu/uatomic.h>
+
+#include "urcu-wait.h"
+
 #define NR_TESTS       1
 
 DEFINE_PER_THREAD(long long, n_reads_pt);
@@ -79,6 +95,12 @@ enum callrcu_type {
        CALLRCU_PERTHREAD,
 };
 
+enum writer_state {
+       WRITER_STATE_SYNC_RCU,
+       WRITER_STATE_CALL_RCU,
+       WRITER_STATE_POLL_RCU,
+};
+
 static enum callrcu_type callrcu_type = CALLRCU_GLOBAL;
 
 long long n_reads = 0LL;
@@ -139,10 +161,10 @@ void *rcu_read_perf_test(void *arg)
        run_on(me);
        uatomic_inc(&nthreadsrunning);
        put_thread_offline();
-       while (goflag == GOFLAG_INIT)
+       while (uatomic_read(&goflag) == GOFLAG_INIT)
                (void) poll(NULL, 0, 1);
        put_thread_online();
-       while (goflag == GOFLAG_RUN) {
+       while (uatomic_read(&goflag) == GOFLAG_RUN) {
                for (i = 0; i < RCU_READ_RUN; i++) {
                        rcu_read_lock();
                        /* rcu_read_lock_nest(); */
@@ -174,9 +196,9 @@ void *rcu_update_perf_test(void *arg __attribute__((unused)))
                }
        }
        uatomic_inc(&nthreadsrunning);
-       while (goflag == GOFLAG_INIT)
+       while (uatomic_read(&goflag) == GOFLAG_INIT)
                (void) poll(NULL, 0, 1);
-       while (goflag == GOFLAG_RUN) {
+       while (uatomic_read(&goflag) == GOFLAG_RUN) {
                synchronize_rcu();
                n_updates_local++;
        }
@@ -205,15 +227,11 @@ int perftestrun(int nthreads, int nreaders, int nupdaters)
        int t;
        int duration = 1;
 
-       cmm_smp_mb();
        while (uatomic_read(&nthreadsrunning) < nthreads)
                (void) poll(NULL, 0, 1);
-       goflag = GOFLAG_RUN;
-       cmm_smp_mb();
+       uatomic_set(&goflag, GOFLAG_RUN);
        sleep(duration);
-       cmm_smp_mb();
-       goflag = GOFLAG_STOP;
-       cmm_smp_mb();
+       uatomic_set(&goflag, GOFLAG_STOP);
        wait_all_threads();
        for_each_thread(t) {
                n_reads += per_thread(n_reads_pt, t);
@@ -290,10 +308,17 @@ struct rcu_stress {
        int mbtest;
 };
 
-struct rcu_stress rcu_stress_array[RCU_STRESS_PIPE_LEN] = { { 0 } };
+struct rcu_stress rcu_stress_array[RCU_STRESS_PIPE_LEN] = { { 0, 0 } };
 struct rcu_stress *rcu_stress_current;
 int rcu_stress_idx = 0;
 
+/*
+ * How many time a reader has seen something that should not be visible. It is
+ * an error if this value is different than zero at the end of the stress test.
+ *
+ * Here, the something that should not be visibile is an old pipe that has been
+ * freed (mbtest = 0).
+ */
 int n_mberror = 0;
 DEFINE_PER_THREAD(long long [RCU_STRESS_PIPE_LEN + 1], rcu_stress_count);
 
@@ -309,19 +334,25 @@ void *rcu_read_stress_test(void *arg __attribute__((unused)))
 
        rcu_register_thread();
        put_thread_offline();
-       while (goflag == GOFLAG_INIT)
+       while (uatomic_read(&goflag) == GOFLAG_INIT)
                (void) poll(NULL, 0, 1);
        put_thread_online();
-       while (goflag == GOFLAG_RUN) {
+       while (uatomic_read(&goflag) == GOFLAG_RUN) {
                rcu_read_lock();
                p = rcu_dereference(rcu_stress_current);
                if (p->mbtest == 0)
-                       n_mberror++;
+                       uatomic_inc_mo(&n_mberror, CMM_RELAXED);
                rcu_read_lock_nest();
+               /*
+                * The value of garbage is nothing important. This is
+                * essentially a busy loop. The atomic operation -- while not
+                * important here -- helps tools such as TSAN to not flag this
+                * as a race condition.
+                */
                for (i = 0; i < 100; i++)
-                       garbage++;
+                       uatomic_inc(&garbage);
                rcu_read_unlock_nest();
-               pc = p->pipe_count;
+               pc = uatomic_read(&p->pipe_count);
                rcu_read_unlock();
                if ((pc > RCU_STRESS_PIPE_LEN) || (pc < 0))
                        pc = RCU_STRESS_PIPE_LEN;
@@ -340,34 +371,30 @@ void *rcu_read_stress_test(void *arg __attribute__((unused)))
        return (NULL);
 }
 
-static pthread_mutex_t call_rcu_test_mutex = PTHREAD_MUTEX_INITIALIZER;
-static pthread_cond_t call_rcu_test_cond = PTHREAD_COND_INITIALIZER;
+static DEFINE_URCU_WAIT_QUEUE(call_rcu_waiters);
 
 static
 void rcu_update_stress_test_rcu(struct rcu_head *head __attribute__((unused)))
 {
-       int ret;
+       struct urcu_waiters waiters;
 
-       ret = pthread_mutex_lock(&call_rcu_test_mutex);
-       if (ret) {
-               errno = ret;
-               diag("pthread_mutex_lock: %s",
-                       strerror(errno));
-               abort();
-       }
-       ret = pthread_cond_signal(&call_rcu_test_cond);
-       if (ret) {
-               errno = ret;
-               diag("pthread_cond_signal: %s",
-                       strerror(errno));
-               abort();
-       }
-       ret = pthread_mutex_unlock(&call_rcu_test_mutex);
-       if (ret) {
-               errno = ret;
-               diag("pthread_mutex_unlock: %s",
-                       strerror(errno));
-               abort();
+       urcu_move_waiters(&waiters, &call_rcu_waiters);
+       urcu_wake_all_waiters(&waiters);
+}
+
+static
+void advance_writer_state(enum writer_state *state)
+{
+       switch (*state) {
+       case WRITER_STATE_SYNC_RCU:
+               *state = WRITER_STATE_CALL_RCU;
+               break;
+       case WRITER_STATE_CALL_RCU:
+               *state = WRITER_STATE_POLL_RCU;
+               break;
+       case WRITER_STATE_POLL_RCU:
+               *state = WRITER_STATE_SYNC_RCU;
+               break;
        }
 }
 
@@ -375,73 +402,93 @@ static
 void *rcu_update_stress_test(void *arg __attribute__((unused)))
 {
        int i;
-       struct rcu_stress *p;
+       struct rcu_stress *p, *old_p;
        struct rcu_head rh;
+       enum writer_state writer_state = WRITER_STATE_SYNC_RCU;
 
-       while (goflag == GOFLAG_INIT)
+       rcu_register_thread();
+
+       /* Offline for poll. */
+       put_thread_offline();
+       while (uatomic_read(&goflag) == GOFLAG_INIT)
                (void) poll(NULL, 0, 1);
-       while (goflag == GOFLAG_RUN) {
+       put_thread_online();
+
+       old_p = NULL;
+       while (uatomic_read(&goflag) == GOFLAG_RUN) {
                i = rcu_stress_idx + 1;
                if (i >= RCU_STRESS_PIPE_LEN)
                        i = 0;
+
+               rcu_read_lock();
+               old_p = rcu_dereference(rcu_stress_current);
+               rcu_read_unlock();
+
+               /*
+                * Allocate a new pipe.
+                */
                p = &rcu_stress_array[i];
-               p->mbtest = 0;
-               cmm_smp_mb();
                p->pipe_count = 0;
                p->mbtest = 1;
+
                rcu_assign_pointer(rcu_stress_current, p);
                rcu_stress_idx = i;
+
+               /*
+                * Increment every pipe except the freshly allocated one. A
+                * reader should only see either the old pipe or the new
+                * pipe. This is reflected in the rcu_stress_count histogram.
+                */
                for (i = 0; i < RCU_STRESS_PIPE_LEN; i++)
                        if (i != rcu_stress_idx)
-                               rcu_stress_array[i].pipe_count++;
-               if (n_updates & 0x1)
+                               uatomic_inc(&rcu_stress_array[i].pipe_count);
+
+               switch (writer_state) {
+               case WRITER_STATE_SYNC_RCU:
                        synchronize_rcu();
-               else {
-                       int ret;
-
-                       ret = pthread_mutex_lock(&call_rcu_test_mutex);
-                       if (ret) {
-                               errno = ret;
-                               diag("pthread_mutex_lock: %s",
-                                       strerror(errno));
-                               abort();
-                       }
-                       rcu_register_thread();
+                       break;
+               case WRITER_STATE_CALL_RCU:
+               {
+                       DEFINE_URCU_WAIT_NODE(wait, URCU_WAIT_WAITING);
+
+                       urcu_wait_add(&call_rcu_waiters, &wait);
+
                        call_rcu(&rh, rcu_update_stress_test_rcu);
-                       rcu_unregister_thread();
-                       /*
-                        * Our MacOS X test machine with the following
-                        * config:
-                        * 15.6.0 Darwin Kernel Version 15.6.0
-                        * root:xnu-3248.60.10~1/RELEASE_X86_64
-                        * appears to have issues with liburcu-signal
-                        * signal being delivered on top of
-                        * pthread_cond_wait. It seems to make the
-                        * thread continue, and therefore corrupt the
-                        * rcu_head. Work around this issue by
-                        * unregistering the RCU read-side thread
-                        * immediately after call_rcu (call_rcu needs
-                        * us to be registered RCU readers).
-                        */
-                       ret = pthread_cond_wait(&call_rcu_test_cond,
-                                       &call_rcu_test_mutex);
-                       if (ret) {
-                               errno = ret;
-                               diag("pthread_cond_signal: %s",
-                                       strerror(errno));
-                               abort();
-                       }
-                       ret = pthread_mutex_unlock(&call_rcu_test_mutex);
-                       if (ret) {
-                               errno = ret;
-                               diag("pthread_mutex_unlock: %s",
-                                       strerror(errno));
-                               abort();
-                       }
+
+                       /* Offline for busy-wait. */
+                       put_thread_offline();
+                       urcu_adaptative_busy_wait(&wait);
+                       put_thread_online();
+                       break;
+               }
+               case WRITER_STATE_POLL_RCU:
+               {
+                       struct urcu_gp_poll_state poll_state;
+
+                       poll_state = start_poll_synchronize_rcu();
+
+                       /* Offline for poll. */
+                       put_thread_offline();
+                       while (!poll_state_synchronize_rcu(poll_state))
+                               (void) poll(NULL, 0, 1);        /* Wait for 1ms */
+                       put_thread_online();
+                       break;
                }
+               }
+               /*
+                * No readers should see that old pipe now. Setting mbtest to 0
+                * to mark it as "freed".
+                */
+               if (old_p) {
+                       old_p->mbtest = 0;
+               }
+               old_p = p;
                n_updates++;
+               advance_writer_state(&writer_state);
        }
 
+       rcu_unregister_thread();
+
        return NULL;
 }
 
@@ -457,9 +504,9 @@ void *rcu_fake_update_stress_test(void *arg __attribute__((unused)))
                        set_thread_call_rcu_data(crdp);
                }
        }
-       while (goflag == GOFLAG_INIT)
+       while (uatomic_read(&goflag) == GOFLAG_INIT)
                (void) poll(NULL, 0, 1);
-       while (goflag == GOFLAG_RUN) {
+       while (uatomic_read(&goflag) == GOFLAG_RUN) {
                synchronize_rcu();
                (void) poll(NULL, 0, 1);
        }
@@ -480,6 +527,7 @@ int stresstest(int nreaders)
        int t;
        long long *p;
        long long sum;
+       int ret;
 
        init_per_thread(n_reads_pt, 0LL);
        for_each_thread(t) {
@@ -495,13 +543,9 @@ int stresstest(int nreaders)
        create_thread(rcu_update_stress_test, NULL);
        for (i = 0; i < 5; i++)
                create_thread(rcu_fake_update_stress_test, NULL);
-       cmm_smp_mb();
-       goflag = GOFLAG_RUN;
-       cmm_smp_mb();
+       uatomic_set(&goflag, GOFLAG_RUN);
        sleep(10);
-       cmm_smp_mb();
-       goflag = GOFLAG_STOP;
-       cmm_smp_mb();
+       uatomic_set(&goflag, GOFLAG_STOP);
        wait_all_threads();
        for_each_thread(t)
                n_reads += per_thread(n_reads_pt, t);
@@ -509,11 +553,19 @@ int stresstest(int nreaders)
               n_reads, n_updates, n_mberror);
        rdiag_start();
        rdiag("rcu_stress_count:");
+       ret = 0;
        for (i = 0; i <= RCU_STRESS_PIPE_LEN; i++) {
                sum = 0LL;
                for_each_thread(t) {
                        sum += per_thread(rcu_stress_count, t)[i];
                }
+               /*
+                * If any entries past the first two are non-zero, RCU is
+                * broken. See details above about rcu_stress_count.
+                */
+               if (i > 1 && sum != 0) {
+                       ret = -1;
+               }
                rdiag(" %lld", sum);
        }
        rdiag_end();
@@ -521,10 +573,9 @@ int stresstest(int nreaders)
                diag("Deallocating per-CPU call_rcu threads.");
                free_all_cpu_call_rcu_data();
        }
-       if (!n_mberror)
-               return 0;
-       else
-               return -1;
+       if (n_mberror)
+               ret = -1;
+       return ret;
 }
 
 /*
@@ -532,7 +583,7 @@ int stresstest(int nreaders)
  */
 
 static
-void usage(char *argv[]) __attribute__((noreturn));
+void usage(char *argv[]) __attribute__((__noreturn__));
 
 static
 void usage(char *argv[])
This page took 0.028981 seconds and 4 git commands to generate.