#include <linux/proc_fs.h>
#include <linux/sched.h>
#include <linux/timex.h>
-#include <linux/wbias-rwlock.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/hardirq.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <asm/ptrace.h>
+#include <linux/wbias-rwlock.h>
/* Test with no contention duration, in seconds */
#define SINGLE_WRITER_TEST_DURATION 10
#define NR_VARS 100
#define NR_WRITERS 2
#define NR_TRYLOCK_WRITERS 1
-#define NR_READERS 4
+#define NR_NPREADERS 2
#define NR_TRYLOCK_READERS 1
/*
#define NR_TRYLOCK_INTERRUPT_READERS 0
#endif
+/*
+ * 1 : test with thread preemption readers.
+ * 0 : test only with non-preemptable thread readers.
+ */
+#define TEST_PREEMPT 1
+
+#if (TEST_PREEMPT)
+#define NR_PREADERS 2
+#else
+#define NR_PREADERS 0
+#endif
+
+
/*
* Writer iteration delay, in us. 0 for busy loop. Caution : writers can
* starve readers.
#define INTERRUPT_READER_DELAY 100
static int var[NR_VARS];
-static struct task_struct *reader_threads[NR_READERS];
+static struct task_struct *preader_threads[NR_PREADERS];
+static struct task_struct *npreader_threads[NR_NPREADERS];
static struct task_struct *trylock_reader_threads[NR_TRYLOCK_READERS];
static struct task_struct *writer_threads[NR_WRITERS];
static struct task_struct *trylock_writer_threads[NR_TRYLOCK_WRITERS];
#define wrap_read_trylock() read_trylock(&std_rw_lock)
#define wrap_read_unlock() read_unlock(&std_rw_lock)
+#define wrap_read_lock_inatomic() read_lock(&std_rw_lock)
+#define wrap_read_trylock_inatomic() read_trylock(&std_rw_lock)
+#define wrap_read_unlock_inatomic() read_unlock(&std_rw_lock)
+
#define wrap_read_lock_irq() read_lock(&std_rw_lock)
#define wrap_read_trylock_irq() read_trylock(&std_rw_lock)
#define wrap_read_unlock_irq() read_unlock(&std_rw_lock)
#define wrap_read_trylock() wbias_read_trylock(&wbiasrwlock)
#define wrap_read_unlock() wbias_read_unlock(&wbiasrwlock)
+#define wrap_read_lock_inatomic() wbias_read_lock_inatomic(&wbiasrwlock)
+#define wrap_read_trylock_inatomic() \
+ wbias_read_trylock_inatomic(&wbiasrwlock)
+#define wrap_read_unlock_inatomic() \
+ wbias_read_unlock_inatomic(&wbiasrwlock)
+
#define wrap_read_lock_irq() wbias_read_lock_irq(&wbiasrwlock)
#define wrap_read_trylock_irq() wbias_read_trylock_irq(&wbiasrwlock)
#define wrap_read_unlock_irq() wbias_read_unlock_irq(&wbiasrwlock)
#if (TEST_INTERRUPTS)
#define wrap_write_lock() wbias_write_lock_irq(&wbiasrwlock)
#define wrap_write_unlock() wbias_write_unlock_irq(&wbiasrwlock)
+#define wrap_write_trylock_else_subscribe() \
+ wbias_write_trylock_irq_else_subscribe(&wbiasrwlock)
+#define wrap_write_trylock_subscribed() \
+ wbias_write_trylock_irq_subscribed(&wbiasrwlock)
#else
+#if (TEST_PREEMPT)
#define wrap_write_lock() wbias_write_lock(&wbiasrwlock)
#define wrap_write_unlock() wbias_write_unlock(&wbiasrwlock)
+#define wrap_write_trylock_else_subscribe() \
+ wbias_write_trylock_else_subscribe(&wbiasrwlock)
+#define wrap_write_trylock_subscribed() \
+ wbias_write_trylock_subscribed(&wbiasrwlock)
+#else
+#define wrap_write_lock() wbias_write_lock_atomic(&wbiasrwlock)
+#define wrap_write_unlock() wbias_write_unlock_atomic(&wbiasrwlock)
+#define wrap_write_trylock_else_subscribe() \
+ wbias_write_trylock_atomic_else_subscribe(&wbiasrwlock)
+#define wrap_write_trylock_subscribed() \
+ wbias_write_trylock_atomic_subscribed(&wbiasrwlock)
+#endif
#endif
#endif
struct proc_dir_entry *pentry = NULL;
-static int reader_thread(void *data)
+static int p_or_np_reader_thread(const char *typename,
+ void *data, int preemptable)
{
int i;
int prev, cur;
cycles_t time1, time2, delay, delaymax = 0, delaymin = ULLONG_MAX,
delayavg = 0;
- printk("reader_thread/%lu runnning\n", (unsigned long)data);
+ printk("%s/%lu runnning\n", typename, (unsigned long)data);
do {
iter++;
- preempt_disable(); /* for get_cycles accuracy */
+ if (!preemptable)
+ preempt_disable();
rdtsc_barrier();
time1 = get_cycles();
rdtsc_barrier();
- wrap_read_lock();
+ if (!preemptable)
+ wrap_read_lock_inatomic();
+ else
+ wrap_read_lock();
rdtsc_barrier();
time2 = get_cycles();
"in thread\n", cur, prev, i, iter);
}
- wrap_read_unlock();
-
- preempt_enable(); /* for get_cycles accuracy */
+ if (!preemptable)
+ wrap_read_unlock_inatomic();
+ else
+ wrap_read_unlock();
+ if (!preemptable)
+ preempt_enable();
if (THREAD_READER_DELAY)
msleep(THREAD_READER_DELAY);
} while (!kthread_should_stop());
if (!iter) {
- printk("reader_thread/%lu iterations : %lu",
+ printk("%s/%lu iterations : %lu", typename,
(unsigned long)data, iter);
} else {
delayavg /= iter;
- printk("reader_thread/%lu iterations : %lu, "
+ printk("%s/%lu iterations : %lu, "
"lock delay [min,avg,max] %llu,%llu,%llu cycles\n",
+ typename,
(unsigned long)data, iter,
calibrate_cycles(delaymin),
calibrate_cycles(delayavg),
return 0;
}
+static int preader_thread(void *data)
+{
+ return p_or_np_reader_thread("preader_thread", data, 1);
+}
+
+static int npreader_thread(void *data)
+{
+ return p_or_np_reader_thread("npreader_thread", data, 0);
+}
+
static int trylock_reader_thread(void *data)
{
int i;
printk("writer_thread/%lu runnning\n", (unsigned long)data);
do {
iter++;
- preempt_disable(); /* for get_cycles accuracy */
+ //preempt_disable(); /* for get_cycles accuracy */
rdtsc_barrier();
time1 = get_cycles();
rdtsc_barrier();
wrap_write_unlock();
- preempt_enable(); /* for get_cycles accuracy */
+ //preempt_enable(); /* for get_cycles accuracy */
if (WRITER_DELAY > 0)
udelay(WRITER_DELAY);
} while (!kthread_should_stop());
printk("trylock_writer_thread/%lu runnning\n", (unsigned long)data);
do {
iter++;
-#if (TEST_INTERRUPTS)
- if (wbias_write_trylock_irq_else_subscribe(&wbiasrwlock))
-#else
- if (wbias_write_trylock_else_subscribe(&wbiasrwlock))
-#endif
+ if (wrap_write_trylock_else_subscribe())
goto locked;
#if (TRYLOCK_WRITERS_FAIL_ITER == -1)
for (;;) {
iter++;
-#if (TEST_INTERRUPTS)
- if (wbias_write_trylock_irq_subscribed(&wbiasrwlock))
-#else
- if (wbias_write_trylock_subscribed(&wbiasrwlock))
-#endif
+ if (wrap_write_trylock_subscribed())
goto locked;
}
#else
for (i = 0; i < TRYLOCK_WRITERS_FAIL_ITER - 1; i++) {
iter++;
-#if (TEST_INTERRUPTS)
- if (wbias_write_trylock_irq_subscribed(&wbiasrwlock))
-#else
- if (wbias_write_trylock_subscribed(&wbiasrwlock))
-#endif
+ if (wrap_write_trylock_subscribed())
goto locked;
}
#endif
for (i = 0; i < NR_VARS; i++) {
var[i] = new;
}
-#if (TEST_INTERRUPTS)
- wbias_write_unlock_irq(&wbiasrwlock);
-#else
- wbias_write_unlock(&wbiasrwlock);
-#endif
+ wrap_write_unlock();
loop:
if (TRYLOCK_WRITER_DELAY > 0)
udelay(TRYLOCK_WRITER_DELAY);
{
unsigned long i;
- for (i = 0; i < NR_READERS; i++) {
- printk("starting reader thread %lu\n", i);
- reader_threads[i] = kthread_run(reader_thread, (void *)i,
- "wbiasrwlock_reader");
- BUG_ON(!reader_threads[i]);
+ for (i = 0; i < NR_PREADERS; i++) {
+ printk("starting preemptable reader thread %lu\n", i);
+ preader_threads[i] = kthread_run(preader_thread, (void *)i,
+ "wbiasrwlock_preader");
+ BUG_ON(!preader_threads[i]);
+ }
+
+ for (i = 0; i < NR_NPREADERS; i++) {
+ printk("starting non-preemptable reader thread %lu\n", i);
+ npreader_threads[i] = kthread_run(npreader_thread, (void *)i,
+ "wbiasrwlock_npreader");
+ BUG_ON(!npreader_threads[i]);
}
for (i = 0; i < NR_TRYLOCK_READERS; i++) {
kthread_stop(writer_threads[i]);
for (i = 0; i < NR_TRYLOCK_WRITERS; i++)
kthread_stop(trylock_writer_threads[i]);
- for (i = 0; i < NR_READERS; i++)
- kthread_stop(reader_threads[i]);
+ for (i = 0; i < NR_NPREADERS; i++)
+ kthread_stop(npreader_threads[i]);
+ for (i = 0; i < NR_PREADERS; i++)
+ kthread_stop(preader_threads[i]);
for (i = 0; i < NR_TRYLOCK_READERS; i++)
kthread_stop(trylock_reader_threads[i]);
for (i = 0; i < NR_INTERRUPT_READERS; i++)
cycles_calibration_min,
cycles_calibration_avg,
cycles_calibration_max);
+ printk("\n");
printk("** Single writer test, no contention **\n");
wbias_rwlock_profile_latency_reset();
BUG_ON(!writer_threads[0]);
ssleep(SINGLE_WRITER_TEST_DURATION);
kthread_stop(writer_threads[0]);
+ printk("\n");
wbias_rwlock_profile_latency_print();
BUG_ON(!trylock_writer_threads[0]);
ssleep(SINGLE_WRITER_TEST_DURATION);
kthread_stop(trylock_writer_threads[0]);
+ printk("\n");
+
+ wbias_rwlock_profile_latency_print();
+
+ printk("** Single preemptable reader test, no contention **\n");
+ wbias_rwlock_profile_latency_reset();
+ preader_threads[0] = kthread_run(preader_thread, (void *)0,
+ "wbiasrwlock_preader");
+ BUG_ON(!preader_threads[0]);
+ ssleep(SINGLE_READER_TEST_DURATION);
+ kthread_stop(preader_threads[0]);
+ printk("\n");
wbias_rwlock_profile_latency_print();
- printk("** Single reader test, no contention **\n");
+ printk("** Single non-preemptable reader test, no contention **\n");
wbias_rwlock_profile_latency_reset();
- reader_threads[0] = kthread_run(reader_thread, (void *)0,
- "wbiasrwlock_reader");
- BUG_ON(!reader_threads[0]);
+ npreader_threads[0] = kthread_run(npreader_thread, (void *)0,
+ "wbiasrwlock_npreader");
+ BUG_ON(!npreader_threads[0]);
ssleep(SINGLE_READER_TEST_DURATION);
- kthread_stop(reader_threads[0]);
+ kthread_stop(npreader_threads[0]);
+ printk("\n");
wbias_rwlock_profile_latency_print();
- printk("** Multiple readers test, no contention **\n");
+ printk("** Multiple p/non-p readers test, no contention **\n");
wbias_rwlock_profile_latency_reset();
- for (i = 0; i < NR_READERS; i++) {
- printk("starting reader thread %lu\n", i);
- reader_threads[i] = kthread_run(reader_thread, (void *)i,
- "wbiasrwlock_reader");
- BUG_ON(!reader_threads[i]);
+ for (i = 0; i < NR_PREADERS; i++) {
+ printk("starting preader thread %lu\n", i);
+ preader_threads[i] = kthread_run(preader_thread, (void *)i,
+ "wbiasrwlock_preader");
+ BUG_ON(!preader_threads[i]);
+ }
+ for (i = 0; i < NR_NPREADERS; i++) {
+ printk("starting npreader thread %lu\n", i);
+ npreader_threads[i] = kthread_run(npreader_thread, (void *)i,
+ "wbiasrwlock_npreader");
+ BUG_ON(!npreader_threads[i]);
}
ssleep(SINGLE_READER_TEST_DURATION);
- for (i = 0; i < NR_READERS; i++)
- kthread_stop(reader_threads[i]);
+ for (i = 0; i < NR_NPREADERS; i++)
+ kthread_stop(npreader_threads[i]);
+ for (i = 0; i < NR_PREADERS; i++)
+ kthread_stop(preader_threads[i]);
+ printk("\n");
wbias_rwlock_profile_latency_print();
perform_test("wbias-rwlock-create", wbias_rwlock_create);
ssleep(TEST_DURATION);
perform_test("wbias-rwlock-stop", wbias_rwlock_stop);
-
+ printk("\n");
wbias_rwlock_profile_latency_print();
return -EPERM;
if (pentry)
pentry->proc_fops = &my_operations;
- printk("pow2cpus : %lu\n", pow2cpus);
- printk("THREAD_ROFFSET : %lX\n", THREAD_ROFFSET);
- printk("THREAD_RMASK : %lX\n", THREAD_RMASK);
- printk("SOFTIRQ_ROFFSET : %lX\n", SOFTIRQ_ROFFSET);
- printk("SOFTIRQ_RMASK : %lX\n", SOFTIRQ_RMASK);
- printk("HARDIRQ_ROFFSET : %lX\n", HARDIRQ_ROFFSET);
- printk("HARDIRQ_RMASK : %lX\n", HARDIRQ_RMASK);
- printk("SUBSCRIBERS_WOFFSET : %lX\n", SUBSCRIBERS_WOFFSET);
- printk("SUBSCRIBERS_WMASK : %lX\n", SUBSCRIBERS_WMASK);
- printk("WRITER_MUTEX : %lX\n", WRITER_MUTEX);
- printk("SOFTIRQ_WMASK : %lX\n", SOFTIRQ_WMASK);
- printk("HARDIRQ_WMASK : %lX\n", HARDIRQ_WMASK);
+ printk("PTHREAD_ROFFSET : %016lX\n", PTHREAD_ROFFSET);
+ printk("PTHREAD_RMASK : %016lX\n", PTHREAD_RMASK);
+ printk("NPTHREAD_ROFFSET : %016lX\n", NPTHREAD_ROFFSET);
+ printk("NPTHREAD_RMASK : %016lX\n", NPTHREAD_RMASK);
+ printk("SOFTIRQ_ROFFSET : %016lX\n", SOFTIRQ_ROFFSET);
+ printk("SOFTIRQ_RMASK : %016lX\n", SOFTIRQ_RMASK);
+ printk("HARDIRQ_ROFFSET : %016lX\n", HARDIRQ_ROFFSET);
+ printk("HARDIRQ_RMASK : %016lX\n", HARDIRQ_RMASK);
+ printk("PTHREAD_WOFFSET : %016lX\n", PTHREAD_WOFFSET);
+ printk("PTHREAD_WMASK : %016lX\n", PTHREAD_WMASK);
+ printk("NPTHREAD_WOFFSET : %016lX\n", NPTHREAD_WOFFSET);
+ printk("NPTHREAD_WMASK : %016lX\n", NPTHREAD_WMASK);
+ printk("WRITER_MUTEX : %016lX\n", WRITER_MUTEX);
+ printk("SOFTIRQ_WMASK : %016lX\n", SOFTIRQ_WMASK);
+ printk("HARDIRQ_WMASK : %016lX\n", HARDIRQ_WMASK);
return 0;
}