*
* Userspace RCU library - test program (with baatch reclamation)
*
- * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
*/
#define _GNU_SOURCE
+#include "../config.h"
#include <stdio.h>
#include <pthread.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
-#include <sys/syscall.h>
#include <sched.h>
#include <errno.h>
#include <urcu/arch.h>
+#include <urcu/tls-compat.h>
+
+#ifdef __linux__
+#include <syscall.h>
+#endif
/* hardcoded number of CPUs */
#define NR_CPUS 16384
/* read-side C.S. duration, in loops */
static unsigned long rduration;
+/* write-side C.S. duration, in loops */
+static unsigned long wduration;
+
static inline void loop_sleep(unsigned long l)
{
while(l-- != 0)
- cpu_relax();
+ caa_cpu_relax();
}
static int verbose_mode;
pthread_mutex_t affinity_mutex = PTHREAD_MUTEX_INITIALIZER;
+#ifndef HAVE_CPU_SET_T
+typedef unsigned long cpu_set_t;
+# define CPU_ZERO(cpuset) do { *(cpuset) = 0; } while(0)
+# define CPU_SET(cpu, cpuset) do { *(cpuset) |= (1UL << (cpu)); } while(0)
+#endif
+
static void set_affinity(void)
{
cpu_set_t mask;
if (!use_affinity)
return;
+#if HAVE_SCHED_SETAFFINITY
ret = pthread_mutex_lock(&affinity_mutex);
if (ret) {
perror("Error in pthread mutex lock");
perror("Error in pthread mutex unlock");
exit(-1);
}
+
CPU_ZERO(&mask);
CPU_SET(cpu, &mask);
+#if SCHED_SETAFFINITY_ARGS == 2
+ sched_setaffinity(0, &mask);
+#else
sched_setaffinity(0, sizeof(mask), &mask);
+#endif
+#endif /* HAVE_SCHED_SETAFFINITY */
}
/*
return !test_stop;
}
-static unsigned long long __thread nr_writes;
-static unsigned long long __thread nr_reads;
+static DEFINE_URCU_TLS(unsigned long long, nr_writes);
+static DEFINE_URCU_TLS(unsigned long long, nr_reads);
static
-unsigned long long __attribute__((aligned(CACHE_LINE_SIZE))) *tot_nr_writes;
+unsigned long long __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *tot_nr_writes;
static unsigned int nr_readers;
static unsigned int nr_writers;
struct test_array *local_ptr;
printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
- "reader", pthread_self(), (unsigned long)gettid());
+ "reader", (unsigned long) pthread_self(),
+ (unsigned long) gettid());
set_affinity();
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
rcu_read_lock();
debug_yield_read();
if (local_ptr)
assert(local_ptr->a == 8);
- if (unlikely(rduration))
+ if (caa_unlikely(rduration))
loop_sleep(rduration);
rcu_read_unlock();
- nr_reads++;
- if (unlikely(!test_duration_read()))
+ URCU_TLS(nr_reads)++;
+ if (caa_unlikely(!test_duration_read()))
break;
}
rcu_unregister_thread();
- *count = nr_reads;
+ *count = URCU_TLS(nr_reads);
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
- "reader", pthread_self(), (unsigned long)gettid());
+ "reader", (unsigned long) pthread_self(),
+ (unsigned long) gettid());
return ((void*)1);
}
*pending_reclaims[wtidx].head = old;
pending_reclaims[wtidx].head++;
- if (likely(pending_reclaims[wtidx].head - pending_reclaims[wtidx].queue
+ if (caa_likely(pending_reclaims[wtidx].head - pending_reclaims[wtidx].queue
< reclaim_batch))
return;
#endif
printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
- "writer", pthread_self(), (unsigned long)gettid());
+ "writer", (unsigned long) pthread_self(),
+ (unsigned long) gettid());
set_affinity();
while (!test_go)
{
}
- smp_mb();
+ cmm_smp_mb();
for (;;) {
#ifndef TEST_LOCAL_GC
new->a = 8;
old = rcu_xchg_pointer(&test_rcu_pointer, new);
#endif
+ if (caa_unlikely(wduration))
+ loop_sleep(wduration);
rcu_gc_reclaim(wtidx, old);
- nr_writes++;
- if (unlikely(!test_duration_write()))
+ URCU_TLS(nr_writes)++;
+ if (caa_unlikely(!test_duration_write()))
break;
- if (unlikely(wdelay))
+ if (caa_unlikely(wdelay))
loop_sleep(wdelay);
}
printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
- "writer", pthread_self(), (unsigned long)gettid());
- tot_nr_writes[wtidx] = nr_writes;
+ "writer", (unsigned long) pthread_self(),
+ (unsigned long) gettid());
+ tot_nr_writes[wtidx] = URCU_TLS(nr_writes);
return ((void*)2);
}
#endif
printf(" [-d delay] (writer period (us))");
printf(" [-c duration] (reader C.S. duration (in loops))");
+ printf(" [-e duration] (writer C.S. duration (in loops))");
printf(" [-v] (verbose output)");
printf(" [-a cpu#] [-a cpu#]... (affinity)");
printf("\n");
}
wdelay = atol(argv[++i]);
break;
+ case 'e':
+ if (argc < i + 2) {
+ show_usage(argc, argv);
+ return -1;
+ }
+ wduration = atol(argv[++i]);
+ break;
case 'v':
verbose_mode = 1;
break;
printf_verbose("Writer delay : %lu loops.\n", wdelay);
printf_verbose("Reader duration : %lu loops.\n", rduration);
printf_verbose("thread %-6s, thread id : %lx, tid %lu\n",
- "main", pthread_self(), (unsigned long)gettid());
+ "main", (unsigned long) pthread_self(),
+ (unsigned long) gettid());
tid_reader = malloc(sizeof(*tid_reader) * nr_readers);
tid_writer = malloc(sizeof(*tid_writer) * nr_writers);
tot_nr_writes = malloc(sizeof(*tot_nr_writes) * nr_writers);
pending_reclaims = malloc(sizeof(*pending_reclaims) * nr_writers);
if (reclaim_batch * sizeof(*pending_reclaims[i].queue)
- < CACHE_LINE_SIZE)
+ < CAA_CACHE_LINE_SIZE)
for (i = 0; i < nr_writers; i++)
- pending_reclaims[i].queue = calloc(1, CACHE_LINE_SIZE);
+ pending_reclaims[i].queue = calloc(1, CAA_CACHE_LINE_SIZE);
else
for (i = 0; i < nr_writers; i++)
pending_reclaims[i].queue = calloc(reclaim_batch,
exit(1);
}
- smp_mb();
+ cmm_smp_mb();
test_go = 1;
printf_verbose("total number of reads : %llu, writes %llu\n", tot_reads,
tot_writes);
- printf("SUMMARY %-25s testdur %4lu nr_readers %3u rdur %6lu "
+ printf("SUMMARY %-25s testdur %4lu nr_readers %3u rdur %6lu wdur %6lu "
"nr_writers %3u "
"wdelay %6lu nr_reads %12llu nr_writes %12llu nr_ops %12llu "
"batch %u\n",
- argv[0], duration, nr_readers, rduration,
+ argv[0], duration, nr_readers, rduration, wduration,
nr_writers, wdelay, tot_reads, tot_writes,
tot_reads + tot_writes, reclaim_batch);
free(tid_reader);