Delay reader in loops, not us
[urcu.git] / test_qsbr.c
CommitLineData
061159a0
MD
1/*
2 * test_urcu.c
3 *
4 * Userspace RCU library - test program
5 *
6 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 */
22
9e97e478 23#define _GNU_SOURCE
061159a0
MD
24#include <stdio.h>
25#include <pthread.h>
26#include <stdlib.h>
27#include <string.h>
28#include <sys/types.h>
29#include <sys/wait.h>
30#include <unistd.h>
31#include <stdio.h>
32#include <assert.h>
33#include <sys/syscall.h>
9e97e478 34#include <sched.h>
061159a0 35
7685d963
MD
36#include "arch.h"
37
061159a0
MD
38#if defined(_syscall0)
39_syscall0(pid_t, gettid)
40#elif defined(__NR_gettid)
41static inline pid_t gettid(void)
42{
43 return syscall(__NR_gettid);
44}
45#else
46#warning "use pid as tid"
47static inline pid_t gettid(void)
48{
49 return getpid();
50}
51#endif
52
53#define _LGPL_SOURCE
54#include "urcu-qsbr.h"
55
56struct test_array {
57 int a;
58};
59
78efb485 60static volatile int test_go, test_stop;
061159a0
MD
61
62static int wdelay;
63
64static struct test_array *test_rcu_pointer;
65
66static unsigned long duration;
061159a0 67
daddf5b0 68/* read-side C.S. duration, in loops */
8b632bab
MD
69static unsigned long rduration;
70
daddf5b0
MD
71static inline void loop_sleep(unsigned long l)
72{
73 while(l-- != 0)
74 cpu_relax();
75}
76
061159a0
MD
77/*
78 * returns 0 if test should end.
79 */
80static int test_duration_write(void)
81{
78efb485 82 return !test_stop;
061159a0
MD
83}
84
85static int test_duration_read(void)
86{
78efb485 87 return !test_stop;
061159a0
MD
88}
89
90static unsigned long long __thread nr_writes;
91static unsigned long long __thread nr_reads;
92
93static unsigned int nr_readers;
94static unsigned int nr_writers;
95
96pthread_mutex_t rcu_copy_mutex = PTHREAD_MUTEX_INITIALIZER;
97
98void rcu_copy_mutex_lock(void)
99{
100 int ret;
101 ret = pthread_mutex_lock(&rcu_copy_mutex);
102 if (ret) {
103 perror("Error in pthread mutex lock");
104 exit(-1);
105 }
106}
107
108void rcu_copy_mutex_unlock(void)
109{
110 int ret;
111
112 ret = pthread_mutex_unlock(&rcu_copy_mutex);
113 if (ret) {
114 perror("Error in pthread mutex unlock");
115 exit(-1);
116 }
117}
118
119/*
120 * malloc/free are reusing memory areas too quickly, which does not let us
121 * test races appropriately. Use a large circular array for allocations.
122 * ARRAY_SIZE is larger than nr_writers, which insures we never run over our tail.
123 */
124#define ARRAY_SIZE (1048576 * nr_writers)
125#define ARRAY_POISON 0xDEADBEEF
126static int array_index;
127static struct test_array *test_array;
128
129static struct test_array *test_array_alloc(void)
130{
131 struct test_array *ret;
132 int index;
133
134 rcu_copy_mutex_lock();
135 index = array_index % ARRAY_SIZE;
136 assert(test_array[index].a == ARRAY_POISON ||
137 test_array[index].a == 0);
138 ret = &test_array[index];
139 array_index++;
140 if (array_index == ARRAY_SIZE)
141 array_index = 0;
142 rcu_copy_mutex_unlock();
143 return ret;
144}
145
146static void test_array_free(struct test_array *ptr)
147{
148 if (!ptr)
149 return;
150 rcu_copy_mutex_lock();
151 ptr->a = ARRAY_POISON;
152 rcu_copy_mutex_unlock();
153}
154
155void *thr_reader(void *_count)
156{
157 unsigned long long *count = _count;
158 struct test_array *local_ptr;
159
160 printf("thread_begin %s, thread id : %lx, tid %lu\n",
161 "reader", pthread_self(), (unsigned long)gettid());
162
163 rcu_register_thread();
164
165 while (!test_go)
166 {
167 }
7685d963 168 smp_mb();
061159a0
MD
169
170 for (;;) {
171 _rcu_read_lock();
172 local_ptr = _rcu_dereference(test_rcu_pointer);
173 debug_yield_read();
174 if (local_ptr)
175 assert(local_ptr->a == 8);
8b632bab 176 if (unlikely(rduration))
daddf5b0 177 loop_sleep(rduration);
061159a0
MD
178 _rcu_read_unlock();
179 nr_reads++;
78efb485 180 /* QS each 1024 reads */
59d5a406 181 if (unlikely((nr_reads & ((1 << 10) - 1)) == 0))
061159a0 182 _rcu_quiescent_state();
59d5a406 183 if (unlikely(!test_duration_read()))
061159a0
MD
184 break;
185 }
186
187 rcu_unregister_thread();
188
189 *count = nr_reads;
190 printf("thread_end %s, thread id : %lx, tid %lu\n",
191 "reader", pthread_self(), (unsigned long)gettid());
192 return ((void*)1);
193
194}
195
196void *thr_writer(void *_count)
197{
198 unsigned long long *count = _count;
199 struct test_array *new, *old;
200
201 printf("thread_begin %s, thread id : %lx, tid %lu\n",
202 "writer", pthread_self(), (unsigned long)gettid());
203
204 while (!test_go)
205 {
206 }
7685d963 207 smp_mb();
061159a0
MD
208
209 for (;;) {
210 new = test_array_alloc();
211 rcu_copy_mutex_lock();
212 old = test_rcu_pointer;
213 if (old)
214 assert(old->a == 8);
215 new->a = 8;
216 old = _rcu_publish_content(&test_rcu_pointer, new);
217 rcu_copy_mutex_unlock();
218 /* can be done after unlock */
219 if (old)
220 old->a = 0;
221 test_array_free(old);
222 nr_writes++;
59d5a406 223 if (unlikely(!test_duration_write()))
061159a0 224 break;
59d5a406 225 if (unlikely(wdelay))
061159a0
MD
226 usleep(wdelay);
227 }
228
229 printf("thread_end %s, thread id : %lx, tid %lu\n",
230 "writer", pthread_self(), (unsigned long)gettid());
231 *count = nr_writes;
232 return ((void*)2);
233}
234
235void show_usage(int argc, char **argv)
236{
237 printf("Usage : %s nr_readers nr_writers duration (s)", argv[0]);
238#ifdef DEBUG_YIELD
239 printf(" [-r] [-w] (yield reader and/or writer)");
240#endif
7685d963 241 printf(" [-d delay] (writer period (us))");
daddf5b0 242 printf(" [-c duration] (reader C.S. duration (in loops))");
9e97e478 243 printf(" [-a cpu#] [-a cpu#]... (affinity)");
061159a0
MD
244 printf("\n");
245}
246
9e97e478
MD
247cpu_set_t affinity;
248
061159a0
MD
249int main(int argc, char **argv)
250{
251 int err;
252 pthread_t *tid_reader, *tid_writer;
253 void *tret;
254 unsigned long long *count_reader, *count_writer;
255 unsigned long long tot_reads = 0, tot_writes = 0;
9e97e478
MD
256 int i, a;
257 int use_affinity = 0;
061159a0
MD
258
259 if (argc < 4) {
260 show_usage(argc, argv);
261 return -1;
262 }
263
264 err = sscanf(argv[1], "%u", &nr_readers);
265 if (err != 1) {
266 show_usage(argc, argv);
267 return -1;
268 }
269
270 err = sscanf(argv[2], "%u", &nr_writers);
271 if (err != 1) {
272 show_usage(argc, argv);
273 return -1;
274 }
275
276 err = sscanf(argv[3], "%lu", &duration);
277 if (err != 1) {
278 show_usage(argc, argv);
279 return -1;
280 }
281
9e97e478
MD
282 CPU_ZERO(&affinity);
283
061159a0
MD
284 for (i = 4; i < argc; i++) {
285 if (argv[i][0] != '-')
286 continue;
287 switch (argv[i][1]) {
288#ifdef DEBUG_YIELD
289 case 'r':
290 yield_active |= YIELD_READ;
291 break;
292 case 'w':
293 yield_active |= YIELD_WRITE;
294 break;
295#endif
9e97e478
MD
296 case 'a':
297 if (argc < i + 2) {
298 show_usage(argc, argv);
299 return -1;
300 }
301 a = atoi(argv[++i]);
302 CPU_SET(a, &affinity);
303 use_affinity = 1;
304 printf("Adding CPU %d affinity\n", a);
305 break;
8b632bab
MD
306 case 'c':
307 if (argc < i + 2) {
308 show_usage(argc, argv);
309 return -1;
310 }
311 rduration = atoi(argv[++i]);
312 break;
061159a0
MD
313 case 'd':
314 if (argc < i + 2) {
315 show_usage(argc, argv);
316 return -1;
317 }
318 wdelay = atoi(argv[++i]);
319 break;
320 }
321 }
322
323 printf("running test for %lu seconds, %u readers, %u writers.\n",
324 duration, nr_readers, nr_writers);
325 printf("Writer delay : %u us.\n", wdelay);
061159a0
MD
326 printf("thread %-6s, thread id : %lx, tid %lu\n",
327 "main", pthread_self(), (unsigned long)gettid());
328
9e97e478
MD
329 if (use_affinity
330 && sched_setaffinity(0, sizeof(affinity), &affinity) < 0) {
331 perror("sched_setaffinity");
332 exit(-1);
333 }
334
061159a0
MD
335 test_array = malloc(sizeof(*test_array) * ARRAY_SIZE);
336 tid_reader = malloc(sizeof(*tid_reader) * nr_readers);
337 tid_writer = malloc(sizeof(*tid_writer) * nr_writers);
338 count_reader = malloc(sizeof(*count_reader) * nr_readers);
339 count_writer = malloc(sizeof(*count_writer) * nr_writers);
340
341 for (i = 0; i < nr_readers; i++) {
342 err = pthread_create(&tid_reader[i], NULL, thr_reader,
343 &count_reader[i]);
344 if (err != 0)
345 exit(1);
346 }
347 for (i = 0; i < nr_writers; i++) {
348 err = pthread_create(&tid_writer[i], NULL, thr_writer,
349 &count_writer[i]);
350 if (err != 0)
351 exit(1);
352 }
353
7685d963 354 smp_mb();
061159a0 355
78efb485
MD
356 test_go = 1;
357
358 sleep(duration);
359
360 test_stop = 1;
361
061159a0
MD
362 for (i = 0; i < nr_readers; i++) {
363 err = pthread_join(tid_reader[i], &tret);
364 if (err != 0)
365 exit(1);
366 tot_reads += count_reader[i];
367 }
368 for (i = 0; i < nr_writers; i++) {
369 err = pthread_join(tid_writer[i], &tret);
370 if (err != 0)
371 exit(1);
372 tot_writes += count_writer[i];
373 }
374
375 printf("total number of reads : %llu, writes %llu\n", tot_reads,
376 tot_writes);
377 test_array_free(test_rcu_pointer);
378 free(test_array);
379 free(tid_reader);
380 free(tid_writer);
381 free(count_reader);
382 free(count_writer);
383 return 0;
384}
This page took 0.036365 seconds and 4 git commands to generate.