uatomic: add memory barrier API for and/or/add/sub/inc/sub
[urcu.git] / tests / test_urcu_timing.c
1 /*
2 * test_urcu.c
3 *
4 * Userspace RCU library - test program
5 *
6 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 */
22
23 #include <stdio.h>
24 #include <pthread.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <sys/types.h>
28 #include <sys/wait.h>
29 #include <unistd.h>
30 #include <stdio.h>
31 #include <assert.h>
32 #include <errno.h>
33 #include <urcu/arch.h>
34
35 #ifdef __linux__
36 #include <syscall.h>
37 #endif
38
39 #if defined(_syscall0)
40 _syscall0(pid_t, gettid)
41 #elif defined(__NR_gettid)
42 static inline pid_t gettid(void)
43 {
44 return syscall(__NR_gettid);
45 }
46 #else
47 #warning "use pid as tid"
48 static inline pid_t gettid(void)
49 {
50 return getpid();
51 }
52 #endif
53
54 #define _LGPL_SOURCE
55 #include <urcu.h>
56
57 pthread_mutex_t rcu_copy_mutex = PTHREAD_MUTEX_INITIALIZER;
58
59 void rcu_copy_mutex_lock(void)
60 {
61 int ret;
62 ret = pthread_mutex_lock(&rcu_copy_mutex);
63 if (ret) {
64 perror("Error in pthread mutex lock");
65 exit(-1);
66 }
67 }
68
69 void rcu_copy_mutex_unlock(void)
70 {
71 int ret;
72
73 ret = pthread_mutex_unlock(&rcu_copy_mutex);
74 if (ret) {
75 perror("Error in pthread mutex unlock");
76 exit(-1);
77 }
78 }
79
80 struct test_array {
81 int a;
82 };
83
84 static struct test_array *test_rcu_pointer;
85
86 #define OUTER_READ_LOOP 2000U
87 #define INNER_READ_LOOP 100000U
88 #define READ_LOOP ((unsigned long long)OUTER_READ_LOOP * INNER_READ_LOOP)
89
90 #define OUTER_WRITE_LOOP 10U
91 #define INNER_WRITE_LOOP 200U
92 #define WRITE_LOOP ((unsigned long long)OUTER_WRITE_LOOP * INNER_WRITE_LOOP)
93
94 static int num_read;
95 static int num_write;
96
97 #define NR_READ num_read
98 #define NR_WRITE num_write
99
100 static cycles_t __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *reader_time;
101 static cycles_t __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *writer_time;
102
103 void *thr_reader(void *arg)
104 {
105 int i, j;
106 struct test_array *local_ptr;
107 cycles_t time1, time2;
108
109 printf("thread_begin %s, thread id : %lx, tid %lu\n",
110 "reader", pthread_self(), (unsigned long)gettid());
111 sleep(2);
112
113 rcu_register_thread();
114
115 time1 = caa_get_cycles();
116 for (i = 0; i < OUTER_READ_LOOP; i++) {
117 for (j = 0; j < INNER_READ_LOOP; j++) {
118 rcu_read_lock();
119 local_ptr = rcu_dereference(test_rcu_pointer);
120 if (local_ptr) {
121 assert(local_ptr->a == 8);
122 }
123 rcu_read_unlock();
124 }
125 }
126 time2 = caa_get_cycles();
127
128 rcu_unregister_thread();
129
130 reader_time[(unsigned long)arg] = time2 - time1;
131
132 sleep(2);
133 printf("thread_end %s, thread id : %lx, tid %lu\n",
134 "reader", pthread_self(), (unsigned long)gettid());
135 return ((void*)1);
136
137 }
138
139 void *thr_writer(void *arg)
140 {
141 int i, j;
142 struct test_array *new, *old;
143 cycles_t time1, time2;
144
145 printf("thread_begin %s, thread id : %lx, tid %lu\n",
146 "writer", pthread_self(), (unsigned long)gettid());
147 sleep(2);
148
149 for (i = 0; i < OUTER_WRITE_LOOP; i++) {
150 for (j = 0; j < INNER_WRITE_LOOP; j++) {
151 time1 = caa_get_cycles();
152 new = malloc(sizeof(struct test_array));
153 rcu_copy_mutex_lock();
154 old = test_rcu_pointer;
155 if (old) {
156 assert(old->a == 8);
157 }
158 new->a = 8;
159 old = rcu_xchg_pointer(&test_rcu_pointer, new);
160 rcu_copy_mutex_unlock();
161 synchronize_rcu();
162 /* can be done after unlock */
163 if (old) {
164 old->a = 0;
165 }
166 free(old);
167 time2 = caa_get_cycles();
168 writer_time[(unsigned long)arg] += time2 - time1;
169 usleep(1);
170 }
171 }
172
173 printf("thread_end %s, thread id : %lx, tid %lu\n",
174 "writer", pthread_self(), (unsigned long)gettid());
175 return ((void*)2);
176 }
177
178 int main(int argc, char **argv)
179 {
180 int err;
181 pthread_t *tid_reader, *tid_writer;
182 void *tret;
183 int i;
184 cycles_t tot_rtime = 0;
185 cycles_t tot_wtime = 0;
186
187 if (argc < 2) {
188 printf("Usage : %s nr_readers nr_writers\n", argv[0]);
189 exit(-1);
190 }
191 num_read = atoi(argv[1]);
192 num_write = atoi(argv[2]);
193
194 reader_time = malloc(sizeof(*reader_time) * num_read);
195 writer_time = malloc(sizeof(*writer_time) * num_write);
196 tid_reader = malloc(sizeof(*tid_reader) * num_read);
197 tid_writer = malloc(sizeof(*tid_writer) * num_write);
198
199 printf("thread %-6s, thread id : %lx, tid %lu\n",
200 "main", pthread_self(), (unsigned long)gettid());
201
202 for (i = 0; i < NR_READ; i++) {
203 err = pthread_create(&tid_reader[i], NULL, thr_reader,
204 (void *)(long)i);
205 if (err != 0)
206 exit(1);
207 }
208 for (i = 0; i < NR_WRITE; i++) {
209 err = pthread_create(&tid_writer[i], NULL, thr_writer,
210 (void *)(long)i);
211 if (err != 0)
212 exit(1);
213 }
214
215 sleep(10);
216
217 for (i = 0; i < NR_READ; i++) {
218 err = pthread_join(tid_reader[i], &tret);
219 if (err != 0)
220 exit(1);
221 tot_rtime += reader_time[i];
222 }
223 for (i = 0; i < NR_WRITE; i++) {
224 err = pthread_join(tid_writer[i], &tret);
225 if (err != 0)
226 exit(1);
227 tot_wtime += writer_time[i];
228 }
229 free(test_rcu_pointer);
230 printf("Time per read : %g cycles\n",
231 (double)tot_rtime / ((double)NR_READ * (double)READ_LOOP));
232 printf("Time per write : %g cycles\n",
233 (double)tot_wtime / ((double)NR_WRITE * (double)WRITE_LOOP));
234
235 free(reader_time);
236 free(writer_time);
237 free(tid_reader);
238 free(tid_writer);
239
240 return 0;
241 }
This page took 0.036554 seconds and 4 git commands to generate.