Using AM_PROG_MKDIR_P for compatibility
[urcu.git] / tests / test_perthreadlock_timing.c
CommitLineData
102d1d23
MD
1/*
2 * test_perthreadloc_timing.c
3 *
4 * Per thread locks - test program
5 *
6 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 */
22
23#include <stdio.h>
24#include <pthread.h>
25#include <stdlib.h>
26#include <string.h>
27#include <sys/types.h>
28#include <sys/wait.h>
29#include <unistd.h>
30#include <stdio.h>
31#include <assert.h>
32#include <sys/syscall.h>
33#include <pthread.h>
833dbdb6 34
ec4e58a3 35#include <urcu/arch.h>
102d1d23
MD
36
37#if defined(_syscall0)
38_syscall0(pid_t, gettid)
39#elif defined(__NR_gettid)
40static inline pid_t gettid(void)
41{
42 return syscall(__NR_gettid);
43}
44#else
45#warning "use pid as tid"
46static inline pid_t gettid(void)
47{
48 return getpid();
49}
50#endif
51
ec4e58a3 52#include <urcu.h>
102d1d23
MD
53
54struct test_array {
55 int a;
56};
57
58static struct test_array test_array = { 8 };
59
60struct per_thread_lock {
61 pthread_mutex_t lock;
b430c6d4 62} __attribute__((aligned(CACHE_LINE_SIZE))); /* cache-line aligned */
102d1d23
MD
63
64static struct per_thread_lock *per_thread_lock;
65
66#define OUTER_READ_LOOP 200U
67#define INNER_READ_LOOP 100000U
68#define READ_LOOP ((unsigned long long)OUTER_READ_LOOP * INNER_READ_LOOP)
69
70#define OUTER_WRITE_LOOP 10U
71#define INNER_WRITE_LOOP 200U
72#define WRITE_LOOP ((unsigned long long)OUTER_WRITE_LOOP * INNER_WRITE_LOOP)
73
2c9689fe
MD
74static int num_read;
75static int num_write;
102d1d23 76
2c9689fe
MD
77#define NR_READ num_read
78#define NR_WRITE num_write
79
b430c6d4
MD
80static cycles_t __attribute__((aligned(CACHE_LINE_SIZE))) *reader_time;
81static cycles_t __attribute__((aligned(CACHE_LINE_SIZE))) *writer_time;
102d1d23
MD
82
83void *thr_reader(void *arg)
84{
85 int i, j;
86 cycles_t time1, time2;
87 long tidx = (long)arg;
88
89 printf("thread_begin %s, thread id : %lx, tid %lu\n",
90 "reader", pthread_self(), (unsigned long)gettid());
91 sleep(2);
92
93 time1 = get_cycles();
94 for (i = 0; i < OUTER_READ_LOOP; i++) {
95 for (j = 0; j < INNER_READ_LOOP; j++) {
96 pthread_mutex_lock(&per_thread_lock[tidx].lock);
97 assert(test_array.a == 8);
98 pthread_mutex_unlock(&per_thread_lock[tidx].lock);
99 }
100 }
101 time2 = get_cycles();
102
103 reader_time[tidx] = time2 - time1;
104
105 sleep(2);
106 printf("thread_end %s, thread id : %lx, tid %lu\n",
107 "reader", pthread_self(), (unsigned long)gettid());
108 return ((void*)1);
109
110}
111
112void *thr_writer(void *arg)
113{
114 int i, j;
115 long tidx;
116 cycles_t time1, time2;
117
118 printf("thread_begin %s, thread id : %lx, tid %lu\n",
119 "writer", pthread_self(), (unsigned long)gettid());
120 sleep(2);
121
122 for (i = 0; i < OUTER_WRITE_LOOP; i++) {
123 for (j = 0; j < INNER_WRITE_LOOP; j++) {
124 time1 = get_cycles();
125 for (tidx = 0; tidx < NR_READ; tidx++) {
126 pthread_mutex_lock(&per_thread_lock[tidx].lock);
127 }
128 test_array.a = 8;
129 for (tidx = NR_READ - 1; tidx >= 0; tidx--) {
130 pthread_mutex_unlock(&per_thread_lock[tidx].lock);
131 }
132 time2 = get_cycles();
133 writer_time[(unsigned long)arg] += time2 - time1;
fc606a74 134 usleep(1);
102d1d23 135 }
102d1d23
MD
136 }
137
138 printf("thread_end %s, thread id : %lx, tid %lu\n",
139 "writer", pthread_self(), (unsigned long)gettid());
140 return ((void*)2);
141}
142
2c9689fe 143int main(int argc, char **argv)
102d1d23
MD
144{
145 int err;
2c9689fe 146 pthread_t *tid_reader, *tid_writer;
102d1d23
MD
147 void *tret;
148 int i;
149 cycles_t tot_rtime = 0;
150 cycles_t tot_wtime = 0;
151
2c9689fe
MD
152 if (argc < 2) {
153 printf("Usage : %s nr_readers nr_writers\n", argv[0]);
154 exit(-1);
155 }
156 num_read = atoi(argv[1]);
157 num_write = atoi(argv[2]);
158
159 reader_time = malloc(sizeof(*reader_time) * num_read);
160 writer_time = malloc(sizeof(*writer_time) * num_write);
161 tid_reader = malloc(sizeof(*tid_reader) * num_read);
162 tid_writer = malloc(sizeof(*tid_writer) * num_write);
163
102d1d23
MD
164 printf("thread %-6s, thread id : %lx, tid %lu\n",
165 "main", pthread_self(), (unsigned long)gettid());
166
167 per_thread_lock = malloc(sizeof(struct per_thread_lock) * NR_READ);
168
169 for (i = 0; i < NR_READ; i++) {
170 pthread_mutex_init(&per_thread_lock[i].lock, NULL);
171 }
172 for (i = 0; i < NR_READ; i++) {
173 err = pthread_create(&tid_reader[i], NULL, thr_reader,
174 (void *)(long)i);
175 if (err != 0)
176 exit(1);
177 }
178 for (i = 0; i < NR_WRITE; i++) {
179 err = pthread_create(&tid_writer[i], NULL, thr_writer,
180 (void *)(long)i);
181 if (err != 0)
182 exit(1);
183 }
184
185 sleep(10);
186
187 for (i = 0; i < NR_READ; i++) {
188 err = pthread_join(tid_reader[i], &tret);
189 if (err != 0)
190 exit(1);
191 tot_rtime += reader_time[i];
192 }
193 for (i = 0; i < NR_WRITE; i++) {
194 err = pthread_join(tid_writer[i], &tret);
195 if (err != 0)
196 exit(1);
197 tot_wtime += writer_time[i];
198 }
199 printf("Time per read : %g cycles\n",
200 (double)tot_rtime / ((double)NR_READ * (double)READ_LOOP));
201 printf("Time per write : %g cycles\n",
202 (double)tot_wtime / ((double)NR_WRITE * (double)WRITE_LOOP));
203 free(per_thread_lock);
204
2c9689fe
MD
205 free(reader_time);
206 free(writer_time);
207 free(tid_reader);
208 free(tid_writer);
209
102d1d23
MD
210 return 0;
211}
This page took 0.030734 seconds and 4 git commands to generate.