Rename likely/unlikely to caa_likely/caa_unlikely
[urcu.git] / tests / api.h
... / ...
CommitLineData
1
2#ifndef _INCLUDE_API_H
3#define _INCLUDE_API_H
4
5#include "../config.h"
6
7/*
8 * common.h: Common Linux kernel-isms.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; but version 2 of the License only due
13 * to code included from the Linux kernel.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 *
24 * Copyright (c) 2006 Paul E. McKenney, IBM.
25 *
26 * Much code taken from the Linux kernel. For such code, the option
27 * to redistribute under later versions of GPL might not be available.
28 */
29
30#include <urcu/compiler.h>
31#include <urcu/arch.h>
32
33/*
34 * Machine parameters.
35 */
36
37#define ____cacheline_internodealigned_in_smp \
38 __attribute__((__aligned__(CAA_CACHE_LINE_SIZE)))
39
40/*
41 * api_pthreads.h: API mapping to pthreads environment.
42 *
43 * This program is free software; you can redistribute it and/or modify
44 * it under the terms of the GNU General Public License as published by
45 * the Free Software Foundation; either version 2 of the License, or
46 * (at your option) any later version. However, please note that much
47 * of the code in this file derives from the Linux kernel, and that such
48 * code may not be available except under GPLv2.
49 *
50 * This program is distributed in the hope that it will be useful,
51 * but WITHOUT ANY WARRANTY; without even the implied warranty of
52 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
53 * GNU General Public License for more details.
54 *
55 * You should have received a copy of the GNU General Public License
56 * along with this program; if not, write to the Free Software
57 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
58 *
59 * Copyright (c) 2006 Paul E. McKenney, IBM.
60 */
61
62#include <stdio.h>
63#include <stdlib.h>
64#include <errno.h>
65#include <limits.h>
66#include <sys/types.h>
67#define __USE_GNU
68#include <pthread.h>
69#include <sched.h>
70#include <sys/param.h>
71/* #include "atomic.h" */
72
73/*
74 * Exclusive locking primitives.
75 */
76
77typedef pthread_mutex_t spinlock_t;
78
79#define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER;
80#define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER
81
82static void spin_lock_init(spinlock_t *sp)
83{
84 if (pthread_mutex_init(sp, NULL) != 0) {
85 perror("spin_lock_init:pthread_mutex_init");
86 exit(-1);
87 }
88}
89
90static void spin_lock(spinlock_t *sp)
91{
92 if (pthread_mutex_lock(sp) != 0) {
93 perror("spin_lock:pthread_mutex_lock");
94 exit(-1);
95 }
96}
97
98static void spin_unlock(spinlock_t *sp)
99{
100 if (pthread_mutex_unlock(sp) != 0) {
101 perror("spin_unlock:pthread_mutex_unlock");
102 exit(-1);
103 }
104}
105
106#define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0)
107#define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0)
108
109/*
110 * Thread creation/destruction primitives.
111 */
112
113typedef pthread_t thread_id_t;
114
115#define NR_THREADS 128
116
117#define __THREAD_ID_MAP_EMPTY ((thread_id_t) 0)
118#define __THREAD_ID_MAP_WAITING ((thread_id_t) 1)
119thread_id_t __thread_id_map[NR_THREADS];
120spinlock_t __thread_id_map_mutex;
121
122#define for_each_thread(t) \
123 for (t = 0; t < NR_THREADS; t++)
124
125#define for_each_running_thread(t) \
126 for (t = 0; t < NR_THREADS; t++) \
127 if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \
128 (__thread_id_map[t] != __THREAD_ID_MAP_WAITING))
129
130#define for_each_tid(t, tid) \
131 for (t = 0; t < NR_THREADS; t++) \
132 if ((((tid) = __thread_id_map[t]) != __THREAD_ID_MAP_EMPTY) && \
133 ((tid) != __THREAD_ID_MAP_WAITING))
134
135pthread_key_t thread_id_key;
136
137static int __smp_thread_id(void)
138{
139 int i;
140 thread_id_t tid = pthread_self();
141
142 for (i = 0; i < NR_THREADS; i++) {
143 if (__thread_id_map[i] == tid) {
144 long v = i + 1; /* must be non-NULL. */
145
146 if (pthread_setspecific(thread_id_key, (void *)v) != 0) {
147 perror("pthread_setspecific");
148 exit(-1);
149 }
150 return i;
151 }
152 }
153 spin_lock(&__thread_id_map_mutex);
154 for (i = 0; i < NR_THREADS; i++) {
155 if (__thread_id_map[i] == tid)
156 spin_unlock(&__thread_id_map_mutex);
157 return i;
158 }
159 spin_unlock(&__thread_id_map_mutex);
160 fprintf(stderr, "smp_thread_id: Rogue thread, id: %d(%#x)\n",
161 (int)tid, (int)tid);
162 exit(-1);
163}
164
165static int smp_thread_id(void)
166{
167 void *id;
168
169 id = pthread_getspecific(thread_id_key);
170 if (id == NULL)
171 return __smp_thread_id();
172 return (long)(id - 1);
173}
174
175static thread_id_t create_thread(void *(*func)(void *), void *arg)
176{
177 thread_id_t tid;
178 int i;
179
180 spin_lock(&__thread_id_map_mutex);
181 for (i = 0; i < NR_THREADS; i++) {
182 if (__thread_id_map[i] == __THREAD_ID_MAP_EMPTY)
183 break;
184 }
185 if (i >= NR_THREADS) {
186 spin_unlock(&__thread_id_map_mutex);
187 fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS);
188 exit(-1);
189 }
190 __thread_id_map[i] = __THREAD_ID_MAP_WAITING;
191 spin_unlock(&__thread_id_map_mutex);
192 if (pthread_create(&tid, NULL, func, arg) != 0) {
193 perror("create_thread:pthread_create");
194 exit(-1);
195 }
196 __thread_id_map[i] = tid;
197 return tid;
198}
199
200static void *wait_thread(thread_id_t tid)
201{
202 int i;
203 void *vp;
204
205 for (i = 0; i < NR_THREADS; i++) {
206 if (__thread_id_map[i] == tid)
207 break;
208 }
209 if (i >= NR_THREADS){
210 fprintf(stderr, "wait_thread: bad tid = %d(%#x)\n",
211 (int)tid, (int)tid);
212 exit(-1);
213 }
214 if (pthread_join(tid, &vp) != 0) {
215 perror("wait_thread:pthread_join");
216 exit(-1);
217 }
218 __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
219 return vp;
220}
221
222static void wait_all_threads(void)
223{
224 int i;
225 thread_id_t tid;
226
227 for (i = 1; i < NR_THREADS; i++) {
228 tid = __thread_id_map[i];
229 if (tid != __THREAD_ID_MAP_EMPTY &&
230 tid != __THREAD_ID_MAP_WAITING)
231 (void)wait_thread(tid);
232 }
233}
234
235#ifndef HAVE_CPU_SET_T
236typedef unsigned long cpu_set_t;
237# define CPU_ZERO(cpuset) do { *(cpuset) = 0; } while(0)
238# define CPU_SET(cpu, cpuset) do { *(cpuset) |= (1UL << (cpu)); } while(0)
239#endif
240
241static void run_on(int cpu)
242{
243#if HAVE_SCHED_SETAFFINITY
244 cpu_set_t mask;
245
246 CPU_ZERO(&mask);
247 CPU_SET(cpu, &mask);
248#if SCHED_SETAFFINITY_ARGS == 2
249 sched_setaffinity(0, &mask);
250#else
251 sched_setaffinity(0, sizeof(mask), &mask);
252#endif
253#endif /* HAVE_SCHED_SETAFFINITY */
254}
255
256/*
257 * timekeeping -- very crude -- should use MONOTONIC...
258 */
259
260long long get_microseconds(void)
261{
262 struct timeval tv;
263
264 if (gettimeofday(&tv, NULL) != 0)
265 abort();
266 return ((long long)tv.tv_sec) * 1000000LL + (long long)tv.tv_usec;
267}
268
269/*
270 * Per-thread variables.
271 */
272
273#define DEFINE_PER_THREAD(type, name) \
274 struct { \
275 __typeof__(type) v \
276 __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \
277 } __per_thread_##name[NR_THREADS];
278#define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name)
279
280#define per_thread(name, thread) __per_thread_##name[thread].v
281#define __get_thread_var(name) per_thread(name, smp_thread_id())
282
283#define init_per_thread(name, v) \
284 do { \
285 int __i_p_t_i; \
286 for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \
287 per_thread(name, __i_p_t_i) = v; \
288 } while (0)
289
290DEFINE_PER_THREAD(int, smp_processor_id);
291
292/*
293 * Bug checks.
294 */
295
296#define BUG_ON(c) do { if (!(c)) abort(); } while (0)
297
298/*
299 * Initialization -- Must be called before calling any primitives.
300 */
301
302static void smp_init(void)
303{
304 int i;
305
306 spin_lock_init(&__thread_id_map_mutex);
307 __thread_id_map[0] = pthread_self();
308 for (i = 1; i < NR_THREADS; i++)
309 __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
310 init_per_thread(smp_processor_id, 0);
311 if (pthread_key_create(&thread_id_key, NULL) != 0) {
312 perror("pthread_key_create");
313 exit(-1);
314 }
315}
316
317#endif
This page took 0.023013 seconds and 4 git commands to generate.