8 * common.h: Common Linux kernel-isms.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; but version 2 of the License only due
13 * to code included from the Linux kernel.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 * Copyright (c) 2006 Paul E. McKenney, IBM.
26 * Much code taken from the Linux kernel. For such code, the option
27 * to redistribute under later versions of GPL might not be available.
30 #include <urcu/compiler.h>
31 #include <urcu/arch.h>
37 #define ____cacheline_internodealigned_in_smp \
38 __attribute__((__aligned__(CAA_CACHE_LINE_SIZE)))
41 * api_pthreads.h: API mapping to pthreads environment.
43 * This program is free software; you can redistribute it and/or modify
44 * it under the terms of the GNU General Public License as published by
45 * the Free Software Foundation; either version 2 of the License, or
46 * (at your option) any later version. However, please note that much
47 * of the code in this file derives from the Linux kernel, and that such
48 * code may not be available except under GPLv2.
50 * This program is distributed in the hope that it will be useful,
51 * but WITHOUT ANY WARRANTY; without even the implied warranty of
52 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
53 * GNU General Public License for more details.
55 * You should have received a copy of the GNU General Public License
56 * along with this program; if not, write to the Free Software
57 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
59 * Copyright (c) 2006 Paul E. McKenney, IBM.
66 #include <sys/types.h>
70 #include <sys/param.h>
71 /* #include "atomic.h" */
74 * Exclusive locking primitives.
77 typedef pthread_mutex_t spinlock_t
;
79 #define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER;
80 #define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER
82 static void spin_lock_init(spinlock_t
*sp
)
84 if (pthread_mutex_init(sp
, NULL
) != 0) {
85 perror("spin_lock_init:pthread_mutex_init");
90 static void spin_lock(spinlock_t
*sp
)
92 if (pthread_mutex_lock(sp
) != 0) {
93 perror("spin_lock:pthread_mutex_lock");
98 static void spin_unlock(spinlock_t
*sp
)
100 if (pthread_mutex_unlock(sp
) != 0) {
101 perror("spin_unlock:pthread_mutex_unlock");
106 #define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0)
107 #define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0)
110 * Thread creation/destruction primitives.
113 typedef pthread_t thread_id_t
;
115 #define NR_THREADS 128
117 #define __THREAD_ID_MAP_EMPTY ((thread_id_t) 0)
118 #define __THREAD_ID_MAP_WAITING ((thread_id_t) 1)
119 thread_id_t __thread_id_map
[NR_THREADS
];
120 spinlock_t __thread_id_map_mutex
;
122 #define for_each_thread(t) \
123 for (t = 0; t < NR_THREADS; t++)
125 #define for_each_running_thread(t) \
126 for (t = 0; t < NR_THREADS; t++) \
127 if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \
128 (__thread_id_map[t] != __THREAD_ID_MAP_WAITING))
130 #define for_each_tid(t, tid) \
131 for (t = 0; t < NR_THREADS; t++) \
132 if ((((tid) = __thread_id_map[t]) != __THREAD_ID_MAP_EMPTY) && \
133 ((tid) != __THREAD_ID_MAP_WAITING))
135 pthread_key_t thread_id_key
;
137 static int __smp_thread_id(void)
140 thread_id_t tid
= pthread_self();
142 for (i
= 0; i
< NR_THREADS
; i
++) {
143 if (__thread_id_map
[i
] == tid
) {
144 long v
= i
+ 1; /* must be non-NULL. */
146 if (pthread_setspecific(thread_id_key
, (void *)v
) != 0) {
147 perror("pthread_setspecific");
153 spin_lock(&__thread_id_map_mutex
);
154 for (i
= 0; i
< NR_THREADS
; i
++) {
155 if (__thread_id_map
[i
] == tid
)
156 spin_unlock(&__thread_id_map_mutex
);
159 spin_unlock(&__thread_id_map_mutex
);
160 fprintf(stderr
, "smp_thread_id: Rogue thread, id: %d(%#x)\n",
165 static int smp_thread_id(void)
169 id
= pthread_getspecific(thread_id_key
);
171 return __smp_thread_id();
172 return (long)(id
- 1);
175 static thread_id_t
create_thread(void *(*func
)(void *), void *arg
)
180 spin_lock(&__thread_id_map_mutex
);
181 for (i
= 0; i
< NR_THREADS
; i
++) {
182 if (__thread_id_map
[i
] == __THREAD_ID_MAP_EMPTY
)
185 if (i
>= NR_THREADS
) {
186 spin_unlock(&__thread_id_map_mutex
);
187 fprintf(stderr
, "Thread limit of %d exceeded!\n", NR_THREADS
);
190 __thread_id_map
[i
] = __THREAD_ID_MAP_WAITING
;
191 spin_unlock(&__thread_id_map_mutex
);
192 if (pthread_create(&tid
, NULL
, func
, arg
) != 0) {
193 perror("create_thread:pthread_create");
196 __thread_id_map
[i
] = tid
;
200 static void *wait_thread(thread_id_t tid
)
205 for (i
= 0; i
< NR_THREADS
; i
++) {
206 if (__thread_id_map
[i
] == tid
)
209 if (i
>= NR_THREADS
){
210 fprintf(stderr
, "wait_thread: bad tid = %d(%#x)\n",
214 if (pthread_join(tid
, &vp
) != 0) {
215 perror("wait_thread:pthread_join");
218 __thread_id_map
[i
] = __THREAD_ID_MAP_EMPTY
;
222 static void wait_all_threads(void)
227 for (i
= 1; i
< NR_THREADS
; i
++) {
228 tid
= __thread_id_map
[i
];
229 if (tid
!= __THREAD_ID_MAP_EMPTY
&&
230 tid
!= __THREAD_ID_MAP_WAITING
)
231 (void)wait_thread(tid
);
235 #ifndef HAVE_CPU_SET_T
236 typedef unsigned long cpu_set_t
;
237 # define CPU_ZERO(cpuset) do { *(cpuset) = 0; } while(0)
238 # define CPU_SET(cpu, cpuset) do { *(cpuset) |= (1UL << (cpu)); } while(0)
241 static void run_on(int cpu
)
243 #if HAVE_SCHED_SETAFFINITY
248 #if SCHED_SETAFFINITY_ARGS == 2
249 sched_setaffinity(0, &mask
);
251 sched_setaffinity(0, sizeof(mask
), &mask
);
253 #endif /* HAVE_SCHED_SETAFFINITY */
257 * timekeeping -- very crude -- should use MONOTONIC...
260 long long get_microseconds(void)
264 if (gettimeofday(&tv
, NULL
) != 0)
266 return ((long long)tv
.tv_sec
) * 1000000LL + (long long)tv
.tv_usec
;
270 * Per-thread variables.
273 #define DEFINE_PER_THREAD(type, name) \
276 __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \
277 } __per_thread_##name[NR_THREADS];
278 #define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name)
280 #define per_thread(name, thread) __per_thread_##name[thread].v
281 #define __get_thread_var(name) per_thread(name, smp_thread_id())
283 #define init_per_thread(name, v) \
286 for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \
287 per_thread(name, __i_p_t_i) = v; \
290 DEFINE_PER_THREAD(int, smp_processor_id
);
296 #define BUG_ON(c) do { if (!(c)) abort(); } while (0)
299 * Initialization -- Must be called before calling any primitives.
302 static void smp_init(void)
306 spin_lock_init(&__thread_id_map_mutex
);
307 __thread_id_map
[0] = pthread_self();
308 for (i
= 1; i
< NR_THREADS
; i
++)
309 __thread_id_map
[i
] = __THREAD_ID_MAP_EMPTY
;
310 init_per_thread(smp_processor_id
, 0);
311 if (pthread_key_create(&thread_id_key
, NULL
) != 0) {
312 perror("pthread_key_create");