fix api.h generation on x86 and powerpc
[urcu.git] / tests / api_ppc.h
CommitLineData
6d0ce021
PM
1/* MECHANICALLY GENERATED, DO NOT EDIT!!! */
2
3#define _INCLUDE_API_H
4
5/*
6 * common.h: Common Linux kernel-isms.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; but version 2 of the License only due
11 * to code included from the Linux kernel.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * Copyright (c) 2006 Paul E. McKenney, IBM.
23 *
24 * Much code taken from the Linux kernel. For such code, the option
25 * to redistribute under later versions of GPL might not be available.
26 */
27
22b63ec4
MD
28#include <urcu/arch.h>
29
6d0ce021
PM
30#ifndef __always_inline
31#define __always_inline inline
32#endif
33
34#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
35#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1)
36
37#ifdef __ASSEMBLY__
38# define stringify_in_c(...) __VA_ARGS__
39# define ASM_CONST(x) x
40#else
41/* This version of stringify will deal with commas... */
42# define __stringify_in_c(...) #__VA_ARGS__
43# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
44# define __ASM_CONST(x) x##UL
45# define ASM_CONST(x) __ASM_CONST(x)
46#endif
47
48
49/*
50 * arch-ppc64.h: Expose PowerPC atomic instructions.
51 *
52 * This program is free software; you can redistribute it and/or modify
53 * it under the terms of the GNU General Public License as published by
54 * the Free Software Foundation; but version 2 of the License only due
55 * to code included from the Linux kernel.
56 *
57 * This program is distributed in the hope that it will be useful,
58 * but WITHOUT ANY WARRANTY; without even the implied warranty of
59 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
60 * GNU General Public License for more details.
61 *
62 * You should have received a copy of the GNU General Public License
63 * along with this program; if not, write to the Free Software
64 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
65 *
66 * Copyright (c) 2006 Paul E. McKenney, IBM.
67 *
68 * Much code taken from the Linux kernel. For such code, the option
69 * to redistribute under later versions of GPL might not be available.
70 */
71
72/*
73 * Machine parameters.
74 */
75
6d0ce021
PM
76#define CONFIG_PPC64
77
b4e52e3e 78/*#define CACHE_LINE_SIZE 128 */
6d0ce021
PM
79#define ____cacheline_internodealigned_in_smp \
80 __attribute__((__aligned__(1 << 7)))
81
6ee91d83
MD
82#if 0 /* duplicate with arch_atomic.h */
83
6d0ce021
PM
84/*
85 * Atomic data structure, initialization, and access.
86 */
87
88typedef struct { volatile int counter; } atomic_t;
89
90#define ATOMIC_INIT(i) { (i) }
91
92#define atomic_read(v) ((v)->counter)
93#define atomic_set(v, i) (((v)->counter) = (i))
94
95/*
96 * Atomic operations.
97 */
98
99#define LWSYNC lwsync
100#define PPC405_ERR77(ra,rb)
101#ifdef CONFIG_SMP
102# define LWSYNC_ON_SMP stringify_in_c(LWSYNC) "\n"
103# define ISYNC_ON_SMP "\n\tisync\n"
104#else
105# define LWSYNC_ON_SMP
106# define ISYNC_ON_SMP
107#endif
108
6d0ce021
PM
109/*
110 * Atomic exchange
111 *
112 * Changes the memory location '*ptr' to be val and returns
113 * the previous value stored there.
114 */
115static __always_inline unsigned long
116__xchg_u32(volatile void *p, unsigned long val)
117{
118 unsigned long prev;
119
120 __asm__ __volatile__(
121 LWSYNC_ON_SMP
122"1: lwarx %0,0,%2 \n"
123 PPC405_ERR77(0,%2)
124" stwcx. %3,0,%2 \n\
125 bne- 1b"
126 ISYNC_ON_SMP
127 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
128 : "r" (p), "r" (val)
129 : "cc", "memory");
130
131 return prev;
132}
133
134/*
135 * Atomic exchange
136 *
137 * Changes the memory location '*ptr' to be val and returns
138 * the previous value stored there.
139 */
140static __always_inline unsigned long
141__xchg_u32_local(volatile void *p, unsigned long val)
142{
143 unsigned long prev;
144
145 __asm__ __volatile__(
146"1: lwarx %0,0,%2 \n"
147 PPC405_ERR77(0,%2)
148" stwcx. %3,0,%2 \n\
149 bne- 1b"
150 : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
151 : "r" (p), "r" (val)
152 : "cc", "memory");
153
154 return prev;
155}
156
157#ifdef CONFIG_PPC64
158static __always_inline unsigned long
159__xchg_u64(volatile void *p, unsigned long val)
160{
161 unsigned long prev;
162
163 __asm__ __volatile__(
164 LWSYNC_ON_SMP
165"1: ldarx %0,0,%2 \n"
166 PPC405_ERR77(0,%2)
167" stdcx. %3,0,%2 \n\
168 bne- 1b"
169 ISYNC_ON_SMP
170 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
171 : "r" (p), "r" (val)
172 : "cc", "memory");
173
174 return prev;
175}
176
177static __always_inline unsigned long
178__xchg_u64_local(volatile void *p, unsigned long val)
179{
180 unsigned long prev;
181
182 __asm__ __volatile__(
183"1: ldarx %0,0,%2 \n"
184 PPC405_ERR77(0,%2)
185" stdcx. %3,0,%2 \n\
186 bne- 1b"
187 : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
188 : "r" (p), "r" (val)
189 : "cc", "memory");
190
191 return prev;
192}
193#endif
194
195/*
196 * This function doesn't exist, so you'll get a linker error
197 * if something tries to do an invalid xchg().
198 */
199extern void __xchg_called_with_bad_pointer(void);
200
201static __always_inline unsigned long
202__xchg(volatile void *ptr, unsigned long x, unsigned int size)
203{
204 switch (size) {
205 case 4:
206 return __xchg_u32(ptr, x);
207#ifdef CONFIG_PPC64
208 case 8:
209 return __xchg_u64(ptr, x);
210#endif
211 }
212 __xchg_called_with_bad_pointer();
213 return x;
214}
215
216static __always_inline unsigned long
217__xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
218{
219 switch (size) {
220 case 4:
221 return __xchg_u32_local(ptr, x);
222#ifdef CONFIG_PPC64
223 case 8:
224 return __xchg_u64_local(ptr, x);
225#endif
226 }
227 __xchg_called_with_bad_pointer();
228 return x;
229}
230#define xchg(ptr,x) \
231 ({ \
232 __typeof__(*(ptr)) _x_ = (x); \
233 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
234 })
235
236#define xchg_local(ptr,x) \
237 ({ \
238 __typeof__(*(ptr)) _x_ = (x); \
239 (__typeof__(*(ptr))) __xchg_local((ptr), \
240 (unsigned long)_x_, sizeof(*(ptr))); \
241 })
242
243/*
244 * Compare and exchange - if *p == old, set it to new,
245 * and return the old value of *p.
246 */
247#define __HAVE_ARCH_CMPXCHG 1
248
249static __always_inline unsigned long
250__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
251{
252 unsigned int prev;
253
254 __asm__ __volatile__ (
255 LWSYNC_ON_SMP
256"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
257 cmpw 0,%0,%3\n\
258 bne- 2f\n"
259 PPC405_ERR77(0,%2)
260" stwcx. %4,0,%2\n\
261 bne- 1b"
262 ISYNC_ON_SMP
263 "\n\
2642:"
265 : "=&r" (prev), "+m" (*p)
266 : "r" (p), "r" (old), "r" (new)
267 : "cc", "memory");
268
269 return prev;
270}
271
272static __always_inline unsigned long
273__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
274 unsigned long new)
275{
276 unsigned int prev;
277
278 __asm__ __volatile__ (
279"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
280 cmpw 0,%0,%3\n\
281 bne- 2f\n"
282 PPC405_ERR77(0,%2)
283" stwcx. %4,0,%2\n\
284 bne- 1b"
285 "\n\
2862:"
287 : "=&r" (prev), "+m" (*p)
288 : "r" (p), "r" (old), "r" (new)
289 : "cc", "memory");
290
291 return prev;
292}
293
294#ifdef CONFIG_PPC64
295static __always_inline unsigned long
296__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
297{
298 unsigned long prev;
299
300 __asm__ __volatile__ (
301 LWSYNC_ON_SMP
302"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
303 cmpd 0,%0,%3\n\
304 bne- 2f\n\
305 stdcx. %4,0,%2\n\
306 bne- 1b"
307 ISYNC_ON_SMP
308 "\n\
3092:"
310 : "=&r" (prev), "+m" (*p)
311 : "r" (p), "r" (old), "r" (new)
312 : "cc", "memory");
313
314 return prev;
315}
316
317static __always_inline unsigned long
318__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
319 unsigned long new)
320{
321 unsigned long prev;
322
323 __asm__ __volatile__ (
324"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
325 cmpd 0,%0,%3\n\
326 bne- 2f\n\
327 stdcx. %4,0,%2\n\
328 bne- 1b"
329 "\n\
3302:"
331 : "=&r" (prev), "+m" (*p)
332 : "r" (p), "r" (old), "r" (new)
333 : "cc", "memory");
334
335 return prev;
336}
337#endif
338
339/* This function doesn't exist, so you'll get a linker error
340 if something tries to do an invalid cmpxchg(). */
341extern void __cmpxchg_called_with_bad_pointer(void);
342
343static __always_inline unsigned long
344__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
345 unsigned int size)
346{
347 switch (size) {
348 case 4:
349 return __cmpxchg_u32(ptr, old, new);
350#ifdef CONFIG_PPC64
351 case 8:
352 return __cmpxchg_u64(ptr, old, new);
353#endif
354 }
355 __cmpxchg_called_with_bad_pointer();
356 return old;
357}
358
359static __always_inline unsigned long
360__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
361 unsigned int size)
362{
363 switch (size) {
364 case 4:
365 return __cmpxchg_u32_local(ptr, old, new);
366#ifdef CONFIG_PPC64
367 case 8:
368 return __cmpxchg_u64_local(ptr, old, new);
369#endif
370 }
371 __cmpxchg_called_with_bad_pointer();
372 return old;
373}
374
375#define cmpxchg(ptr, o, n) \
376 ({ \
377 __typeof__(*(ptr)) _o_ = (o); \
378 __typeof__(*(ptr)) _n_ = (n); \
379 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
380 (unsigned long)_n_, sizeof(*(ptr))); \
381 })
382
383
384#define cmpxchg_local(ptr, o, n) \
385 ({ \
386 __typeof__(*(ptr)) _o_ = (o); \
387 __typeof__(*(ptr)) _n_ = (n); \
388 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
389 (unsigned long)_n_, sizeof(*(ptr))); \
390 })
391
392#ifdef CONFIG_PPC64
393/*
394 * We handle most unaligned accesses in hardware. On the other hand
395 * unaligned DMA can be very expensive on some ppc64 IO chips (it does
396 * powers of 2 writes until it reaches sufficient alignment).
397 *
398 * Based on this we disable the IP header alignment in network drivers.
399 * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
400 * cacheline alignment of buffers.
401 */
402#define NET_IP_ALIGN 0
403#define NET_SKB_PAD L1_CACHE_BYTES
404
405#define cmpxchg64(ptr, o, n) \
406 ({ \
407 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
408 cmpxchg((ptr), (o), (n)); \
409 })
410#define cmpxchg64_local(ptr, o, n) \
411 ({ \
412 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
413 cmpxchg_local((ptr), (o), (n)); \
414 })
415#endif
416
417#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
418#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
419
420/**
421 * atomic_add - add integer to atomic variable
422 * @i: integer value to add
423 * @v: pointer of type atomic_t
424 *
425 * Atomically adds @a to @v.
426 */
427static __inline__ void atomic_add(int a, atomic_t *v)
428{
429 int t;
430
431 __asm__ __volatile__(
432 "1: lwarx %0,0,%3 # atomic_add\n\
433 add %0,%2,%0 \n\
434 stwcx. %0,0,%3 \n\
435 bne- 1b"
436 : "=&r" (t), "+m" (v->counter)
437 : "r" (a), "r" (&v->counter)
438 : "cc");
439}
440
441/**
442 * atomic_sub - subtract the atomic variable
443 * @i: integer value to subtract
444 * @v: pointer of type atomic_t
445 *
446 * Atomically subtracts @a from @v.
447 */
448static __inline__ void atomic_sub(int a, atomic_t *v)
449{
450 int t;
451
452 __asm__ __volatile__(
453 "1: lwarx %0,0,%3 # atomic_sub \n\
454 subf %0,%2,%0 \n\
455 stwcx. %0,0,%3 \n\
456 bne- 1b"
457 : "=&r" (t), "+m" (v->counter)
458 : "r" (a), "r" (&v->counter)
459 : "cc");
460}
461
462static __inline__ atomic_sub_return(int a, atomic_t *v)
463{
464 int t;
465
466 __asm__ __volatile__(
467 "lwsync\n\
468 1: lwarx %0,0,%2 # atomic_sub_return\n\
469 subf %0,%1,%0\n\
470 stwcx. %0,0,%2 \n\
471 bne- 1b \n\
472 isync"
473 : "=&r" (t)
474 : "r" (a), "r" (&v->counter)
475 : "cc", "memory");
476
477 return t;
478}
479
480/**
481 * atomic_sub_and_test - subtract value from variable and test result
482 * @i: integer value to subtract
483 * @v: pointer of type atomic_t
484 *
485 * Atomically subtracts @i from @v and returns
486 * true if the result is zero, or false for all
487 * other cases.
488 */
489static __inline__ int atomic_sub_and_test(int a, atomic_t *v)
490{
491 return atomic_sub_return(a, v) == 0;
492}
493
494/**
495 * atomic_inc - increment atomic variable
496 * @v: pointer of type atomic_t
497 *
498 * Atomically increments @v by 1.
499 */
500static __inline__ void atomic_inc(atomic_t *v)
501{
502 atomic_add(1, v);
503}
504
505/**
506 * atomic_dec - decrement atomic variable
507 * @v: pointer of type atomic_t
508 *
509 * Atomically decrements @v by 1.
510 */
511static __inline__ void atomic_dec(atomic_t *v)
512{
513 atomic_sub(1, v);
514}
515
516/**
517 * atomic_dec_and_test - decrement and test
518 * @v: pointer of type atomic_t
519 *
520 * Atomically decrements @v by 1 and
521 * returns true if the result is 0, or false for all other
522 * cases.
523 */
524static __inline__ int atomic_dec_and_test(atomic_t *v)
525{
526 return atomic_sub_and_test(1, v);
527}
528
529/**
530 * atomic_inc_and_test - increment and test
531 * @v: pointer of type atomic_t
532 *
533 * Atomically increments @v by 1
534 * and returns true if the result is zero, or false for all
535 * other cases.
536 */
537static __inline__ int atomic_inc_and_test(atomic_t *v)
538{
539 return atomic_inc_return(v);
540}
541
542/**
543 * atomic_add_return - add and return
544 * @v: pointer of type atomic_t
545 * @i: integer value to add
546 *
547 * Atomically adds @i to @v and returns @i + @v
548 */
549static __inline__ int atomic_add_return(int a, atomic_t *v)
550{
551 int t;
552
553 __asm__ __volatile__(
554 "lwsync \n\
555 1: lwarx %0,0,%2 # atomic_add_return \n\
556 add %0,%1,%0 \n\
557 stwcx. %0,0,%2 \n\
558 bne- 1b \n\
559 isync"
560 : "=&r" (t)
561 : "r" (a), "r" (&v->counter)
562 : "cc", "memory");
563
564 return t;
565}
566
567/**
568 * atomic_add_negative - add and test if negative
569 * @v: pointer of type atomic_t
570 * @i: integer value to add
571 *
572 * Atomically adds @i to @v and returns true
573 * if the result is negative, or false when
574 * result is greater than or equal to zero.
575 */
576static __inline__ int atomic_add_negative(int a, atomic_t *v)
577{
578 return atomic_add_return(a, v) < 0;
579}
580
581/**
582 * atomic_add_unless - add unless the number is a given value
583 * @v: pointer of type atomic_t
584 * @a: the amount to add to v...
585 * @u: ...unless v is equal to u.
586 *
587 * Atomically adds @a to @v, so long as it was not @u.
588 * Returns non-zero if @v was not @u, and zero otherwise.
589 */
590static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
591{
592 int t;
593
594 __asm__ __volatile__(
595 "lwsync \n\
596 1: lwarx %0,0,%1 # atomic_add_unless\n\
597 cmpd 0,%0,%3 \n\
598 beq- 2f \n\
599 add %0,%2,%0 \n\
600 stwcx. %0,0,%1 \n\
601 bne- 1b \n\
602 isync \n\
603 subf %0,%2,%0 \n\
604 2:"
605 : "=&r" (t)
606 : "r" (&v->counter), "r" (a), "r" (u)
607 : "cc", "memory");
608
609 return t != u;
610}
611
612#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
613
614#define atomic_inc_return(v) (atomic_add_return(1,v))
615#define atomic_dec_return(v) (atomic_sub_return(1,v))
616
617/* Atomic operations are already serializing on x86 */
618#define smp_mb__before_atomic_dec() smp_mb()
619#define smp_mb__after_atomic_dec() smp_mb()
620#define smp_mb__before_atomic_inc() smp_mb()
621#define smp_mb__after_atomic_inc() smp_mb()
622
6ee91d83
MD
623#endif //0 /* duplicate with arch_atomic.h */
624
6d0ce021
PM
625/*
626 * api_pthreads.h: API mapping to pthreads environment.
627 *
628 * This program is free software; you can redistribute it and/or modify
629 * it under the terms of the GNU General Public License as published by
630 * the Free Software Foundation; either version 2 of the License, or
631 * (at your option) any later version. However, please note that much
632 * of the code in this file derives from the Linux kernel, and that such
633 * code may not be available except under GPLv2.
634 *
635 * This program is distributed in the hope that it will be useful,
636 * but WITHOUT ANY WARRANTY; without even the implied warranty of
637 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
638 * GNU General Public License for more details.
639 *
640 * You should have received a copy of the GNU General Public License
641 * along with this program; if not, write to the Free Software
642 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
643 *
644 * Copyright (c) 2006 Paul E. McKenney, IBM.
645 */
646
647#include <stdio.h>
648#include <stdlib.h>
649#include <errno.h>
650#include <limits.h>
651#include <sys/types.h>
652#define __USE_GNU
653#include <pthread.h>
654#include <sched.h>
655#include <sys/param.h>
656/* #include "atomic.h" */
657
658/*
659 * Compiler magic.
660 */
6d0ce021
PM
661#define container_of(ptr, type, member) ({ \
662 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
663 (type *)( (char *)__mptr - offsetof(type,member) );})
6d0ce021
PM
664
665/*
666 * Default machine parameters.
667 */
668
669#ifndef CACHE_LINE_SIZE
b4e52e3e 670/* #define CACHE_LINE_SIZE 128 */
6d0ce021
PM
671#endif /* #ifndef CACHE_LINE_SIZE */
672
673/*
674 * Exclusive locking primitives.
675 */
676
677typedef pthread_mutex_t spinlock_t;
678
679#define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER;
680#define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER
681
682static void spin_lock_init(spinlock_t *sp)
683{
684 if (pthread_mutex_init(sp, NULL) != 0) {
685 perror("spin_lock_init:pthread_mutex_init");
686 exit(-1);
687 }
688}
689
690static void spin_lock(spinlock_t *sp)
691{
692 if (pthread_mutex_lock(sp) != 0) {
693 perror("spin_lock:pthread_mutex_lock");
694 exit(-1);
695 }
696}
697
6d0ce021
PM
698static void spin_unlock(spinlock_t *sp)
699{
700 if (pthread_mutex_unlock(sp) != 0) {
701 perror("spin_unlock:pthread_mutex_unlock");
702 exit(-1);
703 }
704}
705
706#define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0)
707#define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0)
708
6d0ce021
PM
709/*
710 * Thread creation/destruction primitives.
711 */
712
713typedef pthread_t thread_id_t;
714
715#define NR_THREADS 128
716
717#define __THREAD_ID_MAP_EMPTY 0
718#define __THREAD_ID_MAP_WAITING 1
719thread_id_t __thread_id_map[NR_THREADS];
720spinlock_t __thread_id_map_mutex;
721
722#define for_each_thread(t) \
723 for (t = 0; t < NR_THREADS; t++)
724
725#define for_each_running_thread(t) \
726 for (t = 0; t < NR_THREADS; t++) \
727 if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \
728 (__thread_id_map[t] != __THREAD_ID_MAP_WAITING))
729
730#define for_each_tid(t, tid) \
731 for (t = 0; t < NR_THREADS; t++) \
732 if ((((tid) = __thread_id_map[t]) != __THREAD_ID_MAP_EMPTY) && \
733 ((tid) != __THREAD_ID_MAP_WAITING))
734
735pthread_key_t thread_id_key;
736
737static int __smp_thread_id(void)
738{
739 int i;
740 thread_id_t tid = pthread_self();
741
742 for (i = 0; i < NR_THREADS; i++) {
743 if (__thread_id_map[i] == tid) {
744 long v = i + 1; /* must be non-NULL. */
745
746 if (pthread_setspecific(thread_id_key, (void *)v) != 0) {
747 perror("pthread_setspecific");
748 exit(-1);
749 }
750 return i;
751 }
752 }
753 spin_lock(&__thread_id_map_mutex);
754 for (i = 0; i < NR_THREADS; i++) {
755 if (__thread_id_map[i] == tid)
756 spin_unlock(&__thread_id_map_mutex);
757 return i;
758 }
759 spin_unlock(&__thread_id_map_mutex);
0578089f
PM
760 fprintf(stderr, "smp_thread_id: Rogue thread, id: %d(%#x)\n",
761 (int)tid, (int)tid);
6d0ce021
PM
762 exit(-1);
763}
764
765static int smp_thread_id(void)
766{
767 void *id;
768
769 id = pthread_getspecific(thread_id_key);
770 if (id == NULL)
771 return __smp_thread_id();
772 return (long)(id - 1);
773}
774
775static thread_id_t create_thread(void *(*func)(void *), void *arg)
776{
777 thread_id_t tid;
778 int i;
779
780 spin_lock(&__thread_id_map_mutex);
781 for (i = 0; i < NR_THREADS; i++) {
782 if (__thread_id_map[i] == __THREAD_ID_MAP_EMPTY)
783 break;
784 }
785 if (i >= NR_THREADS) {
786 spin_unlock(&__thread_id_map_mutex);
787 fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS);
788 exit(-1);
789 }
790 __thread_id_map[i] = __THREAD_ID_MAP_WAITING;
791 spin_unlock(&__thread_id_map_mutex);
792 if (pthread_create(&tid, NULL, func, arg) != 0) {
793 perror("create_thread:pthread_create");
794 exit(-1);
795 }
796 __thread_id_map[i] = tid;
797 return tid;
798}
799
800static void *wait_thread(thread_id_t tid)
801{
802 int i;
803 void *vp;
804
805 for (i = 0; i < NR_THREADS; i++) {
806 if (__thread_id_map[i] == tid)
807 break;
808 }
809 if (i >= NR_THREADS){
0578089f
PM
810 fprintf(stderr, "wait_thread: bad tid = %d(%#x)\n",
811 (int)tid, (int)tid);
6d0ce021
PM
812 exit(-1);
813 }
814 if (pthread_join(tid, &vp) != 0) {
815 perror("wait_thread:pthread_join");
816 exit(-1);
817 }
818 __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
819 return vp;
820}
821
822static void wait_all_threads(void)
823{
824 int i;
825 thread_id_t tid;
826
827 for (i = 1; i < NR_THREADS; i++) {
828 tid = __thread_id_map[i];
829 if (tid != __THREAD_ID_MAP_EMPTY &&
830 tid != __THREAD_ID_MAP_WAITING)
831 (void)wait_thread(tid);
832 }
833}
834
835static void run_on(int cpu)
836{
837 cpu_set_t mask;
838
839 CPU_ZERO(&mask);
840 CPU_SET(cpu, &mask);
841 sched_setaffinity(0, sizeof(mask), &mask);
842}
843
844/*
845 * timekeeping -- very crude -- should use MONOTONIC...
846 */
847
848long long get_microseconds(void)
849{
850 struct timeval tv;
851
852 if (gettimeofday(&tv, NULL) != 0)
853 abort();
854 return ((long long)tv.tv_sec) * 1000000LL + (long long)tv.tv_usec;
855}
856
857/*
858 * Per-thread variables.
859 */
860
861#define DEFINE_PER_THREAD(type, name) \
862 struct { \
863 __typeof__(type) v \
864 __attribute__((__aligned__(CACHE_LINE_SIZE))); \
865 } __per_thread_##name[NR_THREADS];
866#define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name)
867
868#define per_thread(name, thread) __per_thread_##name[thread].v
869#define __get_thread_var(name) per_thread(name, smp_thread_id())
870
871#define init_per_thread(name, v) \
872 do { \
873 int __i_p_t_i; \
874 for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \
875 per_thread(name, __i_p_t_i) = v; \
876 } while (0)
877
878/*
879 * CPU traversal primitives.
880 */
881
882#ifndef NR_CPUS
883#define NR_CPUS 16
884#endif /* #ifndef NR_CPUS */
885
886#define for_each_possible_cpu(cpu) \
887 for (cpu = 0; cpu < NR_CPUS; cpu++)
888#define for_each_online_cpu(cpu) \
889 for (cpu = 0; cpu < NR_CPUS; cpu++)
890
891/*
892 * Per-CPU variables.
893 */
894
895#define DEFINE_PER_CPU(type, name) \
896 struct { \
897 __typeof__(type) v \
898 __attribute__((__aligned__(CACHE_LINE_SIZE))); \
899 } __per_cpu_##name[NR_CPUS]
900#define DECLARE_PER_CPU(type, name) extern DEFINE_PER_CPU(type, name)
901
902DEFINE_PER_THREAD(int, smp_processor_id);
903
6d0ce021
PM
904#define per_cpu(name, thread) __per_cpu_##name[thread].v
905#define __get_cpu_var(name) per_cpu(name, smp_processor_id())
906
907#define init_per_cpu(name, v) \
908 do { \
909 int __i_p_c_i; \
910 for (__i_p_c_i = 0; __i_p_c_i < NR_CPUS; __i_p_c_i++) \
911 per_cpu(name, __i_p_c_i) = v; \
912 } while (0)
913
914/*
915 * CPU state checking (crowbarred).
916 */
917
918#define idle_cpu(cpu) 0
919#define in_softirq() 1
920#define hardirq_count() 0
921#define PREEMPT_SHIFT 0
922#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
923#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
924#define PREEMPT_BITS 8
925#define SOFTIRQ_BITS 8
926
927/*
928 * CPU hotplug.
929 */
930
931struct notifier_block {
932 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
933 struct notifier_block *next;
934 int priority;
935};
936
937#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
938#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
939#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
940#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
941#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
942#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
943#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
944 * not handling interrupts, soon dead */
945#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
946 * lock is dropped */
947
948/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
949 * operation in progress
950 */
951#define CPU_TASKS_FROZEN 0x0010
952
953#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
954#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
955#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
956#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
957#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
958#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
959#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
960
961/* Hibernation and suspend events */
962#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
963#define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */
964#define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */
965#define PM_POST_SUSPEND 0x0004 /* Suspend finished */
966#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
967#define PM_POST_RESTORE 0x0006 /* Restore failed */
968
969#define NOTIFY_DONE 0x0000 /* Don't care */
970#define NOTIFY_OK 0x0001 /* Suits me */
971#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
972#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
973 /* Bad/Veto action */
974/*
975 * Clean way to return from the notifier and stop further calls.
976 */
977#define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK)
978
979/*
980 * Bug checks.
981 */
982
983#define BUG_ON(c) do { if (!(c)) abort(); } while (0)
984
985/*
986 * Initialization -- Must be called before calling any primitives.
987 */
988
989static void smp_init(void)
990{
991 int i;
992
993 spin_lock_init(&__thread_id_map_mutex);
994 __thread_id_map[0] = pthread_self();
995 for (i = 1; i < NR_THREADS; i++)
996 __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
997 init_per_thread(smp_processor_id, 0);
998 if (pthread_key_create(&thread_id_key, NULL) != 0) {
999 perror("pthread_key_create");
1000 exit(-1);
1001 }
1002}
1003
1004/* Taken from the Linux kernel source tree, so GPLv2-only!!! */
1005
1006#ifndef _LINUX_LIST_H
1007#define _LINUX_LIST_H
1008
1009#define LIST_POISON1 ((void *) 0x00100100)
1010#define LIST_POISON2 ((void *) 0x00200200)
1011
6d0ce021
PM
1012#define container_of(ptr, type, member) ({ \
1013 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
1014 (type *)( (char *)__mptr - offsetof(type,member) );})
1015
63ff4873
MD
1016#if 0
1017
6d0ce021
PM
1018/*
1019 * Simple doubly linked list implementation.
1020 *
1021 * Some of the internal functions ("__xxx") are useful when
1022 * manipulating whole lists rather than single entries, as
1023 * sometimes we already know the next/prev entries and we can
1024 * generate better code by using them directly rather than
1025 * using the generic single-entry routines.
1026 */
1027
1028struct list_head {
1029 struct list_head *next, *prev;
1030};
1031
1032#define LIST_HEAD_INIT(name) { &(name), &(name) }
1033
1034#define LIST_HEAD(name) \
1035 struct list_head name = LIST_HEAD_INIT(name)
1036
1037static inline void INIT_LIST_HEAD(struct list_head *list)
1038{
1039 list->next = list;
1040 list->prev = list;
1041}
1042
1043/*
1044 * Insert a new entry between two known consecutive entries.
1045 *
1046 * This is only for internal list manipulation where we know
1047 * the prev/next entries already!
1048 */
1049#ifndef CONFIG_DEBUG_LIST
1050static inline void __list_add(struct list_head *new,
1051 struct list_head *prev,
1052 struct list_head *next)
1053{
1054 next->prev = new;
1055 new->next = next;
1056 new->prev = prev;
1057 prev->next = new;
1058}
1059#else
1060extern void __list_add(struct list_head *new,
1061 struct list_head *prev,
1062 struct list_head *next);
1063#endif
1064
1065/**
1066 * list_add - add a new entry
1067 * @new: new entry to be added
1068 * @head: list head to add it after
1069 *
1070 * Insert a new entry after the specified head.
1071 * This is good for implementing stacks.
1072 */
1073static inline void list_add(struct list_head *new, struct list_head *head)
1074{
1075 __list_add(new, head, head->next);
1076}
1077
1078
1079/**
1080 * list_add_tail - add a new entry
1081 * @new: new entry to be added
1082 * @head: list head to add it before
1083 *
1084 * Insert a new entry before the specified head.
1085 * This is useful for implementing queues.
1086 */
1087static inline void list_add_tail(struct list_head *new, struct list_head *head)
1088{
1089 __list_add(new, head->prev, head);
1090}
1091
1092/*
1093 * Delete a list entry by making the prev/next entries
1094 * point to each other.
1095 *
1096 * This is only for internal list manipulation where we know
1097 * the prev/next entries already!
1098 */
1099static inline void __list_del(struct list_head * prev, struct list_head * next)
1100{
1101 next->prev = prev;
1102 prev->next = next;
1103}
1104
1105/**
1106 * list_del - deletes entry from list.
1107 * @entry: the element to delete from the list.
1108 * Note: list_empty() on entry does not return true after this, the entry is
1109 * in an undefined state.
1110 */
1111#ifndef CONFIG_DEBUG_LIST
1112static inline void list_del(struct list_head *entry)
1113{
1114 __list_del(entry->prev, entry->next);
1115 entry->next = LIST_POISON1;
1116 entry->prev = LIST_POISON2;
1117}
1118#else
1119extern void list_del(struct list_head *entry);
1120#endif
1121
1122/**
1123 * list_replace - replace old entry by new one
1124 * @old : the element to be replaced
1125 * @new : the new element to insert
1126 *
1127 * If @old was empty, it will be overwritten.
1128 */
1129static inline void list_replace(struct list_head *old,
1130 struct list_head *new)
1131{
1132 new->next = old->next;
1133 new->next->prev = new;
1134 new->prev = old->prev;
1135 new->prev->next = new;
1136}
1137
1138static inline void list_replace_init(struct list_head *old,
1139 struct list_head *new)
1140{
1141 list_replace(old, new);
1142 INIT_LIST_HEAD(old);
1143}
1144
1145/**
1146 * list_del_init - deletes entry from list and reinitialize it.
1147 * @entry: the element to delete from the list.
1148 */
1149static inline void list_del_init(struct list_head *entry)
1150{
1151 __list_del(entry->prev, entry->next);
1152 INIT_LIST_HEAD(entry);
1153}
1154
1155/**
1156 * list_move - delete from one list and add as another's head
1157 * @list: the entry to move
1158 * @head: the head that will precede our entry
1159 */
1160static inline void list_move(struct list_head *list, struct list_head *head)
1161{
1162 __list_del(list->prev, list->next);
1163 list_add(list, head);
1164}
1165
1166/**
1167 * list_move_tail - delete from one list and add as another's tail
1168 * @list: the entry to move
1169 * @head: the head that will follow our entry
1170 */
1171static inline void list_move_tail(struct list_head *list,
1172 struct list_head *head)
1173{
1174 __list_del(list->prev, list->next);
1175 list_add_tail(list, head);
1176}
1177
1178/**
1179 * list_is_last - tests whether @list is the last entry in list @head
1180 * @list: the entry to test
1181 * @head: the head of the list
1182 */
1183static inline int list_is_last(const struct list_head *list,
1184 const struct list_head *head)
1185{
1186 return list->next == head;
1187}
1188
1189/**
1190 * list_empty - tests whether a list is empty
1191 * @head: the list to test.
1192 */
1193static inline int list_empty(const struct list_head *head)
1194{
1195 return head->next == head;
1196}
1197
1198/**
1199 * list_empty_careful - tests whether a list is empty and not being modified
1200 * @head: the list to test
1201 *
1202 * Description:
1203 * tests whether a list is empty _and_ checks that no other CPU might be
1204 * in the process of modifying either member (next or prev)
1205 *
1206 * NOTE: using list_empty_careful() without synchronization
1207 * can only be safe if the only activity that can happen
1208 * to the list entry is list_del_init(). Eg. it cannot be used
1209 * if another CPU could re-list_add() it.
1210 */
1211static inline int list_empty_careful(const struct list_head *head)
1212{
1213 struct list_head *next = head->next;
1214 return (next == head) && (next == head->prev);
1215}
1216
1217/**
1218 * list_is_singular - tests whether a list has just one entry.
1219 * @head: the list to test.
1220 */
1221static inline int list_is_singular(const struct list_head *head)
1222{
1223 return !list_empty(head) && (head->next == head->prev);
1224}
1225
1226static inline void __list_cut_position(struct list_head *list,
1227 struct list_head *head, struct list_head *entry)
1228{
1229 struct list_head *new_first = entry->next;
1230 list->next = head->next;
1231 list->next->prev = list;
1232 list->prev = entry;
1233 entry->next = list;
1234 head->next = new_first;
1235 new_first->prev = head;
1236}
1237
1238/**
1239 * list_cut_position - cut a list into two
1240 * @list: a new list to add all removed entries
1241 * @head: a list with entries
1242 * @entry: an entry within head, could be the head itself
1243 * and if so we won't cut the list
1244 *
1245 * This helper moves the initial part of @head, up to and
1246 * including @entry, from @head to @list. You should
1247 * pass on @entry an element you know is on @head. @list
1248 * should be an empty list or a list you do not care about
1249 * losing its data.
1250 *
1251 */
1252static inline void list_cut_position(struct list_head *list,
1253 struct list_head *head, struct list_head *entry)
1254{
1255 if (list_empty(head))
1256 return;
1257 if (list_is_singular(head) &&
1258 (head->next != entry && head != entry))
1259 return;
1260 if (entry == head)
1261 INIT_LIST_HEAD(list);
1262 else
1263 __list_cut_position(list, head, entry);
1264}
1265
1266static inline void __list_splice(const struct list_head *list,
1267 struct list_head *prev,
1268 struct list_head *next)
1269{
1270 struct list_head *first = list->next;
1271 struct list_head *last = list->prev;
1272
1273 first->prev = prev;
1274 prev->next = first;
1275
1276 last->next = next;
1277 next->prev = last;
1278}
1279
1280/**
1281 * list_splice - join two lists, this is designed for stacks
1282 * @list: the new list to add.
1283 * @head: the place to add it in the first list.
1284 */
1285static inline void list_splice(const struct list_head *list,
1286 struct list_head *head)
1287{
1288 if (!list_empty(list))
1289 __list_splice(list, head, head->next);
1290}
1291
1292/**
1293 * list_splice_tail - join two lists, each list being a queue
1294 * @list: the new list to add.
1295 * @head: the place to add it in the first list.
1296 */
1297static inline void list_splice_tail(struct list_head *list,
1298 struct list_head *head)
1299{
1300 if (!list_empty(list))
1301 __list_splice(list, head->prev, head);
1302}
1303
1304/**
1305 * list_splice_init - join two lists and reinitialise the emptied list.
1306 * @list: the new list to add.
1307 * @head: the place to add it in the first list.
1308 *
1309 * The list at @list is reinitialised
1310 */
1311static inline void list_splice_init(struct list_head *list,
1312 struct list_head *head)
1313{
1314 if (!list_empty(list)) {
1315 __list_splice(list, head, head->next);
1316 INIT_LIST_HEAD(list);
1317 }
1318}
1319
1320/**
1321 * list_splice_tail_init - join two lists and reinitialise the emptied list
1322 * @list: the new list to add.
1323 * @head: the place to add it in the first list.
1324 *
1325 * Each of the lists is a queue.
1326 * The list at @list is reinitialised
1327 */
1328static inline void list_splice_tail_init(struct list_head *list,
1329 struct list_head *head)
1330{
1331 if (!list_empty(list)) {
1332 __list_splice(list, head->prev, head);
1333 INIT_LIST_HEAD(list);
1334 }
1335}
1336
1337/**
1338 * list_entry - get the struct for this entry
1339 * @ptr: the &struct list_head pointer.
1340 * @type: the type of the struct this is embedded in.
1341 * @member: the name of the list_struct within the struct.
1342 */
1343#define list_entry(ptr, type, member) \
1344 container_of(ptr, type, member)
1345
1346/**
1347 * list_first_entry - get the first element from a list
1348 * @ptr: the list head to take the element from.
1349 * @type: the type of the struct this is embedded in.
1350 * @member: the name of the list_struct within the struct.
1351 *
1352 * Note, that list is expected to be not empty.
1353 */
1354#define list_first_entry(ptr, type, member) \
1355 list_entry((ptr)->next, type, member)
1356
1357/**
1358 * list_for_each - iterate over a list
1359 * @pos: the &struct list_head to use as a loop cursor.
1360 * @head: the head for your list.
1361 */
1362#define list_for_each(pos, head) \
1363 for (pos = (head)->next; prefetch(pos->next), pos != (head); \
1364 pos = pos->next)
1365
1366/**
1367 * __list_for_each - iterate over a list
1368 * @pos: the &struct list_head to use as a loop cursor.
1369 * @head: the head for your list.
1370 *
1371 * This variant differs from list_for_each() in that it's the
1372 * simplest possible list iteration code, no prefetching is done.
1373 * Use this for code that knows the list to be very short (empty
1374 * or 1 entry) most of the time.
1375 */
1376#define __list_for_each(pos, head) \
1377 for (pos = (head)->next; pos != (head); pos = pos->next)
1378
1379/**
1380 * list_for_each_prev - iterate over a list backwards
1381 * @pos: the &struct list_head to use as a loop cursor.
1382 * @head: the head for your list.
1383 */
1384#define list_for_each_prev(pos, head) \
1385 for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
1386 pos = pos->prev)
1387
1388/**
1389 * list_for_each_safe - iterate over a list safe against removal of list entry
1390 * @pos: the &struct list_head to use as a loop cursor.
1391 * @n: another &struct list_head to use as temporary storage
1392 * @head: the head for your list.
1393 */
1394#define list_for_each_safe(pos, n, head) \
1395 for (pos = (head)->next, n = pos->next; pos != (head); \
1396 pos = n, n = pos->next)
1397
1398/**
1399 * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
1400 * @pos: the &struct list_head to use as a loop cursor.
1401 * @n: another &struct list_head to use as temporary storage
1402 * @head: the head for your list.
1403 */
1404#define list_for_each_prev_safe(pos, n, head) \
1405 for (pos = (head)->prev, n = pos->prev; \
1406 prefetch(pos->prev), pos != (head); \
1407 pos = n, n = pos->prev)
1408
1409/**
1410 * list_for_each_entry - iterate over list of given type
1411 * @pos: the type * to use as a loop cursor.
1412 * @head: the head for your list.
1413 * @member: the name of the list_struct within the struct.
1414 */
1415#define list_for_each_entry(pos, head, member) \
1416 for (pos = list_entry((head)->next, typeof(*pos), member); \
1417 prefetch(pos->member.next), &pos->member != (head); \
1418 pos = list_entry(pos->member.next, typeof(*pos), member))
1419
1420/**
1421 * list_for_each_entry_reverse - iterate backwards over list of given type.
1422 * @pos: the type * to use as a loop cursor.
1423 * @head: the head for your list.
1424 * @member: the name of the list_struct within the struct.
1425 */
1426#define list_for_each_entry_reverse(pos, head, member) \
1427 for (pos = list_entry((head)->prev, typeof(*pos), member); \
1428 prefetch(pos->member.prev), &pos->member != (head); \
1429 pos = list_entry(pos->member.prev, typeof(*pos), member))
1430
1431/**
1432 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
1433 * @pos: the type * to use as a start point
1434 * @head: the head of the list
1435 * @member: the name of the list_struct within the struct.
1436 *
1437 * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
1438 */
1439#define list_prepare_entry(pos, head, member) \
1440 ((pos) ? : list_entry(head, typeof(*pos), member))
1441
1442/**
1443 * list_for_each_entry_continue - continue iteration over list of given type
1444 * @pos: the type * to use as a loop cursor.
1445 * @head: the head for your list.
1446 * @member: the name of the list_struct within the struct.
1447 *
1448 * Continue to iterate over list of given type, continuing after
1449 * the current position.
1450 */
1451#define list_for_each_entry_continue(pos, head, member) \
1452 for (pos = list_entry(pos->member.next, typeof(*pos), member); \
1453 prefetch(pos->member.next), &pos->member != (head); \
1454 pos = list_entry(pos->member.next, typeof(*pos), member))
1455
1456/**
1457 * list_for_each_entry_continue_reverse - iterate backwards from the given point
1458 * @pos: the type * to use as a loop cursor.
1459 * @head: the head for your list.
1460 * @member: the name of the list_struct within the struct.
1461 *
1462 * Start to iterate over list of given type backwards, continuing after
1463 * the current position.
1464 */
1465#define list_for_each_entry_continue_reverse(pos, head, member) \
1466 for (pos = list_entry(pos->member.prev, typeof(*pos), member); \
1467 prefetch(pos->member.prev), &pos->member != (head); \
1468 pos = list_entry(pos->member.prev, typeof(*pos), member))
1469
1470/**
1471 * list_for_each_entry_from - iterate over list of given type from the current point
1472 * @pos: the type * to use as a loop cursor.
1473 * @head: the head for your list.
1474 * @member: the name of the list_struct within the struct.
1475 *
1476 * Iterate over list of given type, continuing from current position.
1477 */
1478#define list_for_each_entry_from(pos, head, member) \
1479 for (; prefetch(pos->member.next), &pos->member != (head); \
1480 pos = list_entry(pos->member.next, typeof(*pos), member))
1481
1482/**
1483 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
1484 * @pos: the type * to use as a loop cursor.
1485 * @n: another type * to use as temporary storage
1486 * @head: the head for your list.
1487 * @member: the name of the list_struct within the struct.
1488 */
1489#define list_for_each_entry_safe(pos, n, head, member) \
1490 for (pos = list_entry((head)->next, typeof(*pos), member), \
1491 n = list_entry(pos->member.next, typeof(*pos), member); \
1492 &pos->member != (head); \
1493 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1494
1495/**
1496 * list_for_each_entry_safe_continue
1497 * @pos: the type * to use as a loop cursor.
1498 * @n: another type * to use as temporary storage
1499 * @head: the head for your list.
1500 * @member: the name of the list_struct within the struct.
1501 *
1502 * Iterate over list of given type, continuing after current point,
1503 * safe against removal of list entry.
1504 */
1505#define list_for_each_entry_safe_continue(pos, n, head, member) \
1506 for (pos = list_entry(pos->member.next, typeof(*pos), member), \
1507 n = list_entry(pos->member.next, typeof(*pos), member); \
1508 &pos->member != (head); \
1509 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1510
1511/**
1512 * list_for_each_entry_safe_from
1513 * @pos: the type * to use as a loop cursor.
1514 * @n: another type * to use as temporary storage
1515 * @head: the head for your list.
1516 * @member: the name of the list_struct within the struct.
1517 *
1518 * Iterate over list of given type from current point, safe against
1519 * removal of list entry.
1520 */
1521#define list_for_each_entry_safe_from(pos, n, head, member) \
1522 for (n = list_entry(pos->member.next, typeof(*pos), member); \
1523 &pos->member != (head); \
1524 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1525
1526/**
1527 * list_for_each_entry_safe_reverse
1528 * @pos: the type * to use as a loop cursor.
1529 * @n: another type * to use as temporary storage
1530 * @head: the head for your list.
1531 * @member: the name of the list_struct within the struct.
1532 *
1533 * Iterate backwards over list of given type, safe against removal
1534 * of list entry.
1535 */
1536#define list_for_each_entry_safe_reverse(pos, n, head, member) \
1537 for (pos = list_entry((head)->prev, typeof(*pos), member), \
1538 n = list_entry(pos->member.prev, typeof(*pos), member); \
1539 &pos->member != (head); \
1540 pos = n, n = list_entry(n->member.prev, typeof(*n), member))
1541
63ff4873
MD
1542#endif //0
1543
6d0ce021
PM
1544/*
1545 * Double linked lists with a single pointer list head.
1546 * Mostly useful for hash tables where the two pointer list head is
1547 * too wasteful.
1548 * You lose the ability to access the tail in O(1).
1549 */
1550
1551struct hlist_head {
1552 struct hlist_node *first;
1553};
1554
1555struct hlist_node {
1556 struct hlist_node *next, **pprev;
1557};
1558
1559#define HLIST_HEAD_INIT { .first = NULL }
1560#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
1561#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
1562static inline void INIT_HLIST_NODE(struct hlist_node *h)
1563{
1564 h->next = NULL;
1565 h->pprev = NULL;
1566}
1567
1568static inline int hlist_unhashed(const struct hlist_node *h)
1569{
1570 return !h->pprev;
1571}
1572
1573static inline int hlist_empty(const struct hlist_head *h)
1574{
1575 return !h->first;
1576}
1577
1578static inline void __hlist_del(struct hlist_node *n)
1579{
1580 struct hlist_node *next = n->next;
1581 struct hlist_node **pprev = n->pprev;
1582 *pprev = next;
1583 if (next)
1584 next->pprev = pprev;
1585}
1586
1587static inline void hlist_del(struct hlist_node *n)
1588{
1589 __hlist_del(n);
1590 n->next = LIST_POISON1;
1591 n->pprev = LIST_POISON2;
1592}
1593
1594static inline void hlist_del_init(struct hlist_node *n)
1595{
1596 if (!hlist_unhashed(n)) {
1597 __hlist_del(n);
1598 INIT_HLIST_NODE(n);
1599 }
1600}
1601
1602static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
1603{
1604 struct hlist_node *first = h->first;
1605 n->next = first;
1606 if (first)
1607 first->pprev = &n->next;
1608 h->first = n;
1609 n->pprev = &h->first;
1610}
1611
1612/* next must be != NULL */
1613static inline void hlist_add_before(struct hlist_node *n,
1614 struct hlist_node *next)
1615{
1616 n->pprev = next->pprev;
1617 n->next = next;
1618 next->pprev = &n->next;
1619 *(n->pprev) = n;
1620}
1621
1622static inline void hlist_add_after(struct hlist_node *n,
1623 struct hlist_node *next)
1624{
1625 next->next = n->next;
1626 n->next = next;
1627 next->pprev = &n->next;
1628
1629 if(next->next)
1630 next->next->pprev = &next->next;
1631}
1632
1633/*
1634 * Move a list from one list head to another. Fixup the pprev
1635 * reference of the first entry if it exists.
1636 */
1637static inline void hlist_move_list(struct hlist_head *old,
1638 struct hlist_head *new)
1639{
1640 new->first = old->first;
1641 if (new->first)
1642 new->first->pprev = &new->first;
1643 old->first = NULL;
1644}
1645
1646#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
1647
1648#define hlist_for_each(pos, head) \
1649 for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
1650 pos = pos->next)
1651
1652#define hlist_for_each_safe(pos, n, head) \
1653 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
1654 pos = n)
1655
1656/**
1657 * hlist_for_each_entry - iterate over list of given type
1658 * @tpos: the type * to use as a loop cursor.
1659 * @pos: the &struct hlist_node to use as a loop cursor.
1660 * @head: the head for your list.
1661 * @member: the name of the hlist_node within the struct.
1662 */
1663#define hlist_for_each_entry(tpos, pos, head, member) \
1664 for (pos = (head)->first; \
1665 pos && ({ prefetch(pos->next); 1;}) && \
1666 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1667 pos = pos->next)
1668
1669/**
1670 * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
1671 * @tpos: the type * to use as a loop cursor.
1672 * @pos: the &struct hlist_node to use as a loop cursor.
1673 * @member: the name of the hlist_node within the struct.
1674 */
1675#define hlist_for_each_entry_continue(tpos, pos, member) \
1676 for (pos = (pos)->next; \
1677 pos && ({ prefetch(pos->next); 1;}) && \
1678 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1679 pos = pos->next)
1680
1681/**
1682 * hlist_for_each_entry_from - iterate over a hlist continuing from current point
1683 * @tpos: the type * to use as a loop cursor.
1684 * @pos: the &struct hlist_node to use as a loop cursor.
1685 * @member: the name of the hlist_node within the struct.
1686 */
1687#define hlist_for_each_entry_from(tpos, pos, member) \
1688 for (; pos && ({ prefetch(pos->next); 1;}) && \
1689 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1690 pos = pos->next)
1691
1692/**
1693 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
1694 * @tpos: the type * to use as a loop cursor.
1695 * @pos: the &struct hlist_node to use as a loop cursor.
1696 * @n: another &struct hlist_node to use as temporary storage
1697 * @head: the head for your list.
1698 * @member: the name of the hlist_node within the struct.
1699 */
1700#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
1701 for (pos = (head)->first; \
1702 pos && ({ n = pos->next; 1; }) && \
1703 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1704 pos = n)
1705
1706#endif
This page took 0.082535 seconds and 4 git commands to generate.