fix api.h generation on x86 and powerpc
[urcu.git] / tests / api_x86.h
CommitLineData
6d0ce021
PM
1/* MECHANICALLY GENERATED, DO NOT EDIT!!! */
2
3#define _INCLUDE_API_H
4
5/*
6 * common.h: Common Linux kernel-isms.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; but version 2 of the License only due
11 * to code included from the Linux kernel.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * Copyright (c) 2006 Paul E. McKenney, IBM.
23 *
24 * Much code taken from the Linux kernel. For such code, the option
25 * to redistribute under later versions of GPL might not be available.
26 */
27
22b63ec4
MD
28#include <urcu/arch.h>
29
6d0ce021
PM
30#ifndef __always_inline
31#define __always_inline inline
32#endif
33
34#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
35#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1)
36
37#ifdef __ASSEMBLY__
38# define stringify_in_c(...) __VA_ARGS__
39# define ASM_CONST(x) x
40#else
41/* This version of stringify will deal with commas... */
42# define __stringify_in_c(...) #__VA_ARGS__
43# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
44# define __ASM_CONST(x) x##UL
45# define ASM_CONST(x) __ASM_CONST(x)
46#endif
47
48
49/*
50 * arch-i386.h: Expose x86 atomic instructions. 80486 and better only.
51 *
52 * This program is free software; you can redistribute it and/or modify
53 * it under the terms of the GNU General Public License as published by
54 * the Free Software Foundation, but version 2 only due to inclusion
55 * of Linux-kernel code.
56 *
57 * This program is distributed in the hope that it will be useful,
58 * but WITHOUT ANY WARRANTY; without even the implied warranty of
59 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
60 * GNU General Public License for more details.
61 *
62 * You should have received a copy of the GNU General Public License
63 * along with this program; if not, write to the Free Software
64 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
65 *
66 * Copyright (c) 2006 Paul E. McKenney, IBM.
67 *
68 * Much code taken from the Linux kernel. For such code, the option
69 * to redistribute under later versions of GPL might not be available.
70 */
71
72/*
73 * Machine parameters.
74 */
75
b4e52e3e 76/* #define CACHE_LINE_SIZE 64 */
6d0ce021
PM
77#define ____cacheline_internodealigned_in_smp \
78 __attribute__((__aligned__(1 << 6)))
79
80#define LOCK_PREFIX "lock ; "
81
6ee91d83
MD
82#if 0 /* duplicate with arch_atomic.h */
83
6d0ce021
PM
84/*
85 * Atomic data structure, initialization, and access.
86 */
87
88typedef struct { volatile int counter; } atomic_t;
89
90#define ATOMIC_INIT(i) { (i) }
91
92#define atomic_read(v) ((v)->counter)
93#define atomic_set(v, i) (((v)->counter) = (i))
94
95/*
96 * Atomic operations.
97 */
98
99/**
100 * atomic_add - add integer to atomic variable
101 * @i: integer value to add
102 * @v: pointer of type atomic_t
103 *
104 * Atomically adds @i to @v.
105 */
106static __inline__ void atomic_add(int i, atomic_t *v)
107{
108 __asm__ __volatile__(
109 LOCK_PREFIX "addl %1,%0"
110 :"+m" (v->counter)
111 :"ir" (i));
112}
113
114/**
115 * atomic_sub - subtract the atomic variable
116 * @i: integer value to subtract
117 * @v: pointer of type atomic_t
118 *
119 * Atomically subtracts @i from @v.
120 */
121static __inline__ void atomic_sub(int i, atomic_t *v)
122{
123 __asm__ __volatile__(
124 LOCK_PREFIX "subl %1,%0"
125 :"+m" (v->counter)
126 :"ir" (i));
127}
128
129/**
130 * atomic_sub_and_test - subtract value from variable and test result
131 * @i: integer value to subtract
132 * @v: pointer of type atomic_t
133 *
134 * Atomically subtracts @i from @v and returns
135 * true if the result is zero, or false for all
136 * other cases.
137 */
138static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
139{
140 unsigned char c;
141
142 __asm__ __volatile__(
143 LOCK_PREFIX "subl %2,%0; sete %1"
144 :"+m" (v->counter), "=qm" (c)
145 :"ir" (i) : "memory");
146 return c;
147}
148
149/**
150 * atomic_inc - increment atomic variable
151 * @v: pointer of type atomic_t
152 *
153 * Atomically increments @v by 1.
154 */
155static __inline__ void atomic_inc(atomic_t *v)
156{
157 __asm__ __volatile__(
158 LOCK_PREFIX "incl %0"
159 :"+m" (v->counter));
160}
161
162/**
163 * atomic_dec - decrement atomic variable
164 * @v: pointer of type atomic_t
165 *
166 * Atomically decrements @v by 1.
167 */
168static __inline__ void atomic_dec(atomic_t *v)
169{
170 __asm__ __volatile__(
171 LOCK_PREFIX "decl %0"
172 :"+m" (v->counter));
173}
174
175/**
176 * atomic_dec_and_test - decrement and test
177 * @v: pointer of type atomic_t
178 *
179 * Atomically decrements @v by 1 and
180 * returns true if the result is 0, or false for all other
181 * cases.
182 */
183static __inline__ int atomic_dec_and_test(atomic_t *v)
184{
185 unsigned char c;
186
187 __asm__ __volatile__(
188 LOCK_PREFIX "decl %0; sete %1"
189 :"+m" (v->counter), "=qm" (c)
190 : : "memory");
191 return c != 0;
192}
193
194/**
195 * atomic_inc_and_test - increment and test
196 * @v: pointer of type atomic_t
197 *
198 * Atomically increments @v by 1
199 * and returns true if the result is zero, or false for all
200 * other cases.
201 */
202static __inline__ int atomic_inc_and_test(atomic_t *v)
203{
204 unsigned char c;
205
206 __asm__ __volatile__(
207 LOCK_PREFIX "incl %0; sete %1"
208 :"+m" (v->counter), "=qm" (c)
209 : : "memory");
210 return c != 0;
211}
212
213/**
214 * atomic_add_negative - add and test if negative
215 * @v: pointer of type atomic_t
216 * @i: integer value to add
217 *
218 * Atomically adds @i to @v and returns true
219 * if the result is negative, or false when
220 * result is greater than or equal to zero.
221 */
222static __inline__ int atomic_add_negative(int i, atomic_t *v)
223{
224 unsigned char c;
225
226 __asm__ __volatile__(
227 LOCK_PREFIX "addl %2,%0; sets %1"
228 :"+m" (v->counter), "=qm" (c)
229 :"ir" (i) : "memory");
230 return c;
231}
232
233/**
234 * atomic_add_return - add and return
235 * @v: pointer of type atomic_t
236 * @i: integer value to add
237 *
238 * Atomically adds @i to @v and returns @i + @v
239 */
240static __inline__ int atomic_add_return(int i, atomic_t *v)
241{
242 int __i;
243
244 __i = i;
245 __asm__ __volatile__(
246 LOCK_PREFIX "xaddl %0, %1;"
247 :"=r"(i)
248 :"m"(v->counter), "0"(i));
249 return i + __i;
250}
251
252static __inline__ int atomic_sub_return(int i, atomic_t *v)
253{
254 return atomic_add_return(-i,v);
255}
256
257static inline unsigned int
258cmpxchg(volatile long *ptr, long oldval, long newval)
259{
260 unsigned long retval;
261
262 asm("# cmpxchg\n"
263 "lock; cmpxchgl %4,(%2)\n"
264 "# end atomic_cmpxchg4"
265 : "=a" (retval), "=m" (*ptr)
266 : "r" (ptr), "0" (oldval), "r" (newval), "m" (*ptr)
267 : "cc");
268 return (retval);
269}
270
271#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
272#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
273
274/**
275 * atomic_add_unless - add unless the number is a given value
276 * @v: pointer of type atomic_t
277 * @a: the amount to add to v...
278 * @u: ...unless v is equal to u.
279 *
280 * Atomically adds @a to @v, so long as it was not @u.
281 * Returns non-zero if @v was not @u, and zero otherwise.
282 */
283#define atomic_add_unless(v, a, u) \
284({ \
285 int c, old; \
286 c = atomic_read(v); \
287 for (;;) { \
288 if (unlikely(c == (u))) \
289 break; \
290 old = atomic_cmpxchg((v), c, c + (a)); \
291 if (likely(old == c)) \
292 break; \
293 c = old; \
294 } \
295 c != (u); \
296})
297#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
298
299#define atomic_inc_return(v) (atomic_add_return(1,v))
300#define atomic_dec_return(v) (atomic_sub_return(1,v))
301
302/* These are x86-specific, used by some header files */
303#define atomic_clear_mask(mask, addr) \
304__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
305: : "r" (~(mask)),"m" (*addr) : "memory")
306
307#define atomic_set_mask(mask, addr) \
308__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
309: : "r" (mask),"m" (*(addr)) : "memory")
310
311/* Atomic operations are already serializing on x86 */
312#define smp_mb__before_atomic_dec() barrier()
313#define smp_mb__after_atomic_dec() barrier()
314#define smp_mb__before_atomic_inc() barrier()
315#define smp_mb__after_atomic_inc() barrier()
316
6ee91d83
MD
317#endif //0
318
6d0ce021
PM
319/*
320 * api_pthreads.h: API mapping to pthreads environment.
321 *
322 * This program is free software; you can redistribute it and/or modify
323 * it under the terms of the GNU General Public License as published by
324 * the Free Software Foundation; either version 2 of the License, or
325 * (at your option) any later version. However, please note that much
326 * of the code in this file derives from the Linux kernel, and that such
327 * code may not be available except under GPLv2.
328 *
329 * This program is distributed in the hope that it will be useful,
330 * but WITHOUT ANY WARRANTY; without even the implied warranty of
331 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
332 * GNU General Public License for more details.
333 *
334 * You should have received a copy of the GNU General Public License
335 * along with this program; if not, write to the Free Software
336 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
337 *
338 * Copyright (c) 2006 Paul E. McKenney, IBM.
339 */
340
341#include <stdio.h>
342#include <stdlib.h>
343#include <errno.h>
344#include <limits.h>
345#include <sys/types.h>
346#define __USE_GNU
347#include <pthread.h>
348#include <sched.h>
349#include <sys/param.h>
350/* #include "atomic.h" */
351
352/*
353 * Compiler magic.
354 */
6d0ce021
PM
355#define container_of(ptr, type, member) ({ \
356 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
357 (type *)( (char *)__mptr - offsetof(type,member) );})
6d0ce021
PM
358
359/*
360 * Default machine parameters.
361 */
362
363#ifndef CACHE_LINE_SIZE
22b63ec4 364/* #define CACHE_LINE_SIZE 128 */
6d0ce021
PM
365#endif /* #ifndef CACHE_LINE_SIZE */
366
367/*
368 * Exclusive locking primitives.
369 */
370
371typedef pthread_mutex_t spinlock_t;
372
373#define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER;
374#define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER
375
376static void spin_lock_init(spinlock_t *sp)
377{
378 if (pthread_mutex_init(sp, NULL) != 0) {
379 perror("spin_lock_init:pthread_mutex_init");
380 exit(-1);
381 }
382}
383
384static void spin_lock(spinlock_t *sp)
385{
386 if (pthread_mutex_lock(sp) != 0) {
387 perror("spin_lock:pthread_mutex_lock");
388 exit(-1);
389 }
390}
391
6d0ce021
PM
392static void spin_unlock(spinlock_t *sp)
393{
394 if (pthread_mutex_unlock(sp) != 0) {
395 perror("spin_unlock:pthread_mutex_unlock");
396 exit(-1);
397 }
398}
399
400#define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0)
401#define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0)
402
6d0ce021
PM
403/*
404 * Thread creation/destruction primitives.
405 */
406
407typedef pthread_t thread_id_t;
408
409#define NR_THREADS 128
410
411#define __THREAD_ID_MAP_EMPTY 0
412#define __THREAD_ID_MAP_WAITING 1
413thread_id_t __thread_id_map[NR_THREADS];
414spinlock_t __thread_id_map_mutex;
415
416#define for_each_thread(t) \
417 for (t = 0; t < NR_THREADS; t++)
418
419#define for_each_running_thread(t) \
420 for (t = 0; t < NR_THREADS; t++) \
421 if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \
422 (__thread_id_map[t] != __THREAD_ID_MAP_WAITING))
423
424pthread_key_t thread_id_key;
425
426static int __smp_thread_id(void)
427{
428 int i;
429 thread_id_t tid = pthread_self();
430
431 for (i = 0; i < NR_THREADS; i++) {
432 if (__thread_id_map[i] == tid) {
433 long v = i + 1; /* must be non-NULL. */
434
435 if (pthread_setspecific(thread_id_key, (void *)v) != 0) {
436 perror("pthread_setspecific");
437 exit(-1);
438 }
439 return i;
440 }
441 }
442 spin_lock(&__thread_id_map_mutex);
443 for (i = 0; i < NR_THREADS; i++) {
444 if (__thread_id_map[i] == tid)
445 spin_unlock(&__thread_id_map_mutex);
446 return i;
447 }
448 spin_unlock(&__thread_id_map_mutex);
0578089f
PM
449 fprintf(stderr, "smp_thread_id: Rogue thread, id: %d(%#x)\n",
450 (int)tid, (int)tid);
6d0ce021
PM
451 exit(-1);
452}
453
454static int smp_thread_id(void)
455{
456 void *id;
457
458 id = pthread_getspecific(thread_id_key);
459 if (id == NULL)
460 return __smp_thread_id();
461 return (long)(id - 1);
462}
463
464static thread_id_t create_thread(void *(*func)(void *), void *arg)
465{
466 thread_id_t tid;
467 int i;
468
469 spin_lock(&__thread_id_map_mutex);
470 for (i = 0; i < NR_THREADS; i++) {
471 if (__thread_id_map[i] == __THREAD_ID_MAP_EMPTY)
472 break;
473 }
474 if (i >= NR_THREADS) {
475 spin_unlock(&__thread_id_map_mutex);
476 fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS);
477 exit(-1);
478 }
479 __thread_id_map[i] = __THREAD_ID_MAP_WAITING;
480 spin_unlock(&__thread_id_map_mutex);
481 if (pthread_create(&tid, NULL, func, arg) != 0) {
482 perror("create_thread:pthread_create");
483 exit(-1);
484 }
485 __thread_id_map[i] = tid;
486 return tid;
487}
488
489static void *wait_thread(thread_id_t tid)
490{
491 int i;
492 void *vp;
493
494 for (i = 0; i < NR_THREADS; i++) {
495 if (__thread_id_map[i] == tid)
496 break;
497 }
498 if (i >= NR_THREADS){
0578089f
PM
499 fprintf(stderr, "wait_thread: bad tid = %d(%#x)\n",
500 (int)tid, (int)tid);
6d0ce021
PM
501 exit(-1);
502 }
503 if (pthread_join(tid, &vp) != 0) {
504 perror("wait_thread:pthread_join");
505 exit(-1);
506 }
507 __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
508 return vp;
509}
510
511static void wait_all_threads(void)
512{
513 int i;
514 thread_id_t tid;
515
516 for (i = 1; i < NR_THREADS; i++) {
517 tid = __thread_id_map[i];
518 if (tid != __THREAD_ID_MAP_EMPTY &&
519 tid != __THREAD_ID_MAP_WAITING)
520 (void)wait_thread(tid);
521 }
522}
523
524static void run_on(int cpu)
525{
526 cpu_set_t mask;
527
528 CPU_ZERO(&mask);
529 CPU_SET(cpu, &mask);
530 sched_setaffinity(0, sizeof(mask), &mask);
531}
532
533/*
534 * timekeeping -- very crude -- should use MONOTONIC...
535 */
536
537long long get_microseconds(void)
538{
539 struct timeval tv;
540
541 if (gettimeofday(&tv, NULL) != 0)
542 abort();
543 return ((long long)tv.tv_sec) * 1000000LL + (long long)tv.tv_usec;
544}
545
546/*
547 * Per-thread variables.
548 */
549
550#define DEFINE_PER_THREAD(type, name) \
551 struct { \
552 __typeof__(type) v \
553 __attribute__((__aligned__(CACHE_LINE_SIZE))); \
554 } __per_thread_##name[NR_THREADS];
555#define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name)
556
557#define per_thread(name, thread) __per_thread_##name[thread].v
558#define __get_thread_var(name) per_thread(name, smp_thread_id())
559
560#define init_per_thread(name, v) \
561 do { \
562 int __i_p_t_i; \
563 for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \
564 per_thread(name, __i_p_t_i) = v; \
565 } while (0)
566
567/*
568 * CPU traversal primitives.
569 */
570
571#ifndef NR_CPUS
572#define NR_CPUS 16
573#endif /* #ifndef NR_CPUS */
574
575#define for_each_possible_cpu(cpu) \
576 for (cpu = 0; cpu < NR_CPUS; cpu++)
577#define for_each_online_cpu(cpu) \
578 for (cpu = 0; cpu < NR_CPUS; cpu++)
579
580/*
581 * Per-CPU variables.
582 */
583
584#define DEFINE_PER_CPU(type, name) \
585 struct { \
586 __typeof__(type) v \
587 __attribute__((__aligned__(CACHE_LINE_SIZE))); \
588 } __per_cpu_##name[NR_CPUS]
589#define DECLARE_PER_CPU(type, name) extern DEFINE_PER_CPU(type, name)
590
591DEFINE_PER_THREAD(int, smp_processor_id);
592
6d0ce021
PM
593#define per_cpu(name, thread) __per_cpu_##name[thread].v
594#define __get_cpu_var(name) per_cpu(name, smp_processor_id())
595
596#define init_per_cpu(name, v) \
597 do { \
598 int __i_p_c_i; \
599 for (__i_p_c_i = 0; __i_p_c_i < NR_CPUS; __i_p_c_i++) \
600 per_cpu(name, __i_p_c_i) = v; \
601 } while (0)
602
603/*
604 * CPU state checking (crowbarred).
605 */
606
607#define idle_cpu(cpu) 0
608#define in_softirq() 1
609#define hardirq_count() 0
610#define PREEMPT_SHIFT 0
611#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
612#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
613#define PREEMPT_BITS 8
614#define SOFTIRQ_BITS 8
615
616/*
617 * CPU hotplug.
618 */
619
620struct notifier_block {
621 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
622 struct notifier_block *next;
623 int priority;
624};
625
626#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
627#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
628#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
629#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
630#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
631#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
632#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
633 * not handling interrupts, soon dead */
634#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
635 * lock is dropped */
636
637/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
638 * operation in progress
639 */
640#define CPU_TASKS_FROZEN 0x0010
641
642#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
643#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
644#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
645#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
646#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
647#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
648#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
649
650/* Hibernation and suspend events */
651#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
652#define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */
653#define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */
654#define PM_POST_SUSPEND 0x0004 /* Suspend finished */
655#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
656#define PM_POST_RESTORE 0x0006 /* Restore failed */
657
658#define NOTIFY_DONE 0x0000 /* Don't care */
659#define NOTIFY_OK 0x0001 /* Suits me */
660#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
661#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
662 /* Bad/Veto action */
663/*
664 * Clean way to return from the notifier and stop further calls.
665 */
666#define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK)
667
668/*
669 * Bug checks.
670 */
671
672#define BUG_ON(c) do { if (!(c)) abort(); } while (0)
673
674/*
675 * Initialization -- Must be called before calling any primitives.
676 */
677
678static void smp_init(void)
679{
680 int i;
681
682 spin_lock_init(&__thread_id_map_mutex);
683 __thread_id_map[0] = pthread_self();
684 for (i = 1; i < NR_THREADS; i++)
685 __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
686 init_per_thread(smp_processor_id, 0);
687 if (pthread_key_create(&thread_id_key, NULL) != 0) {
688 perror("pthread_key_create");
689 exit(-1);
690 }
691}
692
693/* Taken from the Linux kernel source tree, so GPLv2-only!!! */
694
695#ifndef _LINUX_LIST_H
696#define _LINUX_LIST_H
697
698#define LIST_POISON1 ((void *) 0x00100100)
699#define LIST_POISON2 ((void *) 0x00200200)
700
6d0ce021
PM
701#define container_of(ptr, type, member) ({ \
702 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
703 (type *)( (char *)__mptr - offsetof(type,member) );})
704
63ff4873
MD
705#if 0
706
6d0ce021
PM
707/*
708 * Simple doubly linked list implementation.
709 *
710 * Some of the internal functions ("__xxx") are useful when
711 * manipulating whole lists rather than single entries, as
712 * sometimes we already know the next/prev entries and we can
713 * generate better code by using them directly rather than
714 * using the generic single-entry routines.
715 */
716
717struct list_head {
718 struct list_head *next, *prev;
719};
720
721#define LIST_HEAD_INIT(name) { &(name), &(name) }
722
723#define LIST_HEAD(name) \
724 struct list_head name = LIST_HEAD_INIT(name)
725
726static inline void INIT_LIST_HEAD(struct list_head *list)
727{
728 list->next = list;
729 list->prev = list;
730}
731
732/*
733 * Insert a new entry between two known consecutive entries.
734 *
735 * This is only for internal list manipulation where we know
736 * the prev/next entries already!
737 */
738#ifndef CONFIG_DEBUG_LIST
739static inline void __list_add(struct list_head *new,
740 struct list_head *prev,
741 struct list_head *next)
742{
743 next->prev = new;
744 new->next = next;
745 new->prev = prev;
746 prev->next = new;
747}
748#else
749extern void __list_add(struct list_head *new,
750 struct list_head *prev,
751 struct list_head *next);
752#endif
753
754/**
755 * list_add - add a new entry
756 * @new: new entry to be added
757 * @head: list head to add it after
758 *
759 * Insert a new entry after the specified head.
760 * This is good for implementing stacks.
761 */
762static inline void list_add(struct list_head *new, struct list_head *head)
763{
764 __list_add(new, head, head->next);
765}
766
767
768/**
769 * list_add_tail - add a new entry
770 * @new: new entry to be added
771 * @head: list head to add it before
772 *
773 * Insert a new entry before the specified head.
774 * This is useful for implementing queues.
775 */
776static inline void list_add_tail(struct list_head *new, struct list_head *head)
777{
778 __list_add(new, head->prev, head);
779}
780
781/*
782 * Delete a list entry by making the prev/next entries
783 * point to each other.
784 *
785 * This is only for internal list manipulation where we know
786 * the prev/next entries already!
787 */
788static inline void __list_del(struct list_head * prev, struct list_head * next)
789{
790 next->prev = prev;
791 prev->next = next;
792}
793
794/**
795 * list_del - deletes entry from list.
796 * @entry: the element to delete from the list.
797 * Note: list_empty() on entry does not return true after this, the entry is
798 * in an undefined state.
799 */
800#ifndef CONFIG_DEBUG_LIST
801static inline void list_del(struct list_head *entry)
802{
803 __list_del(entry->prev, entry->next);
804 entry->next = LIST_POISON1;
805 entry->prev = LIST_POISON2;
806}
807#else
808extern void list_del(struct list_head *entry);
809#endif
810
811/**
812 * list_replace - replace old entry by new one
813 * @old : the element to be replaced
814 * @new : the new element to insert
815 *
816 * If @old was empty, it will be overwritten.
817 */
818static inline void list_replace(struct list_head *old,
819 struct list_head *new)
820{
821 new->next = old->next;
822 new->next->prev = new;
823 new->prev = old->prev;
824 new->prev->next = new;
825}
826
827static inline void list_replace_init(struct list_head *old,
828 struct list_head *new)
829{
830 list_replace(old, new);
831 INIT_LIST_HEAD(old);
832}
833
834/**
835 * list_del_init - deletes entry from list and reinitialize it.
836 * @entry: the element to delete from the list.
837 */
838static inline void list_del_init(struct list_head *entry)
839{
840 __list_del(entry->prev, entry->next);
841 INIT_LIST_HEAD(entry);
842}
843
844/**
845 * list_move - delete from one list and add as another's head
846 * @list: the entry to move
847 * @head: the head that will precede our entry
848 */
849static inline void list_move(struct list_head *list, struct list_head *head)
850{
851 __list_del(list->prev, list->next);
852 list_add(list, head);
853}
854
855/**
856 * list_move_tail - delete from one list and add as another's tail
857 * @list: the entry to move
858 * @head: the head that will follow our entry
859 */
860static inline void list_move_tail(struct list_head *list,
861 struct list_head *head)
862{
863 __list_del(list->prev, list->next);
864 list_add_tail(list, head);
865}
866
867/**
868 * list_is_last - tests whether @list is the last entry in list @head
869 * @list: the entry to test
870 * @head: the head of the list
871 */
872static inline int list_is_last(const struct list_head *list,
873 const struct list_head *head)
874{
875 return list->next == head;
876}
877
878/**
879 * list_empty - tests whether a list is empty
880 * @head: the list to test.
881 */
882static inline int list_empty(const struct list_head *head)
883{
884 return head->next == head;
885}
886
887/**
888 * list_empty_careful - tests whether a list is empty and not being modified
889 * @head: the list to test
890 *
891 * Description:
892 * tests whether a list is empty _and_ checks that no other CPU might be
893 * in the process of modifying either member (next or prev)
894 *
895 * NOTE: using list_empty_careful() without synchronization
896 * can only be safe if the only activity that can happen
897 * to the list entry is list_del_init(). Eg. it cannot be used
898 * if another CPU could re-list_add() it.
899 */
900static inline int list_empty_careful(const struct list_head *head)
901{
902 struct list_head *next = head->next;
903 return (next == head) && (next == head->prev);
904}
905
906/**
907 * list_is_singular - tests whether a list has just one entry.
908 * @head: the list to test.
909 */
910static inline int list_is_singular(const struct list_head *head)
911{
912 return !list_empty(head) && (head->next == head->prev);
913}
914
915static inline void __list_cut_position(struct list_head *list,
916 struct list_head *head, struct list_head *entry)
917{
918 struct list_head *new_first = entry->next;
919 list->next = head->next;
920 list->next->prev = list;
921 list->prev = entry;
922 entry->next = list;
923 head->next = new_first;
924 new_first->prev = head;
925}
926
927/**
928 * list_cut_position - cut a list into two
929 * @list: a new list to add all removed entries
930 * @head: a list with entries
931 * @entry: an entry within head, could be the head itself
932 * and if so we won't cut the list
933 *
934 * This helper moves the initial part of @head, up to and
935 * including @entry, from @head to @list. You should
936 * pass on @entry an element you know is on @head. @list
937 * should be an empty list or a list you do not care about
938 * losing its data.
939 *
940 */
941static inline void list_cut_position(struct list_head *list,
942 struct list_head *head, struct list_head *entry)
943{
944 if (list_empty(head))
945 return;
946 if (list_is_singular(head) &&
947 (head->next != entry && head != entry))
948 return;
949 if (entry == head)
950 INIT_LIST_HEAD(list);
951 else
952 __list_cut_position(list, head, entry);
953}
954
955static inline void __list_splice(const struct list_head *list,
956 struct list_head *prev,
957 struct list_head *next)
958{
959 struct list_head *first = list->next;
960 struct list_head *last = list->prev;
961
962 first->prev = prev;
963 prev->next = first;
964
965 last->next = next;
966 next->prev = last;
967}
968
969/**
970 * list_splice - join two lists, this is designed for stacks
971 * @list: the new list to add.
972 * @head: the place to add it in the first list.
973 */
974static inline void list_splice(const struct list_head *list,
975 struct list_head *head)
976{
977 if (!list_empty(list))
978 __list_splice(list, head, head->next);
979}
980
981/**
982 * list_splice_tail - join two lists, each list being a queue
983 * @list: the new list to add.
984 * @head: the place to add it in the first list.
985 */
986static inline void list_splice_tail(struct list_head *list,
987 struct list_head *head)
988{
989 if (!list_empty(list))
990 __list_splice(list, head->prev, head);
991}
992
993/**
994 * list_splice_init - join two lists and reinitialise the emptied list.
995 * @list: the new list to add.
996 * @head: the place to add it in the first list.
997 *
998 * The list at @list is reinitialised
999 */
1000static inline void list_splice_init(struct list_head *list,
1001 struct list_head *head)
1002{
1003 if (!list_empty(list)) {
1004 __list_splice(list, head, head->next);
1005 INIT_LIST_HEAD(list);
1006 }
1007}
1008
1009/**
1010 * list_splice_tail_init - join two lists and reinitialise the emptied list
1011 * @list: the new list to add.
1012 * @head: the place to add it in the first list.
1013 *
1014 * Each of the lists is a queue.
1015 * The list at @list is reinitialised
1016 */
1017static inline void list_splice_tail_init(struct list_head *list,
1018 struct list_head *head)
1019{
1020 if (!list_empty(list)) {
1021 __list_splice(list, head->prev, head);
1022 INIT_LIST_HEAD(list);
1023 }
1024}
1025
1026/**
1027 * list_entry - get the struct for this entry
1028 * @ptr: the &struct list_head pointer.
1029 * @type: the type of the struct this is embedded in.
1030 * @member: the name of the list_struct within the struct.
1031 */
1032#define list_entry(ptr, type, member) \
1033 container_of(ptr, type, member)
1034
1035/**
1036 * list_first_entry - get the first element from a list
1037 * @ptr: the list head to take the element from.
1038 * @type: the type of the struct this is embedded in.
1039 * @member: the name of the list_struct within the struct.
1040 *
1041 * Note, that list is expected to be not empty.
1042 */
1043#define list_first_entry(ptr, type, member) \
1044 list_entry((ptr)->next, type, member)
1045
1046/**
1047 * list_for_each - iterate over a list
1048 * @pos: the &struct list_head to use as a loop cursor.
1049 * @head: the head for your list.
1050 */
1051#define list_for_each(pos, head) \
1052 for (pos = (head)->next; prefetch(pos->next), pos != (head); \
1053 pos = pos->next)
1054
1055/**
1056 * __list_for_each - iterate over a list
1057 * @pos: the &struct list_head to use as a loop cursor.
1058 * @head: the head for your list.
1059 *
1060 * This variant differs from list_for_each() in that it's the
1061 * simplest possible list iteration code, no prefetching is done.
1062 * Use this for code that knows the list to be very short (empty
1063 * or 1 entry) most of the time.
1064 */
1065#define __list_for_each(pos, head) \
1066 for (pos = (head)->next; pos != (head); pos = pos->next)
1067
1068/**
1069 * list_for_each_prev - iterate over a list backwards
1070 * @pos: the &struct list_head to use as a loop cursor.
1071 * @head: the head for your list.
1072 */
1073#define list_for_each_prev(pos, head) \
1074 for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
1075 pos = pos->prev)
1076
1077/**
1078 * list_for_each_safe - iterate over a list safe against removal of list entry
1079 * @pos: the &struct list_head to use as a loop cursor.
1080 * @n: another &struct list_head to use as temporary storage
1081 * @head: the head for your list.
1082 */
1083#define list_for_each_safe(pos, n, head) \
1084 for (pos = (head)->next, n = pos->next; pos != (head); \
1085 pos = n, n = pos->next)
1086
1087/**
1088 * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
1089 * @pos: the &struct list_head to use as a loop cursor.
1090 * @n: another &struct list_head to use as temporary storage
1091 * @head: the head for your list.
1092 */
1093#define list_for_each_prev_safe(pos, n, head) \
1094 for (pos = (head)->prev, n = pos->prev; \
1095 prefetch(pos->prev), pos != (head); \
1096 pos = n, n = pos->prev)
1097
1098/**
1099 * list_for_each_entry - iterate over list of given type
1100 * @pos: the type * to use as a loop cursor.
1101 * @head: the head for your list.
1102 * @member: the name of the list_struct within the struct.
1103 */
1104#define list_for_each_entry(pos, head, member) \
1105 for (pos = list_entry((head)->next, typeof(*pos), member); \
1106 prefetch(pos->member.next), &pos->member != (head); \
1107 pos = list_entry(pos->member.next, typeof(*pos), member))
1108
1109/**
1110 * list_for_each_entry_reverse - iterate backwards over list of given type.
1111 * @pos: the type * to use as a loop cursor.
1112 * @head: the head for your list.
1113 * @member: the name of the list_struct within the struct.
1114 */
1115#define list_for_each_entry_reverse(pos, head, member) \
1116 for (pos = list_entry((head)->prev, typeof(*pos), member); \
1117 prefetch(pos->member.prev), &pos->member != (head); \
1118 pos = list_entry(pos->member.prev, typeof(*pos), member))
1119
1120/**
1121 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
1122 * @pos: the type * to use as a start point
1123 * @head: the head of the list
1124 * @member: the name of the list_struct within the struct.
1125 *
1126 * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
1127 */
1128#define list_prepare_entry(pos, head, member) \
1129 ((pos) ? : list_entry(head, typeof(*pos), member))
1130
1131/**
1132 * list_for_each_entry_continue - continue iteration over list of given type
1133 * @pos: the type * to use as a loop cursor.
1134 * @head: the head for your list.
1135 * @member: the name of the list_struct within the struct.
1136 *
1137 * Continue to iterate over list of given type, continuing after
1138 * the current position.
1139 */
1140#define list_for_each_entry_continue(pos, head, member) \
1141 for (pos = list_entry(pos->member.next, typeof(*pos), member); \
1142 prefetch(pos->member.next), &pos->member != (head); \
1143 pos = list_entry(pos->member.next, typeof(*pos), member))
1144
1145/**
1146 * list_for_each_entry_continue_reverse - iterate backwards from the given point
1147 * @pos: the type * to use as a loop cursor.
1148 * @head: the head for your list.
1149 * @member: the name of the list_struct within the struct.
1150 *
1151 * Start to iterate over list of given type backwards, continuing after
1152 * the current position.
1153 */
1154#define list_for_each_entry_continue_reverse(pos, head, member) \
1155 for (pos = list_entry(pos->member.prev, typeof(*pos), member); \
1156 prefetch(pos->member.prev), &pos->member != (head); \
1157 pos = list_entry(pos->member.prev, typeof(*pos), member))
1158
1159/**
1160 * list_for_each_entry_from - iterate over list of given type from the current point
1161 * @pos: the type * to use as a loop cursor.
1162 * @head: the head for your list.
1163 * @member: the name of the list_struct within the struct.
1164 *
1165 * Iterate over list of given type, continuing from current position.
1166 */
1167#define list_for_each_entry_from(pos, head, member) \
1168 for (; prefetch(pos->member.next), &pos->member != (head); \
1169 pos = list_entry(pos->member.next, typeof(*pos), member))
1170
1171/**
1172 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
1173 * @pos: the type * to use as a loop cursor.
1174 * @n: another type * to use as temporary storage
1175 * @head: the head for your list.
1176 * @member: the name of the list_struct within the struct.
1177 */
1178#define list_for_each_entry_safe(pos, n, head, member) \
1179 for (pos = list_entry((head)->next, typeof(*pos), member), \
1180 n = list_entry(pos->member.next, typeof(*pos), member); \
1181 &pos->member != (head); \
1182 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1183
1184/**
1185 * list_for_each_entry_safe_continue
1186 * @pos: the type * to use as a loop cursor.
1187 * @n: another type * to use as temporary storage
1188 * @head: the head for your list.
1189 * @member: the name of the list_struct within the struct.
1190 *
1191 * Iterate over list of given type, continuing after current point,
1192 * safe against removal of list entry.
1193 */
1194#define list_for_each_entry_safe_continue(pos, n, head, member) \
1195 for (pos = list_entry(pos->member.next, typeof(*pos), member), \
1196 n = list_entry(pos->member.next, typeof(*pos), member); \
1197 &pos->member != (head); \
1198 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1199
1200/**
1201 * list_for_each_entry_safe_from
1202 * @pos: the type * to use as a loop cursor.
1203 * @n: another type * to use as temporary storage
1204 * @head: the head for your list.
1205 * @member: the name of the list_struct within the struct.
1206 *
1207 * Iterate over list of given type from current point, safe against
1208 * removal of list entry.
1209 */
1210#define list_for_each_entry_safe_from(pos, n, head, member) \
1211 for (n = list_entry(pos->member.next, typeof(*pos), member); \
1212 &pos->member != (head); \
1213 pos = n, n = list_entry(n->member.next, typeof(*n), member))
1214
1215/**
1216 * list_for_each_entry_safe_reverse
1217 * @pos: the type * to use as a loop cursor.
1218 * @n: another type * to use as temporary storage
1219 * @head: the head for your list.
1220 * @member: the name of the list_struct within the struct.
1221 *
1222 * Iterate backwards over list of given type, safe against removal
1223 * of list entry.
1224 */
1225#define list_for_each_entry_safe_reverse(pos, n, head, member) \
1226 for (pos = list_entry((head)->prev, typeof(*pos), member), \
1227 n = list_entry(pos->member.prev, typeof(*pos), member); \
1228 &pos->member != (head); \
1229 pos = n, n = list_entry(n->member.prev, typeof(*n), member))
1230
63ff4873
MD
1231#endif //0
1232
6d0ce021
PM
1233/*
1234 * Double linked lists with a single pointer list head.
1235 * Mostly useful for hash tables where the two pointer list head is
1236 * too wasteful.
1237 * You lose the ability to access the tail in O(1).
1238 */
1239
1240struct hlist_head {
1241 struct hlist_node *first;
1242};
1243
1244struct hlist_node {
1245 struct hlist_node *next, **pprev;
1246};
1247
1248#define HLIST_HEAD_INIT { .first = NULL }
1249#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
1250#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
1251static inline void INIT_HLIST_NODE(struct hlist_node *h)
1252{
1253 h->next = NULL;
1254 h->pprev = NULL;
1255}
1256
1257static inline int hlist_unhashed(const struct hlist_node *h)
1258{
1259 return !h->pprev;
1260}
1261
1262static inline int hlist_empty(const struct hlist_head *h)
1263{
1264 return !h->first;
1265}
1266
1267static inline void __hlist_del(struct hlist_node *n)
1268{
1269 struct hlist_node *next = n->next;
1270 struct hlist_node **pprev = n->pprev;
1271 *pprev = next;
1272 if (next)
1273 next->pprev = pprev;
1274}
1275
1276static inline void hlist_del(struct hlist_node *n)
1277{
1278 __hlist_del(n);
1279 n->next = LIST_POISON1;
1280 n->pprev = LIST_POISON2;
1281}
1282
1283static inline void hlist_del_init(struct hlist_node *n)
1284{
1285 if (!hlist_unhashed(n)) {
1286 __hlist_del(n);
1287 INIT_HLIST_NODE(n);
1288 }
1289}
1290
1291static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
1292{
1293 struct hlist_node *first = h->first;
1294 n->next = first;
1295 if (first)
1296 first->pprev = &n->next;
1297 h->first = n;
1298 n->pprev = &h->first;
1299}
1300
1301/* next must be != NULL */
1302static inline void hlist_add_before(struct hlist_node *n,
1303 struct hlist_node *next)
1304{
1305 n->pprev = next->pprev;
1306 n->next = next;
1307 next->pprev = &n->next;
1308 *(n->pprev) = n;
1309}
1310
1311static inline void hlist_add_after(struct hlist_node *n,
1312 struct hlist_node *next)
1313{
1314 next->next = n->next;
1315 n->next = next;
1316 next->pprev = &n->next;
1317
1318 if(next->next)
1319 next->next->pprev = &next->next;
1320}
1321
1322/*
1323 * Move a list from one list head to another. Fixup the pprev
1324 * reference of the first entry if it exists.
1325 */
1326static inline void hlist_move_list(struct hlist_head *old,
1327 struct hlist_head *new)
1328{
1329 new->first = old->first;
1330 if (new->first)
1331 new->first->pprev = &new->first;
1332 old->first = NULL;
1333}
1334
1335#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
1336
1337#define hlist_for_each(pos, head) \
1338 for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
1339 pos = pos->next)
1340
1341#define hlist_for_each_safe(pos, n, head) \
1342 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
1343 pos = n)
1344
1345/**
1346 * hlist_for_each_entry - iterate over list of given type
1347 * @tpos: the type * to use as a loop cursor.
1348 * @pos: the &struct hlist_node to use as a loop cursor.
1349 * @head: the head for your list.
1350 * @member: the name of the hlist_node within the struct.
1351 */
1352#define hlist_for_each_entry(tpos, pos, head, member) \
1353 for (pos = (head)->first; \
1354 pos && ({ prefetch(pos->next); 1;}) && \
1355 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1356 pos = pos->next)
1357
1358/**
1359 * hlist_for_each_entry_continue - iterate over a hlist continuing after current point
1360 * @tpos: the type * to use as a loop cursor.
1361 * @pos: the &struct hlist_node to use as a loop cursor.
1362 * @member: the name of the hlist_node within the struct.
1363 */
1364#define hlist_for_each_entry_continue(tpos, pos, member) \
1365 for (pos = (pos)->next; \
1366 pos && ({ prefetch(pos->next); 1;}) && \
1367 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1368 pos = pos->next)
1369
1370/**
1371 * hlist_for_each_entry_from - iterate over a hlist continuing from current point
1372 * @tpos: the type * to use as a loop cursor.
1373 * @pos: the &struct hlist_node to use as a loop cursor.
1374 * @member: the name of the hlist_node within the struct.
1375 */
1376#define hlist_for_each_entry_from(tpos, pos, member) \
1377 for (; pos && ({ prefetch(pos->next); 1;}) && \
1378 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1379 pos = pos->next)
1380
1381/**
1382 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
1383 * @tpos: the type * to use as a loop cursor.
1384 * @pos: the &struct hlist_node to use as a loop cursor.
1385 * @n: another &struct hlist_node to use as temporary storage
1386 * @head: the head for your list.
1387 * @member: the name of the hlist_node within the struct.
1388 */
1389#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
1390 for (pos = (head)->first; \
1391 pos && ({ n = pos->next; 1; }) && \
1392 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1393 pos = n)
1394
1395#endif
This page took 0.070507 seconds and 4 git commands to generate.