| 1 | #ifndef _URCU_H |
| 2 | #define _URCU_H |
| 3 | |
| 4 | /* |
| 5 | * urcu.h |
| 6 | * |
| 7 | * Userspace RCU header |
| 8 | * |
| 9 | * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> |
| 10 | * |
| 11 | * Credits for Paul e. McKenney <paulmck@linux.vnet.ibm.com> |
| 12 | * for inspiration coming from the Linux kernel RCU and rcu-preempt. |
| 13 | * |
| 14 | * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE |
| 15 | * and rcu_dereference primitives come from the Linux kernel. |
| 16 | * |
| 17 | * Distributed under GPLv2 |
| 18 | */ |
| 19 | |
| 20 | /* The "volatile" is due to gcc bugs */ |
| 21 | #define barrier() __asm__ __volatile__("": : :"memory") |
| 22 | |
| 23 | /* x86 32/64 specific */ |
| 24 | #define mb() asm volatile("mfence":::"memory") |
| 25 | #define rmb() asm volatile("lfence":::"memory") |
| 26 | #define wmb() asm volatile("sfence" ::: "memory") |
| 27 | |
| 28 | static inline void atomic_inc(int *v) |
| 29 | { |
| 30 | asm volatile("lock; incl %0" |
| 31 | : "+m" (*v)); |
| 32 | } |
| 33 | |
| 34 | /* Nop everywhere except on alpha. */ |
| 35 | #define smp_read_barrier_depends() |
| 36 | |
| 37 | /* |
| 38 | * Prevent the compiler from merging or refetching accesses. The compiler |
| 39 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), |
| 40 | * but only when the compiler is aware of some particular ordering. One way |
| 41 | * to make the compiler aware of ordering is to put the two invocations of |
| 42 | * ACCESS_ONCE() in different C statements. |
| 43 | * |
| 44 | * This macro does absolutely -nothing- to prevent the CPU from reordering, |
| 45 | * merging, or refetching absolutely anything at any time. Its main intended |
| 46 | * use is to mediate communication between process-level code and irq/NMI |
| 47 | * handlers, all running on the same CPU. |
| 48 | */ |
| 49 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
| 50 | |
| 51 | /** |
| 52 | * rcu_dereference - fetch an RCU-protected pointer in an |
| 53 | * RCU read-side critical section. This pointer may later |
| 54 | * be safely dereferenced. |
| 55 | * |
| 56 | * Inserts memory barriers on architectures that require them |
| 57 | * (currently only the Alpha), and, more importantly, documents |
| 58 | * exactly which pointers are protected by RCU. |
| 59 | */ |
| 60 | |
| 61 | #define rcu_dereference(p) ({ \ |
| 62 | typeof(p) _________p1 = ACCESS_ONCE(p); \ |
| 63 | smp_read_barrier_depends(); \ |
| 64 | (_________p1); \ |
| 65 | }) |
| 66 | |
| 67 | #define SIGURCU SIGUSR1 |
| 68 | |
| 69 | /* Global quiescent period parity */ |
| 70 | extern int urcu_qparity; |
| 71 | |
| 72 | extern int __thread urcu_active_readers[2]; |
| 73 | |
| 74 | static inline int get_urcu_qparity(void) |
| 75 | { |
| 76 | return urcu_qparity; |
| 77 | } |
| 78 | |
| 79 | /* |
| 80 | * urcu_parity should be declared on the caller's stack. |
| 81 | */ |
| 82 | static inline void rcu_read_lock(int *urcu_parity) |
| 83 | { |
| 84 | *urcu_parity = get_urcu_qparity(); |
| 85 | urcu_active_readers[*urcu_parity]++; |
| 86 | /* |
| 87 | * Increment active readers count before accessing the pointer. |
| 88 | * See force_mb_all_threads(). |
| 89 | */ |
| 90 | barrier(); |
| 91 | } |
| 92 | |
| 93 | static inline void rcu_read_unlock(int *urcu_parity) |
| 94 | { |
| 95 | barrier(); |
| 96 | /* |
| 97 | * Finish using rcu before decrementing the pointer. |
| 98 | * See force_mb_all_threads(). |
| 99 | */ |
| 100 | urcu_active_readers[*urcu_parity]--; |
| 101 | } |
| 102 | |
| 103 | extern void *urcu_publish_content(void **ptr, void *new); |
| 104 | |
| 105 | /* |
| 106 | * Reader thread registration. |
| 107 | */ |
| 108 | extern void urcu_register_thread(void); |
| 109 | extern void urcu_unregister_thread(void); |
| 110 | |
| 111 | #endif /* _URCU_H */ |