Commit | Line | Data |
---|---|---|
27b012e2 MD |
1 | #ifndef _URCU_H |
2 | #define _URCU_H | |
3 | ||
b257a10b MD |
4 | /* |
5 | * urcu.h | |
6 | * | |
7 | * Userspace RCU header | |
8 | * | |
9 | * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | |
10 | * | |
5e7e64b9 MD |
11 | * Credits for Paul e. McKenney <paulmck@linux.vnet.ibm.com> |
12 | * for inspiration coming from the Linux kernel RCU and rcu-preempt. | |
13 | * | |
14 | * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE | |
15 | * and rcu_dereference primitives come from the Linux kernel. | |
16 | * | |
b257a10b MD |
17 | * Distributed under GPLv2 |
18 | */ | |
19 | ||
1430ee0b MD |
20 | #include <stdlib.h> |
21 | ||
27b012e2 MD |
22 | /* The "volatile" is due to gcc bugs */ |
23 | #define barrier() __asm__ __volatile__("": : :"memory") | |
24 | ||
25 | /* x86 32/64 specific */ | |
26 | #define mb() asm volatile("mfence":::"memory") | |
27 | #define rmb() asm volatile("lfence":::"memory") | |
28 | #define wmb() asm volatile("sfence" ::: "memory") | |
29 | ||
27b012e2 MD |
30 | static inline void atomic_inc(int *v) |
31 | { | |
32 | asm volatile("lock; incl %0" | |
f69f195a | 33 | : "+m" (*v)); |
27b012e2 MD |
34 | } |
35 | ||
36 | /* Nop everywhere except on alpha. */ | |
37 | #define smp_read_barrier_depends() | |
38 | ||
41718ff9 MD |
39 | /* |
40 | * Prevent the compiler from merging or refetching accesses. The compiler | |
41 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), | |
42 | * but only when the compiler is aware of some particular ordering. One way | |
43 | * to make the compiler aware of ordering is to put the two invocations of | |
44 | * ACCESS_ONCE() in different C statements. | |
45 | * | |
46 | * This macro does absolutely -nothing- to prevent the CPU from reordering, | |
47 | * merging, or refetching absolutely anything at any time. Its main intended | |
48 | * use is to mediate communication between process-level code and irq/NMI | |
49 | * handlers, all running on the same CPU. | |
50 | */ | |
51 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) | |
52 | ||
53 | /** | |
54 | * rcu_dereference - fetch an RCU-protected pointer in an | |
55 | * RCU read-side critical section. This pointer may later | |
56 | * be safely dereferenced. | |
57 | * | |
58 | * Inserts memory barriers on architectures that require them | |
59 | * (currently only the Alpha), and, more importantly, documents | |
60 | * exactly which pointers are protected by RCU. | |
61 | */ | |
62 | ||
63 | #define rcu_dereference(p) ({ \ | |
64 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | |
65 | smp_read_barrier_depends(); \ | |
66 | (_________p1); \ | |
67 | }) | |
68 | ||
27b012e2 MD |
69 | #define SIGURCU SIGUSR1 |
70 | ||
cf380c2f MD |
71 | #ifdef DEBUG_YIELD |
72 | #include <sched.h> | |
73 | ||
74 | #define YIELD_READ (1 << 0) | |
75 | #define YIELD_WRITE (1 << 1) | |
76 | ||
9d335088 MD |
77 | extern unsigned int yield_active; |
78 | extern unsigned int __thread rand_yield; | |
cf380c2f MD |
79 | |
80 | static inline void debug_yield_read(void) | |
81 | { | |
82 | if (yield_active & YIELD_READ) | |
9d335088 MD |
83 | if (rand_r(&rand_yield) & 0x1) |
84 | sched_yield(); | |
cf380c2f MD |
85 | } |
86 | ||
87 | static inline void debug_yield_write(void) | |
88 | { | |
89 | if (yield_active & YIELD_WRITE) | |
9d335088 MD |
90 | if (rand_r(&rand_yield) & 0x1) |
91 | sched_yield(); | |
92 | } | |
93 | ||
94 | static inline void debug_yield_init(void) | |
95 | { | |
96 | rand_yield = time(NULL) ^ pthread_self(); | |
cf380c2f MD |
97 | } |
98 | #else | |
99 | static inline void debug_yield_read(void) | |
100 | { | |
101 | } | |
102 | ||
103 | static inline void debug_yield_write(void) | |
104 | { | |
9d335088 MD |
105 | } |
106 | ||
107 | static inline void debug_yield_init(void) | |
108 | { | |
109 | ||
cf380c2f MD |
110 | } |
111 | #endif | |
112 | ||
1430ee0b MD |
113 | /* |
114 | * Limiting the nesting level to 256 to keep instructions small in the read | |
115 | * fast-path. | |
116 | */ | |
117 | #define RCU_GP_COUNT (1U << 0) | |
118 | #define RCU_GP_CTR_BIT (1U << 8) | |
119 | #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1) | |
120 | ||
121 | /* Global quiescent period counter with low-order bits unused. */ | |
122 | extern int urcu_gp_ctr; | |
27b012e2 | 123 | |
1430ee0b | 124 | extern int __thread urcu_active_readers; |
27b012e2 | 125 | |
1430ee0b | 126 | static inline int rcu_old_gp_ongoing(int *value) |
27b012e2 | 127 | { |
1430ee0b MD |
128 | int v; |
129 | ||
130 | if (value == NULL) | |
131 | return 0; | |
132 | debug_yield_write(); | |
133 | v = ACCESS_ONCE(*value); | |
134 | debug_yield_write(); | |
135 | return (v & RCU_GP_CTR_NEST_MASK) && | |
136 | ((v ^ ACCESS_ONCE(urcu_gp_ctr)) & RCU_GP_CTR_BIT); | |
27b012e2 MD |
137 | } |
138 | ||
1430ee0b | 139 | static inline void rcu_read_lock(void) |
27b012e2 | 140 | { |
1430ee0b MD |
141 | int tmp; |
142 | ||
cf380c2f | 143 | debug_yield_read(); |
1430ee0b | 144 | tmp = urcu_active_readers; |
cf380c2f | 145 | debug_yield_read(); |
1430ee0b MD |
146 | if (!(tmp & RCU_GP_CTR_NEST_MASK)) |
147 | urcu_active_readers = urcu_gp_ctr + RCU_GP_COUNT; | |
148 | else | |
149 | urcu_active_readers = tmp + RCU_GP_COUNT; | |
cf380c2f | 150 | debug_yield_read(); |
27b012e2 MD |
151 | /* |
152 | * Increment active readers count before accessing the pointer. | |
153 | * See force_mb_all_threads(). | |
154 | */ | |
155 | barrier(); | |
cf380c2f | 156 | debug_yield_read(); |
27b012e2 MD |
157 | } |
158 | ||
1430ee0b | 159 | static inline void rcu_read_unlock(void) |
27b012e2 | 160 | { |
cf380c2f | 161 | debug_yield_read(); |
27b012e2 | 162 | barrier(); |
cf380c2f | 163 | debug_yield_read(); |
27b012e2 MD |
164 | /* |
165 | * Finish using rcu before decrementing the pointer. | |
166 | * See force_mb_all_threads(). | |
167 | */ | |
1430ee0b | 168 | urcu_active_readers -= RCU_GP_COUNT; |
cf380c2f | 169 | debug_yield_read(); |
27b012e2 MD |
170 | } |
171 | ||
e462817e MD |
172 | /** |
173 | * rcu_assign_pointer - assign (publicize) a pointer to a newly | |
174 | * initialized structure that will be dereferenced by RCU read-side | |
175 | * critical sections. Returns the value assigned. | |
176 | * | |
177 | * Inserts memory barriers on architectures that require them | |
178 | * (pretty much all of them other than x86), and also prevents | |
179 | * the compiler from reordering the code that initializes the | |
180 | * structure after the pointer assignment. More importantly, this | |
181 | * call documents which pointers will be dereferenced by RCU read-side | |
182 | * code. | |
183 | */ | |
184 | ||
185 | #define rcu_assign_pointer(p, v) \ | |
186 | ({ \ | |
187 | if (!__builtin_constant_p(v) || \ | |
188 | ((v) != NULL)) \ | |
189 | wmb(); \ | |
190 | (p) = (v); \ | |
191 | }) | |
192 | ||
cdcb92bb | 193 | extern void *urcu_publish_content(void **ptr, void *new); |
e462817e | 194 | extern void synchronize_rcu(void); |
27b012e2 MD |
195 | |
196 | /* | |
197 | * Reader thread registration. | |
198 | */ | |
199 | extern void urcu_register_thread(void); | |
5e7e64b9 | 200 | extern void urcu_unregister_thread(void); |
27b012e2 MD |
201 | |
202 | #endif /* _URCU_H */ |