Commit | Line | Data |
---|---|---|
b5b073e2 PMF |
1 | /* |
2 | * buffers.c | |
3 | * LTTng userspace tracer buffering system | |
4 | * | |
5 | * Copyright (C) 2009 - Pierre-Marc Fournier (pierre-marc dot fournier at polymtl dot ca) | |
6 | * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) | |
7 | * | |
8 | * This library is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU Lesser General Public | |
10 | * License as published by the Free Software Foundation; either | |
11 | * version 2.1 of the License, or (at your option) any later version. | |
12 | * | |
13 | * This library is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * Lesser General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU Lesser General Public | |
19 | * License along with this library; if not, write to the Free Software | |
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
21 | */ | |
22 | ||
204141ee | 23 | #include <unistd.h> |
b5b073e2 PMF |
24 | #include <sys/mman.h> |
25 | #include <sys/ipc.h> | |
26 | #include <sys/shm.h> | |
27 | #include <fcntl.h> | |
28 | #include <ust/kernelcompat.h> | |
29 | #include <kcompat/kref.h> | |
30 | #include "buffers.h" | |
31 | #include "channels.h" | |
32 | #include "tracer.h" | |
33 | #include "tracercore.h" | |
34 | #include "usterr.h" | |
35 | ||
b73a4c47 PMF |
36 | struct ltt_reserve_switch_offsets { |
37 | long begin, end, old; | |
38 | long begin_switch, end_switch_current, end_switch_old; | |
39 | size_t before_hdr_pad, size; | |
40 | }; | |
41 | ||
42 | ||
b5b073e2 PMF |
43 | static DEFINE_MUTEX(ust_buffers_channels_mutex); |
44 | static LIST_HEAD(ust_buffers_channels); | |
45 | ||
204141ee PMF |
46 | static int get_n_cpus(void) |
47 | { | |
48 | int result; | |
49 | static int n_cpus = 0; | |
50 | ||
51 | if(n_cpus) { | |
52 | return n_cpus; | |
53 | } | |
54 | ||
a0243ab1 PMF |
55 | /* On Linux, when some processors are offline |
56 | * _SC_NPROCESSORS_CONF counts the offline | |
57 | * processors, whereas _SC_NPROCESSORS_ONLN | |
58 | * does not. If we used _SC_NPROCESSORS_ONLN, | |
59 | * getcpu() could return a value greater than | |
60 | * this sysconf, in which case the arrays | |
61 | * indexed by processor would overflow. | |
62 | */ | |
63 | result = sysconf(_SC_NPROCESSORS_CONF); | |
204141ee PMF |
64 | if(result == -1) { |
65 | return -1; | |
66 | } | |
67 | ||
68 | n_cpus = result; | |
69 | ||
70 | return result; | |
71 | } | |
72 | ||
b73a4c47 PMF |
73 | /* _ust_buffers_write() |
74 | * | |
75 | * @buf: destination buffer | |
76 | * @offset: offset in destination | |
77 | * @src: source buffer | |
78 | * @len: length of source | |
79 | * @cpy: already copied | |
80 | */ | |
81 | ||
82 | void _ust_buffers_write(struct ust_buffer *buf, size_t offset, | |
83 | const void *src, size_t len, ssize_t cpy) | |
84 | { | |
85 | do { | |
86 | len -= cpy; | |
87 | src += cpy; | |
88 | offset += cpy; | |
89 | ||
90 | WARN_ON(offset >= buf->buf_size); | |
91 | ||
92 | cpy = min_t(size_t, len, buf->buf_size - offset); | |
93 | ust_buffers_do_copy(buf->buf_data + offset, src, cpy); | |
94 | } while (unlikely(len != cpy)); | |
95 | } | |
96 | ||
97 | static int ust_buffers_init_buffer(struct ust_trace *trace, | |
b5b073e2 PMF |
98 | struct ust_channel *ltt_chan, |
99 | struct ust_buffer *buf, | |
100 | unsigned int n_subbufs); | |
101 | ||
102 | static int ust_buffers_alloc_buf(struct ust_buffer *buf, size_t *size) | |
103 | { | |
104 | void *ptr; | |
105 | int result; | |
106 | ||
107 | *size = PAGE_ALIGN(*size); | |
108 | ||
109 | result = buf->shmid = shmget(getpid(), *size, IPC_CREAT | IPC_EXCL | 0700); | |
110 | if(result == -1 && errno == EINVAL) { | |
111 | ERR("shmget() returned EINVAL; maybe /proc/sys/kernel/shmmax should be increased."); | |
112 | return -1; | |
113 | } | |
114 | else if(result == -1) { | |
115 | PERROR("shmget"); | |
116 | return -1; | |
117 | } | |
118 | ||
204141ee | 119 | /* FIXME: should have matching call to shmdt */ |
b5b073e2 PMF |
120 | ptr = shmat(buf->shmid, NULL, 0); |
121 | if(ptr == (void *) -1) { | |
122 | perror("shmat"); | |
123 | goto destroy_shmem; | |
124 | } | |
125 | ||
126 | /* Already mark the shared memory for destruction. This will occur only | |
127 | * when all users have detached. | |
128 | */ | |
129 | result = shmctl(buf->shmid, IPC_RMID, NULL); | |
130 | if(result == -1) { | |
131 | perror("shmctl"); | |
132 | return -1; | |
133 | } | |
134 | ||
135 | buf->buf_data = ptr; | |
136 | buf->buf_size = *size; | |
137 | ||
138 | return 0; | |
139 | ||
140 | destroy_shmem: | |
141 | result = shmctl(buf->shmid, IPC_RMID, NULL); | |
142 | if(result == -1) { | |
143 | perror("shmctl"); | |
144 | } | |
145 | ||
146 | return -1; | |
147 | } | |
148 | ||
204141ee | 149 | int ust_buffers_create_buf(struct ust_channel *channel, int cpu) |
b5b073e2 PMF |
150 | { |
151 | int result; | |
204141ee | 152 | struct ust_buffer *buf = channel->buf[cpu]; |
b5b073e2 | 153 | |
204141ee PMF |
154 | buf->cpu = cpu; |
155 | result = ust_buffers_alloc_buf(buf, &channel->alloc_size); | |
b5b073e2 | 156 | if(result) |
204141ee | 157 | return -1; |
b5b073e2 | 158 | |
204141ee | 159 | buf->chan = channel; |
b5b073e2 | 160 | kref_get(&channel->kref); |
204141ee | 161 | return 0; |
b5b073e2 PMF |
162 | } |
163 | ||
164 | static void ust_buffers_destroy_channel(struct kref *kref) | |
165 | { | |
166 | struct ust_channel *chan = container_of(kref, struct ust_channel, kref); | |
167 | free(chan); | |
168 | } | |
169 | ||
170 | static void ust_buffers_destroy_buf(struct ust_buffer *buf) | |
171 | { | |
172 | struct ust_channel *chan = buf->chan; | |
173 | int result; | |
174 | ||
175 | result = munmap(buf->buf_data, buf->buf_size); | |
176 | if(result == -1) { | |
177 | PERROR("munmap"); | |
178 | } | |
179 | ||
204141ee | 180 | //ust// chan->buf[buf->cpu] = NULL; |
b5b073e2 PMF |
181 | free(buf); |
182 | kref_put(&chan->kref, ust_buffers_destroy_channel); | |
183 | } | |
184 | ||
185 | /* called from kref_put */ | |
186 | static void ust_buffers_remove_buf(struct kref *kref) | |
187 | { | |
188 | struct ust_buffer *buf = container_of(kref, struct ust_buffer, kref); | |
189 | ust_buffers_destroy_buf(buf); | |
190 | } | |
191 | ||
204141ee | 192 | int ust_buffers_open_buf(struct ust_channel *chan, int cpu) |
b5b073e2 | 193 | { |
204141ee | 194 | int result; |
b5b073e2 | 195 | |
204141ee PMF |
196 | result = ust_buffers_create_buf(chan, cpu); |
197 | if (result == -1) | |
198 | return -1; | |
b5b073e2 | 199 | |
204141ee | 200 | kref_init(&chan->buf[cpu]->kref); |
b5b073e2 | 201 | |
204141ee PMF |
202 | result = ust_buffers_init_buffer(chan->trace, chan, chan->buf[cpu], chan->subbuf_cnt); |
203 | if(result == -1) | |
204 | return -1; | |
b5b073e2 | 205 | |
204141ee | 206 | return 0; |
b5b073e2 PMF |
207 | |
208 | /* FIXME: decrementally destroy on error? */ | |
209 | } | |
210 | ||
211 | /** | |
212 | * ust_buffers_close_buf - close a channel buffer | |
213 | * @buf: buffer | |
214 | */ | |
215 | static void ust_buffers_close_buf(struct ust_buffer *buf) | |
216 | { | |
217 | kref_put(&buf->kref, ust_buffers_remove_buf); | |
218 | } | |
219 | ||
220 | int ust_buffers_channel_open(struct ust_channel *chan, size_t subbuf_size, size_t subbuf_cnt) | |
221 | { | |
204141ee PMF |
222 | int i; |
223 | int result; | |
224 | ||
b5b073e2 PMF |
225 | if(subbuf_size == 0 || subbuf_cnt == 0) |
226 | return -1; | |
227 | ||
b73a4c47 PMF |
228 | /* Check that the subbuffer size is larger than a page. */ |
229 | WARN_ON_ONCE(subbuf_size < PAGE_SIZE); | |
230 | ||
231 | /* | |
232 | * Make sure the number of subbuffers and subbuffer size are power of 2. | |
233 | */ | |
234 | WARN_ON_ONCE(hweight32(subbuf_size) != 1); | |
235 | WARN_ON(hweight32(subbuf_cnt) != 1); | |
236 | ||
b5b073e2 PMF |
237 | chan->version = UST_CHANNEL_VERSION; |
238 | chan->subbuf_cnt = subbuf_cnt; | |
239 | chan->subbuf_size = subbuf_size; | |
240 | chan->subbuf_size_order = get_count_order(subbuf_size); | |
b73a4c47 | 241 | chan->alloc_size = subbuf_size * subbuf_cnt; |
204141ee | 242 | |
b5b073e2 PMF |
243 | kref_init(&chan->kref); |
244 | ||
245 | mutex_lock(&ust_buffers_channels_mutex); | |
204141ee PMF |
246 | for(i=0; i<chan->n_cpus; i++) { |
247 | result = ust_buffers_open_buf(chan, i); | |
248 | if (result == -1) | |
249 | goto error; | |
250 | } | |
b5b073e2 PMF |
251 | list_add(&chan->list, &ust_buffers_channels); |
252 | mutex_unlock(&ust_buffers_channels_mutex); | |
253 | ||
254 | return 0; | |
255 | ||
204141ee PMF |
256 | /* Jump directly inside the loop to close the buffers that were already |
257 | * opened. */ | |
258 | for(; i>=0; i--) { | |
259 | ust_buffers_close_buf(chan->buf[i]); | |
260 | error: | |
120b0ec3 | 261 | do {} while(0); |
204141ee PMF |
262 | } |
263 | ||
b5b073e2 PMF |
264 | kref_put(&chan->kref, ust_buffers_destroy_channel); |
265 | mutex_unlock(&ust_buffers_channels_mutex); | |
266 | return -1; | |
267 | } | |
268 | ||
269 | void ust_buffers_channel_close(struct ust_channel *chan) | |
270 | { | |
204141ee PMF |
271 | int i; |
272 | if(!chan) | |
b5b073e2 PMF |
273 | return; |
274 | ||
275 | mutex_lock(&ust_buffers_channels_mutex); | |
204141ee PMF |
276 | for(i=0; i<chan->n_cpus; i++) { |
277 | /* FIXME: if we make it here, then all buffers were necessarily allocated. Moreover, we don't | |
278 | * initialize to NULL so we cannot use this check. Should we? */ | |
279 | //ust// if (chan->buf[i]) | |
280 | ust_buffers_close_buf(chan->buf[i]); | |
281 | } | |
b5b073e2 PMF |
282 | |
283 | list_del(&chan->list); | |
284 | kref_put(&chan->kref, ust_buffers_destroy_channel); | |
285 | mutex_unlock(&ust_buffers_channels_mutex); | |
286 | } | |
287 | ||
b5b073e2 PMF |
288 | /* |
289 | * ------- | |
290 | */ | |
291 | ||
204141ee | 292 | static void ust_buffers_destroy_buffer(struct ust_channel *ltt_chan, int cpu); |
b5b073e2 PMF |
293 | |
294 | static void ltt_force_switch(struct ust_buffer *buf, | |
295 | enum force_switch_mode mode); | |
296 | ||
297 | /* | |
298 | * Trace callbacks | |
299 | */ | |
b73a4c47 | 300 | static void ltt_buffer_begin(struct ust_buffer *buf, |
b5b073e2 PMF |
301 | u64 tsc, unsigned int subbuf_idx) |
302 | { | |
303 | struct ust_channel *channel = buf->chan; | |
304 | struct ltt_subbuffer_header *header = | |
305 | (struct ltt_subbuffer_header *) | |
b73a4c47 | 306 | ust_buffers_offset_address(buf, |
b5b073e2 PMF |
307 | subbuf_idx * buf->chan->subbuf_size); |
308 | ||
309 | header->cycle_count_begin = tsc; | |
02af3e60 PMF |
310 | header->data_size = 0xFFFFFFFF; /* for recognizing crashed buffers */ |
311 | header->sb_size = 0xFFFFFFFF; /* for recognizing crashed buffers */ | |
312 | /* FIXME: add memory barrier? */ | |
b5b073e2 PMF |
313 | ltt_write_trace_header(channel->trace, header); |
314 | } | |
315 | ||
316 | /* | |
317 | * offset is assumed to never be 0 here : never deliver a completely empty | |
318 | * subbuffer. The lost size is between 0 and subbuf_size-1. | |
319 | */ | |
b73a4c47 | 320 | static notrace void ltt_buffer_end(struct ust_buffer *buf, |
b5b073e2 PMF |
321 | u64 tsc, unsigned int offset, unsigned int subbuf_idx) |
322 | { | |
323 | struct ltt_subbuffer_header *header = | |
324 | (struct ltt_subbuffer_header *) | |
b73a4c47 | 325 | ust_buffers_offset_address(buf, |
b5b073e2 | 326 | subbuf_idx * buf->chan->subbuf_size); |
8c36d1ee | 327 | u32 data_size = SUBBUF_OFFSET(offset - 1, buf->chan) + 1; |
b5b073e2 | 328 | |
8c36d1ee PMF |
329 | header->data_size = data_size; |
330 | header->sb_size = PAGE_ALIGN(data_size); | |
b5b073e2 PMF |
331 | header->cycle_count_end = tsc; |
332 | header->events_lost = local_read(&buf->events_lost); | |
333 | header->subbuf_corrupt = local_read(&buf->corrupted_subbuffers); | |
719569e4 PMF |
334 | if(unlikely(header->events_lost > 0)) { |
335 | DBG("Some events (%d) were lost in %s_%d", header->events_lost, buf->chan->channel_name, buf->cpu); | |
336 | } | |
b5b073e2 PMF |
337 | } |
338 | ||
339 | /* | |
340 | * This function should not be called from NMI interrupt context | |
341 | */ | |
342 | static notrace void ltt_buf_unfull(struct ust_buffer *buf, | |
343 | unsigned int subbuf_idx, | |
344 | long offset) | |
345 | { | |
b5b073e2 PMF |
346 | } |
347 | ||
b73a4c47 PMF |
348 | /* |
349 | * Promote compiler barrier to a smp_mb(). | |
350 | * For the specific LTTng case, this IPI call should be removed if the | |
351 | * architecture does not reorder writes. This should eventually be provided by | |
352 | * a separate architecture-specific infrastructure. | |
353 | */ | |
e17571a5 PMF |
354 | //ust// static void remote_mb(void *info) |
355 | //ust// { | |
356 | //ust// smp_mb(); | |
357 | //ust// } | |
b73a4c47 PMF |
358 | |
359 | int ust_buffers_get_subbuf(struct ust_buffer *buf, long *consumed) | |
b5b073e2 PMF |
360 | { |
361 | struct ust_channel *channel = buf->chan; | |
362 | long consumed_old, consumed_idx, commit_count, write_offset; | |
b73a4c47 PMF |
363 | //ust// int retval; |
364 | ||
b5b073e2 PMF |
365 | consumed_old = atomic_long_read(&buf->consumed); |
366 | consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan); | |
b73a4c47 | 367 | commit_count = local_read(&buf->commit_count[consumed_idx].cc_sb); |
b5b073e2 PMF |
368 | /* |
369 | * Make sure we read the commit count before reading the buffer | |
370 | * data and the write offset. Correct consumed offset ordering | |
371 | * wrt commit count is insured by the use of cmpxchg to update | |
372 | * the consumed offset. | |
b73a4c47 PMF |
373 | * smp_call_function_single can fail if the remote CPU is offline, |
374 | * this is OK because then there is no wmb to execute there. | |
375 | * If our thread is executing on the same CPU as the on the buffers | |
376 | * belongs to, we don't have to synchronize it at all. If we are | |
377 | * migrated, the scheduler will take care of the memory barriers. | |
378 | * Normally, smp_call_function_single() should ensure program order when | |
379 | * executing the remote function, which implies that it surrounds the | |
380 | * function execution with : | |
381 | * smp_mb() | |
382 | * send IPI | |
383 | * csd_lock_wait | |
384 | * recv IPI | |
385 | * smp_mb() | |
386 | * exec. function | |
387 | * smp_mb() | |
388 | * csd unlock | |
389 | * smp_mb() | |
390 | * | |
391 | * However, smp_call_function_single() does not seem to clearly execute | |
392 | * such barriers. It depends on spinlock semantic to provide the barrier | |
393 | * before executing the IPI and, when busy-looping, csd_lock_wait only | |
394 | * executes smp_mb() when it has to wait for the other CPU. | |
395 | * | |
396 | * I don't trust this code. Therefore, let's add the smp_mb() sequence | |
397 | * required ourself, even if duplicated. It has no performance impact | |
398 | * anyway. | |
399 | * | |
400 | * smp_mb() is needed because smp_rmb() and smp_wmb() only order read vs | |
401 | * read and write vs write. They do not ensure core synchronization. We | |
402 | * really have to ensure total order between the 3 barriers running on | |
403 | * the 2 CPUs. | |
404 | */ | |
405 | //ust// #ifdef LTT_NO_IPI_BARRIER | |
406 | /* | |
407 | * Local rmb to match the remote wmb to read the commit count before the | |
408 | * buffer data and the write offset. | |
b5b073e2 PMF |
409 | */ |
410 | smp_rmb(); | |
b73a4c47 PMF |
411 | //ust// #else |
412 | //ust// if (raw_smp_processor_id() != buf->cpu) { | |
413 | //ust// smp_mb(); /* Total order with IPI handler smp_mb() */ | |
414 | //ust// smp_call_function_single(buf->cpu, remote_mb, NULL, 1); | |
415 | //ust// smp_mb(); /* Total order with IPI handler smp_mb() */ | |
416 | //ust// } | |
417 | //ust// #endif | |
418 | ||
b5b073e2 PMF |
419 | write_offset = local_read(&buf->offset); |
420 | /* | |
421 | * Check that the subbuffer we are trying to consume has been | |
422 | * already fully committed. | |
423 | */ | |
424 | if (((commit_count - buf->chan->subbuf_size) | |
425 | & channel->commit_count_mask) | |
426 | - (BUFFER_TRUNC(consumed_old, buf->chan) | |
427 | >> channel->n_subbufs_order) | |
428 | != 0) { | |
429 | return -EAGAIN; | |
430 | } | |
431 | /* | |
432 | * Check that we are not about to read the same subbuffer in | |
433 | * which the writer head is. | |
434 | */ | |
435 | if ((SUBBUF_TRUNC(write_offset, buf->chan) | |
436 | - SUBBUF_TRUNC(consumed_old, buf->chan)) | |
437 | == 0) { | |
438 | return -EAGAIN; | |
439 | } | |
440 | ||
b73a4c47 PMF |
441 | /* FIXME: is this ok to disable the reading feature? */ |
442 | //ust// retval = update_read_sb_index(buf, consumed_idx); | |
443 | //ust// if (retval) | |
444 | //ust// return retval; | |
445 | ||
446 | *consumed = consumed_old; | |
447 | ||
b5b073e2 PMF |
448 | return 0; |
449 | } | |
450 | ||
b73a4c47 | 451 | int ust_buffers_put_subbuf(struct ust_buffer *buf, unsigned long uconsumed_old) |
b5b073e2 PMF |
452 | { |
453 | long consumed_new, consumed_old; | |
454 | ||
455 | consumed_old = atomic_long_read(&buf->consumed); | |
456 | consumed_old = consumed_old & (~0xFFFFFFFFL); | |
457 | consumed_old = consumed_old | uconsumed_old; | |
458 | consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan); | |
459 | ||
460 | //ust// spin_lock(<t_buf->full_lock); | |
461 | if (atomic_long_cmpxchg(&buf->consumed, consumed_old, | |
462 | consumed_new) | |
463 | != consumed_old) { | |
464 | /* We have been pushed by the writer : the last | |
465 | * buffer read _is_ corrupted! It can also | |
466 | * happen if this is a buffer we never got. */ | |
467 | //ust// spin_unlock(<t_buf->full_lock); | |
468 | return -EIO; | |
469 | } else { | |
470 | /* tell the client that buffer is now unfull */ | |
471 | int index; | |
472 | long data; | |
473 | index = SUBBUF_INDEX(consumed_old, buf->chan); | |
474 | data = BUFFER_OFFSET(consumed_old, buf->chan); | |
475 | ltt_buf_unfull(buf, index, data); | |
476 | //ust// spin_unlock(<t_buf->full_lock); | |
477 | } | |
478 | return 0; | |
479 | } | |
480 | ||
b73a4c47 PMF |
481 | //ust// static void switch_buffer(unsigned long data) |
482 | //ust// { | |
483 | //ust// struct ltt_channel_buf_struct *ltt_buf = | |
484 | //ust// (struct ltt_channel_buf_struct *)data; | |
485 | //ust// struct rchan_buf *buf = ltt_buf->rbuf; | |
486 | //ust// | |
487 | //ust// if (buf) | |
488 | //ust// ltt_force_switch(buf, FORCE_ACTIVE); | |
489 | //ust// | |
490 | //ust// ltt_buf->switch_timer.expires += ltt_buf->switch_timer_interval; | |
491 | //ust// add_timer_on(<t_buf->switch_timer, smp_processor_id()); | |
492 | //ust// } | |
493 | //ust// | |
494 | //ust// static void start_switch_timer(struct ltt_channel_struct *ltt_channel) | |
495 | //ust// { | |
496 | //ust// struct rchan *rchan = ltt_channel->trans_channel_data; | |
497 | //ust// int cpu; | |
498 | //ust// | |
499 | //ust// if (!ltt_channel->switch_timer_interval) | |
500 | //ust// return; | |
501 | //ust// | |
502 | //ust// // TODO : hotplug | |
503 | //ust// for_each_online_cpu(cpu) { | |
504 | //ust// struct ltt_channel_buf_struct *ltt_buf; | |
505 | //ust// struct rchan_buf *buf; | |
506 | //ust// | |
507 | //ust// buf = rchan->buf[cpu]; | |
508 | //ust// ltt_buf = buf->chan_private; | |
509 | //ust// buf->random_access = 1; | |
510 | //ust// ltt_buf->switch_timer_interval = | |
511 | //ust// ltt_channel->switch_timer_interval; | |
512 | //ust// init_timer(<t_buf->switch_timer); | |
513 | //ust// ltt_buf->switch_timer.function = switch_buffer; | |
514 | //ust// ltt_buf->switch_timer.expires = jiffies + | |
515 | //ust// ltt_buf->switch_timer_interval; | |
516 | //ust// ltt_buf->switch_timer.data = (unsigned long)ltt_buf; | |
517 | //ust// add_timer_on(<t_buf->switch_timer, cpu); | |
518 | //ust// } | |
519 | //ust// } | |
520 | //ust// | |
521 | //ust// /* | |
522 | //ust// * Cannot use del_timer_sync with add_timer_on, so use an IPI to locally | |
523 | //ust// * delete the timer. | |
524 | //ust// */ | |
525 | //ust// static void stop_switch_timer_ipi(void *info) | |
526 | //ust// { | |
527 | //ust// struct ltt_channel_buf_struct *ltt_buf = | |
528 | //ust// (struct ltt_channel_buf_struct *)info; | |
529 | //ust// | |
530 | //ust// del_timer(<t_buf->switch_timer); | |
531 | //ust// } | |
532 | //ust// | |
533 | //ust// static void stop_switch_timer(struct ltt_channel_struct *ltt_channel) | |
534 | //ust// { | |
535 | //ust// struct rchan *rchan = ltt_channel->trans_channel_data; | |
536 | //ust// int cpu; | |
537 | //ust// | |
538 | //ust// if (!ltt_channel->switch_timer_interval) | |
539 | //ust// return; | |
540 | //ust// | |
541 | //ust// // TODO : hotplug | |
542 | //ust// for_each_online_cpu(cpu) { | |
543 | //ust// struct ltt_channel_buf_struct *ltt_buf; | |
544 | //ust// struct rchan_buf *buf; | |
545 | //ust// | |
546 | //ust// buf = rchan->buf[cpu]; | |
547 | //ust// ltt_buf = buf->chan_private; | |
548 | //ust// smp_call_function(stop_switch_timer_ipi, ltt_buf, 1); | |
549 | //ust// buf->random_access = 0; | |
550 | //ust// } | |
551 | //ust// } | |
552 | ||
e17571a5 PMF |
553 | //ust// static void ust_buffers_print_written(struct ust_channel *chan, |
554 | //ust// long cons_off, unsigned int cpu) | |
555 | //ust// { | |
556 | //ust// struct ust_buffer *buf = chan->buf[cpu]; | |
557 | //ust// long cons_idx, events_count; | |
558 | //ust// | |
559 | //ust// cons_idx = SUBBUF_INDEX(cons_off, chan); | |
560 | //ust// events_count = local_read(&buf->commit_count[cons_idx].events); | |
561 | //ust// | |
562 | //ust// if (events_count) | |
563 | //ust// printk(KERN_INFO | |
564 | //ust// "channel %s: %lu events written (cpu %u, index %lu)\n", | |
565 | //ust// chan->channel_name, events_count, cpu, cons_idx); | |
566 | //ust// } | |
b73a4c47 | 567 | |
b5b073e2 PMF |
568 | static void ltt_relay_print_subbuffer_errors( |
569 | struct ust_channel *channel, | |
204141ee | 570 | long cons_off, int cpu) |
b5b073e2 | 571 | { |
204141ee | 572 | struct ust_buffer *ltt_buf = channel->buf[cpu]; |
b73a4c47 | 573 | long cons_idx, commit_count, commit_count_sb, write_offset; |
b5b073e2 PMF |
574 | |
575 | cons_idx = SUBBUF_INDEX(cons_off, channel); | |
b73a4c47 PMF |
576 | commit_count = local_read(<t_buf->commit_count[cons_idx].cc); |
577 | commit_count_sb = local_read(<t_buf->commit_count[cons_idx].cc_sb); | |
578 | ||
b5b073e2 PMF |
579 | /* |
580 | * No need to order commit_count and write_offset reads because we | |
581 | * execute after trace is stopped when there are no readers left. | |
582 | */ | |
583 | write_offset = local_read(<t_buf->offset); | |
584 | WARN( "LTT : unread channel %s offset is %ld " | |
b73a4c47 PMF |
585 | "and cons_off : %ld (cpu %d)\n", |
586 | channel->channel_name, write_offset, cons_off, cpu); | |
b5b073e2 PMF |
587 | /* Check each sub-buffer for non filled commit count */ |
588 | if (((commit_count - channel->subbuf_size) & channel->commit_count_mask) | |
589 | - (BUFFER_TRUNC(cons_off, channel) >> channel->n_subbufs_order) != 0) { | |
590 | ERR("LTT : %s : subbuffer %lu has non filled " | |
b73a4c47 PMF |
591 | "commit count [cc, cc_sb] [%lu,%lu].\n", |
592 | channel->channel_name, cons_idx, commit_count, commit_count_sb); | |
b5b073e2 PMF |
593 | } |
594 | ERR("LTT : %s : commit count : %lu, subbuf size %zd\n", | |
595 | channel->channel_name, commit_count, | |
596 | channel->subbuf_size); | |
597 | } | |
598 | ||
b73a4c47 | 599 | static void ltt_relay_print_errors(struct ust_trace *trace, |
204141ee | 600 | struct ust_channel *channel, int cpu) |
b5b073e2 | 601 | { |
204141ee | 602 | struct ust_buffer *ltt_buf = channel->buf[cpu]; |
b5b073e2 PMF |
603 | long cons_off; |
604 | ||
4292ed8a PMF |
605 | /* |
606 | * Can be called in the error path of allocation when | |
607 | * trans_channel_data is not yet set. | |
608 | */ | |
609 | if (!channel) | |
610 | return; | |
611 | ||
e17571a5 PMF |
612 | //ust// for (cons_off = 0; cons_off < rchan->alloc_size; |
613 | //ust// cons_off = SUBBUF_ALIGN(cons_off, rchan)) | |
614 | //ust// ust_buffers_print_written(ltt_chan, cons_off, cpu); | |
b5b073e2 PMF |
615 | for (cons_off = atomic_long_read(<t_buf->consumed); |
616 | (SUBBUF_TRUNC(local_read(<t_buf->offset), | |
617 | channel) | |
618 | - cons_off) > 0; | |
619 | cons_off = SUBBUF_ALIGN(cons_off, channel)) | |
204141ee | 620 | ltt_relay_print_subbuffer_errors(channel, cons_off, cpu); |
b5b073e2 PMF |
621 | } |
622 | ||
204141ee | 623 | static void ltt_relay_print_buffer_errors(struct ust_channel *channel, int cpu) |
b5b073e2 | 624 | { |
b73a4c47 | 625 | struct ust_trace *trace = channel->trace; |
204141ee | 626 | struct ust_buffer *ltt_buf = channel->buf[cpu]; |
b5b073e2 PMF |
627 | |
628 | if (local_read(<t_buf->events_lost)) | |
b73a4c47 | 629 | ERR("channel %s: %ld events lost (cpu %d)", |
b5b073e2 | 630 | channel->channel_name, |
b73a4c47 | 631 | local_read(<t_buf->events_lost), cpu); |
b5b073e2 | 632 | if (local_read(<t_buf->corrupted_subbuffers)) |
b73a4c47 | 633 | ERR("channel %s : %ld corrupted subbuffers (cpu %d)", |
b5b073e2 | 634 | channel->channel_name, |
b73a4c47 | 635 | local_read(<t_buf->corrupted_subbuffers), cpu); |
b5b073e2 | 636 | |
204141ee | 637 | ltt_relay_print_errors(trace, channel, cpu); |
b5b073e2 PMF |
638 | } |
639 | ||
640 | static void ltt_relay_release_channel(struct kref *kref) | |
641 | { | |
642 | struct ust_channel *ltt_chan = container_of(kref, | |
643 | struct ust_channel, kref); | |
644 | free(ltt_chan->buf); | |
645 | } | |
646 | ||
647 | /* | |
648 | * Create ltt buffer. | |
649 | */ | |
b73a4c47 | 650 | //ust// static int ltt_relay_create_buffer(struct ust_trace *trace, |
b5b073e2 PMF |
651 | //ust// struct ltt_channel_struct *ltt_chan, struct rchan_buf *buf, |
652 | //ust// unsigned int cpu, unsigned int n_subbufs) | |
653 | //ust// { | |
654 | //ust// struct ltt_channel_buf_struct *ltt_buf = | |
655 | //ust// percpu_ptr(ltt_chan->buf, cpu); | |
656 | //ust// unsigned int j; | |
b73a4c47 | 657 | //ust// |
b5b073e2 PMF |
658 | //ust// ltt_buf->commit_count = |
659 | //ust// kzalloc_node(sizeof(ltt_buf->commit_count) * n_subbufs, | |
660 | //ust// GFP_KERNEL, cpu_to_node(cpu)); | |
661 | //ust// if (!ltt_buf->commit_count) | |
662 | //ust// return -ENOMEM; | |
663 | //ust// kref_get(&trace->kref); | |
664 | //ust// kref_get(&trace->ltt_transport_kref); | |
665 | //ust// kref_get(<t_chan->kref); | |
666 | //ust// local_set(<t_buf->offset, ltt_subbuffer_header_size()); | |
667 | //ust// atomic_long_set(<t_buf->consumed, 0); | |
668 | //ust// atomic_long_set(<t_buf->active_readers, 0); | |
669 | //ust// for (j = 0; j < n_subbufs; j++) | |
670 | //ust// local_set(<t_buf->commit_count[j], 0); | |
671 | //ust// init_waitqueue_head(<t_buf->write_wait); | |
672 | //ust// atomic_set(<t_buf->wakeup_readers, 0); | |
673 | //ust// spin_lock_init(<t_buf->full_lock); | |
b73a4c47 | 674 | //ust// |
b5b073e2 PMF |
675 | //ust// ltt_buffer_begin_callback(buf, trace->start_tsc, 0); |
676 | //ust// /* atomic_add made on local variable on data that belongs to | |
677 | //ust// * various CPUs : ok because tracing not started (for this cpu). */ | |
678 | //ust// local_add(ltt_subbuffer_header_size(), <t_buf->commit_count[0]); | |
b73a4c47 | 679 | //ust// |
b5b073e2 PMF |
680 | //ust// local_set(<t_buf->events_lost, 0); |
681 | //ust// local_set(<t_buf->corrupted_subbuffers, 0); | |
b73a4c47 | 682 | //ust// |
b5b073e2 PMF |
683 | //ust// return 0; |
684 | //ust// } | |
685 | ||
b73a4c47 | 686 | static int ust_buffers_init_buffer(struct ust_trace *trace, |
b5b073e2 PMF |
687 | struct ust_channel *ltt_chan, struct ust_buffer *buf, |
688 | unsigned int n_subbufs) | |
689 | { | |
690 | unsigned int j; | |
691 | int fds[2]; | |
692 | int result; | |
693 | ||
694 | buf->commit_count = | |
b73a4c47 | 695 | zmalloc(sizeof(*buf->commit_count) * n_subbufs); |
b5b073e2 PMF |
696 | if (!buf->commit_count) |
697 | return -ENOMEM; | |
698 | kref_get(&trace->kref); | |
699 | kref_get(&trace->ltt_transport_kref); | |
700 | kref_get(<t_chan->kref); | |
701 | local_set(&buf->offset, ltt_subbuffer_header_size()); | |
702 | atomic_long_set(&buf->consumed, 0); | |
703 | atomic_long_set(&buf->active_readers, 0); | |
b73a4c47 PMF |
704 | for (j = 0; j < n_subbufs; j++) { |
705 | local_set(&buf->commit_count[j].cc, 0); | |
706 | local_set(&buf->commit_count[j].cc_sb, 0); | |
707 | } | |
b5b073e2 PMF |
708 | //ust// init_waitqueue_head(&buf->write_wait); |
709 | //ust// atomic_set(&buf->wakeup_readers, 0); | |
710 | //ust// spin_lock_init(&buf->full_lock); | |
711 | ||
b73a4c47 | 712 | ltt_buffer_begin(buf, trace->start_tsc, 0); |
b5b073e2 | 713 | |
b73a4c47 | 714 | local_add(ltt_subbuffer_header_size(), &buf->commit_count[0].cc); |
b5b073e2 PMF |
715 | |
716 | local_set(&buf->events_lost, 0); | |
717 | local_set(&buf->corrupted_subbuffers, 0); | |
718 | ||
719 | result = pipe(fds); | |
720 | if(result == -1) { | |
721 | PERROR("pipe"); | |
722 | return -1; | |
723 | } | |
724 | buf->data_ready_fd_read = fds[0]; | |
725 | buf->data_ready_fd_write = fds[1]; | |
726 | ||
727 | /* FIXME: do we actually need this? */ | |
728 | result = fcntl(fds[0], F_SETFL, O_NONBLOCK); | |
729 | if(result == -1) { | |
730 | PERROR("fcntl"); | |
731 | } | |
732 | ||
733 | //ust// buf->commit_seq = malloc(sizeof(buf->commit_seq) * n_subbufs); | |
734 | //ust// if(!ltt_buf->commit_seq) { | |
735 | //ust// return -1; | |
736 | //ust// } | |
37315729 | 737 | memset(buf->commit_seq, 0, sizeof(buf->commit_seq[0]) * n_subbufs); |
b5b073e2 PMF |
738 | |
739 | /* FIXME: decrementally destroy on error */ | |
740 | ||
741 | return 0; | |
742 | } | |
743 | ||
744 | /* FIXME: use this function */ | |
204141ee | 745 | static void ust_buffers_destroy_buffer(struct ust_channel *ltt_chan, int cpu) |
b5b073e2 | 746 | { |
b73a4c47 | 747 | struct ust_trace *trace = ltt_chan->trace; |
204141ee | 748 | struct ust_buffer *ltt_buf = ltt_chan->buf[cpu]; |
b5b073e2 PMF |
749 | |
750 | kref_put(<t_chan->trace->ltt_transport_kref, | |
751 | ltt_release_transport); | |
204141ee | 752 | ltt_relay_print_buffer_errors(ltt_chan, cpu); |
b5b073e2 PMF |
753 | //ust// free(ltt_buf->commit_seq); |
754 | kfree(ltt_buf->commit_count); | |
755 | ltt_buf->commit_count = NULL; | |
756 | kref_put(<t_chan->kref, ltt_relay_release_channel); | |
757 | kref_put(&trace->kref, ltt_release_trace); | |
758 | //ust// wake_up_interruptible(&trace->kref_wq); | |
759 | } | |
760 | ||
204141ee | 761 | static int ust_buffers_alloc_channel_buf_structs(struct ust_channel *chan) |
b5b073e2 PMF |
762 | { |
763 | void *ptr; | |
764 | int result; | |
204141ee PMF |
765 | size_t size; |
766 | int i; | |
b5b073e2 | 767 | |
204141ee | 768 | size = PAGE_ALIGN(1); |
b5b073e2 | 769 | |
204141ee | 770 | for(i=0; i<chan->n_cpus; i++) { |
b5b073e2 | 771 | |
204141ee PMF |
772 | result = chan->buf_struct_shmids[i] = shmget(getpid(), size, IPC_CREAT | IPC_EXCL | 0700); |
773 | if(result == -1) { | |
774 | PERROR("shmget"); | |
775 | goto destroy_previous; | |
776 | } | |
b5b073e2 | 777 | |
204141ee PMF |
778 | /* FIXME: should have matching call to shmdt */ |
779 | ptr = shmat(chan->buf_struct_shmids[i], NULL, 0); | |
780 | if(ptr == (void *) -1) { | |
781 | perror("shmat"); | |
782 | goto destroy_shm; | |
783 | } | |
784 | ||
785 | /* Already mark the shared memory for destruction. This will occur only | |
786 | * when all users have detached. | |
787 | */ | |
788 | result = shmctl(chan->buf_struct_shmids[i], IPC_RMID, NULL); | |
789 | if(result == -1) { | |
790 | perror("shmctl"); | |
791 | goto destroy_previous; | |
792 | } | |
793 | ||
794 | chan->buf[i] = ptr; | |
b5b073e2 PMF |
795 | } |
796 | ||
204141ee | 797 | return 0; |
b5b073e2 | 798 | |
204141ee PMF |
799 | /* Jumping inside this loop occurs from within the other loop above with i as |
800 | * counter, so it unallocates the structures for the cpu = current_i down to | |
801 | * zero. */ | |
802 | for(; i>=0; i--) { | |
803 | destroy_shm: | |
804 | result = shmctl(chan->buf_struct_shmids[i], IPC_RMID, NULL); | |
805 | if(result == -1) { | |
806 | perror("shmctl"); | |
807 | } | |
b5b073e2 | 808 | |
204141ee PMF |
809 | destroy_previous: |
810 | continue; | |
b5b073e2 PMF |
811 | } |
812 | ||
204141ee | 813 | return -1; |
b5b073e2 PMF |
814 | } |
815 | ||
816 | /* | |
817 | * Create channel. | |
818 | */ | |
b73a4c47 | 819 | static int ust_buffers_create_channel(const char *trace_name, struct ust_trace *trace, |
b5b073e2 PMF |
820 | const char *channel_name, struct ust_channel *ltt_chan, |
821 | unsigned int subbuf_size, unsigned int n_subbufs, int overwrite) | |
822 | { | |
b5b073e2 PMF |
823 | int result; |
824 | ||
825 | kref_init(<t_chan->kref); | |
826 | ||
827 | ltt_chan->trace = trace; | |
b5b073e2 PMF |
828 | ltt_chan->overwrite = overwrite; |
829 | ltt_chan->n_subbufs_order = get_count_order(n_subbufs); | |
830 | ltt_chan->commit_count_mask = (~0UL >> ltt_chan->n_subbufs_order); | |
204141ee | 831 | ltt_chan->n_cpus = get_n_cpus(); |
b5b073e2 | 832 | //ust// ltt_chan->buf = percpu_alloc_mask(sizeof(struct ltt_channel_buf_struct), GFP_KERNEL, cpu_possible_map); |
204141ee PMF |
833 | ltt_chan->buf = (void *) malloc(ltt_chan->n_cpus * sizeof(void *)); |
834 | if(ltt_chan->buf == NULL) { | |
835 | goto error; | |
836 | } | |
837 | ltt_chan->buf_struct_shmids = (int *) malloc(ltt_chan->n_cpus * sizeof(int)); | |
838 | if(ltt_chan->buf_struct_shmids == NULL) | |
839 | goto free_buf; | |
b5b073e2 | 840 | |
204141ee PMF |
841 | result = ust_buffers_alloc_channel_buf_structs(ltt_chan); |
842 | if(result != 0) { | |
843 | goto free_buf_struct_shmids; | |
844 | } | |
b5b073e2 | 845 | |
b5b073e2 | 846 | result = ust_buffers_channel_open(ltt_chan, subbuf_size, n_subbufs); |
204141ee | 847 | if (result != 0) { |
c1f20530 | 848 | ERR("Cannot open channel for trace %s", trace_name); |
204141ee | 849 | goto unalloc_buf_structs; |
b5b073e2 PMF |
850 | } |
851 | ||
204141ee PMF |
852 | return 0; |
853 | ||
854 | unalloc_buf_structs: | |
855 | /* FIXME: put a call here to unalloc the buf structs! */ | |
856 | ||
857 | free_buf_struct_shmids: | |
858 | free(ltt_chan->buf_struct_shmids); | |
b5b073e2 | 859 | |
204141ee PMF |
860 | free_buf: |
861 | free(ltt_chan->buf); | |
862 | ||
863 | error: | |
864 | return -1; | |
b5b073e2 PMF |
865 | } |
866 | ||
867 | /* | |
868 | * LTTng channel flush function. | |
869 | * | |
870 | * Must be called when no tracing is active in the channel, because of | |
871 | * accesses across CPUs. | |
872 | */ | |
873 | static notrace void ltt_relay_buffer_flush(struct ust_buffer *buf) | |
874 | { | |
875 | int result; | |
876 | ||
877 | //ust// buf->finalized = 1; | |
878 | ltt_force_switch(buf, FORCE_FLUSH); | |
879 | ||
880 | result = write(buf->data_ready_fd_write, "1", 1); | |
881 | if(result == -1) { | |
882 | PERROR("write (in ltt_relay_buffer_flush)"); | |
883 | ERR("this should never happen!"); | |
884 | } | |
885 | } | |
886 | ||
887 | static void ltt_relay_async_wakeup_chan(struct ust_channel *ltt_channel) | |
888 | { | |
889 | //ust// unsigned int i; | |
890 | //ust// struct rchan *rchan = ltt_channel->trans_channel_data; | |
891 | //ust// | |
892 | //ust// for_each_possible_cpu(i) { | |
893 | //ust// struct ltt_channel_buf_struct *ltt_buf = | |
894 | //ust// percpu_ptr(ltt_channel->buf, i); | |
895 | //ust// | |
896 | //ust// if (atomic_read(<t_buf->wakeup_readers) == 1) { | |
897 | //ust// atomic_set(<t_buf->wakeup_readers, 0); | |
898 | //ust// wake_up_interruptible(&rchan->buf[i]->read_wait); | |
899 | //ust// } | |
900 | //ust// } | |
901 | } | |
902 | ||
204141ee | 903 | static void ltt_relay_finish_buffer(struct ust_channel *channel, unsigned int cpu) |
b5b073e2 PMF |
904 | { |
905 | // int result; | |
906 | ||
204141ee PMF |
907 | if (channel->buf[cpu]) { |
908 | struct ust_buffer *buf = channel->buf[cpu]; | |
b5b073e2 PMF |
909 | ltt_relay_buffer_flush(buf); |
910 | //ust// ltt_relay_wake_writers(ltt_buf); | |
911 | /* closing the pipe tells the consumer the buffer is finished */ | |
912 | ||
913 | //result = write(ltt_buf->data_ready_fd_write, "D", 1); | |
914 | //if(result == -1) { | |
915 | // PERROR("write (in ltt_relay_finish_buffer)"); | |
916 | // ERR("this should never happen!"); | |
917 | //} | |
918 | close(buf->data_ready_fd_write); | |
919 | } | |
920 | } | |
921 | ||
922 | ||
923 | static void ltt_relay_finish_channel(struct ust_channel *channel) | |
924 | { | |
204141ee | 925 | unsigned int i; |
b5b073e2 | 926 | |
204141ee PMF |
927 | for(i=0; i<channel->n_cpus; i++) { |
928 | ltt_relay_finish_buffer(channel, i); | |
929 | } | |
b5b073e2 PMF |
930 | } |
931 | ||
932 | static void ltt_relay_remove_channel(struct ust_channel *channel) | |
933 | { | |
934 | ust_buffers_channel_close(channel); | |
935 | kref_put(&channel->kref, ltt_relay_release_channel); | |
936 | } | |
937 | ||
b73a4c47 PMF |
938 | //ust// /* |
939 | //ust// * Returns : | |
940 | //ust// * 0 if ok | |
941 | //ust// * !0 if execution must be aborted. | |
942 | //ust// */ | |
943 | //ust// static inline int ltt_relay_try_reserve( | |
944 | //ust// struct ust_channel *channel, struct ust_buffer *buf, | |
945 | //ust// struct ltt_reserve_switch_offsets *offsets, size_t data_size, | |
946 | //ust// u64 *tsc, unsigned int *rflags, int largest_align) | |
947 | //ust// { | |
948 | //ust// offsets->begin = local_read(&buf->offset); | |
949 | //ust// offsets->old = offsets->begin; | |
950 | //ust// offsets->begin_switch = 0; | |
951 | //ust// offsets->end_switch_current = 0; | |
952 | //ust// offsets->end_switch_old = 0; | |
953 | //ust// | |
954 | //ust// *tsc = trace_clock_read64(); | |
955 | //ust// if (last_tsc_overflow(buf, *tsc)) | |
956 | //ust// *rflags = LTT_RFLAG_ID_SIZE_TSC; | |
957 | //ust// | |
958 | //ust// if (SUBBUF_OFFSET(offsets->begin, buf->chan) == 0) { | |
959 | //ust// offsets->begin_switch = 1; /* For offsets->begin */ | |
960 | //ust// } else { | |
961 | //ust// offsets->size = ust_get_header_size(channel, | |
962 | //ust// offsets->begin, data_size, | |
963 | //ust// &offsets->before_hdr_pad, *rflags); | |
964 | //ust// offsets->size += ltt_align(offsets->begin + offsets->size, | |
965 | //ust// largest_align) | |
966 | //ust// + data_size; | |
967 | //ust// if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size) | |
968 | //ust// > buf->chan->subbuf_size) { | |
969 | //ust// offsets->end_switch_old = 1; /* For offsets->old */ | |
970 | //ust// offsets->begin_switch = 1; /* For offsets->begin */ | |
971 | //ust// } | |
972 | //ust// } | |
973 | //ust// if (offsets->begin_switch) { | |
974 | //ust// long subbuf_index; | |
975 | //ust// | |
976 | //ust// if (offsets->end_switch_old) | |
977 | //ust// offsets->begin = SUBBUF_ALIGN(offsets->begin, | |
978 | //ust// buf->chan); | |
979 | //ust// offsets->begin = offsets->begin + ltt_subbuffer_header_size(); | |
980 | //ust// /* Test new buffer integrity */ | |
981 | //ust// subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan); | |
982 | //ust// offsets->reserve_commit_diff = | |
983 | //ust// (BUFFER_TRUNC(offsets->begin, buf->chan) | |
984 | //ust// >> channel->n_subbufs_order) | |
985 | //ust// - (local_read(&buf->commit_count[subbuf_index]) | |
986 | //ust// & channel->commit_count_mask); | |
987 | //ust// if (offsets->reserve_commit_diff == 0) { | |
988 | //ust// long consumed; | |
989 | //ust// | |
990 | //ust// consumed = atomic_long_read(&buf->consumed); | |
991 | //ust// | |
992 | //ust// /* Next buffer not corrupted. */ | |
993 | //ust// if (!channel->overwrite && | |
994 | //ust// (SUBBUF_TRUNC(offsets->begin, buf->chan) | |
995 | //ust// - SUBBUF_TRUNC(consumed, buf->chan)) | |
996 | //ust// >= channel->alloc_size) { | |
997 | //ust// | |
998 | //ust// long consumed_idx = SUBBUF_INDEX(consumed, buf->chan); | |
999 | //ust// long commit_count = local_read(&buf->commit_count[consumed_idx]); | |
1000 | //ust// if(((commit_count - buf->chan->subbuf_size) & channel->commit_count_mask) - (BUFFER_TRUNC(consumed, buf->chan) >> channel->n_subbufs_order) != 0) { | |
1001 | //ust// WARN("Event dropped. Caused by non-committed event."); | |
1002 | //ust// } | |
1003 | //ust// else { | |
1004 | //ust// WARN("Event dropped. Caused by non-consumed buffer."); | |
1005 | //ust// } | |
1006 | //ust// /* | |
1007 | //ust// * We do not overwrite non consumed buffers | |
1008 | //ust// * and we are full : event is lost. | |
1009 | //ust// */ | |
1010 | //ust// local_inc(&buf->events_lost); | |
1011 | //ust// return -1; | |
1012 | //ust// } else { | |
1013 | //ust// /* | |
1014 | //ust// * next buffer not corrupted, we are either in | |
1015 | //ust// * overwrite mode or the buffer is not full. | |
1016 | //ust// * It's safe to write in this new subbuffer. | |
1017 | //ust// */ | |
1018 | //ust// } | |
1019 | //ust// } else { | |
1020 | //ust// /* | |
1021 | //ust// * Next subbuffer corrupted. Force pushing reader even | |
1022 | //ust// * in normal mode. It's safe to write in this new | |
1023 | //ust// * subbuffer. | |
1024 | //ust// */ | |
1025 | //ust// } | |
1026 | //ust// offsets->size = ust_get_header_size(channel, | |
1027 | //ust// offsets->begin, data_size, | |
1028 | //ust// &offsets->before_hdr_pad, *rflags); | |
1029 | //ust// offsets->size += ltt_align(offsets->begin + offsets->size, | |
1030 | //ust// largest_align) | |
1031 | //ust// + data_size; | |
1032 | //ust// if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size) | |
1033 | //ust// > buf->chan->subbuf_size) { | |
1034 | //ust// /* | |
1035 | //ust// * Event too big for subbuffers, report error, don't | |
1036 | //ust// * complete the sub-buffer switch. | |
1037 | //ust// */ | |
1038 | //ust// local_inc(&buf->events_lost); | |
1039 | //ust// return -1; | |
1040 | //ust// } else { | |
1041 | //ust// /* | |
1042 | //ust// * We just made a successful buffer switch and the event | |
1043 | //ust// * fits in the new subbuffer. Let's write. | |
1044 | //ust// */ | |
1045 | //ust// } | |
1046 | //ust// } else { | |
1047 | //ust// /* | |
1048 | //ust// * Event fits in the current buffer and we are not on a switch | |
1049 | //ust// * boundary. It's safe to write. | |
1050 | //ust// */ | |
1051 | //ust// } | |
1052 | //ust// offsets->end = offsets->begin + offsets->size; | |
1053 | //ust// | |
1054 | //ust// if ((SUBBUF_OFFSET(offsets->end, buf->chan)) == 0) { | |
1055 | //ust// /* | |
1056 | //ust// * The offset_end will fall at the very beginning of the next | |
1057 | //ust// * subbuffer. | |
1058 | //ust// */ | |
1059 | //ust// offsets->end_switch_current = 1; /* For offsets->begin */ | |
1060 | //ust// } | |
1061 | //ust// return 0; | |
1062 | //ust// } | |
1063 | //ust// | |
1064 | //ust// /* | |
1065 | //ust// * Returns : | |
1066 | //ust// * 0 if ok | |
1067 | //ust// * !0 if execution must be aborted. | |
1068 | //ust// */ | |
1069 | //ust// static inline int ltt_relay_try_switch( | |
1070 | //ust// enum force_switch_mode mode, | |
1071 | //ust// struct ust_channel *channel, | |
1072 | //ust// struct ust_buffer *buf, | |
1073 | //ust// struct ltt_reserve_switch_offsets *offsets, | |
1074 | //ust// u64 *tsc) | |
1075 | //ust// { | |
1076 | //ust// long subbuf_index; | |
1077 | //ust// | |
1078 | //ust// offsets->begin = local_read(&buf->offset); | |
1079 | //ust// offsets->old = offsets->begin; | |
1080 | //ust// offsets->begin_switch = 0; | |
1081 | //ust// offsets->end_switch_old = 0; | |
1082 | //ust// | |
1083 | //ust// *tsc = trace_clock_read64(); | |
1084 | //ust// | |
1085 | //ust// if (SUBBUF_OFFSET(offsets->begin, buf->chan) != 0) { | |
1086 | //ust// offsets->begin = SUBBUF_ALIGN(offsets->begin, buf->chan); | |
1087 | //ust// offsets->end_switch_old = 1; | |
1088 | //ust// } else { | |
1089 | //ust// /* we do not have to switch : buffer is empty */ | |
1090 | //ust// return -1; | |
1091 | //ust// } | |
1092 | //ust// if (mode == FORCE_ACTIVE) | |
1093 | //ust// offsets->begin += ltt_subbuffer_header_size(); | |
1094 | //ust// /* | |
1095 | //ust// * Always begin_switch in FORCE_ACTIVE mode. | |
1096 | //ust// * Test new buffer integrity | |
1097 | //ust// */ | |
1098 | //ust// subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan); | |
1099 | //ust// offsets->reserve_commit_diff = | |
1100 | //ust// (BUFFER_TRUNC(offsets->begin, buf->chan) | |
1101 | //ust// >> channel->n_subbufs_order) | |
1102 | //ust// - (local_read(&buf->commit_count[subbuf_index]) | |
1103 | //ust// & channel->commit_count_mask); | |
1104 | //ust// if (offsets->reserve_commit_diff == 0) { | |
1105 | //ust// /* Next buffer not corrupted. */ | |
1106 | //ust// if (mode == FORCE_ACTIVE | |
1107 | //ust// && !channel->overwrite | |
1108 | //ust// && offsets->begin - atomic_long_read(&buf->consumed) | |
1109 | //ust// >= channel->alloc_size) { | |
1110 | //ust// /* | |
1111 | //ust// * We do not overwrite non consumed buffers and we are | |
1112 | //ust// * full : ignore switch while tracing is active. | |
1113 | //ust// */ | |
1114 | //ust// return -1; | |
1115 | //ust// } | |
1116 | //ust// } else { | |
1117 | //ust// /* | |
1118 | //ust// * Next subbuffer corrupted. Force pushing reader even in normal | |
1119 | //ust// * mode | |
1120 | //ust// */ | |
1121 | //ust// } | |
1122 | //ust// offsets->end = offsets->begin; | |
1123 | //ust// return 0; | |
1124 | //ust// } | |
1125 | //ust// | |
1126 | //ust// static inline void ltt_reserve_push_reader( | |
1127 | //ust// struct ust_channel *channel, | |
1128 | //ust// struct ust_buffer *buf, | |
1129 | //ust// struct ltt_reserve_switch_offsets *offsets) | |
1130 | //ust// { | |
1131 | //ust// long consumed_old, consumed_new; | |
1132 | //ust// | |
1133 | //ust// do { | |
1134 | //ust// consumed_old = atomic_long_read(&buf->consumed); | |
1135 | //ust// /* | |
1136 | //ust// * If buffer is in overwrite mode, push the reader consumed | |
1137 | //ust// * count if the write position has reached it and we are not | |
1138 | //ust// * at the first iteration (don't push the reader farther than | |
1139 | //ust// * the writer). This operation can be done concurrently by many | |
1140 | //ust// * writers in the same buffer, the writer being at the farthest | |
1141 | //ust// * write position sub-buffer index in the buffer being the one | |
1142 | //ust// * which will win this loop. | |
1143 | //ust// * If the buffer is not in overwrite mode, pushing the reader | |
1144 | //ust// * only happens if a sub-buffer is corrupted. | |
1145 | //ust// */ | |
1146 | //ust// if ((SUBBUF_TRUNC(offsets->end-1, buf->chan) | |
1147 | //ust// - SUBBUF_TRUNC(consumed_old, buf->chan)) | |
1148 | //ust// >= channel->alloc_size) | |
1149 | //ust// consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan); | |
1150 | //ust// else { | |
1151 | //ust// consumed_new = consumed_old; | |
1152 | //ust// break; | |
1153 | //ust// } | |
1154 | //ust// } while (atomic_long_cmpxchg(&buf->consumed, consumed_old, | |
1155 | //ust// consumed_new) != consumed_old); | |
1156 | //ust// | |
1157 | //ust// if (consumed_old != consumed_new) { | |
1158 | //ust// /* | |
1159 | //ust// * Reader pushed : we are the winner of the push, we can | |
1160 | //ust// * therefore reequilibrate reserve and commit. Atomic increment | |
1161 | //ust// * of the commit count permits other writers to play around | |
1162 | //ust// * with this variable before us. We keep track of | |
1163 | //ust// * corrupted_subbuffers even in overwrite mode : | |
1164 | //ust// * we never want to write over a non completely committed | |
1165 | //ust// * sub-buffer : possible causes : the buffer size is too low | |
1166 | //ust// * compared to the unordered data input, or there is a writer | |
1167 | //ust// * that died between the reserve and the commit. | |
1168 | //ust// */ | |
1169 | //ust// if (offsets->reserve_commit_diff) { | |
1170 | //ust// /* | |
1171 | //ust// * We have to alter the sub-buffer commit count. | |
1172 | //ust// * We do not deliver the previous subbuffer, given it | |
1173 | //ust// * was either corrupted or not consumed (overwrite | |
1174 | //ust// * mode). | |
1175 | //ust// */ | |
1176 | //ust// local_add(offsets->reserve_commit_diff, | |
1177 | //ust// &buf->commit_count[ | |
1178 | //ust// SUBBUF_INDEX(offsets->begin, | |
1179 | //ust// buf->chan)]); | |
1180 | //ust// if (!channel->overwrite | |
1181 | //ust// || offsets->reserve_commit_diff | |
1182 | //ust// != channel->subbuf_size) { | |
1183 | //ust// /* | |
1184 | //ust// * The reserve commit diff was not subbuf_size : | |
1185 | //ust// * it means the subbuffer was partly written to | |
1186 | //ust// * and is therefore corrupted. If it is multiple | |
1187 | //ust// * of subbuffer size and we are in flight | |
1188 | //ust// * recorder mode, we are skipping over a whole | |
1189 | //ust// * subbuffer. | |
1190 | //ust// */ | |
1191 | //ust// local_inc(&buf->corrupted_subbuffers); | |
1192 | //ust// } | |
1193 | //ust// } | |
1194 | //ust// } | |
1195 | //ust// } | |
1196 | //ust// | |
1197 | //ust// /** | |
1198 | //ust// * ltt_relay_reserve_slot - Atomic slot reservation in a LTTng buffer. | |
1199 | //ust// * @trace: the trace structure to log to. | |
1200 | //ust// * @ltt_channel: channel structure | |
1201 | //ust// * @transport_data: data structure specific to ltt relay | |
1202 | //ust// * @data_size: size of the variable length data to log. | |
1203 | //ust// * @slot_size: pointer to total size of the slot (out) | |
1204 | //ust// * @buf_offset : pointer to reserved buffer offset (out) | |
1205 | //ust// * @tsc: pointer to the tsc at the slot reservation (out) | |
1206 | //ust// * @cpu: cpuid | |
1207 | //ust// * | |
1208 | //ust// * Return : -ENOSPC if not enough space, else returns 0. | |
1209 | //ust// * It will take care of sub-buffer switching. | |
1210 | //ust// */ | |
1211 | //ust// static notrace int ltt_relay_reserve_slot(struct ust_trace *trace, | |
1212 | //ust// struct ust_channel *channel, void **transport_data, | |
1213 | //ust// size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc, | |
1214 | //ust// unsigned int *rflags, int largest_align, int cpu) | |
1215 | //ust// { | |
1216 | //ust// struct ust_buffer *buf = *transport_data = channel->buf[cpu]; | |
1217 | //ust// struct ltt_reserve_switch_offsets offsets; | |
1218 | //ust// | |
1219 | //ust// offsets.reserve_commit_diff = 0; | |
1220 | //ust// offsets.size = 0; | |
1221 | //ust// | |
1222 | //ust// /* | |
1223 | //ust// * Perform retryable operations. | |
1224 | //ust// */ | |
1225 | //ust// if (ltt_nesting > 4) { | |
1226 | //ust// local_inc(&buf->events_lost); | |
1227 | //ust// return -EPERM; | |
1228 | //ust// } | |
1229 | //ust// do { | |
1230 | //ust// if (ltt_relay_try_reserve(channel, buf, &offsets, data_size, tsc, rflags, | |
1231 | //ust// largest_align)) | |
1232 | //ust// return -ENOSPC; | |
1233 | //ust// } while (local_cmpxchg(&buf->offset, offsets.old, | |
1234 | //ust// offsets.end) != offsets.old); | |
1235 | //ust// | |
1236 | //ust// /* | |
1237 | //ust// * Atomically update last_tsc. This update races against concurrent | |
1238 | //ust// * atomic updates, but the race will always cause supplementary full TSC | |
1239 | //ust// * events, never the opposite (missing a full TSC event when it would be | |
1240 | //ust// * needed). | |
1241 | //ust// */ | |
1242 | //ust// save_last_tsc(buf, *tsc); | |
1243 | //ust// | |
1244 | //ust// /* | |
1245 | //ust// * Push the reader if necessary | |
1246 | //ust// */ | |
1247 | //ust// ltt_reserve_push_reader(channel, buf, &offsets); | |
1248 | //ust// | |
1249 | //ust// /* | |
1250 | //ust// * Switch old subbuffer if needed. | |
1251 | //ust// */ | |
1252 | //ust// if (offsets.end_switch_old) | |
1253 | //ust// ltt_reserve_switch_old_subbuf(channel, buf, &offsets, tsc); | |
1254 | //ust// | |
1255 | //ust// /* | |
1256 | //ust// * Populate new subbuffer. | |
1257 | //ust// */ | |
1258 | //ust// if (offsets.begin_switch) | |
1259 | //ust// ltt_reserve_switch_new_subbuf(channel, buf, &offsets, tsc); | |
1260 | //ust// | |
1261 | //ust// if (offsets.end_switch_current) | |
1262 | //ust// ltt_reserve_end_switch_current(channel, buf, &offsets, tsc); | |
1263 | //ust// | |
1264 | //ust// *slot_size = offsets.size; | |
1265 | //ust// *buf_offset = offsets.begin + offsets.before_hdr_pad; | |
1266 | //ust// return 0; | |
1267 | //ust// } | |
1268 | //ust// | |
1269 | //ust// /* | |
1270 | //ust// * Force a sub-buffer switch for a per-cpu buffer. This operation is | |
1271 | //ust// * completely reentrant : can be called while tracing is active with | |
1272 | //ust// * absolutely no lock held. | |
1273 | //ust// * | |
1274 | //ust// * Note, however, that as a local_cmpxchg is used for some atomic | |
1275 | //ust// * operations, this function must be called from the CPU which owns the buffer | |
1276 | //ust// * for a ACTIVE flush. | |
1277 | //ust// */ | |
1278 | //ust// static notrace void ltt_force_switch(struct ust_buffer *buf, | |
1279 | //ust// enum force_switch_mode mode) | |
1280 | //ust// { | |
1281 | //ust// struct ust_channel *channel = buf->chan; | |
1282 | //ust// struct ltt_reserve_switch_offsets offsets; | |
1283 | //ust// u64 tsc; | |
1284 | //ust// | |
1285 | //ust// offsets.reserve_commit_diff = 0; | |
1286 | //ust// offsets.size = 0; | |
1287 | //ust// | |
1288 | //ust// /* | |
1289 | //ust// * Perform retryable operations. | |
1290 | //ust// */ | |
1291 | //ust// do { | |
1292 | //ust// if (ltt_relay_try_switch(mode, channel, buf, &offsets, &tsc)) | |
1293 | //ust// return; | |
1294 | //ust// } while (local_cmpxchg(&buf->offset, offsets.old, | |
1295 | //ust// offsets.end) != offsets.old); | |
1296 | //ust// | |
1297 | //ust// /* | |
1298 | //ust// * Atomically update last_tsc. This update races against concurrent | |
1299 | //ust// * atomic updates, but the race will always cause supplementary full TSC | |
1300 | //ust// * events, never the opposite (missing a full TSC event when it would be | |
1301 | //ust// * needed). | |
1302 | //ust// */ | |
1303 | //ust// save_last_tsc(buf, tsc); | |
1304 | //ust// | |
1305 | //ust// /* | |
1306 | //ust// * Push the reader if necessary | |
1307 | //ust// */ | |
1308 | //ust// if (mode == FORCE_ACTIVE) | |
1309 | //ust// ltt_reserve_push_reader(channel, buf, &offsets); | |
1310 | //ust// | |
1311 | //ust// /* | |
1312 | //ust// * Switch old subbuffer if needed. | |
1313 | //ust// */ | |
1314 | //ust// if (offsets.end_switch_old) | |
1315 | //ust// ltt_reserve_switch_old_subbuf(channel, buf, &offsets, &tsc); | |
1316 | //ust// | |
1317 | //ust// /* | |
1318 | //ust// * Populate new subbuffer. | |
1319 | //ust// */ | |
1320 | //ust// if (mode == FORCE_ACTIVE) | |
1321 | //ust// ltt_reserve_switch_new_subbuf(channel, buf, &offsets, &tsc); | |
1322 | //ust// } | |
b5b073e2 PMF |
1323 | |
1324 | /* | |
b73a4c47 PMF |
1325 | * ltt_reserve_switch_old_subbuf: switch old subbuffer |
1326 | * | |
1327 | * Concurrency safe because we are the last and only thread to alter this | |
1328 | * sub-buffer. As long as it is not delivered and read, no other thread can | |
1329 | * alter the offset, alter the reserve_count or call the | |
1330 | * client_buffer_end_callback on this sub-buffer. | |
1331 | * | |
1332 | * The only remaining threads could be the ones with pending commits. They will | |
1333 | * have to do the deliver themselves. Not concurrency safe in overwrite mode. | |
1334 | * We detect corrupted subbuffers with commit and reserve counts. We keep a | |
1335 | * corrupted sub-buffers count and push the readers across these sub-buffers. | |
1336 | * | |
1337 | * Not concurrency safe if a writer is stalled in a subbuffer and another writer | |
1338 | * switches in, finding out it's corrupted. The result will be than the old | |
1339 | * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer | |
1340 | * will be declared corrupted too because of the commit count adjustment. | |
1341 | * | |
1342 | * Note : offset_old should never be 0 here. | |
b5b073e2 | 1343 | */ |
b73a4c47 PMF |
1344 | static void ltt_reserve_switch_old_subbuf( |
1345 | struct ust_channel *chan, struct ust_buffer *buf, | |
1346 | struct ltt_reserve_switch_offsets *offsets, u64 *tsc) | |
b5b073e2 | 1347 | { |
b73a4c47 PMF |
1348 | long oldidx = SUBBUF_INDEX(offsets->old - 1, chan); |
1349 | long commit_count, padding_size; | |
b5b073e2 | 1350 | |
b73a4c47 PMF |
1351 | padding_size = chan->subbuf_size |
1352 | - (SUBBUF_OFFSET(offsets->old - 1, chan) + 1); | |
1353 | ltt_buffer_end(buf, *tsc, offsets->old, oldidx); | |
b5b073e2 | 1354 | |
b73a4c47 PMF |
1355 | /* |
1356 | * Must write slot data before incrementing commit count. | |
1357 | * This compiler barrier is upgraded into a smp_wmb() by the IPI | |
1358 | * sent by get_subbuf() when it does its smp_rmb(). | |
1359 | */ | |
1360 | barrier(); | |
1361 | local_add(padding_size, | |
1362 | &buf->commit_count[oldidx].cc); | |
1363 | commit_count = local_read(&buf->commit_count[oldidx].cc); | |
1364 | ltt_check_deliver(chan, buf, offsets->old - 1, commit_count, oldidx); | |
1e8c9e7b | 1365 | ltt_write_commit_counter(chan, buf, oldidx, |
b73a4c47 PMF |
1366 | offsets->old, commit_count, padding_size); |
1367 | } | |
b5b073e2 | 1368 | |
b73a4c47 PMF |
1369 | /* |
1370 | * ltt_reserve_switch_new_subbuf: Populate new subbuffer. | |
1371 | * | |
1372 | * This code can be executed unordered : writers may already have written to the | |
1373 | * sub-buffer before this code gets executed, caution. The commit makes sure | |
1374 | * that this code is executed before the deliver of this sub-buffer. | |
1375 | */ | |
1376 | static void ltt_reserve_switch_new_subbuf( | |
1377 | struct ust_channel *chan, struct ust_buffer *buf, | |
1378 | struct ltt_reserve_switch_offsets *offsets, u64 *tsc) | |
1379 | { | |
1380 | long beginidx = SUBBUF_INDEX(offsets->begin, chan); | |
1381 | long commit_count; | |
b5b073e2 | 1382 | |
b73a4c47 | 1383 | ltt_buffer_begin(buf, *tsc, beginidx); |
b5b073e2 | 1384 | |
b73a4c47 PMF |
1385 | /* |
1386 | * Must write slot data before incrementing commit count. | |
1387 | * This compiler barrier is upgraded into a smp_wmb() by the IPI | |
1388 | * sent by get_subbuf() when it does its smp_rmb(). | |
1389 | */ | |
1390 | barrier(); | |
1391 | local_add(ltt_subbuffer_header_size(), | |
1392 | &buf->commit_count[beginidx].cc); | |
1393 | commit_count = local_read(&buf->commit_count[beginidx].cc); | |
1394 | /* Check if the written buffer has to be delivered */ | |
1395 | ltt_check_deliver(chan, buf, offsets->begin, commit_count, beginidx); | |
1e8c9e7b | 1396 | ltt_write_commit_counter(chan, buf, beginidx, |
b73a4c47 PMF |
1397 | offsets->begin, commit_count, ltt_subbuffer_header_size()); |
1398 | } | |
b5b073e2 | 1399 | |
b73a4c47 PMF |
1400 | /* |
1401 | * ltt_reserve_end_switch_current: finish switching current subbuffer | |
1402 | * | |
1403 | * Concurrency safe because we are the last and only thread to alter this | |
1404 | * sub-buffer. As long as it is not delivered and read, no other thread can | |
1405 | * alter the offset, alter the reserve_count or call the | |
1406 | * client_buffer_end_callback on this sub-buffer. | |
1407 | * | |
1408 | * The only remaining threads could be the ones with pending commits. They will | |
1409 | * have to do the deliver themselves. Not concurrency safe in overwrite mode. | |
1410 | * We detect corrupted subbuffers with commit and reserve counts. We keep a | |
1411 | * corrupted sub-buffers count and push the readers across these sub-buffers. | |
1412 | * | |
1413 | * Not concurrency safe if a writer is stalled in a subbuffer and another writer | |
1414 | * switches in, finding out it's corrupted. The result will be than the old | |
1415 | * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer | |
1416 | * will be declared corrupted too because of the commit count adjustment. | |
1417 | */ | |
1418 | static void ltt_reserve_end_switch_current( | |
1419 | struct ust_channel *chan, | |
1420 | struct ust_buffer *buf, | |
1421 | struct ltt_reserve_switch_offsets *offsets, u64 *tsc) | |
1422 | { | |
1423 | long endidx = SUBBUF_INDEX(offsets->end - 1, chan); | |
1424 | long commit_count, padding_size; | |
1425 | ||
1426 | padding_size = chan->subbuf_size | |
1427 | - (SUBBUF_OFFSET(offsets->end - 1, chan) + 1); | |
1428 | ||
1429 | ltt_buffer_end(buf, *tsc, offsets->end, endidx); | |
1430 | ||
1431 | /* | |
1432 | * Must write slot data before incrementing commit count. | |
1433 | * This compiler barrier is upgraded into a smp_wmb() by the IPI | |
1434 | * sent by get_subbuf() when it does its smp_rmb(). | |
1435 | */ | |
1436 | barrier(); | |
1437 | local_add(padding_size, | |
1438 | &buf->commit_count[endidx].cc); | |
1439 | commit_count = local_read(&buf->commit_count[endidx].cc); | |
1440 | ltt_check_deliver(chan, buf, | |
1441 | offsets->end - 1, commit_count, endidx); | |
1e8c9e7b | 1442 | ltt_write_commit_counter(chan, buf, endidx, |
b73a4c47 | 1443 | offsets->end, commit_count, padding_size); |
b5b073e2 PMF |
1444 | } |
1445 | ||
1446 | /* | |
1447 | * Returns : | |
1448 | * 0 if ok | |
1449 | * !0 if execution must be aborted. | |
1450 | */ | |
b73a4c47 | 1451 | static int ltt_relay_try_switch_slow( |
b5b073e2 | 1452 | enum force_switch_mode mode, |
b73a4c47 | 1453 | struct ust_channel *chan, |
b5b073e2 PMF |
1454 | struct ust_buffer *buf, |
1455 | struct ltt_reserve_switch_offsets *offsets, | |
1456 | u64 *tsc) | |
1457 | { | |
1458 | long subbuf_index; | |
b73a4c47 | 1459 | long reserve_commit_diff; |
b5b073e2 PMF |
1460 | |
1461 | offsets->begin = local_read(&buf->offset); | |
1462 | offsets->old = offsets->begin; | |
1463 | offsets->begin_switch = 0; | |
1464 | offsets->end_switch_old = 0; | |
1465 | ||
1466 | *tsc = trace_clock_read64(); | |
1467 | ||
1468 | if (SUBBUF_OFFSET(offsets->begin, buf->chan) != 0) { | |
1469 | offsets->begin = SUBBUF_ALIGN(offsets->begin, buf->chan); | |
1470 | offsets->end_switch_old = 1; | |
1471 | } else { | |
1472 | /* we do not have to switch : buffer is empty */ | |
1473 | return -1; | |
1474 | } | |
1475 | if (mode == FORCE_ACTIVE) | |
1476 | offsets->begin += ltt_subbuffer_header_size(); | |
1477 | /* | |
1478 | * Always begin_switch in FORCE_ACTIVE mode. | |
1479 | * Test new buffer integrity | |
1480 | */ | |
1481 | subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan); | |
b73a4c47 | 1482 | reserve_commit_diff = |
b5b073e2 | 1483 | (BUFFER_TRUNC(offsets->begin, buf->chan) |
b73a4c47 PMF |
1484 | >> chan->n_subbufs_order) |
1485 | - (local_read(&buf->commit_count[subbuf_index].cc_sb) | |
1486 | & chan->commit_count_mask); | |
1487 | if (reserve_commit_diff == 0) { | |
b5b073e2 PMF |
1488 | /* Next buffer not corrupted. */ |
1489 | if (mode == FORCE_ACTIVE | |
b73a4c47 | 1490 | && !chan->overwrite |
b5b073e2 | 1491 | && offsets->begin - atomic_long_read(&buf->consumed) |
b73a4c47 | 1492 | >= chan->alloc_size) { |
b5b073e2 PMF |
1493 | /* |
1494 | * We do not overwrite non consumed buffers and we are | |
1495 | * full : ignore switch while tracing is active. | |
1496 | */ | |
1497 | return -1; | |
1498 | } | |
1499 | } else { | |
1500 | /* | |
1501 | * Next subbuffer corrupted. Force pushing reader even in normal | |
1502 | * mode | |
1503 | */ | |
1504 | } | |
1505 | offsets->end = offsets->begin; | |
1506 | return 0; | |
1507 | } | |
1508 | ||
b5b073e2 | 1509 | /* |
b73a4c47 PMF |
1510 | * Force a sub-buffer switch for a per-cpu buffer. This operation is |
1511 | * completely reentrant : can be called while tracing is active with | |
1512 | * absolutely no lock held. | |
b5b073e2 | 1513 | * |
b73a4c47 PMF |
1514 | * Note, however, that as a local_cmpxchg is used for some atomic |
1515 | * operations, this function must be called from the CPU which owns the buffer | |
1516 | * for a ACTIVE flush. | |
b5b073e2 | 1517 | */ |
b73a4c47 PMF |
1518 | void ltt_force_switch_lockless_slow(struct ust_buffer *buf, |
1519 | enum force_switch_mode mode) | |
b5b073e2 | 1520 | { |
b73a4c47 | 1521 | struct ust_channel *chan = buf->chan; |
b5b073e2 | 1522 | struct ltt_reserve_switch_offsets offsets; |
b73a4c47 | 1523 | u64 tsc; |
b5b073e2 | 1524 | |
b5b073e2 PMF |
1525 | offsets.size = 0; |
1526 | ||
10dd3941 | 1527 | DBG("Switching (forced) %s_%d", chan->channel_name, buf->cpu); |
b5b073e2 PMF |
1528 | /* |
1529 | * Perform retryable operations. | |
1530 | */ | |
b5b073e2 | 1531 | do { |
b73a4c47 PMF |
1532 | if (ltt_relay_try_switch_slow(mode, chan, buf, |
1533 | &offsets, &tsc)) | |
1534 | return; | |
b5b073e2 PMF |
1535 | } while (local_cmpxchg(&buf->offset, offsets.old, |
1536 | offsets.end) != offsets.old); | |
1537 | ||
1538 | /* | |
1539 | * Atomically update last_tsc. This update races against concurrent | |
1540 | * atomic updates, but the race will always cause supplementary full TSC | |
1541 | * events, never the opposite (missing a full TSC event when it would be | |
1542 | * needed). | |
1543 | */ | |
b73a4c47 | 1544 | save_last_tsc(buf, tsc); |
b5b073e2 PMF |
1545 | |
1546 | /* | |
1547 | * Push the reader if necessary | |
1548 | */ | |
b73a4c47 PMF |
1549 | if (mode == FORCE_ACTIVE) { |
1550 | ltt_reserve_push_reader(chan, buf, offsets.end - 1); | |
1551 | //ust// ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.end - 1, chan)); | |
1552 | } | |
b5b073e2 PMF |
1553 | |
1554 | /* | |
1555 | * Switch old subbuffer if needed. | |
1556 | */ | |
b73a4c47 PMF |
1557 | if (offsets.end_switch_old) { |
1558 | //ust// ltt_clear_noref_flag(rchan, buf, SUBBUF_INDEX(offsets.old - 1, rchan)); | |
1559 | ltt_reserve_switch_old_subbuf(chan, buf, &offsets, &tsc); | |
1560 | } | |
b5b073e2 PMF |
1561 | |
1562 | /* | |
1563 | * Populate new subbuffer. | |
1564 | */ | |
b73a4c47 PMF |
1565 | if (mode == FORCE_ACTIVE) |
1566 | ltt_reserve_switch_new_subbuf(chan, buf, &offsets, &tsc); | |
1567 | } | |
b5b073e2 | 1568 | |
b73a4c47 PMF |
1569 | /* |
1570 | * Returns : | |
1571 | * 0 if ok | |
1572 | * !0 if execution must be aborted. | |
1573 | */ | |
1574 | static int ltt_relay_try_reserve_slow(struct ust_channel *chan, struct ust_buffer *buf, | |
1575 | struct ltt_reserve_switch_offsets *offsets, size_t data_size, | |
1576 | u64 *tsc, unsigned int *rflags, int largest_align) | |
1577 | { | |
1578 | long reserve_commit_diff; | |
b5b073e2 | 1579 | |
b73a4c47 PMF |
1580 | offsets->begin = local_read(&buf->offset); |
1581 | offsets->old = offsets->begin; | |
1582 | offsets->begin_switch = 0; | |
1583 | offsets->end_switch_current = 0; | |
1584 | offsets->end_switch_old = 0; | |
1585 | ||
1586 | *tsc = trace_clock_read64(); | |
1587 | if (last_tsc_overflow(buf, *tsc)) | |
1588 | *rflags = LTT_RFLAG_ID_SIZE_TSC; | |
1589 | ||
1590 | if (unlikely(SUBBUF_OFFSET(offsets->begin, buf->chan) == 0)) { | |
1591 | offsets->begin_switch = 1; /* For offsets->begin */ | |
1592 | } else { | |
1593 | offsets->size = ust_get_header_size(chan, | |
1594 | offsets->begin, data_size, | |
1595 | &offsets->before_hdr_pad, *rflags); | |
1596 | offsets->size += ltt_align(offsets->begin + offsets->size, | |
1597 | largest_align) | |
1598 | + data_size; | |
1599 | if (unlikely((SUBBUF_OFFSET(offsets->begin, buf->chan) + | |
1600 | offsets->size) > buf->chan->subbuf_size)) { | |
1601 | offsets->end_switch_old = 1; /* For offsets->old */ | |
1602 | offsets->begin_switch = 1; /* For offsets->begin */ | |
1603 | } | |
1604 | } | |
1605 | if (unlikely(offsets->begin_switch)) { | |
1606 | long subbuf_index; | |
1607 | ||
1608 | /* | |
1609 | * We are typically not filling the previous buffer completely. | |
1610 | */ | |
1611 | if (likely(offsets->end_switch_old)) | |
1612 | offsets->begin = SUBBUF_ALIGN(offsets->begin, | |
1613 | buf->chan); | |
1614 | offsets->begin = offsets->begin + ltt_subbuffer_header_size(); | |
1615 | /* Test new buffer integrity */ | |
1616 | subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan); | |
1617 | reserve_commit_diff = | |
1618 | (BUFFER_TRUNC(offsets->begin, buf->chan) | |
1619 | >> chan->n_subbufs_order) | |
1620 | - (local_read(&buf->commit_count[subbuf_index].cc_sb) | |
1621 | & chan->commit_count_mask); | |
1622 | if (likely(reserve_commit_diff == 0)) { | |
1623 | /* Next buffer not corrupted. */ | |
1624 | if (unlikely(!chan->overwrite && | |
1625 | (SUBBUF_TRUNC(offsets->begin, buf->chan) | |
1626 | - SUBBUF_TRUNC(atomic_long_read( | |
1627 | &buf->consumed), | |
1628 | buf->chan)) | |
1629 | >= chan->alloc_size)) { | |
1630 | /* | |
1631 | * We do not overwrite non consumed buffers | |
1632 | * and we are full : event is lost. | |
1633 | */ | |
1634 | local_inc(&buf->events_lost); | |
1635 | return -1; | |
1636 | } else { | |
1637 | /* | |
1638 | * next buffer not corrupted, we are either in | |
1639 | * overwrite mode or the buffer is not full. | |
1640 | * It's safe to write in this new subbuffer. | |
1641 | */ | |
1642 | } | |
1643 | } else { | |
1644 | /* | |
1645 | * Next subbuffer corrupted. Drop event in normal and | |
1646 | * overwrite mode. Caused by either a writer OOPS or | |
1647 | * too many nested writes over a reserve/commit pair. | |
1648 | */ | |
1649 | local_inc(&buf->events_lost); | |
1650 | return -1; | |
1651 | } | |
1652 | offsets->size = ust_get_header_size(chan, | |
1653 | offsets->begin, data_size, | |
1654 | &offsets->before_hdr_pad, *rflags); | |
1655 | offsets->size += ltt_align(offsets->begin + offsets->size, | |
1656 | largest_align) | |
1657 | + data_size; | |
1658 | if (unlikely((SUBBUF_OFFSET(offsets->begin, buf->chan) | |
1659 | + offsets->size) > buf->chan->subbuf_size)) { | |
1660 | /* | |
1661 | * Event too big for subbuffers, report error, don't | |
1662 | * complete the sub-buffer switch. | |
1663 | */ | |
1664 | local_inc(&buf->events_lost); | |
1665 | return -1; | |
1666 | } else { | |
1667 | /* | |
1668 | * We just made a successful buffer switch and the event | |
1669 | * fits in the new subbuffer. Let's write. | |
1670 | */ | |
1671 | } | |
1672 | } else { | |
1673 | /* | |
1674 | * Event fits in the current buffer and we are not on a switch | |
1675 | * boundary. It's safe to write. | |
1676 | */ | |
1677 | } | |
1678 | offsets->end = offsets->begin + offsets->size; | |
1679 | ||
1680 | if (unlikely((SUBBUF_OFFSET(offsets->end, buf->chan)) == 0)) { | |
1681 | /* | |
1682 | * The offset_end will fall at the very beginning of the next | |
1683 | * subbuffer. | |
1684 | */ | |
1685 | offsets->end_switch_current = 1; /* For offsets->begin */ | |
1686 | } | |
b5b073e2 PMF |
1687 | return 0; |
1688 | } | |
1689 | ||
b73a4c47 PMF |
1690 | /** |
1691 | * ltt_relay_reserve_slot_lockless_slow - Atomic slot reservation in a buffer. | |
1692 | * @trace: the trace structure to log to. | |
1693 | * @ltt_channel: channel structure | |
1694 | * @transport_data: data structure specific to ltt relay | |
1695 | * @data_size: size of the variable length data to log. | |
1696 | * @slot_size: pointer to total size of the slot (out) | |
1697 | * @buf_offset : pointer to reserved buffer offset (out) | |
1698 | * @tsc: pointer to the tsc at the slot reservation (out) | |
1699 | * @cpu: cpuid | |
b5b073e2 | 1700 | * |
b73a4c47 PMF |
1701 | * Return : -ENOSPC if not enough space, else returns 0. |
1702 | * It will take care of sub-buffer switching. | |
b5b073e2 | 1703 | */ |
b73a4c47 PMF |
1704 | int ltt_reserve_slot_lockless_slow(struct ust_trace *trace, |
1705 | struct ust_channel *chan, void **transport_data, | |
1706 | size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc, | |
1707 | unsigned int *rflags, int largest_align, int cpu) | |
b5b073e2 | 1708 | { |
b73a4c47 | 1709 | struct ust_buffer *buf = chan->buf[cpu]; |
b5b073e2 | 1710 | struct ltt_reserve_switch_offsets offsets; |
b5b073e2 | 1711 | |
b5b073e2 PMF |
1712 | offsets.size = 0; |
1713 | ||
b5b073e2 | 1714 | do { |
b73a4c47 PMF |
1715 | if (unlikely(ltt_relay_try_reserve_slow(chan, buf, &offsets, |
1716 | data_size, tsc, rflags, largest_align))) | |
1717 | return -ENOSPC; | |
1718 | } while (unlikely(local_cmpxchg(&buf->offset, offsets.old, | |
1719 | offsets.end) != offsets.old)); | |
b5b073e2 PMF |
1720 | |
1721 | /* | |
1722 | * Atomically update last_tsc. This update races against concurrent | |
1723 | * atomic updates, but the race will always cause supplementary full TSC | |
1724 | * events, never the opposite (missing a full TSC event when it would be | |
1725 | * needed). | |
1726 | */ | |
b73a4c47 | 1727 | save_last_tsc(buf, *tsc); |
b5b073e2 PMF |
1728 | |
1729 | /* | |
1730 | * Push the reader if necessary | |
1731 | */ | |
b73a4c47 PMF |
1732 | ltt_reserve_push_reader(chan, buf, offsets.end - 1); |
1733 | ||
1734 | /* | |
1735 | * Clear noref flag for this subbuffer. | |
1736 | */ | |
1737 | //ust// ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.end - 1, chan)); | |
b5b073e2 PMF |
1738 | |
1739 | /* | |
1740 | * Switch old subbuffer if needed. | |
1741 | */ | |
b73a4c47 PMF |
1742 | if (unlikely(offsets.end_switch_old)) { |
1743 | //ust// ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.old - 1, chan)); | |
1744 | ltt_reserve_switch_old_subbuf(chan, buf, &offsets, tsc); | |
10dd3941 | 1745 | DBG("Switching %s_%d", chan->channel_name, cpu); |
b73a4c47 | 1746 | } |
b5b073e2 PMF |
1747 | |
1748 | /* | |
1749 | * Populate new subbuffer. | |
1750 | */ | |
b73a4c47 PMF |
1751 | if (unlikely(offsets.begin_switch)) |
1752 | ltt_reserve_switch_new_subbuf(chan, buf, &offsets, tsc); | |
1753 | ||
1754 | if (unlikely(offsets.end_switch_current)) | |
1755 | ltt_reserve_end_switch_current(chan, buf, &offsets, tsc); | |
1756 | ||
1757 | *slot_size = offsets.size; | |
1758 | *buf_offset = offsets.begin + offsets.before_hdr_pad; | |
1759 | return 0; | |
b5b073e2 PMF |
1760 | } |
1761 | ||
b5b073e2 PMF |
1762 | static struct ltt_transport ust_relay_transport = { |
1763 | .name = "ustrelay", | |
1764 | .ops = { | |
1765 | .create_channel = ust_buffers_create_channel, | |
1766 | .finish_channel = ltt_relay_finish_channel, | |
1767 | .remove_channel = ltt_relay_remove_channel, | |
1768 | .wakeup_channel = ltt_relay_async_wakeup_chan, | |
b5b073e2 PMF |
1769 | }, |
1770 | }; | |
1771 | ||
b5b073e2 PMF |
1772 | static char initialized = 0; |
1773 | ||
1774 | void __attribute__((constructor)) init_ustrelay_transport(void) | |
1775 | { | |
1776 | if(!initialized) { | |
1777 | ltt_transport_register(&ust_relay_transport); | |
1778 | initialized = 1; | |
1779 | } | |
1780 | } | |
1781 | ||
b73a4c47 | 1782 | static void __attribute__((destructor)) ust_buffers_exit(void) |
b5b073e2 PMF |
1783 | { |
1784 | ltt_transport_unregister(&ust_relay_transport); | |
1785 | } | |
b73a4c47 PMF |
1786 | |
1787 | size_t ltt_write_event_header_slow(struct ust_trace *trace, | |
1788 | struct ust_channel *channel, | |
1789 | struct ust_buffer *buf, long buf_offset, | |
1790 | u16 eID, u32 event_size, | |
1791 | u64 tsc, unsigned int rflags) | |
1792 | { | |
1793 | struct ltt_event_header header; | |
1794 | u16 small_size; | |
1795 | ||
1796 | switch (rflags) { | |
1797 | case LTT_RFLAG_ID_SIZE_TSC: | |
1798 | header.id_time = 29 << LTT_TSC_BITS; | |
1799 | break; | |
1800 | case LTT_RFLAG_ID_SIZE: | |
1801 | header.id_time = 30 << LTT_TSC_BITS; | |
1802 | break; | |
1803 | case LTT_RFLAG_ID: | |
1804 | header.id_time = 31 << LTT_TSC_BITS; | |
1805 | break; | |
1806 | } | |
1807 | ||
1808 | header.id_time |= (u32)tsc & LTT_TSC_MASK; | |
1809 | ust_buffers_write(buf, buf_offset, &header, sizeof(header)); | |
1810 | buf_offset += sizeof(header); | |
1811 | ||
1812 | switch (rflags) { | |
1813 | case LTT_RFLAG_ID_SIZE_TSC: | |
1814 | small_size = (u16)min_t(u32, event_size, LTT_MAX_SMALL_SIZE); | |
1815 | ust_buffers_write(buf, buf_offset, | |
1816 | &eID, sizeof(u16)); | |
1817 | buf_offset += sizeof(u16); | |
1818 | ust_buffers_write(buf, buf_offset, | |
1819 | &small_size, sizeof(u16)); | |
1820 | buf_offset += sizeof(u16); | |
1821 | if (small_size == LTT_MAX_SMALL_SIZE) { | |
1822 | ust_buffers_write(buf, buf_offset, | |
1823 | &event_size, sizeof(u32)); | |
1824 | buf_offset += sizeof(u32); | |
1825 | } | |
1826 | buf_offset += ltt_align(buf_offset, sizeof(u64)); | |
1827 | ust_buffers_write(buf, buf_offset, | |
1828 | &tsc, sizeof(u64)); | |
1829 | buf_offset += sizeof(u64); | |
1830 | break; | |
1831 | case LTT_RFLAG_ID_SIZE: | |
1832 | small_size = (u16)min_t(u32, event_size, LTT_MAX_SMALL_SIZE); | |
1833 | ust_buffers_write(buf, buf_offset, | |
1834 | &eID, sizeof(u16)); | |
1835 | buf_offset += sizeof(u16); | |
1836 | ust_buffers_write(buf, buf_offset, | |
1837 | &small_size, sizeof(u16)); | |
1838 | buf_offset += sizeof(u16); | |
1839 | if (small_size == LTT_MAX_SMALL_SIZE) { | |
1840 | ust_buffers_write(buf, buf_offset, | |
1841 | &event_size, sizeof(u32)); | |
1842 | buf_offset += sizeof(u32); | |
1843 | } | |
1844 | break; | |
1845 | case LTT_RFLAG_ID: | |
1846 | ust_buffers_write(buf, buf_offset, | |
1847 | &eID, sizeof(u16)); | |
1848 | buf_offset += sizeof(u16); | |
1849 | break; | |
1850 | } | |
1851 | ||
1852 | return buf_offset; | |
1853 | } |