libust: Fix multiple fd close during fork v2
[ust.git] / libust / buffers.c
CommitLineData
b5b073e2
PMF
1/*
2 * buffers.c
3 * LTTng userspace tracer buffering system
4 *
5 * Copyright (C) 2009 - Pierre-Marc Fournier (pierre-marc dot fournier at polymtl dot ca)
6 * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
204141ee 23#include <unistd.h>
b5b073e2
PMF
24#include <sys/mman.h>
25#include <sys/ipc.h>
26#include <sys/shm.h>
27#include <fcntl.h>
909bc43f 28#include <stdlib.h>
518d7abb
PMF
29
30#include <ust/clock.h>
31
b5b073e2
PMF
32#include "buffers.h"
33#include "channels.h"
34#include "tracer.h"
35#include "tracercore.h"
36#include "usterr.h"
37
b73a4c47
PMF
38struct ltt_reserve_switch_offsets {
39 long begin, end, old;
40 long begin_switch, end_switch_current, end_switch_old;
41 size_t before_hdr_pad, size;
42};
43
44
b5b073e2 45static DEFINE_MUTEX(ust_buffers_channels_mutex);
0222e121 46static CDS_LIST_HEAD(ust_buffers_channels);
b5b073e2 47
204141ee
PMF
48static int get_n_cpus(void)
49{
50 int result;
51 static int n_cpus = 0;
52
c7dc133c
PMF
53 if(!n_cpus) {
54 /* On Linux, when some processors are offline
55 * _SC_NPROCESSORS_CONF counts the offline
56 * processors, whereas _SC_NPROCESSORS_ONLN
57 * does not. If we used _SC_NPROCESSORS_ONLN,
58 * getcpu() could return a value greater than
59 * this sysconf, in which case the arrays
60 * indexed by processor would overflow.
61 */
62 result = sysconf(_SC_NPROCESSORS_CONF);
63 if(result == -1) {
64 return -1;
65 }
66
67 n_cpus = result;
204141ee
PMF
68 }
69
c7dc133c 70 return n_cpus;
204141ee
PMF
71}
72
bb3132c8
MD
73/**
74 * _ust_buffers_strncpy_fixup - Fix an incomplete string in a ltt_relay buffer.
75 * @buf : buffer
76 * @offset : offset within the buffer
77 * @len : length to write
78 * @copied: string actually copied
79 * @terminated: does string end with \0
b73a4c47 80 *
bb3132c8 81 * Fills string with "X" if incomplete.
b73a4c47 82 */
bb3132c8
MD
83void _ust_buffers_strncpy_fixup(struct ust_buffer *buf, size_t offset,
84 size_t len, size_t copied, int terminated)
b73a4c47 85{
bb3132c8
MD
86 size_t buf_offset, cpy;
87
88 if (copied == len) {
89 /*
90 * Deal with non-terminated string.
91 */
92 assert(!terminated);
93 offset += copied - 1;
94 buf_offset = BUFFER_OFFSET(offset, buf->chan);
95 /*
96 * Underlying layer should never ask for writes across
97 * subbuffers.
98 */
99 assert(buf_offset
100 < buf->chan->subbuf_size*buf->chan->subbuf_cnt);
101 ust_buffers_do_memset(buf->buf_data + buf_offset, '\0', 1);
102 return;
103 }
104
105 /*
106 * Deal with incomplete string.
107 * Overwrite string's \0 with X too.
108 */
109 cpy = copied - 1;
110 assert(terminated);
111 len -= cpy;
112 offset += cpy;
113 buf_offset = BUFFER_OFFSET(offset, buf->chan);
114
115 /*
116 * Underlying layer should never ask for writes across subbuffers.
117 */
118 assert(buf_offset
119 < buf->chan->subbuf_size*buf->chan->subbuf_cnt);
b73a4c47 120
bb3132c8
MD
121 ust_buffers_do_memset(buf->buf_data + buf_offset,
122 'X', len);
b73a4c47 123
bb3132c8
MD
124 /*
125 * Overwrite last 'X' with '\0'.
126 */
127 offset += len - 1;
128 buf_offset = BUFFER_OFFSET(offset, buf->chan);
129 /*
130 * Underlying layer should never ask for writes across subbuffers.
131 */
132 assert(buf_offset
133 < buf->chan->subbuf_size*buf->chan->subbuf_cnt);
134 ust_buffers_do_memset(buf->buf_data + buf_offset, '\0', 1);
b73a4c47
PMF
135}
136
ef15e552
NC
137static void ltt_buffer_begin(struct ust_buffer *buf,
138 u64 tsc, unsigned int subbuf_idx)
139{
140 struct ust_channel *channel = buf->chan;
141 struct ltt_subbuffer_header *header =
142 (struct ltt_subbuffer_header *)
143 ust_buffers_offset_address(buf,
144 subbuf_idx * buf->chan->subbuf_size);
b5b073e2 145
ef15e552
NC
146 header->cycle_count_begin = tsc;
147 header->data_size = 0xFFFFFFFF; /* for recognizing crashed buffers */
148 header->sb_size = 0xFFFFFFFF; /* for recognizing crashed buffers */
149 /* FIXME: add memory barrier? */
150 ltt_write_trace_header(channel->trace, header);
151}
152
153static int map_buf_data(struct ust_buffer *buf, size_t *size)
b5b073e2
PMF
154{
155 void *ptr;
156 int result;
157
158 *size = PAGE_ALIGN(*size);
159
160 result = buf->shmid = shmget(getpid(), *size, IPC_CREAT | IPC_EXCL | 0700);
ef15e552 161 if (result < 0 && errno == EINVAL) {
b5b073e2
PMF
162 ERR("shmget() returned EINVAL; maybe /proc/sys/kernel/shmmax should be increased.");
163 return -1;
ef15e552 164 } else if (result < 0) {
b5b073e2
PMF
165 PERROR("shmget");
166 return -1;
167 }
168
169 ptr = shmat(buf->shmid, NULL, 0);
ef15e552 170 if (ptr == (void *) -1) {
b5b073e2
PMF
171 perror("shmat");
172 goto destroy_shmem;
173 }
174
175 /* Already mark the shared memory for destruction. This will occur only
176 * when all users have detached.
177 */
178 result = shmctl(buf->shmid, IPC_RMID, NULL);
179 if(result == -1) {
180 perror("shmctl");
181 return -1;
182 }
183
184 buf->buf_data = ptr;
185 buf->buf_size = *size;
186
187 return 0;
188
ef15e552 189destroy_shmem:
b5b073e2
PMF
190 result = shmctl(buf->shmid, IPC_RMID, NULL);
191 if(result == -1) {
192 perror("shmctl");
193 }
194
195 return -1;
196}
197
ef15e552 198static int open_buf(struct ust_channel *chan, int cpu)
b5b073e2 199{
ef15e552
NC
200 int result, fds[2];
201 unsigned int j;
202 struct ust_trace *trace = chan->trace;
203 struct ust_buffer *buf = chan->buf[cpu];
204 unsigned int n_subbufs = chan->subbuf_cnt;
b5b073e2 205
ef15e552
NC
206
207 result = map_buf_data(buf, &chan->alloc_size);
208 if (result < 0)
204141ee 209 return -1;
b5b073e2 210
ef15e552
NC
211 buf->commit_count =
212 zmalloc(sizeof(*buf->commit_count) * n_subbufs);
213 if (!buf->commit_count)
214 goto unmap_buf;
b5b073e2 215
ef15e552
NC
216 result = pipe(fds);
217 if (result < 0) {
218 PERROR("pipe");
219 goto free_commit_count;
220 }
221 buf->data_ready_fd_read = fds[0];
222 buf->data_ready_fd_write = fds[1];
b5b073e2 223
ef15e552
NC
224 buf->cpu = cpu;
225 buf->chan = chan;
b5b073e2 226
ef15e552
NC
227 uatomic_set(&buf->offset, ltt_subbuffer_header_size());
228 uatomic_set(&buf->consumed, 0);
229 uatomic_set(&buf->active_readers, 0);
230 for (j = 0; j < n_subbufs; j++) {
231 uatomic_set(&buf->commit_count[j].cc, 0);
232 uatomic_set(&buf->commit_count[j].cc_sb, 0);
b5b073e2
PMF
233 }
234
ef15e552 235 ltt_buffer_begin(buf, trace->start_tsc, 0);
b5b073e2 236
ef15e552
NC
237 uatomic_add(&buf->commit_count[0].cc, ltt_subbuffer_header_size());
238
239 uatomic_set(&buf->events_lost, 0);
240 uatomic_set(&buf->corrupted_subbuffers, 0);
241
242 memset(buf->commit_seq, 0, sizeof(buf->commit_seq[0]) * n_subbufs);
243
244 return 0;
245
246free_commit_count:
247 free(buf->commit_count);
248
249unmap_buf:
250 if (shmdt(buf->buf_data) < 0) {
251 PERROR("shmdt failed");
252 }
253
254 return -1;
b5b073e2
PMF
255}
256
ef15e552
NC
257static void ltt_relay_print_buffer_errors(struct ust_channel *chan, int cpu);
258
259static void close_buf(struct ust_buffer *buf)
b5b073e2 260{
ef15e552
NC
261 struct ust_channel *chan = buf->chan;
262 int cpu = buf->cpu;
204141ee 263 int result;
b5b073e2 264
ef15e552
NC
265 result = shmdt(buf->buf_data);
266 if (result < 0) {
267 PERROR("shmdt");
268 }
b5b073e2 269
ef15e552 270 free(buf->commit_count);
b5b073e2 271
ef15e552
NC
272 result = close(buf->data_ready_fd_read);
273 if (result < 0) {
274 PERROR("close");
275 }
b5b073e2 276
ef15e552
NC
277 result = close(buf->data_ready_fd_write);
278 if (result < 0 && errno != EBADF) {
279 PERROR("close");
280 }
b5b073e2 281
ef15e552
NC
282 /* FIXME: This spews out errors, are they real?:
283 * ltt_relay_print_buffer_errors(chan, cpu); */
b5b073e2
PMF
284}
285
b5b073e2 286
ef15e552
NC
287static int open_channel(struct ust_channel *chan, size_t subbuf_size,
288 size_t subbuf_cnt)
b5b073e2 289{
204141ee
PMF
290 int i;
291 int result;
292
b5b073e2
PMF
293 if(subbuf_size == 0 || subbuf_cnt == 0)
294 return -1;
295
b73a4c47
PMF
296 /* Check that the subbuffer size is larger than a page. */
297 WARN_ON_ONCE(subbuf_size < PAGE_SIZE);
298
299 /*
300 * Make sure the number of subbuffers and subbuffer size are power of 2.
301 */
302 WARN_ON_ONCE(hweight32(subbuf_size) != 1);
303 WARN_ON(hweight32(subbuf_cnt) != 1);
304
b5b073e2
PMF
305 chan->version = UST_CHANNEL_VERSION;
306 chan->subbuf_cnt = subbuf_cnt;
307 chan->subbuf_size = subbuf_size;
308 chan->subbuf_size_order = get_count_order(subbuf_size);
b73a4c47 309 chan->alloc_size = subbuf_size * subbuf_cnt;
204141ee 310
f7b16408 311 pthread_mutex_lock(&ust_buffers_channels_mutex);
ef15e552
NC
312 for (i=0; i < chan->n_cpus; i++) {
313 result = open_buf(chan, i);
204141ee
PMF
314 if (result == -1)
315 goto error;
316 }
0222e121 317 cds_list_add(&chan->list, &ust_buffers_channels);
f7b16408 318 pthread_mutex_unlock(&ust_buffers_channels_mutex);
b5b073e2
PMF
319
320 return 0;
321
b2ccc231 322 /* Error handling */
204141ee 323error:
b2ccc231
MD
324 for(i--; i >= 0; i--)
325 close_buf(chan->buf[i]);
204141ee 326
f7b16408 327 pthread_mutex_unlock(&ust_buffers_channels_mutex);
b5b073e2
PMF
328 return -1;
329}
330
ef15e552 331static void close_channel(struct ust_channel *chan)
b5b073e2 332{
204141ee
PMF
333 int i;
334 if(!chan)
b5b073e2
PMF
335 return;
336
f7b16408 337 pthread_mutex_lock(&ust_buffers_channels_mutex);
204141ee
PMF
338 for(i=0; i<chan->n_cpus; i++) {
339 /* FIXME: if we make it here, then all buffers were necessarily allocated. Moreover, we don't
340 * initialize to NULL so we cannot use this check. Should we? */
341//ust// if (chan->buf[i])
ef15e552 342 close_buf(chan->buf[i]);
204141ee 343 }
b5b073e2 344
0222e121 345 cds_list_del(&chan->list);
ef15e552 346
f7b16408 347 pthread_mutex_unlock(&ust_buffers_channels_mutex);
b5b073e2
PMF
348}
349
b5b073e2
PMF
350static void ltt_force_switch(struct ust_buffer *buf,
351 enum force_switch_mode mode);
352
b5b073e2 353
b5b073e2
PMF
354
355/*
356 * offset is assumed to never be 0 here : never deliver a completely empty
357 * subbuffer. The lost size is between 0 and subbuf_size-1.
358 */
b73a4c47 359static notrace void ltt_buffer_end(struct ust_buffer *buf,
b5b073e2
PMF
360 u64 tsc, unsigned int offset, unsigned int subbuf_idx)
361{
362 struct ltt_subbuffer_header *header =
363 (struct ltt_subbuffer_header *)
b73a4c47 364 ust_buffers_offset_address(buf,
b5b073e2 365 subbuf_idx * buf->chan->subbuf_size);
8c36d1ee 366 u32 data_size = SUBBUF_OFFSET(offset - 1, buf->chan) + 1;
b5b073e2 367
8c36d1ee
PMF
368 header->data_size = data_size;
369 header->sb_size = PAGE_ALIGN(data_size);
b5b073e2 370 header->cycle_count_end = tsc;
b102c2b0
PMF
371 header->events_lost = uatomic_read(&buf->events_lost);
372 header->subbuf_corrupt = uatomic_read(&buf->corrupted_subbuffers);
719569e4
PMF
373 if(unlikely(header->events_lost > 0)) {
374 DBG("Some events (%d) were lost in %s_%d", header->events_lost, buf->chan->channel_name, buf->cpu);
375 }
b5b073e2
PMF
376}
377
378/*
379 * This function should not be called from NMI interrupt context
380 */
381static notrace void ltt_buf_unfull(struct ust_buffer *buf,
382 unsigned int subbuf_idx,
383 long offset)
384{
b5b073e2
PMF
385}
386
b73a4c47 387/*
0222e121 388 * Promote compiler cmm_barrier to a smp_mb().
b73a4c47
PMF
389 * For the specific LTTng case, this IPI call should be removed if the
390 * architecture does not reorder writes. This should eventually be provided by
391 * a separate architecture-specific infrastructure.
392 */
e17571a5
PMF
393//ust// static void remote_mb(void *info)
394//ust// {
395//ust// smp_mb();
396//ust// }
b73a4c47
PMF
397
398int ust_buffers_get_subbuf(struct ust_buffer *buf, long *consumed)
b5b073e2
PMF
399{
400 struct ust_channel *channel = buf->chan;
401 long consumed_old, consumed_idx, commit_count, write_offset;
b73a4c47
PMF
402//ust// int retval;
403
b102c2b0 404 consumed_old = uatomic_read(&buf->consumed);
b5b073e2 405 consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan);
b102c2b0 406 commit_count = uatomic_read(&buf->commit_count[consumed_idx].cc_sb);
b5b073e2
PMF
407 /*
408 * Make sure we read the commit count before reading the buffer
409 * data and the write offset. Correct consumed offset ordering
410 * wrt commit count is insured by the use of cmpxchg to update
411 * the consumed offset.
b73a4c47
PMF
412 * smp_call_function_single can fail if the remote CPU is offline,
413 * this is OK because then there is no wmb to execute there.
414 * If our thread is executing on the same CPU as the on the buffers
415 * belongs to, we don't have to synchronize it at all. If we are
0222e121 416 * migrated, the scheduler will take care of the memory cmm_barriers.
b73a4c47
PMF
417 * Normally, smp_call_function_single() should ensure program order when
418 * executing the remote function, which implies that it surrounds the
419 * function execution with :
420 * smp_mb()
421 * send IPI
422 * csd_lock_wait
423 * recv IPI
424 * smp_mb()
425 * exec. function
426 * smp_mb()
427 * csd unlock
428 * smp_mb()
429 *
430 * However, smp_call_function_single() does not seem to clearly execute
ef15e552 431 * such barriers. It depends on spinlock semantic to provide the barrier
b73a4c47
PMF
432 * before executing the IPI and, when busy-looping, csd_lock_wait only
433 * executes smp_mb() when it has to wait for the other CPU.
434 *
435 * I don't trust this code. Therefore, let's add the smp_mb() sequence
436 * required ourself, even if duplicated. It has no performance impact
437 * anyway.
438 *
0222e121 439 * smp_mb() is needed because cmm_smp_rmb() and cmm_smp_wmb() only order read vs
b73a4c47 440 * read and write vs write. They do not ensure core synchronization. We
0222e121 441 * really have to ensure total order between the 3 cmm_barriers running on
b73a4c47
PMF
442 * the 2 CPUs.
443 */
444//ust// #ifdef LTT_NO_IPI_BARRIER
445 /*
446 * Local rmb to match the remote wmb to read the commit count before the
447 * buffer data and the write offset.
b5b073e2 448 */
0222e121 449 cmm_smp_rmb();
b73a4c47
PMF
450//ust// #else
451//ust// if (raw_smp_processor_id() != buf->cpu) {
452//ust// smp_mb(); /* Total order with IPI handler smp_mb() */
453//ust// smp_call_function_single(buf->cpu, remote_mb, NULL, 1);
454//ust// smp_mb(); /* Total order with IPI handler smp_mb() */
455//ust// }
456//ust// #endif
457
b102c2b0 458 write_offset = uatomic_read(&buf->offset);
b5b073e2
PMF
459 /*
460 * Check that the subbuffer we are trying to consume has been
461 * already fully committed.
462 */
463 if (((commit_count - buf->chan->subbuf_size)
464 & channel->commit_count_mask)
465 - (BUFFER_TRUNC(consumed_old, buf->chan)
466 >> channel->n_subbufs_order)
467 != 0) {
468 return -EAGAIN;
469 }
470 /*
471 * Check that we are not about to read the same subbuffer in
472 * which the writer head is.
473 */
474 if ((SUBBUF_TRUNC(write_offset, buf->chan)
475 - SUBBUF_TRUNC(consumed_old, buf->chan))
476 == 0) {
477 return -EAGAIN;
478 }
479
b73a4c47
PMF
480 /* FIXME: is this ok to disable the reading feature? */
481//ust// retval = update_read_sb_index(buf, consumed_idx);
482//ust// if (retval)
483//ust// return retval;
484
485 *consumed = consumed_old;
486
b5b073e2
PMF
487 return 0;
488}
489
b73a4c47 490int ust_buffers_put_subbuf(struct ust_buffer *buf, unsigned long uconsumed_old)
b5b073e2
PMF
491{
492 long consumed_new, consumed_old;
493
b102c2b0 494 consumed_old = uatomic_read(&buf->consumed);
b5b073e2
PMF
495 consumed_old = consumed_old & (~0xFFFFFFFFL);
496 consumed_old = consumed_old | uconsumed_old;
497 consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan);
498
499//ust// spin_lock(&ltt_buf->full_lock);
b102c2b0 500 if (uatomic_cmpxchg(&buf->consumed, consumed_old,
b5b073e2
PMF
501 consumed_new)
502 != consumed_old) {
503 /* We have been pushed by the writer : the last
504 * buffer read _is_ corrupted! It can also
505 * happen if this is a buffer we never got. */
506//ust// spin_unlock(&ltt_buf->full_lock);
507 return -EIO;
508 } else {
509 /* tell the client that buffer is now unfull */
510 int index;
511 long data;
512 index = SUBBUF_INDEX(consumed_old, buf->chan);
513 data = BUFFER_OFFSET(consumed_old, buf->chan);
514 ltt_buf_unfull(buf, index, data);
515//ust// spin_unlock(&ltt_buf->full_lock);
516 }
517 return 0;
518}
519
520static void ltt_relay_print_subbuffer_errors(
521 struct ust_channel *channel,
204141ee 522 long cons_off, int cpu)
b5b073e2 523{
204141ee 524 struct ust_buffer *ltt_buf = channel->buf[cpu];
b73a4c47 525 long cons_idx, commit_count, commit_count_sb, write_offset;
b5b073e2
PMF
526
527 cons_idx = SUBBUF_INDEX(cons_off, channel);
b102c2b0
PMF
528 commit_count = uatomic_read(&ltt_buf->commit_count[cons_idx].cc);
529 commit_count_sb = uatomic_read(&ltt_buf->commit_count[cons_idx].cc_sb);
b73a4c47 530
b5b073e2
PMF
531 /*
532 * No need to order commit_count and write_offset reads because we
533 * execute after trace is stopped when there are no readers left.
534 */
b102c2b0 535 write_offset = uatomic_read(&ltt_buf->offset);
b5b073e2 536 WARN( "LTT : unread channel %s offset is %ld "
b73a4c47
PMF
537 "and cons_off : %ld (cpu %d)\n",
538 channel->channel_name, write_offset, cons_off, cpu);
b5b073e2
PMF
539 /* Check each sub-buffer for non filled commit count */
540 if (((commit_count - channel->subbuf_size) & channel->commit_count_mask)
541 - (BUFFER_TRUNC(cons_off, channel) >> channel->n_subbufs_order) != 0) {
542 ERR("LTT : %s : subbuffer %lu has non filled "
b73a4c47
PMF
543 "commit count [cc, cc_sb] [%lu,%lu].\n",
544 channel->channel_name, cons_idx, commit_count, commit_count_sb);
b5b073e2
PMF
545 }
546 ERR("LTT : %s : commit count : %lu, subbuf size %zd\n",
547 channel->channel_name, commit_count,
548 channel->subbuf_size);
549}
550
b73a4c47 551static void ltt_relay_print_errors(struct ust_trace *trace,
204141ee 552 struct ust_channel *channel, int cpu)
b5b073e2 553{
204141ee 554 struct ust_buffer *ltt_buf = channel->buf[cpu];
b5b073e2
PMF
555 long cons_off;
556
4292ed8a
PMF
557 /*
558 * Can be called in the error path of allocation when
559 * trans_channel_data is not yet set.
560 */
561 if (!channel)
562 return;
563
e17571a5
PMF
564//ust// for (cons_off = 0; cons_off < rchan->alloc_size;
565//ust// cons_off = SUBBUF_ALIGN(cons_off, rchan))
566//ust// ust_buffers_print_written(ltt_chan, cons_off, cpu);
b102c2b0
PMF
567 for (cons_off = uatomic_read(&ltt_buf->consumed);
568 (SUBBUF_TRUNC(uatomic_read(&ltt_buf->offset),
b5b073e2
PMF
569 channel)
570 - cons_off) > 0;
571 cons_off = SUBBUF_ALIGN(cons_off, channel))
204141ee 572 ltt_relay_print_subbuffer_errors(channel, cons_off, cpu);
b5b073e2
PMF
573}
574
204141ee 575static void ltt_relay_print_buffer_errors(struct ust_channel *channel, int cpu)
b5b073e2 576{
b73a4c47 577 struct ust_trace *trace = channel->trace;
204141ee 578 struct ust_buffer *ltt_buf = channel->buf[cpu];
b5b073e2 579
b102c2b0 580 if (uatomic_read(&ltt_buf->events_lost))
b73a4c47 581 ERR("channel %s: %ld events lost (cpu %d)",
b5b073e2 582 channel->channel_name,
b102c2b0
PMF
583 uatomic_read(&ltt_buf->events_lost), cpu);
584 if (uatomic_read(&ltt_buf->corrupted_subbuffers))
b73a4c47 585 ERR("channel %s : %ld corrupted subbuffers (cpu %d)",
b5b073e2 586 channel->channel_name,
b102c2b0 587 uatomic_read(&ltt_buf->corrupted_subbuffers), cpu);
b5b073e2 588
204141ee 589 ltt_relay_print_errors(trace, channel, cpu);
b5b073e2
PMF
590}
591
ef15e552 592static int map_buf_structs(struct ust_channel *chan)
b5b073e2
PMF
593{
594 void *ptr;
595 int result;
204141ee
PMF
596 size_t size;
597 int i;
b5b073e2 598
204141ee 599 size = PAGE_ALIGN(1);
b5b073e2 600
204141ee 601 for(i=0; i<chan->n_cpus; i++) {
b5b073e2 602
204141ee
PMF
603 result = chan->buf_struct_shmids[i] = shmget(getpid(), size, IPC_CREAT | IPC_EXCL | 0700);
604 if(result == -1) {
605 PERROR("shmget");
606 goto destroy_previous;
607 }
b5b073e2 608
204141ee
PMF
609 ptr = shmat(chan->buf_struct_shmids[i], NULL, 0);
610 if(ptr == (void *) -1) {
611 perror("shmat");
612 goto destroy_shm;
613 }
614
615 /* Already mark the shared memory for destruction. This will occur only
616 * when all users have detached.
617 */
618 result = shmctl(chan->buf_struct_shmids[i], IPC_RMID, NULL);
619 if(result == -1) {
620 perror("shmctl");
621 goto destroy_previous;
622 }
623
624 chan->buf[i] = ptr;
b5b073e2
PMF
625 }
626
204141ee 627 return 0;
b5b073e2 628
204141ee
PMF
629 /* Jumping inside this loop occurs from within the other loop above with i as
630 * counter, so it unallocates the structures for the cpu = current_i down to
631 * zero. */
632 for(; i>=0; i--) {
633 destroy_shm:
634 result = shmctl(chan->buf_struct_shmids[i], IPC_RMID, NULL);
635 if(result == -1) {
636 perror("shmctl");
637 }
b5b073e2 638
204141ee
PMF
639 destroy_previous:
640 continue;
b5b073e2
PMF
641 }
642
204141ee 643 return -1;
b5b073e2
PMF
644}
645
ef15e552
NC
646static int unmap_buf_structs(struct ust_channel *chan)
647{
648 int i;
649
650 for (i=0; i < chan->n_cpus; i++) {
651 if (shmdt(chan->buf[i]) < 0) {
652 PERROR("shmdt");
653 }
654 }
fbae86d6 655 return 0;
ef15e552
NC
656}
657
b5b073e2
PMF
658/*
659 * Create channel.
660 */
ef15e552
NC
661static int create_channel(const char *trace_name, struct ust_trace *trace,
662 const char *channel_name, struct ust_channel *chan,
b5b073e2
PMF
663 unsigned int subbuf_size, unsigned int n_subbufs, int overwrite)
664{
ef15e552 665 int i, result;
b5b073e2 666
ef15e552
NC
667 chan->trace = trace;
668 chan->overwrite = overwrite;
669 chan->n_subbufs_order = get_count_order(n_subbufs);
670 chan->commit_count_mask = (~0UL >> chan->n_subbufs_order);
671 chan->n_cpus = get_n_cpus();
b5b073e2 672
ef15e552
NC
673 /* These mappings should ideall be per-cpu, if somebody can do that
674 * from userspace, that would be cool!
675 */
676 chan->buf = (void *) zmalloc(chan->n_cpus * sizeof(void *));
677 if(chan->buf == NULL) {
204141ee
PMF
678 goto error;
679 }
ef15e552
NC
680 chan->buf_struct_shmids = (int *) zmalloc(chan->n_cpus * sizeof(int));
681 if(chan->buf_struct_shmids == NULL)
204141ee 682 goto free_buf;
b5b073e2 683
ef15e552 684 result = map_buf_structs(chan);
204141ee
PMF
685 if(result != 0) {
686 goto free_buf_struct_shmids;
687 }
b5b073e2 688
ef15e552 689 result = open_channel(chan, subbuf_size, n_subbufs);
204141ee 690 if (result != 0) {
c1f20530 691 ERR("Cannot open channel for trace %s", trace_name);
ef15e552 692 goto unmap_buf_structs;
b5b073e2
PMF
693 }
694
204141ee
PMF
695 return 0;
696
ef15e552
NC
697unmap_buf_structs:
698 for (i=0; i < chan->n_cpus; i++) {
699 if (shmdt(chan->buf[i]) < 0) {
700 PERROR("shmdt bufstruct");
701 }
702 }
204141ee
PMF
703
704free_buf_struct_shmids:
ef15e552 705 free(chan->buf_struct_shmids);
b5b073e2 706
204141ee 707free_buf:
ef15e552 708 free(chan->buf);
204141ee
PMF
709
710error:
711 return -1;
b5b073e2
PMF
712}
713
ef15e552
NC
714
715static void remove_channel(struct ust_channel *chan)
716{
717 close_channel(chan);
718
719 unmap_buf_structs(chan);
720
721 free(chan->buf_struct_shmids);
722
723 free(chan->buf);
724
725}
726
b5b073e2
PMF
727static void ltt_relay_async_wakeup_chan(struct ust_channel *ltt_channel)
728{
729//ust// unsigned int i;
730//ust// struct rchan *rchan = ltt_channel->trans_channel_data;
731//ust//
732//ust// for_each_possible_cpu(i) {
733//ust// struct ltt_channel_buf_struct *ltt_buf =
734//ust// percpu_ptr(ltt_channel->buf, i);
735//ust//
b102c2b0
PMF
736//ust// if (uatomic_read(&ltt_buf->wakeup_readers) == 1) {
737//ust// uatomic_set(&ltt_buf->wakeup_readers, 0);
b5b073e2
PMF
738//ust// wake_up_interruptible(&rchan->buf[i]->read_wait);
739//ust// }
740//ust// }
741}
742
204141ee 743static void ltt_relay_finish_buffer(struct ust_channel *channel, unsigned int cpu)
b5b073e2
PMF
744{
745// int result;
746
204141ee
PMF
747 if (channel->buf[cpu]) {
748 struct ust_buffer *buf = channel->buf[cpu];
97c10252 749 ltt_force_switch(buf, FORCE_FLUSH);
ef15e552 750
b5b073e2 751 /* closing the pipe tells the consumer the buffer is finished */
b5b073e2
PMF
752 close(buf->data_ready_fd_write);
753 }
754}
755
756
ef15e552 757static void finish_channel(struct ust_channel *channel)
b5b073e2 758{
204141ee 759 unsigned int i;
b5b073e2 760
204141ee
PMF
761 for(i=0; i<channel->n_cpus; i++) {
762 ltt_relay_finish_buffer(channel, i);
763 }
b5b073e2
PMF
764}
765
b5b073e2 766
b5b073e2 767/*
b73a4c47
PMF
768 * ltt_reserve_switch_old_subbuf: switch old subbuffer
769 *
770 * Concurrency safe because we are the last and only thread to alter this
771 * sub-buffer. As long as it is not delivered and read, no other thread can
772 * alter the offset, alter the reserve_count or call the
773 * client_buffer_end_callback on this sub-buffer.
774 *
775 * The only remaining threads could be the ones with pending commits. They will
776 * have to do the deliver themselves. Not concurrency safe in overwrite mode.
777 * We detect corrupted subbuffers with commit and reserve counts. We keep a
778 * corrupted sub-buffers count and push the readers across these sub-buffers.
779 *
780 * Not concurrency safe if a writer is stalled in a subbuffer and another writer
781 * switches in, finding out it's corrupted. The result will be than the old
782 * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
783 * will be declared corrupted too because of the commit count adjustment.
784 *
785 * Note : offset_old should never be 0 here.
b5b073e2 786 */
b73a4c47
PMF
787static void ltt_reserve_switch_old_subbuf(
788 struct ust_channel *chan, struct ust_buffer *buf,
789 struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
b5b073e2 790{
b73a4c47
PMF
791 long oldidx = SUBBUF_INDEX(offsets->old - 1, chan);
792 long commit_count, padding_size;
b5b073e2 793
b73a4c47
PMF
794 padding_size = chan->subbuf_size
795 - (SUBBUF_OFFSET(offsets->old - 1, chan) + 1);
796 ltt_buffer_end(buf, *tsc, offsets->old, oldidx);
b5b073e2 797
b73a4c47
PMF
798 /*
799 * Must write slot data before incrementing commit count.
ef15e552 800 * This compiler barrier is upgraded into a cmm_smp_wmb() by the IPI
0222e121 801 * sent by get_subbuf() when it does its cmm_smp_rmb().
b73a4c47 802 */
0222e121 803 cmm_smp_wmb();
b102c2b0
PMF
804 uatomic_add(&buf->commit_count[oldidx].cc, padding_size);
805 commit_count = uatomic_read(&buf->commit_count[oldidx].cc);
b73a4c47 806 ltt_check_deliver(chan, buf, offsets->old - 1, commit_count, oldidx);
1e8c9e7b 807 ltt_write_commit_counter(chan, buf, oldidx,
b73a4c47
PMF
808 offsets->old, commit_count, padding_size);
809}
b5b073e2 810
b73a4c47
PMF
811/*
812 * ltt_reserve_switch_new_subbuf: Populate new subbuffer.
813 *
814 * This code can be executed unordered : writers may already have written to the
815 * sub-buffer before this code gets executed, caution. The commit makes sure
816 * that this code is executed before the deliver of this sub-buffer.
817 */
818static void ltt_reserve_switch_new_subbuf(
819 struct ust_channel *chan, struct ust_buffer *buf,
820 struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
821{
822 long beginidx = SUBBUF_INDEX(offsets->begin, chan);
823 long commit_count;
b5b073e2 824
b73a4c47 825 ltt_buffer_begin(buf, *tsc, beginidx);
b5b073e2 826
b73a4c47
PMF
827 /*
828 * Must write slot data before incrementing commit count.
ef15e552 829 * This compiler barrier is upgraded into a cmm_smp_wmb() by the IPI
0222e121 830 * sent by get_subbuf() when it does its cmm_smp_rmb().
b73a4c47 831 */
0222e121 832 cmm_smp_wmb();
b102c2b0
PMF
833 uatomic_add(&buf->commit_count[beginidx].cc, ltt_subbuffer_header_size());
834 commit_count = uatomic_read(&buf->commit_count[beginidx].cc);
b73a4c47
PMF
835 /* Check if the written buffer has to be delivered */
836 ltt_check_deliver(chan, buf, offsets->begin, commit_count, beginidx);
1e8c9e7b 837 ltt_write_commit_counter(chan, buf, beginidx,
b73a4c47
PMF
838 offsets->begin, commit_count, ltt_subbuffer_header_size());
839}
b5b073e2 840
b73a4c47
PMF
841/*
842 * ltt_reserve_end_switch_current: finish switching current subbuffer
843 *
844 * Concurrency safe because we are the last and only thread to alter this
845 * sub-buffer. As long as it is not delivered and read, no other thread can
846 * alter the offset, alter the reserve_count or call the
847 * client_buffer_end_callback on this sub-buffer.
848 *
849 * The only remaining threads could be the ones with pending commits. They will
850 * have to do the deliver themselves. Not concurrency safe in overwrite mode.
851 * We detect corrupted subbuffers with commit and reserve counts. We keep a
852 * corrupted sub-buffers count and push the readers across these sub-buffers.
853 *
854 * Not concurrency safe if a writer is stalled in a subbuffer and another writer
855 * switches in, finding out it's corrupted. The result will be than the old
856 * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
857 * will be declared corrupted too because of the commit count adjustment.
858 */
859static void ltt_reserve_end_switch_current(
860 struct ust_channel *chan,
861 struct ust_buffer *buf,
862 struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
863{
864 long endidx = SUBBUF_INDEX(offsets->end - 1, chan);
865 long commit_count, padding_size;
866
867 padding_size = chan->subbuf_size
868 - (SUBBUF_OFFSET(offsets->end - 1, chan) + 1);
869
870 ltt_buffer_end(buf, *tsc, offsets->end, endidx);
871
872 /*
873 * Must write slot data before incrementing commit count.
ef15e552 874 * This compiler barrier is upgraded into a cmm_smp_wmb() by the IPI
0222e121 875 * sent by get_subbuf() when it does its cmm_smp_rmb().
b73a4c47 876 */
0222e121 877 cmm_smp_wmb();
b102c2b0
PMF
878 uatomic_add(&buf->commit_count[endidx].cc, padding_size);
879 commit_count = uatomic_read(&buf->commit_count[endidx].cc);
b73a4c47
PMF
880 ltt_check_deliver(chan, buf,
881 offsets->end - 1, commit_count, endidx);
1e8c9e7b 882 ltt_write_commit_counter(chan, buf, endidx,
b73a4c47 883 offsets->end, commit_count, padding_size);
b5b073e2
PMF
884}
885
886/*
887 * Returns :
888 * 0 if ok
889 * !0 if execution must be aborted.
890 */
b73a4c47 891static int ltt_relay_try_switch_slow(
b5b073e2 892 enum force_switch_mode mode,
b73a4c47 893 struct ust_channel *chan,
b5b073e2
PMF
894 struct ust_buffer *buf,
895 struct ltt_reserve_switch_offsets *offsets,
896 u64 *tsc)
897{
898 long subbuf_index;
b73a4c47 899 long reserve_commit_diff;
b5b073e2 900
b102c2b0 901 offsets->begin = uatomic_read(&buf->offset);
b5b073e2
PMF
902 offsets->old = offsets->begin;
903 offsets->begin_switch = 0;
904 offsets->end_switch_old = 0;
905
906 *tsc = trace_clock_read64();
907
908 if (SUBBUF_OFFSET(offsets->begin, buf->chan) != 0) {
909 offsets->begin = SUBBUF_ALIGN(offsets->begin, buf->chan);
910 offsets->end_switch_old = 1;
911 } else {
912 /* we do not have to switch : buffer is empty */
913 return -1;
914 }
915 if (mode == FORCE_ACTIVE)
916 offsets->begin += ltt_subbuffer_header_size();
917 /*
918 * Always begin_switch in FORCE_ACTIVE mode.
919 * Test new buffer integrity
920 */
921 subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
b73a4c47 922 reserve_commit_diff =
b5b073e2 923 (BUFFER_TRUNC(offsets->begin, buf->chan)
b73a4c47 924 >> chan->n_subbufs_order)
b102c2b0 925 - (uatomic_read(&buf->commit_count[subbuf_index].cc_sb)
b73a4c47
PMF
926 & chan->commit_count_mask);
927 if (reserve_commit_diff == 0) {
b5b073e2
PMF
928 /* Next buffer not corrupted. */
929 if (mode == FORCE_ACTIVE
b73a4c47 930 && !chan->overwrite
b102c2b0 931 && offsets->begin - uatomic_read(&buf->consumed)
b73a4c47 932 >= chan->alloc_size) {
b5b073e2
PMF
933 /*
934 * We do not overwrite non consumed buffers and we are
935 * full : ignore switch while tracing is active.
936 */
937 return -1;
938 }
939 } else {
940 /*
941 * Next subbuffer corrupted. Force pushing reader even in normal
942 * mode
943 */
944 }
945 offsets->end = offsets->begin;
946 return 0;
947}
948
b5b073e2 949/*
b73a4c47
PMF
950 * Force a sub-buffer switch for a per-cpu buffer. This operation is
951 * completely reentrant : can be called while tracing is active with
952 * absolutely no lock held.
b5b073e2 953 */
b73a4c47
PMF
954void ltt_force_switch_lockless_slow(struct ust_buffer *buf,
955 enum force_switch_mode mode)
b5b073e2 956{
b73a4c47 957 struct ust_channel *chan = buf->chan;
b5b073e2 958 struct ltt_reserve_switch_offsets offsets;
b73a4c47 959 u64 tsc;
b5b073e2 960
b5b073e2
PMF
961 offsets.size = 0;
962
10dd3941 963 DBG("Switching (forced) %s_%d", chan->channel_name, buf->cpu);
b5b073e2
PMF
964 /*
965 * Perform retryable operations.
966 */
b5b073e2 967 do {
b73a4c47
PMF
968 if (ltt_relay_try_switch_slow(mode, chan, buf,
969 &offsets, &tsc))
970 return;
b102c2b0 971 } while (uatomic_cmpxchg(&buf->offset, offsets.old,
b5b073e2
PMF
972 offsets.end) != offsets.old);
973
974 /*
975 * Atomically update last_tsc. This update races against concurrent
976 * atomic updates, but the race will always cause supplementary full TSC
977 * events, never the opposite (missing a full TSC event when it would be
978 * needed).
979 */
b73a4c47 980 save_last_tsc(buf, tsc);
b5b073e2
PMF
981
982 /*
983 * Push the reader if necessary
984 */
b73a4c47
PMF
985 if (mode == FORCE_ACTIVE) {
986 ltt_reserve_push_reader(chan, buf, offsets.end - 1);
987//ust// ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.end - 1, chan));
988 }
b5b073e2
PMF
989
990 /*
991 * Switch old subbuffer if needed.
992 */
b73a4c47
PMF
993 if (offsets.end_switch_old) {
994//ust// ltt_clear_noref_flag(rchan, buf, SUBBUF_INDEX(offsets.old - 1, rchan));
995 ltt_reserve_switch_old_subbuf(chan, buf, &offsets, &tsc);
996 }
b5b073e2
PMF
997
998 /*
999 * Populate new subbuffer.
1000 */
b73a4c47
PMF
1001 if (mode == FORCE_ACTIVE)
1002 ltt_reserve_switch_new_subbuf(chan, buf, &offsets, &tsc);
1003}
b5b073e2 1004
b73a4c47
PMF
1005/*
1006 * Returns :
1007 * 0 if ok
1008 * !0 if execution must be aborted.
1009 */
1010static int ltt_relay_try_reserve_slow(struct ust_channel *chan, struct ust_buffer *buf,
1011 struct ltt_reserve_switch_offsets *offsets, size_t data_size,
1012 u64 *tsc, unsigned int *rflags, int largest_align)
1013{
1014 long reserve_commit_diff;
b5b073e2 1015
b102c2b0 1016 offsets->begin = uatomic_read(&buf->offset);
b73a4c47
PMF
1017 offsets->old = offsets->begin;
1018 offsets->begin_switch = 0;
1019 offsets->end_switch_current = 0;
1020 offsets->end_switch_old = 0;
1021
1022 *tsc = trace_clock_read64();
1023 if (last_tsc_overflow(buf, *tsc))
1024 *rflags = LTT_RFLAG_ID_SIZE_TSC;
1025
1026 if (unlikely(SUBBUF_OFFSET(offsets->begin, buf->chan) == 0)) {
1027 offsets->begin_switch = 1; /* For offsets->begin */
1028 } else {
1029 offsets->size = ust_get_header_size(chan,
1030 offsets->begin, data_size,
1031 &offsets->before_hdr_pad, *rflags);
1032 offsets->size += ltt_align(offsets->begin + offsets->size,
1033 largest_align)
1034 + data_size;
1035 if (unlikely((SUBBUF_OFFSET(offsets->begin, buf->chan) +
1036 offsets->size) > buf->chan->subbuf_size)) {
1037 offsets->end_switch_old = 1; /* For offsets->old */
1038 offsets->begin_switch = 1; /* For offsets->begin */
1039 }
1040 }
1041 if (unlikely(offsets->begin_switch)) {
1042 long subbuf_index;
1043
1044 /*
1045 * We are typically not filling the previous buffer completely.
1046 */
1047 if (likely(offsets->end_switch_old))
1048 offsets->begin = SUBBUF_ALIGN(offsets->begin,
1049 buf->chan);
1050 offsets->begin = offsets->begin + ltt_subbuffer_header_size();
1051 /* Test new buffer integrity */
1052 subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
1053 reserve_commit_diff =
1054 (BUFFER_TRUNC(offsets->begin, buf->chan)
1055 >> chan->n_subbufs_order)
b102c2b0 1056 - (uatomic_read(&buf->commit_count[subbuf_index].cc_sb)
b73a4c47
PMF
1057 & chan->commit_count_mask);
1058 if (likely(reserve_commit_diff == 0)) {
1059 /* Next buffer not corrupted. */
1060 if (unlikely(!chan->overwrite &&
1061 (SUBBUF_TRUNC(offsets->begin, buf->chan)
b102c2b0 1062 - SUBBUF_TRUNC(uatomic_read(
b73a4c47
PMF
1063 &buf->consumed),
1064 buf->chan))
1065 >= chan->alloc_size)) {
1066 /*
1067 * We do not overwrite non consumed buffers
1068 * and we are full : event is lost.
1069 */
b102c2b0 1070 uatomic_inc(&buf->events_lost);
b73a4c47
PMF
1071 return -1;
1072 } else {
1073 /*
1074 * next buffer not corrupted, we are either in
1075 * overwrite mode or the buffer is not full.
1076 * It's safe to write in this new subbuffer.
1077 */
1078 }
1079 } else {
1080 /*
1081 * Next subbuffer corrupted. Drop event in normal and
1082 * overwrite mode. Caused by either a writer OOPS or
1083 * too many nested writes over a reserve/commit pair.
1084 */
b102c2b0 1085 uatomic_inc(&buf->events_lost);
b73a4c47
PMF
1086 return -1;
1087 }
1088 offsets->size = ust_get_header_size(chan,
1089 offsets->begin, data_size,
1090 &offsets->before_hdr_pad, *rflags);
1091 offsets->size += ltt_align(offsets->begin + offsets->size,
1092 largest_align)
1093 + data_size;
1094 if (unlikely((SUBBUF_OFFSET(offsets->begin, buf->chan)
1095 + offsets->size) > buf->chan->subbuf_size)) {
1096 /*
1097 * Event too big for subbuffers, report error, don't
1098 * complete the sub-buffer switch.
1099 */
b102c2b0 1100 uatomic_inc(&buf->events_lost);
b73a4c47
PMF
1101 return -1;
1102 } else {
1103 /*
1104 * We just made a successful buffer switch and the event
1105 * fits in the new subbuffer. Let's write.
1106 */
1107 }
1108 } else {
1109 /*
1110 * Event fits in the current buffer and we are not on a switch
1111 * boundary. It's safe to write.
1112 */
1113 }
1114 offsets->end = offsets->begin + offsets->size;
1115
1116 if (unlikely((SUBBUF_OFFSET(offsets->end, buf->chan)) == 0)) {
1117 /*
1118 * The offset_end will fall at the very beginning of the next
1119 * subbuffer.
1120 */
1121 offsets->end_switch_current = 1; /* For offsets->begin */
1122 }
b5b073e2
PMF
1123 return 0;
1124}
1125
b73a4c47
PMF
1126/**
1127 * ltt_relay_reserve_slot_lockless_slow - Atomic slot reservation in a buffer.
1128 * @trace: the trace structure to log to.
1129 * @ltt_channel: channel structure
1130 * @transport_data: data structure specific to ltt relay
1131 * @data_size: size of the variable length data to log.
1132 * @slot_size: pointer to total size of the slot (out)
1133 * @buf_offset : pointer to reserved buffer offset (out)
1134 * @tsc: pointer to the tsc at the slot reservation (out)
1135 * @cpu: cpuid
b5b073e2 1136 *
b73a4c47
PMF
1137 * Return : -ENOSPC if not enough space, else returns 0.
1138 * It will take care of sub-buffer switching.
b5b073e2 1139 */
12e81b07
PMF
1140int ltt_reserve_slot_lockless_slow(struct ust_channel *chan,
1141 struct ust_trace *trace, size_t data_size,
1142 int largest_align, int cpu,
1143 struct ust_buffer **ret_buf,
1144 size_t *slot_size, long *buf_offset,
1145 u64 *tsc, unsigned int *rflags)
b5b073e2 1146{
12e81b07 1147 struct ust_buffer *buf = *ret_buf = chan->buf[cpu];
b5b073e2 1148 struct ltt_reserve_switch_offsets offsets;
b5b073e2 1149
b5b073e2
PMF
1150 offsets.size = 0;
1151
b5b073e2 1152 do {
b73a4c47
PMF
1153 if (unlikely(ltt_relay_try_reserve_slow(chan, buf, &offsets,
1154 data_size, tsc, rflags, largest_align)))
1155 return -ENOSPC;
b102c2b0 1156 } while (unlikely(uatomic_cmpxchg(&buf->offset, offsets.old,
b73a4c47 1157 offsets.end) != offsets.old));
b5b073e2
PMF
1158
1159 /*
1160 * Atomically update last_tsc. This update races against concurrent
1161 * atomic updates, but the race will always cause supplementary full TSC
1162 * events, never the opposite (missing a full TSC event when it would be
1163 * needed).
1164 */
b73a4c47 1165 save_last_tsc(buf, *tsc);
b5b073e2
PMF
1166
1167 /*
1168 * Push the reader if necessary
1169 */
b73a4c47
PMF
1170 ltt_reserve_push_reader(chan, buf, offsets.end - 1);
1171
1172 /*
1173 * Clear noref flag for this subbuffer.
1174 */
1175//ust// ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.end - 1, chan));
b5b073e2
PMF
1176
1177 /*
1178 * Switch old subbuffer if needed.
1179 */
b73a4c47
PMF
1180 if (unlikely(offsets.end_switch_old)) {
1181//ust// ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.old - 1, chan));
1182 ltt_reserve_switch_old_subbuf(chan, buf, &offsets, tsc);
10dd3941 1183 DBG("Switching %s_%d", chan->channel_name, cpu);
b73a4c47 1184 }
b5b073e2
PMF
1185
1186 /*
1187 * Populate new subbuffer.
1188 */
b73a4c47
PMF
1189 if (unlikely(offsets.begin_switch))
1190 ltt_reserve_switch_new_subbuf(chan, buf, &offsets, tsc);
1191
1192 if (unlikely(offsets.end_switch_current))
1193 ltt_reserve_end_switch_current(chan, buf, &offsets, tsc);
1194
1195 *slot_size = offsets.size;
1196 *buf_offset = offsets.begin + offsets.before_hdr_pad;
1197 return 0;
b5b073e2
PMF
1198}
1199
b5b073e2
PMF
1200static struct ltt_transport ust_relay_transport = {
1201 .name = "ustrelay",
1202 .ops = {
ef15e552
NC
1203 .create_channel = create_channel,
1204 .finish_channel = finish_channel,
1205 .remove_channel = remove_channel,
b5b073e2 1206 .wakeup_channel = ltt_relay_async_wakeup_chan,
b5b073e2
PMF
1207 },
1208};
1209
b5b073e2
PMF
1210static char initialized = 0;
1211
1212void __attribute__((constructor)) init_ustrelay_transport(void)
1213{
1214 if(!initialized) {
1215 ltt_transport_register(&ust_relay_transport);
1216 initialized = 1;
1217 }
1218}
1219
b73a4c47 1220static void __attribute__((destructor)) ust_buffers_exit(void)
b5b073e2
PMF
1221{
1222 ltt_transport_unregister(&ust_relay_transport);
1223}
b73a4c47 1224
12e81b07 1225size_t ltt_write_event_header_slow(struct ust_channel *channel,
b73a4c47
PMF
1226 struct ust_buffer *buf, long buf_offset,
1227 u16 eID, u32 event_size,
1228 u64 tsc, unsigned int rflags)
1229{
1230 struct ltt_event_header header;
1231 u16 small_size;
1232
1233 switch (rflags) {
1234 case LTT_RFLAG_ID_SIZE_TSC:
1235 header.id_time = 29 << LTT_TSC_BITS;
1236 break;
1237 case LTT_RFLAG_ID_SIZE:
1238 header.id_time = 30 << LTT_TSC_BITS;
1239 break;
1240 case LTT_RFLAG_ID:
1241 header.id_time = 31 << LTT_TSC_BITS;
1242 break;
e2b46575
DG
1243 default:
1244 WARN_ON_ONCE(1);
1245 header.id_time = 0;
1246 break;
b73a4c47
PMF
1247 }
1248
1249 header.id_time |= (u32)tsc & LTT_TSC_MASK;
1250 ust_buffers_write(buf, buf_offset, &header, sizeof(header));
1251 buf_offset += sizeof(header);
1252
1253 switch (rflags) {
1254 case LTT_RFLAG_ID_SIZE_TSC:
1255 small_size = (u16)min_t(u32, event_size, LTT_MAX_SMALL_SIZE);
1256 ust_buffers_write(buf, buf_offset,
1257 &eID, sizeof(u16));
1258 buf_offset += sizeof(u16);
1259 ust_buffers_write(buf, buf_offset,
1260 &small_size, sizeof(u16));
1261 buf_offset += sizeof(u16);
1262 if (small_size == LTT_MAX_SMALL_SIZE) {
1263 ust_buffers_write(buf, buf_offset,
1264 &event_size, sizeof(u32));
1265 buf_offset += sizeof(u32);
1266 }
1267 buf_offset += ltt_align(buf_offset, sizeof(u64));
1268 ust_buffers_write(buf, buf_offset,
1269 &tsc, sizeof(u64));
1270 buf_offset += sizeof(u64);
1271 break;
1272 case LTT_RFLAG_ID_SIZE:
1273 small_size = (u16)min_t(u32, event_size, LTT_MAX_SMALL_SIZE);
1274 ust_buffers_write(buf, buf_offset,
1275 &eID, sizeof(u16));
1276 buf_offset += sizeof(u16);
1277 ust_buffers_write(buf, buf_offset,
1278 &small_size, sizeof(u16));
1279 buf_offset += sizeof(u16);
1280 if (small_size == LTT_MAX_SMALL_SIZE) {
1281 ust_buffers_write(buf, buf_offset,
1282 &event_size, sizeof(u32));
1283 buf_offset += sizeof(u32);
1284 }
1285 break;
1286 case LTT_RFLAG_ID:
1287 ust_buffers_write(buf, buf_offset,
1288 &eID, sizeof(u16));
1289 buf_offset += sizeof(u16);
1290 break;
1291 }
1292
1293 return buf_offset;
1294}
This page took 0.096759 seconds and 4 git commands to generate.