88ec5d3b0aa87dd62fad00bfe6df2440d8960408
[ust.git] / libust / buffers.c
1 /*
2 * buffers.c
3 * LTTng userspace tracer buffering system
4 *
5 * Copyright (C) 2009 - Pierre-Marc Fournier (pierre-marc dot fournier at polymtl dot ca)
6 * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <unistd.h>
24 #include <sys/mman.h>
25 #include <sys/ipc.h>
26 #include <sys/shm.h>
27 #include <fcntl.h>
28 #include <ust/kernelcompat.h>
29 #include <kcompat/kref.h>
30 #include <stdlib.h>
31 #include "buffers.h"
32 #include "channels.h"
33 #include "tracer.h"
34 #include "tracercore.h"
35 #include "usterr.h"
36
37 struct ltt_reserve_switch_offsets {
38 long begin, end, old;
39 long begin_switch, end_switch_current, end_switch_old;
40 size_t before_hdr_pad, size;
41 };
42
43
44 static DEFINE_MUTEX(ust_buffers_channels_mutex);
45 static LIST_HEAD(ust_buffers_channels);
46
47 static int get_n_cpus(void)
48 {
49 int result;
50 static int n_cpus = 0;
51
52 if(!n_cpus) {
53 /* On Linux, when some processors are offline
54 * _SC_NPROCESSORS_CONF counts the offline
55 * processors, whereas _SC_NPROCESSORS_ONLN
56 * does not. If we used _SC_NPROCESSORS_ONLN,
57 * getcpu() could return a value greater than
58 * this sysconf, in which case the arrays
59 * indexed by processor would overflow.
60 */
61 result = sysconf(_SC_NPROCESSORS_CONF);
62 if(result == -1) {
63 return -1;
64 }
65
66 n_cpus = result;
67 }
68
69 return n_cpus;
70 }
71
72 /* _ust_buffers_write()
73 *
74 * @buf: destination buffer
75 * @offset: offset in destination
76 * @src: source buffer
77 * @len: length of source
78 * @cpy: already copied
79 */
80
81 void _ust_buffers_write(struct ust_buffer *buf, size_t offset,
82 const void *src, size_t len, ssize_t cpy)
83 {
84 do {
85 len -= cpy;
86 src += cpy;
87 offset += cpy;
88
89 WARN_ON(offset >= buf->buf_size);
90
91 cpy = min_t(size_t, len, buf->buf_size - offset);
92 ust_buffers_do_copy(buf->buf_data + offset, src, cpy);
93 } while (unlikely(len != cpy));
94 }
95
96 static int ust_buffers_init_buffer(struct ust_trace *trace,
97 struct ust_channel *ltt_chan,
98 struct ust_buffer *buf,
99 unsigned int n_subbufs);
100
101 static int ust_buffers_alloc_buf(struct ust_buffer *buf, size_t *size)
102 {
103 void *ptr;
104 int result;
105
106 *size = PAGE_ALIGN(*size);
107
108 result = buf->shmid = shmget(getpid(), *size, IPC_CREAT | IPC_EXCL | 0700);
109 if(result == -1 && errno == EINVAL) {
110 ERR("shmget() returned EINVAL; maybe /proc/sys/kernel/shmmax should be increased.");
111 return -1;
112 }
113 else if(result == -1) {
114 PERROR("shmget");
115 return -1;
116 }
117
118 /* FIXME: should have matching call to shmdt */
119 ptr = shmat(buf->shmid, NULL, 0);
120 if(ptr == (void *) -1) {
121 perror("shmat");
122 goto destroy_shmem;
123 }
124
125 /* Already mark the shared memory for destruction. This will occur only
126 * when all users have detached.
127 */
128 result = shmctl(buf->shmid, IPC_RMID, NULL);
129 if(result == -1) {
130 perror("shmctl");
131 return -1;
132 }
133
134 buf->buf_data = ptr;
135 buf->buf_size = *size;
136
137 return 0;
138
139 destroy_shmem:
140 result = shmctl(buf->shmid, IPC_RMID, NULL);
141 if(result == -1) {
142 perror("shmctl");
143 }
144
145 return -1;
146 }
147
148 int ust_buffers_create_buf(struct ust_channel *channel, int cpu)
149 {
150 int result;
151 struct ust_buffer *buf = channel->buf[cpu];
152
153 buf->cpu = cpu;
154 result = ust_buffers_alloc_buf(buf, &channel->alloc_size);
155 if(result)
156 return -1;
157
158 buf->chan = channel;
159 kref_get(&channel->kref);
160 return 0;
161 }
162
163 static void ust_buffers_destroy_channel(struct kref *kref)
164 {
165 struct ust_channel *chan = container_of(kref, struct ust_channel, kref);
166 free(chan);
167 }
168
169 static void ust_buffers_destroy_buf(struct ust_buffer *buf)
170 {
171 struct ust_channel *chan = buf->chan;
172 int result;
173
174 result = munmap(buf->buf_data, buf->buf_size);
175 if(result == -1) {
176 PERROR("munmap");
177 }
178
179 //ust// chan->buf[buf->cpu] = NULL;
180 free(buf);
181 kref_put(&chan->kref, ust_buffers_destroy_channel);
182 }
183
184 /* called from kref_put */
185 static void ust_buffers_remove_buf(struct kref *kref)
186 {
187 struct ust_buffer *buf = container_of(kref, struct ust_buffer, kref);
188 ust_buffers_destroy_buf(buf);
189 }
190
191 int ust_buffers_open_buf(struct ust_channel *chan, int cpu)
192 {
193 int result;
194
195 result = ust_buffers_create_buf(chan, cpu);
196 if (result == -1)
197 return -1;
198
199 kref_init(&chan->buf[cpu]->kref);
200
201 result = ust_buffers_init_buffer(chan->trace, chan, chan->buf[cpu], chan->subbuf_cnt);
202 if(result == -1)
203 return -1;
204
205 return 0;
206
207 /* FIXME: decrementally destroy on error? */
208 }
209
210 /**
211 * ust_buffers_close_buf - close a channel buffer
212 * @buf: buffer
213 */
214 static void ust_buffers_close_buf(struct ust_buffer *buf)
215 {
216 kref_put(&buf->kref, ust_buffers_remove_buf);
217 }
218
219 int ust_buffers_channel_open(struct ust_channel *chan, size_t subbuf_size, size_t subbuf_cnt)
220 {
221 int i;
222 int result;
223
224 if(subbuf_size == 0 || subbuf_cnt == 0)
225 return -1;
226
227 /* Check that the subbuffer size is larger than a page. */
228 WARN_ON_ONCE(subbuf_size < PAGE_SIZE);
229
230 /*
231 * Make sure the number of subbuffers and subbuffer size are power of 2.
232 */
233 WARN_ON_ONCE(hweight32(subbuf_size) != 1);
234 WARN_ON(hweight32(subbuf_cnt) != 1);
235
236 chan->version = UST_CHANNEL_VERSION;
237 chan->subbuf_cnt = subbuf_cnt;
238 chan->subbuf_size = subbuf_size;
239 chan->subbuf_size_order = get_count_order(subbuf_size);
240 chan->alloc_size = subbuf_size * subbuf_cnt;
241
242 kref_init(&chan->kref);
243
244 mutex_lock(&ust_buffers_channels_mutex);
245 for(i=0; i<chan->n_cpus; i++) {
246 result = ust_buffers_open_buf(chan, i);
247 if (result == -1)
248 goto error;
249 }
250 list_add(&chan->list, &ust_buffers_channels);
251 mutex_unlock(&ust_buffers_channels_mutex);
252
253 return 0;
254
255 /* Jump directly inside the loop to close the buffers that were already
256 * opened. */
257 for(; i>=0; i--) {
258 ust_buffers_close_buf(chan->buf[i]);
259 error:
260 do {} while(0);
261 }
262
263 kref_put(&chan->kref, ust_buffers_destroy_channel);
264 mutex_unlock(&ust_buffers_channels_mutex);
265 return -1;
266 }
267
268 void ust_buffers_channel_close(struct ust_channel *chan)
269 {
270 int i;
271 if(!chan)
272 return;
273
274 mutex_lock(&ust_buffers_channels_mutex);
275 for(i=0; i<chan->n_cpus; i++) {
276 /* FIXME: if we make it here, then all buffers were necessarily allocated. Moreover, we don't
277 * initialize to NULL so we cannot use this check. Should we? */
278 //ust// if (chan->buf[i])
279 ust_buffers_close_buf(chan->buf[i]);
280 }
281
282 list_del(&chan->list);
283 kref_put(&chan->kref, ust_buffers_destroy_channel);
284 mutex_unlock(&ust_buffers_channels_mutex);
285 }
286
287 /*
288 * -------
289 */
290
291 static void ust_buffers_destroy_buffer(struct ust_channel *ltt_chan, int cpu);
292
293 static void ltt_force_switch(struct ust_buffer *buf,
294 enum force_switch_mode mode);
295
296 /*
297 * Trace callbacks
298 */
299 static void ltt_buffer_begin(struct ust_buffer *buf,
300 u64 tsc, unsigned int subbuf_idx)
301 {
302 struct ust_channel *channel = buf->chan;
303 struct ltt_subbuffer_header *header =
304 (struct ltt_subbuffer_header *)
305 ust_buffers_offset_address(buf,
306 subbuf_idx * buf->chan->subbuf_size);
307
308 header->cycle_count_begin = tsc;
309 header->data_size = 0xFFFFFFFF; /* for recognizing crashed buffers */
310 header->sb_size = 0xFFFFFFFF; /* for recognizing crashed buffers */
311 /* FIXME: add memory barrier? */
312 ltt_write_trace_header(channel->trace, header);
313 }
314
315 /*
316 * offset is assumed to never be 0 here : never deliver a completely empty
317 * subbuffer. The lost size is between 0 and subbuf_size-1.
318 */
319 static notrace void ltt_buffer_end(struct ust_buffer *buf,
320 u64 tsc, unsigned int offset, unsigned int subbuf_idx)
321 {
322 struct ltt_subbuffer_header *header =
323 (struct ltt_subbuffer_header *)
324 ust_buffers_offset_address(buf,
325 subbuf_idx * buf->chan->subbuf_size);
326 u32 data_size = SUBBUF_OFFSET(offset - 1, buf->chan) + 1;
327
328 header->data_size = data_size;
329 header->sb_size = PAGE_ALIGN(data_size);
330 header->cycle_count_end = tsc;
331 header->events_lost = uatomic_read(&buf->events_lost);
332 header->subbuf_corrupt = uatomic_read(&buf->corrupted_subbuffers);
333 if(unlikely(header->events_lost > 0)) {
334 DBG("Some events (%d) were lost in %s_%d", header->events_lost, buf->chan->channel_name, buf->cpu);
335 }
336 }
337
338 /*
339 * This function should not be called from NMI interrupt context
340 */
341 static notrace void ltt_buf_unfull(struct ust_buffer *buf,
342 unsigned int subbuf_idx,
343 long offset)
344 {
345 }
346
347 /*
348 * Promote compiler barrier to a smp_mb().
349 * For the specific LTTng case, this IPI call should be removed if the
350 * architecture does not reorder writes. This should eventually be provided by
351 * a separate architecture-specific infrastructure.
352 */
353 //ust// static void remote_mb(void *info)
354 //ust// {
355 //ust// smp_mb();
356 //ust// }
357
358 int ust_buffers_get_subbuf(struct ust_buffer *buf, long *consumed)
359 {
360 struct ust_channel *channel = buf->chan;
361 long consumed_old, consumed_idx, commit_count, write_offset;
362 //ust// int retval;
363
364 consumed_old = uatomic_read(&buf->consumed);
365 consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan);
366 commit_count = uatomic_read(&buf->commit_count[consumed_idx].cc_sb);
367 /*
368 * Make sure we read the commit count before reading the buffer
369 * data and the write offset. Correct consumed offset ordering
370 * wrt commit count is insured by the use of cmpxchg to update
371 * the consumed offset.
372 * smp_call_function_single can fail if the remote CPU is offline,
373 * this is OK because then there is no wmb to execute there.
374 * If our thread is executing on the same CPU as the on the buffers
375 * belongs to, we don't have to synchronize it at all. If we are
376 * migrated, the scheduler will take care of the memory barriers.
377 * Normally, smp_call_function_single() should ensure program order when
378 * executing the remote function, which implies that it surrounds the
379 * function execution with :
380 * smp_mb()
381 * send IPI
382 * csd_lock_wait
383 * recv IPI
384 * smp_mb()
385 * exec. function
386 * smp_mb()
387 * csd unlock
388 * smp_mb()
389 *
390 * However, smp_call_function_single() does not seem to clearly execute
391 * such barriers. It depends on spinlock semantic to provide the barrier
392 * before executing the IPI and, when busy-looping, csd_lock_wait only
393 * executes smp_mb() when it has to wait for the other CPU.
394 *
395 * I don't trust this code. Therefore, let's add the smp_mb() sequence
396 * required ourself, even if duplicated. It has no performance impact
397 * anyway.
398 *
399 * smp_mb() is needed because smp_rmb() and smp_wmb() only order read vs
400 * read and write vs write. They do not ensure core synchronization. We
401 * really have to ensure total order between the 3 barriers running on
402 * the 2 CPUs.
403 */
404 //ust// #ifdef LTT_NO_IPI_BARRIER
405 /*
406 * Local rmb to match the remote wmb to read the commit count before the
407 * buffer data and the write offset.
408 */
409 smp_rmb();
410 //ust// #else
411 //ust// if (raw_smp_processor_id() != buf->cpu) {
412 //ust// smp_mb(); /* Total order with IPI handler smp_mb() */
413 //ust// smp_call_function_single(buf->cpu, remote_mb, NULL, 1);
414 //ust// smp_mb(); /* Total order with IPI handler smp_mb() */
415 //ust// }
416 //ust// #endif
417
418 write_offset = uatomic_read(&buf->offset);
419 /*
420 * Check that the subbuffer we are trying to consume has been
421 * already fully committed.
422 */
423 if (((commit_count - buf->chan->subbuf_size)
424 & channel->commit_count_mask)
425 - (BUFFER_TRUNC(consumed_old, buf->chan)
426 >> channel->n_subbufs_order)
427 != 0) {
428 return -EAGAIN;
429 }
430 /*
431 * Check that we are not about to read the same subbuffer in
432 * which the writer head is.
433 */
434 if ((SUBBUF_TRUNC(write_offset, buf->chan)
435 - SUBBUF_TRUNC(consumed_old, buf->chan))
436 == 0) {
437 return -EAGAIN;
438 }
439
440 /* FIXME: is this ok to disable the reading feature? */
441 //ust// retval = update_read_sb_index(buf, consumed_idx);
442 //ust// if (retval)
443 //ust// return retval;
444
445 *consumed = consumed_old;
446
447 return 0;
448 }
449
450 int ust_buffers_put_subbuf(struct ust_buffer *buf, unsigned long uconsumed_old)
451 {
452 long consumed_new, consumed_old;
453
454 consumed_old = uatomic_read(&buf->consumed);
455 consumed_old = consumed_old & (~0xFFFFFFFFL);
456 consumed_old = consumed_old | uconsumed_old;
457 consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan);
458
459 //ust// spin_lock(&ltt_buf->full_lock);
460 if (uatomic_cmpxchg(&buf->consumed, consumed_old,
461 consumed_new)
462 != consumed_old) {
463 /* We have been pushed by the writer : the last
464 * buffer read _is_ corrupted! It can also
465 * happen if this is a buffer we never got. */
466 //ust// spin_unlock(&ltt_buf->full_lock);
467 return -EIO;
468 } else {
469 /* tell the client that buffer is now unfull */
470 int index;
471 long data;
472 index = SUBBUF_INDEX(consumed_old, buf->chan);
473 data = BUFFER_OFFSET(consumed_old, buf->chan);
474 ltt_buf_unfull(buf, index, data);
475 //ust// spin_unlock(&ltt_buf->full_lock);
476 }
477 return 0;
478 }
479
480 static void ltt_relay_print_subbuffer_errors(
481 struct ust_channel *channel,
482 long cons_off, int cpu)
483 {
484 struct ust_buffer *ltt_buf = channel->buf[cpu];
485 long cons_idx, commit_count, commit_count_sb, write_offset;
486
487 cons_idx = SUBBUF_INDEX(cons_off, channel);
488 commit_count = uatomic_read(&ltt_buf->commit_count[cons_idx].cc);
489 commit_count_sb = uatomic_read(&ltt_buf->commit_count[cons_idx].cc_sb);
490
491 /*
492 * No need to order commit_count and write_offset reads because we
493 * execute after trace is stopped when there are no readers left.
494 */
495 write_offset = uatomic_read(&ltt_buf->offset);
496 WARN( "LTT : unread channel %s offset is %ld "
497 "and cons_off : %ld (cpu %d)\n",
498 channel->channel_name, write_offset, cons_off, cpu);
499 /* Check each sub-buffer for non filled commit count */
500 if (((commit_count - channel->subbuf_size) & channel->commit_count_mask)
501 - (BUFFER_TRUNC(cons_off, channel) >> channel->n_subbufs_order) != 0) {
502 ERR("LTT : %s : subbuffer %lu has non filled "
503 "commit count [cc, cc_sb] [%lu,%lu].\n",
504 channel->channel_name, cons_idx, commit_count, commit_count_sb);
505 }
506 ERR("LTT : %s : commit count : %lu, subbuf size %zd\n",
507 channel->channel_name, commit_count,
508 channel->subbuf_size);
509 }
510
511 static void ltt_relay_print_errors(struct ust_trace *trace,
512 struct ust_channel *channel, int cpu)
513 {
514 struct ust_buffer *ltt_buf = channel->buf[cpu];
515 long cons_off;
516
517 /*
518 * Can be called in the error path of allocation when
519 * trans_channel_data is not yet set.
520 */
521 if (!channel)
522 return;
523
524 //ust// for (cons_off = 0; cons_off < rchan->alloc_size;
525 //ust// cons_off = SUBBUF_ALIGN(cons_off, rchan))
526 //ust// ust_buffers_print_written(ltt_chan, cons_off, cpu);
527 for (cons_off = uatomic_read(&ltt_buf->consumed);
528 (SUBBUF_TRUNC(uatomic_read(&ltt_buf->offset),
529 channel)
530 - cons_off) > 0;
531 cons_off = SUBBUF_ALIGN(cons_off, channel))
532 ltt_relay_print_subbuffer_errors(channel, cons_off, cpu);
533 }
534
535 static void ltt_relay_print_buffer_errors(struct ust_channel *channel, int cpu)
536 {
537 struct ust_trace *trace = channel->trace;
538 struct ust_buffer *ltt_buf = channel->buf[cpu];
539
540 if (uatomic_read(&ltt_buf->events_lost))
541 ERR("channel %s: %ld events lost (cpu %d)",
542 channel->channel_name,
543 uatomic_read(&ltt_buf->events_lost), cpu);
544 if (uatomic_read(&ltt_buf->corrupted_subbuffers))
545 ERR("channel %s : %ld corrupted subbuffers (cpu %d)",
546 channel->channel_name,
547 uatomic_read(&ltt_buf->corrupted_subbuffers), cpu);
548
549 ltt_relay_print_errors(trace, channel, cpu);
550 }
551
552 static void ltt_relay_release_channel(struct kref *kref)
553 {
554 struct ust_channel *ltt_chan = container_of(kref,
555 struct ust_channel, kref);
556 free(ltt_chan->buf);
557 }
558
559 /*
560 * Create ltt buffer.
561 */
562 //ust// static int ltt_relay_create_buffer(struct ust_trace *trace,
563 //ust// struct ltt_channel_struct *ltt_chan, struct rchan_buf *buf,
564 //ust// unsigned int cpu, unsigned int n_subbufs)
565 //ust// {
566 //ust// struct ltt_channel_buf_struct *ltt_buf =
567 //ust// percpu_ptr(ltt_chan->buf, cpu);
568 //ust// unsigned int j;
569 //ust//
570 //ust// ltt_buf->commit_count =
571 //ust// kzalloc_node(sizeof(ltt_buf->commit_count) * n_subbufs,
572 //ust// GFP_KERNEL, cpu_to_node(cpu));
573 //ust// if (!ltt_buf->commit_count)
574 //ust// return -ENOMEM;
575 //ust// kref_get(&trace->kref);
576 //ust// kref_get(&trace->ltt_transport_kref);
577 //ust// kref_get(&ltt_chan->kref);
578 //ust// uatomic_set(&ltt_buf->offset, ltt_subbuffer_header_size());
579 //ust// uatomic_set(&ltt_buf->consumed, 0);
580 //ust// uatomic_set(&ltt_buf->active_readers, 0);
581 //ust// for (j = 0; j < n_subbufs; j++)
582 //ust// uatomic_set(&ltt_buf->commit_count[j], 0);
583 //ust// init_waitqueue_head(&ltt_buf->write_wait);
584 //ust// uatomic_set(&ltt_buf->wakeup_readers, 0);
585 //ust// spin_lock_init(&ltt_buf->full_lock);
586 //ust//
587 //ust// ltt_buffer_begin_callback(buf, trace->start_tsc, 0);
588 //ust// /* atomic_add made on local variable on data that belongs to
589 //ust// * various CPUs : ok because tracing not started (for this cpu). */
590 //ust// uatomic_add(&ltt_buf->commit_count[0], ltt_subbuffer_header_size());
591 //ust//
592 //ust// uatomic_set(&ltt_buf->events_lost, 0);
593 //ust// uatomic_set(&ltt_buf->corrupted_subbuffers, 0);
594 //ust//
595 //ust// return 0;
596 //ust// }
597
598 static int ust_buffers_init_buffer(struct ust_trace *trace,
599 struct ust_channel *ltt_chan, struct ust_buffer *buf,
600 unsigned int n_subbufs)
601 {
602 unsigned int j;
603 int fds[2];
604 int result;
605
606 buf->commit_count =
607 zmalloc(sizeof(*buf->commit_count) * n_subbufs);
608 if (!buf->commit_count)
609 return -ENOMEM;
610 kref_get(&trace->kref);
611 kref_get(&trace->ltt_transport_kref);
612 kref_get(&ltt_chan->kref);
613 uatomic_set(&buf->offset, ltt_subbuffer_header_size());
614 uatomic_set(&buf->consumed, 0);
615 uatomic_set(&buf->active_readers, 0);
616 for (j = 0; j < n_subbufs; j++) {
617 uatomic_set(&buf->commit_count[j].cc, 0);
618 uatomic_set(&buf->commit_count[j].cc_sb, 0);
619 }
620 //ust// init_waitqueue_head(&buf->write_wait);
621 //ust// uatomic_set(&buf->wakeup_readers, 0);
622 //ust// spin_lock_init(&buf->full_lock);
623
624 ltt_buffer_begin(buf, trace->start_tsc, 0);
625
626 uatomic_add(&buf->commit_count[0].cc, ltt_subbuffer_header_size());
627
628 uatomic_set(&buf->events_lost, 0);
629 uatomic_set(&buf->corrupted_subbuffers, 0);
630
631 result = pipe(fds);
632 if(result == -1) {
633 PERROR("pipe");
634 return -1;
635 }
636 buf->data_ready_fd_read = fds[0];
637 buf->data_ready_fd_write = fds[1];
638
639 /* FIXME: do we actually need this? */
640 result = fcntl(fds[0], F_SETFL, O_NONBLOCK);
641 if(result == -1) {
642 PERROR("fcntl");
643 }
644
645 //ust// buf->commit_seq = malloc(sizeof(buf->commit_seq) * n_subbufs);
646 //ust// if(!ltt_buf->commit_seq) {
647 //ust// return -1;
648 //ust// }
649 memset(buf->commit_seq, 0, sizeof(buf->commit_seq[0]) * n_subbufs);
650
651 /* FIXME: decrementally destroy on error */
652
653 return 0;
654 }
655
656 /* FIXME: use this function */
657 static void ust_buffers_destroy_buffer(struct ust_channel *ltt_chan, int cpu)
658 {
659 struct ust_trace *trace = ltt_chan->trace;
660 struct ust_buffer *ltt_buf = ltt_chan->buf[cpu];
661
662 kref_put(&ltt_chan->trace->ltt_transport_kref,
663 ltt_release_transport);
664 ltt_relay_print_buffer_errors(ltt_chan, cpu);
665 //ust// free(ltt_buf->commit_seq);
666 free(ltt_buf->commit_count);
667 ltt_buf->commit_count = NULL;
668 kref_put(&ltt_chan->kref, ltt_relay_release_channel);
669 kref_put(&trace->kref, ltt_release_trace);
670 //ust// wake_up_interruptible(&trace->kref_wq);
671 }
672
673 static int ust_buffers_alloc_channel_buf_structs(struct ust_channel *chan)
674 {
675 void *ptr;
676 int result;
677 size_t size;
678 int i;
679
680 size = PAGE_ALIGN(1);
681
682 for(i=0; i<chan->n_cpus; i++) {
683
684 result = chan->buf_struct_shmids[i] = shmget(getpid(), size, IPC_CREAT | IPC_EXCL | 0700);
685 if(result == -1) {
686 PERROR("shmget");
687 goto destroy_previous;
688 }
689
690 /* FIXME: should have matching call to shmdt */
691 ptr = shmat(chan->buf_struct_shmids[i], NULL, 0);
692 if(ptr == (void *) -1) {
693 perror("shmat");
694 goto destroy_shm;
695 }
696
697 /* Already mark the shared memory for destruction. This will occur only
698 * when all users have detached.
699 */
700 result = shmctl(chan->buf_struct_shmids[i], IPC_RMID, NULL);
701 if(result == -1) {
702 perror("shmctl");
703 goto destroy_previous;
704 }
705
706 chan->buf[i] = ptr;
707 }
708
709 return 0;
710
711 /* Jumping inside this loop occurs from within the other loop above with i as
712 * counter, so it unallocates the structures for the cpu = current_i down to
713 * zero. */
714 for(; i>=0; i--) {
715 destroy_shm:
716 result = shmctl(chan->buf_struct_shmids[i], IPC_RMID, NULL);
717 if(result == -1) {
718 perror("shmctl");
719 }
720
721 destroy_previous:
722 continue;
723 }
724
725 return -1;
726 }
727
728 /*
729 * Create channel.
730 */
731 static int ust_buffers_create_channel(const char *trace_name, struct ust_trace *trace,
732 const char *channel_name, struct ust_channel *ltt_chan,
733 unsigned int subbuf_size, unsigned int n_subbufs, int overwrite)
734 {
735 int result;
736
737 kref_init(&ltt_chan->kref);
738
739 ltt_chan->trace = trace;
740 ltt_chan->overwrite = overwrite;
741 ltt_chan->n_subbufs_order = get_count_order(n_subbufs);
742 ltt_chan->commit_count_mask = (~0UL >> ltt_chan->n_subbufs_order);
743 ltt_chan->n_cpus = get_n_cpus();
744 //ust// ltt_chan->buf = percpu_alloc_mask(sizeof(struct ltt_channel_buf_struct), GFP_KERNEL, cpu_possible_map);
745 ltt_chan->buf = (void *) malloc(ltt_chan->n_cpus * sizeof(void *));
746 if(ltt_chan->buf == NULL) {
747 goto error;
748 }
749 ltt_chan->buf_struct_shmids = (int *) malloc(ltt_chan->n_cpus * sizeof(int));
750 if(ltt_chan->buf_struct_shmids == NULL)
751 goto free_buf;
752
753 result = ust_buffers_alloc_channel_buf_structs(ltt_chan);
754 if(result != 0) {
755 goto free_buf_struct_shmids;
756 }
757
758 result = ust_buffers_channel_open(ltt_chan, subbuf_size, n_subbufs);
759 if (result != 0) {
760 ERR("Cannot open channel for trace %s", trace_name);
761 goto unalloc_buf_structs;
762 }
763
764 return 0;
765
766 unalloc_buf_structs:
767 /* FIXME: put a call here to unalloc the buf structs! */
768
769 free_buf_struct_shmids:
770 free(ltt_chan->buf_struct_shmids);
771
772 free_buf:
773 free(ltt_chan->buf);
774
775 error:
776 return -1;
777 }
778
779 /*
780 * LTTng channel flush function.
781 *
782 * Must be called when no tracing is active in the channel, because of
783 * accesses across CPUs.
784 */
785 static notrace void ltt_relay_buffer_flush(struct ust_buffer *buf)
786 {
787 int result;
788
789 //ust// buf->finalized = 1;
790 ltt_force_switch(buf, FORCE_FLUSH);
791
792 result = write(buf->data_ready_fd_write, "1", 1);
793 if(result == -1) {
794 PERROR("write (in ltt_relay_buffer_flush)");
795 ERR("this should never happen!");
796 }
797 }
798
799 static void ltt_relay_async_wakeup_chan(struct ust_channel *ltt_channel)
800 {
801 //ust// unsigned int i;
802 //ust// struct rchan *rchan = ltt_channel->trans_channel_data;
803 //ust//
804 //ust// for_each_possible_cpu(i) {
805 //ust// struct ltt_channel_buf_struct *ltt_buf =
806 //ust// percpu_ptr(ltt_channel->buf, i);
807 //ust//
808 //ust// if (uatomic_read(&ltt_buf->wakeup_readers) == 1) {
809 //ust// uatomic_set(&ltt_buf->wakeup_readers, 0);
810 //ust// wake_up_interruptible(&rchan->buf[i]->read_wait);
811 //ust// }
812 //ust// }
813 }
814
815 static void ltt_relay_finish_buffer(struct ust_channel *channel, unsigned int cpu)
816 {
817 // int result;
818
819 if (channel->buf[cpu]) {
820 struct ust_buffer *buf = channel->buf[cpu];
821 ltt_relay_buffer_flush(buf);
822 //ust// ltt_relay_wake_writers(ltt_buf);
823 /* closing the pipe tells the consumer the buffer is finished */
824
825 //result = write(ltt_buf->data_ready_fd_write, "D", 1);
826 //if(result == -1) {
827 // PERROR("write (in ltt_relay_finish_buffer)");
828 // ERR("this should never happen!");
829 //}
830 close(buf->data_ready_fd_write);
831 }
832 }
833
834
835 static void ltt_relay_finish_channel(struct ust_channel *channel)
836 {
837 unsigned int i;
838
839 for(i=0; i<channel->n_cpus; i++) {
840 ltt_relay_finish_buffer(channel, i);
841 }
842 }
843
844 static void ltt_relay_remove_channel(struct ust_channel *channel)
845 {
846 ust_buffers_channel_close(channel);
847 kref_put(&channel->kref, ltt_relay_release_channel);
848 }
849
850 /*
851 * ltt_reserve_switch_old_subbuf: switch old subbuffer
852 *
853 * Concurrency safe because we are the last and only thread to alter this
854 * sub-buffer. As long as it is not delivered and read, no other thread can
855 * alter the offset, alter the reserve_count or call the
856 * client_buffer_end_callback on this sub-buffer.
857 *
858 * The only remaining threads could be the ones with pending commits. They will
859 * have to do the deliver themselves. Not concurrency safe in overwrite mode.
860 * We detect corrupted subbuffers with commit and reserve counts. We keep a
861 * corrupted sub-buffers count and push the readers across these sub-buffers.
862 *
863 * Not concurrency safe if a writer is stalled in a subbuffer and another writer
864 * switches in, finding out it's corrupted. The result will be than the old
865 * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
866 * will be declared corrupted too because of the commit count adjustment.
867 *
868 * Note : offset_old should never be 0 here.
869 */
870 static void ltt_reserve_switch_old_subbuf(
871 struct ust_channel *chan, struct ust_buffer *buf,
872 struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
873 {
874 long oldidx = SUBBUF_INDEX(offsets->old - 1, chan);
875 long commit_count, padding_size;
876
877 padding_size = chan->subbuf_size
878 - (SUBBUF_OFFSET(offsets->old - 1, chan) + 1);
879 ltt_buffer_end(buf, *tsc, offsets->old, oldidx);
880
881 /*
882 * Must write slot data before incrementing commit count.
883 * This compiler barrier is upgraded into a smp_wmb() by the IPI
884 * sent by get_subbuf() when it does its smp_rmb().
885 */
886 barrier();
887 uatomic_add(&buf->commit_count[oldidx].cc, padding_size);
888 commit_count = uatomic_read(&buf->commit_count[oldidx].cc);
889 ltt_check_deliver(chan, buf, offsets->old - 1, commit_count, oldidx);
890 ltt_write_commit_counter(chan, buf, oldidx,
891 offsets->old, commit_count, padding_size);
892 }
893
894 /*
895 * ltt_reserve_switch_new_subbuf: Populate new subbuffer.
896 *
897 * This code can be executed unordered : writers may already have written to the
898 * sub-buffer before this code gets executed, caution. The commit makes sure
899 * that this code is executed before the deliver of this sub-buffer.
900 */
901 static void ltt_reserve_switch_new_subbuf(
902 struct ust_channel *chan, struct ust_buffer *buf,
903 struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
904 {
905 long beginidx = SUBBUF_INDEX(offsets->begin, chan);
906 long commit_count;
907
908 ltt_buffer_begin(buf, *tsc, beginidx);
909
910 /*
911 * Must write slot data before incrementing commit count.
912 * This compiler barrier is upgraded into a smp_wmb() by the IPI
913 * sent by get_subbuf() when it does its smp_rmb().
914 */
915 barrier();
916 uatomic_add(&buf->commit_count[beginidx].cc, ltt_subbuffer_header_size());
917 commit_count = uatomic_read(&buf->commit_count[beginidx].cc);
918 /* Check if the written buffer has to be delivered */
919 ltt_check_deliver(chan, buf, offsets->begin, commit_count, beginidx);
920 ltt_write_commit_counter(chan, buf, beginidx,
921 offsets->begin, commit_count, ltt_subbuffer_header_size());
922 }
923
924 /*
925 * ltt_reserve_end_switch_current: finish switching current subbuffer
926 *
927 * Concurrency safe because we are the last and only thread to alter this
928 * sub-buffer. As long as it is not delivered and read, no other thread can
929 * alter the offset, alter the reserve_count or call the
930 * client_buffer_end_callback on this sub-buffer.
931 *
932 * The only remaining threads could be the ones with pending commits. They will
933 * have to do the deliver themselves. Not concurrency safe in overwrite mode.
934 * We detect corrupted subbuffers with commit and reserve counts. We keep a
935 * corrupted sub-buffers count and push the readers across these sub-buffers.
936 *
937 * Not concurrency safe if a writer is stalled in a subbuffer and another writer
938 * switches in, finding out it's corrupted. The result will be than the old
939 * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
940 * will be declared corrupted too because of the commit count adjustment.
941 */
942 static void ltt_reserve_end_switch_current(
943 struct ust_channel *chan,
944 struct ust_buffer *buf,
945 struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
946 {
947 long endidx = SUBBUF_INDEX(offsets->end - 1, chan);
948 long commit_count, padding_size;
949
950 padding_size = chan->subbuf_size
951 - (SUBBUF_OFFSET(offsets->end - 1, chan) + 1);
952
953 ltt_buffer_end(buf, *tsc, offsets->end, endidx);
954
955 /*
956 * Must write slot data before incrementing commit count.
957 * This compiler barrier is upgraded into a smp_wmb() by the IPI
958 * sent by get_subbuf() when it does its smp_rmb().
959 */
960 barrier();
961 uatomic_add(&buf->commit_count[endidx].cc, padding_size);
962 commit_count = uatomic_read(&buf->commit_count[endidx].cc);
963 ltt_check_deliver(chan, buf,
964 offsets->end - 1, commit_count, endidx);
965 ltt_write_commit_counter(chan, buf, endidx,
966 offsets->end, commit_count, padding_size);
967 }
968
969 /*
970 * Returns :
971 * 0 if ok
972 * !0 if execution must be aborted.
973 */
974 static int ltt_relay_try_switch_slow(
975 enum force_switch_mode mode,
976 struct ust_channel *chan,
977 struct ust_buffer *buf,
978 struct ltt_reserve_switch_offsets *offsets,
979 u64 *tsc)
980 {
981 long subbuf_index;
982 long reserve_commit_diff;
983
984 offsets->begin = uatomic_read(&buf->offset);
985 offsets->old = offsets->begin;
986 offsets->begin_switch = 0;
987 offsets->end_switch_old = 0;
988
989 *tsc = trace_clock_read64();
990
991 if (SUBBUF_OFFSET(offsets->begin, buf->chan) != 0) {
992 offsets->begin = SUBBUF_ALIGN(offsets->begin, buf->chan);
993 offsets->end_switch_old = 1;
994 } else {
995 /* we do not have to switch : buffer is empty */
996 return -1;
997 }
998 if (mode == FORCE_ACTIVE)
999 offsets->begin += ltt_subbuffer_header_size();
1000 /*
1001 * Always begin_switch in FORCE_ACTIVE mode.
1002 * Test new buffer integrity
1003 */
1004 subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
1005 reserve_commit_diff =
1006 (BUFFER_TRUNC(offsets->begin, buf->chan)
1007 >> chan->n_subbufs_order)
1008 - (uatomic_read(&buf->commit_count[subbuf_index].cc_sb)
1009 & chan->commit_count_mask);
1010 if (reserve_commit_diff == 0) {
1011 /* Next buffer not corrupted. */
1012 if (mode == FORCE_ACTIVE
1013 && !chan->overwrite
1014 && offsets->begin - uatomic_read(&buf->consumed)
1015 >= chan->alloc_size) {
1016 /*
1017 * We do not overwrite non consumed buffers and we are
1018 * full : ignore switch while tracing is active.
1019 */
1020 return -1;
1021 }
1022 } else {
1023 /*
1024 * Next subbuffer corrupted. Force pushing reader even in normal
1025 * mode
1026 */
1027 }
1028 offsets->end = offsets->begin;
1029 return 0;
1030 }
1031
1032 /*
1033 * Force a sub-buffer switch for a per-cpu buffer. This operation is
1034 * completely reentrant : can be called while tracing is active with
1035 * absolutely no lock held.
1036 */
1037 void ltt_force_switch_lockless_slow(struct ust_buffer *buf,
1038 enum force_switch_mode mode)
1039 {
1040 struct ust_channel *chan = buf->chan;
1041 struct ltt_reserve_switch_offsets offsets;
1042 u64 tsc;
1043
1044 offsets.size = 0;
1045
1046 DBG("Switching (forced) %s_%d", chan->channel_name, buf->cpu);
1047 /*
1048 * Perform retryable operations.
1049 */
1050 do {
1051 if (ltt_relay_try_switch_slow(mode, chan, buf,
1052 &offsets, &tsc))
1053 return;
1054 } while (uatomic_cmpxchg(&buf->offset, offsets.old,
1055 offsets.end) != offsets.old);
1056
1057 /*
1058 * Atomically update last_tsc. This update races against concurrent
1059 * atomic updates, but the race will always cause supplementary full TSC
1060 * events, never the opposite (missing a full TSC event when it would be
1061 * needed).
1062 */
1063 save_last_tsc(buf, tsc);
1064
1065 /*
1066 * Push the reader if necessary
1067 */
1068 if (mode == FORCE_ACTIVE) {
1069 ltt_reserve_push_reader(chan, buf, offsets.end - 1);
1070 //ust// ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.end - 1, chan));
1071 }
1072
1073 /*
1074 * Switch old subbuffer if needed.
1075 */
1076 if (offsets.end_switch_old) {
1077 //ust// ltt_clear_noref_flag(rchan, buf, SUBBUF_INDEX(offsets.old - 1, rchan));
1078 ltt_reserve_switch_old_subbuf(chan, buf, &offsets, &tsc);
1079 }
1080
1081 /*
1082 * Populate new subbuffer.
1083 */
1084 if (mode == FORCE_ACTIVE)
1085 ltt_reserve_switch_new_subbuf(chan, buf, &offsets, &tsc);
1086 }
1087
1088 /*
1089 * Returns :
1090 * 0 if ok
1091 * !0 if execution must be aborted.
1092 */
1093 static int ltt_relay_try_reserve_slow(struct ust_channel *chan, struct ust_buffer *buf,
1094 struct ltt_reserve_switch_offsets *offsets, size_t data_size,
1095 u64 *tsc, unsigned int *rflags, int largest_align)
1096 {
1097 long reserve_commit_diff;
1098
1099 offsets->begin = uatomic_read(&buf->offset);
1100 offsets->old = offsets->begin;
1101 offsets->begin_switch = 0;
1102 offsets->end_switch_current = 0;
1103 offsets->end_switch_old = 0;
1104
1105 *tsc = trace_clock_read64();
1106 if (last_tsc_overflow(buf, *tsc))
1107 *rflags = LTT_RFLAG_ID_SIZE_TSC;
1108
1109 if (unlikely(SUBBUF_OFFSET(offsets->begin, buf->chan) == 0)) {
1110 offsets->begin_switch = 1; /* For offsets->begin */
1111 } else {
1112 offsets->size = ust_get_header_size(chan,
1113 offsets->begin, data_size,
1114 &offsets->before_hdr_pad, *rflags);
1115 offsets->size += ltt_align(offsets->begin + offsets->size,
1116 largest_align)
1117 + data_size;
1118 if (unlikely((SUBBUF_OFFSET(offsets->begin, buf->chan) +
1119 offsets->size) > buf->chan->subbuf_size)) {
1120 offsets->end_switch_old = 1; /* For offsets->old */
1121 offsets->begin_switch = 1; /* For offsets->begin */
1122 }
1123 }
1124 if (unlikely(offsets->begin_switch)) {
1125 long subbuf_index;
1126
1127 /*
1128 * We are typically not filling the previous buffer completely.
1129 */
1130 if (likely(offsets->end_switch_old))
1131 offsets->begin = SUBBUF_ALIGN(offsets->begin,
1132 buf->chan);
1133 offsets->begin = offsets->begin + ltt_subbuffer_header_size();
1134 /* Test new buffer integrity */
1135 subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
1136 reserve_commit_diff =
1137 (BUFFER_TRUNC(offsets->begin, buf->chan)
1138 >> chan->n_subbufs_order)
1139 - (uatomic_read(&buf->commit_count[subbuf_index].cc_sb)
1140 & chan->commit_count_mask);
1141 if (likely(reserve_commit_diff == 0)) {
1142 /* Next buffer not corrupted. */
1143 if (unlikely(!chan->overwrite &&
1144 (SUBBUF_TRUNC(offsets->begin, buf->chan)
1145 - SUBBUF_TRUNC(uatomic_read(
1146 &buf->consumed),
1147 buf->chan))
1148 >= chan->alloc_size)) {
1149 /*
1150 * We do not overwrite non consumed buffers
1151 * and we are full : event is lost.
1152 */
1153 uatomic_inc(&buf->events_lost);
1154 return -1;
1155 } else {
1156 /*
1157 * next buffer not corrupted, we are either in
1158 * overwrite mode or the buffer is not full.
1159 * It's safe to write in this new subbuffer.
1160 */
1161 }
1162 } else {
1163 /*
1164 * Next subbuffer corrupted. Drop event in normal and
1165 * overwrite mode. Caused by either a writer OOPS or
1166 * too many nested writes over a reserve/commit pair.
1167 */
1168 uatomic_inc(&buf->events_lost);
1169 return -1;
1170 }
1171 offsets->size = ust_get_header_size(chan,
1172 offsets->begin, data_size,
1173 &offsets->before_hdr_pad, *rflags);
1174 offsets->size += ltt_align(offsets->begin + offsets->size,
1175 largest_align)
1176 + data_size;
1177 if (unlikely((SUBBUF_OFFSET(offsets->begin, buf->chan)
1178 + offsets->size) > buf->chan->subbuf_size)) {
1179 /*
1180 * Event too big for subbuffers, report error, don't
1181 * complete the sub-buffer switch.
1182 */
1183 uatomic_inc(&buf->events_lost);
1184 return -1;
1185 } else {
1186 /*
1187 * We just made a successful buffer switch and the event
1188 * fits in the new subbuffer. Let's write.
1189 */
1190 }
1191 } else {
1192 /*
1193 * Event fits in the current buffer and we are not on a switch
1194 * boundary. It's safe to write.
1195 */
1196 }
1197 offsets->end = offsets->begin + offsets->size;
1198
1199 if (unlikely((SUBBUF_OFFSET(offsets->end, buf->chan)) == 0)) {
1200 /*
1201 * The offset_end will fall at the very beginning of the next
1202 * subbuffer.
1203 */
1204 offsets->end_switch_current = 1; /* For offsets->begin */
1205 }
1206 return 0;
1207 }
1208
1209 /**
1210 * ltt_relay_reserve_slot_lockless_slow - Atomic slot reservation in a buffer.
1211 * @trace: the trace structure to log to.
1212 * @ltt_channel: channel structure
1213 * @transport_data: data structure specific to ltt relay
1214 * @data_size: size of the variable length data to log.
1215 * @slot_size: pointer to total size of the slot (out)
1216 * @buf_offset : pointer to reserved buffer offset (out)
1217 * @tsc: pointer to the tsc at the slot reservation (out)
1218 * @cpu: cpuid
1219 *
1220 * Return : -ENOSPC if not enough space, else returns 0.
1221 * It will take care of sub-buffer switching.
1222 */
1223 int ltt_reserve_slot_lockless_slow(struct ust_trace *trace,
1224 struct ust_channel *chan, void **transport_data,
1225 size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc,
1226 unsigned int *rflags, int largest_align, int cpu)
1227 {
1228 struct ust_buffer *buf = chan->buf[cpu];
1229 struct ltt_reserve_switch_offsets offsets;
1230
1231 offsets.size = 0;
1232
1233 do {
1234 if (unlikely(ltt_relay_try_reserve_slow(chan, buf, &offsets,
1235 data_size, tsc, rflags, largest_align)))
1236 return -ENOSPC;
1237 } while (unlikely(uatomic_cmpxchg(&buf->offset, offsets.old,
1238 offsets.end) != offsets.old));
1239
1240 /*
1241 * Atomically update last_tsc. This update races against concurrent
1242 * atomic updates, but the race will always cause supplementary full TSC
1243 * events, never the opposite (missing a full TSC event when it would be
1244 * needed).
1245 */
1246 save_last_tsc(buf, *tsc);
1247
1248 /*
1249 * Push the reader if necessary
1250 */
1251 ltt_reserve_push_reader(chan, buf, offsets.end - 1);
1252
1253 /*
1254 * Clear noref flag for this subbuffer.
1255 */
1256 //ust// ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.end - 1, chan));
1257
1258 /*
1259 * Switch old subbuffer if needed.
1260 */
1261 if (unlikely(offsets.end_switch_old)) {
1262 //ust// ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.old - 1, chan));
1263 ltt_reserve_switch_old_subbuf(chan, buf, &offsets, tsc);
1264 DBG("Switching %s_%d", chan->channel_name, cpu);
1265 }
1266
1267 /*
1268 * Populate new subbuffer.
1269 */
1270 if (unlikely(offsets.begin_switch))
1271 ltt_reserve_switch_new_subbuf(chan, buf, &offsets, tsc);
1272
1273 if (unlikely(offsets.end_switch_current))
1274 ltt_reserve_end_switch_current(chan, buf, &offsets, tsc);
1275
1276 *slot_size = offsets.size;
1277 *buf_offset = offsets.begin + offsets.before_hdr_pad;
1278 return 0;
1279 }
1280
1281 static struct ltt_transport ust_relay_transport = {
1282 .name = "ustrelay",
1283 .ops = {
1284 .create_channel = ust_buffers_create_channel,
1285 .finish_channel = ltt_relay_finish_channel,
1286 .remove_channel = ltt_relay_remove_channel,
1287 .wakeup_channel = ltt_relay_async_wakeup_chan,
1288 },
1289 };
1290
1291 static char initialized = 0;
1292
1293 void __attribute__((constructor)) init_ustrelay_transport(void)
1294 {
1295 if(!initialized) {
1296 ltt_transport_register(&ust_relay_transport);
1297 initialized = 1;
1298 }
1299 }
1300
1301 static void __attribute__((destructor)) ust_buffers_exit(void)
1302 {
1303 ltt_transport_unregister(&ust_relay_transport);
1304 }
1305
1306 size_t ltt_write_event_header_slow(struct ust_trace *trace,
1307 struct ust_channel *channel,
1308 struct ust_buffer *buf, long buf_offset,
1309 u16 eID, u32 event_size,
1310 u64 tsc, unsigned int rflags)
1311 {
1312 struct ltt_event_header header;
1313 u16 small_size;
1314
1315 switch (rflags) {
1316 case LTT_RFLAG_ID_SIZE_TSC:
1317 header.id_time = 29 << LTT_TSC_BITS;
1318 break;
1319 case LTT_RFLAG_ID_SIZE:
1320 header.id_time = 30 << LTT_TSC_BITS;
1321 break;
1322 case LTT_RFLAG_ID:
1323 header.id_time = 31 << LTT_TSC_BITS;
1324 break;
1325 }
1326
1327 header.id_time |= (u32)tsc & LTT_TSC_MASK;
1328 ust_buffers_write(buf, buf_offset, &header, sizeof(header));
1329 buf_offset += sizeof(header);
1330
1331 switch (rflags) {
1332 case LTT_RFLAG_ID_SIZE_TSC:
1333 small_size = (u16)min_t(u32, event_size, LTT_MAX_SMALL_SIZE);
1334 ust_buffers_write(buf, buf_offset,
1335 &eID, sizeof(u16));
1336 buf_offset += sizeof(u16);
1337 ust_buffers_write(buf, buf_offset,
1338 &small_size, sizeof(u16));
1339 buf_offset += sizeof(u16);
1340 if (small_size == LTT_MAX_SMALL_SIZE) {
1341 ust_buffers_write(buf, buf_offset,
1342 &event_size, sizeof(u32));
1343 buf_offset += sizeof(u32);
1344 }
1345 buf_offset += ltt_align(buf_offset, sizeof(u64));
1346 ust_buffers_write(buf, buf_offset,
1347 &tsc, sizeof(u64));
1348 buf_offset += sizeof(u64);
1349 break;
1350 case LTT_RFLAG_ID_SIZE:
1351 small_size = (u16)min_t(u32, event_size, LTT_MAX_SMALL_SIZE);
1352 ust_buffers_write(buf, buf_offset,
1353 &eID, sizeof(u16));
1354 buf_offset += sizeof(u16);
1355 ust_buffers_write(buf, buf_offset,
1356 &small_size, sizeof(u16));
1357 buf_offset += sizeof(u16);
1358 if (small_size == LTT_MAX_SMALL_SIZE) {
1359 ust_buffers_write(buf, buf_offset,
1360 &event_size, sizeof(u32));
1361 buf_offset += sizeof(u32);
1362 }
1363 break;
1364 case LTT_RFLAG_ID:
1365 ust_buffers_write(buf, buf_offset,
1366 &eID, sizeof(u16));
1367 buf_offset += sizeof(u16);
1368 break;
1369 }
1370
1371 return buf_offset;
1372 }
This page took 0.074084 seconds and 3 git commands to generate.