Port changes from lttng-kt
[ust.git] / libust / buffers.c
CommitLineData
b5b073e2
PMF
1/*
2 * buffers.c
3 * LTTng userspace tracer buffering system
4 *
5 * Copyright (C) 2009 - Pierre-Marc Fournier (pierre-marc dot fournier at polymtl dot ca)
6 * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
204141ee 23#include <unistd.h>
b5b073e2
PMF
24#include <sys/mman.h>
25#include <sys/ipc.h>
26#include <sys/shm.h>
27#include <fcntl.h>
28#include <ust/kernelcompat.h>
29#include <kcompat/kref.h>
30#include "buffers.h"
31#include "channels.h"
32#include "tracer.h"
33#include "tracercore.h"
34#include "usterr.h"
35
b73a4c47
PMF
36struct ltt_reserve_switch_offsets {
37 long begin, end, old;
38 long begin_switch, end_switch_current, end_switch_old;
39 size_t before_hdr_pad, size;
40};
41
42
b5b073e2
PMF
43static DEFINE_MUTEX(ust_buffers_channels_mutex);
44static LIST_HEAD(ust_buffers_channels);
45
204141ee
PMF
46static int get_n_cpus(void)
47{
48 int result;
49 static int n_cpus = 0;
50
51 if(n_cpus) {
52 return n_cpus;
53 }
54
a0243ab1
PMF
55 /* On Linux, when some processors are offline
56 * _SC_NPROCESSORS_CONF counts the offline
57 * processors, whereas _SC_NPROCESSORS_ONLN
58 * does not. If we used _SC_NPROCESSORS_ONLN,
59 * getcpu() could return a value greater than
60 * this sysconf, in which case the arrays
61 * indexed by processor would overflow.
62 */
63 result = sysconf(_SC_NPROCESSORS_CONF);
204141ee
PMF
64 if(result == -1) {
65 return -1;
66 }
67
68 n_cpus = result;
69
70 return result;
71}
72
b73a4c47
PMF
73/* _ust_buffers_write()
74 *
75 * @buf: destination buffer
76 * @offset: offset in destination
77 * @src: source buffer
78 * @len: length of source
79 * @cpy: already copied
80 */
81
82void _ust_buffers_write(struct ust_buffer *buf, size_t offset,
83 const void *src, size_t len, ssize_t cpy)
84{
85 do {
86 len -= cpy;
87 src += cpy;
88 offset += cpy;
89
90 WARN_ON(offset >= buf->buf_size);
91
92 cpy = min_t(size_t, len, buf->buf_size - offset);
93 ust_buffers_do_copy(buf->buf_data + offset, src, cpy);
94 } while (unlikely(len != cpy));
95}
96
97static int ust_buffers_init_buffer(struct ust_trace *trace,
b5b073e2
PMF
98 struct ust_channel *ltt_chan,
99 struct ust_buffer *buf,
100 unsigned int n_subbufs);
101
102static int ust_buffers_alloc_buf(struct ust_buffer *buf, size_t *size)
103{
104 void *ptr;
105 int result;
106
107 *size = PAGE_ALIGN(*size);
108
109 result = buf->shmid = shmget(getpid(), *size, IPC_CREAT | IPC_EXCL | 0700);
110 if(result == -1 && errno == EINVAL) {
111 ERR("shmget() returned EINVAL; maybe /proc/sys/kernel/shmmax should be increased.");
112 return -1;
113 }
114 else if(result == -1) {
115 PERROR("shmget");
116 return -1;
117 }
118
204141ee 119 /* FIXME: should have matching call to shmdt */
b5b073e2
PMF
120 ptr = shmat(buf->shmid, NULL, 0);
121 if(ptr == (void *) -1) {
122 perror("shmat");
123 goto destroy_shmem;
124 }
125
126 /* Already mark the shared memory for destruction. This will occur only
127 * when all users have detached.
128 */
129 result = shmctl(buf->shmid, IPC_RMID, NULL);
130 if(result == -1) {
131 perror("shmctl");
132 return -1;
133 }
134
135 buf->buf_data = ptr;
136 buf->buf_size = *size;
137
138 return 0;
139
140 destroy_shmem:
141 result = shmctl(buf->shmid, IPC_RMID, NULL);
142 if(result == -1) {
143 perror("shmctl");
144 }
145
146 return -1;
147}
148
204141ee 149int ust_buffers_create_buf(struct ust_channel *channel, int cpu)
b5b073e2
PMF
150{
151 int result;
204141ee 152 struct ust_buffer *buf = channel->buf[cpu];
b5b073e2 153
204141ee
PMF
154 buf->cpu = cpu;
155 result = ust_buffers_alloc_buf(buf, &channel->alloc_size);
b5b073e2 156 if(result)
204141ee 157 return -1;
b5b073e2 158
204141ee 159 buf->chan = channel;
b5b073e2 160 kref_get(&channel->kref);
204141ee 161 return 0;
b5b073e2
PMF
162}
163
164static void ust_buffers_destroy_channel(struct kref *kref)
165{
166 struct ust_channel *chan = container_of(kref, struct ust_channel, kref);
167 free(chan);
168}
169
170static void ust_buffers_destroy_buf(struct ust_buffer *buf)
171{
172 struct ust_channel *chan = buf->chan;
173 int result;
174
175 result = munmap(buf->buf_data, buf->buf_size);
176 if(result == -1) {
177 PERROR("munmap");
178 }
179
204141ee 180//ust// chan->buf[buf->cpu] = NULL;
b5b073e2
PMF
181 free(buf);
182 kref_put(&chan->kref, ust_buffers_destroy_channel);
183}
184
185/* called from kref_put */
186static void ust_buffers_remove_buf(struct kref *kref)
187{
188 struct ust_buffer *buf = container_of(kref, struct ust_buffer, kref);
189 ust_buffers_destroy_buf(buf);
190}
191
204141ee 192int ust_buffers_open_buf(struct ust_channel *chan, int cpu)
b5b073e2 193{
204141ee 194 int result;
b5b073e2 195
204141ee
PMF
196 result = ust_buffers_create_buf(chan, cpu);
197 if (result == -1)
198 return -1;
b5b073e2 199
204141ee 200 kref_init(&chan->buf[cpu]->kref);
b5b073e2 201
204141ee
PMF
202 result = ust_buffers_init_buffer(chan->trace, chan, chan->buf[cpu], chan->subbuf_cnt);
203 if(result == -1)
204 return -1;
b5b073e2 205
204141ee 206 return 0;
b5b073e2
PMF
207
208 /* FIXME: decrementally destroy on error? */
209}
210
211/**
212 * ust_buffers_close_buf - close a channel buffer
213 * @buf: buffer
214 */
215static void ust_buffers_close_buf(struct ust_buffer *buf)
216{
217 kref_put(&buf->kref, ust_buffers_remove_buf);
218}
219
220int ust_buffers_channel_open(struct ust_channel *chan, size_t subbuf_size, size_t subbuf_cnt)
221{
204141ee
PMF
222 int i;
223 int result;
224
b5b073e2
PMF
225 if(subbuf_size == 0 || subbuf_cnt == 0)
226 return -1;
227
b73a4c47
PMF
228 /* Check that the subbuffer size is larger than a page. */
229 WARN_ON_ONCE(subbuf_size < PAGE_SIZE);
230
231 /*
232 * Make sure the number of subbuffers and subbuffer size are power of 2.
233 */
234 WARN_ON_ONCE(hweight32(subbuf_size) != 1);
235 WARN_ON(hweight32(subbuf_cnt) != 1);
236
b5b073e2
PMF
237 chan->version = UST_CHANNEL_VERSION;
238 chan->subbuf_cnt = subbuf_cnt;
239 chan->subbuf_size = subbuf_size;
240 chan->subbuf_size_order = get_count_order(subbuf_size);
b73a4c47 241 chan->alloc_size = subbuf_size * subbuf_cnt;
204141ee 242
b5b073e2
PMF
243 kref_init(&chan->kref);
244
245 mutex_lock(&ust_buffers_channels_mutex);
204141ee
PMF
246 for(i=0; i<chan->n_cpus; i++) {
247 result = ust_buffers_open_buf(chan, i);
248 if (result == -1)
249 goto error;
250 }
b5b073e2
PMF
251 list_add(&chan->list, &ust_buffers_channels);
252 mutex_unlock(&ust_buffers_channels_mutex);
253
254 return 0;
255
204141ee
PMF
256 /* Jump directly inside the loop to close the buffers that were already
257 * opened. */
258 for(; i>=0; i--) {
259 ust_buffers_close_buf(chan->buf[i]);
260error:
120b0ec3 261 do {} while(0);
204141ee
PMF
262 }
263
b5b073e2
PMF
264 kref_put(&chan->kref, ust_buffers_destroy_channel);
265 mutex_unlock(&ust_buffers_channels_mutex);
266 return -1;
267}
268
269void ust_buffers_channel_close(struct ust_channel *chan)
270{
204141ee
PMF
271 int i;
272 if(!chan)
b5b073e2
PMF
273 return;
274
275 mutex_lock(&ust_buffers_channels_mutex);
204141ee
PMF
276 for(i=0; i<chan->n_cpus; i++) {
277 /* FIXME: if we make it here, then all buffers were necessarily allocated. Moreover, we don't
278 * initialize to NULL so we cannot use this check. Should we? */
279//ust// if (chan->buf[i])
280 ust_buffers_close_buf(chan->buf[i]);
281 }
b5b073e2
PMF
282
283 list_del(&chan->list);
284 kref_put(&chan->kref, ust_buffers_destroy_channel);
285 mutex_unlock(&ust_buffers_channels_mutex);
286}
287
b5b073e2
PMF
288/*
289 * -------
290 */
291
204141ee 292static void ust_buffers_destroy_buffer(struct ust_channel *ltt_chan, int cpu);
b5b073e2
PMF
293
294static void ltt_force_switch(struct ust_buffer *buf,
295 enum force_switch_mode mode);
296
297/*
298 * Trace callbacks
299 */
b73a4c47 300static void ltt_buffer_begin(struct ust_buffer *buf,
b5b073e2
PMF
301 u64 tsc, unsigned int subbuf_idx)
302{
303 struct ust_channel *channel = buf->chan;
304 struct ltt_subbuffer_header *header =
305 (struct ltt_subbuffer_header *)
b73a4c47 306 ust_buffers_offset_address(buf,
b5b073e2
PMF
307 subbuf_idx * buf->chan->subbuf_size);
308
309 header->cycle_count_begin = tsc;
310 header->lost_size = 0xFFFFFFFF; /* for debugging */
311 header->buf_size = buf->chan->subbuf_size;
312 ltt_write_trace_header(channel->trace, header);
313}
314
315/*
316 * offset is assumed to never be 0 here : never deliver a completely empty
317 * subbuffer. The lost size is between 0 and subbuf_size-1.
318 */
b73a4c47 319static notrace void ltt_buffer_end(struct ust_buffer *buf,
b5b073e2
PMF
320 u64 tsc, unsigned int offset, unsigned int subbuf_idx)
321{
322 struct ltt_subbuffer_header *header =
323 (struct ltt_subbuffer_header *)
b73a4c47 324 ust_buffers_offset_address(buf,
b5b073e2
PMF
325 subbuf_idx * buf->chan->subbuf_size);
326
327 header->lost_size = SUBBUF_OFFSET((buf->chan->subbuf_size - offset),
328 buf->chan);
329 header->cycle_count_end = tsc;
330 header->events_lost = local_read(&buf->events_lost);
331 header->subbuf_corrupt = local_read(&buf->corrupted_subbuffers);
b5b073e2
PMF
332}
333
334/*
335 * This function should not be called from NMI interrupt context
336 */
337static notrace void ltt_buf_unfull(struct ust_buffer *buf,
338 unsigned int subbuf_idx,
339 long offset)
340{
b5b073e2
PMF
341}
342
b73a4c47
PMF
343/*
344 * Promote compiler barrier to a smp_mb().
345 * For the specific LTTng case, this IPI call should be removed if the
346 * architecture does not reorder writes. This should eventually be provided by
347 * a separate architecture-specific infrastructure.
348 */
349static void remote_mb(void *info)
350{
351 smp_mb();
352}
353
354int ust_buffers_get_subbuf(struct ust_buffer *buf, long *consumed)
b5b073e2
PMF
355{
356 struct ust_channel *channel = buf->chan;
357 long consumed_old, consumed_idx, commit_count, write_offset;
b73a4c47
PMF
358//ust// int retval;
359
b5b073e2
PMF
360 consumed_old = atomic_long_read(&buf->consumed);
361 consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan);
b73a4c47 362 commit_count = local_read(&buf->commit_count[consumed_idx].cc_sb);
b5b073e2
PMF
363 /*
364 * Make sure we read the commit count before reading the buffer
365 * data and the write offset. Correct consumed offset ordering
366 * wrt commit count is insured by the use of cmpxchg to update
367 * the consumed offset.
b73a4c47
PMF
368 * smp_call_function_single can fail if the remote CPU is offline,
369 * this is OK because then there is no wmb to execute there.
370 * If our thread is executing on the same CPU as the on the buffers
371 * belongs to, we don't have to synchronize it at all. If we are
372 * migrated, the scheduler will take care of the memory barriers.
373 * Normally, smp_call_function_single() should ensure program order when
374 * executing the remote function, which implies that it surrounds the
375 * function execution with :
376 * smp_mb()
377 * send IPI
378 * csd_lock_wait
379 * recv IPI
380 * smp_mb()
381 * exec. function
382 * smp_mb()
383 * csd unlock
384 * smp_mb()
385 *
386 * However, smp_call_function_single() does not seem to clearly execute
387 * such barriers. It depends on spinlock semantic to provide the barrier
388 * before executing the IPI and, when busy-looping, csd_lock_wait only
389 * executes smp_mb() when it has to wait for the other CPU.
390 *
391 * I don't trust this code. Therefore, let's add the smp_mb() sequence
392 * required ourself, even if duplicated. It has no performance impact
393 * anyway.
394 *
395 * smp_mb() is needed because smp_rmb() and smp_wmb() only order read vs
396 * read and write vs write. They do not ensure core synchronization. We
397 * really have to ensure total order between the 3 barriers running on
398 * the 2 CPUs.
399 */
400//ust// #ifdef LTT_NO_IPI_BARRIER
401 /*
402 * Local rmb to match the remote wmb to read the commit count before the
403 * buffer data and the write offset.
b5b073e2
PMF
404 */
405 smp_rmb();
b73a4c47
PMF
406//ust// #else
407//ust// if (raw_smp_processor_id() != buf->cpu) {
408//ust// smp_mb(); /* Total order with IPI handler smp_mb() */
409//ust// smp_call_function_single(buf->cpu, remote_mb, NULL, 1);
410//ust// smp_mb(); /* Total order with IPI handler smp_mb() */
411//ust// }
412//ust// #endif
413
b5b073e2
PMF
414 write_offset = local_read(&buf->offset);
415 /*
416 * Check that the subbuffer we are trying to consume has been
417 * already fully committed.
418 */
419 if (((commit_count - buf->chan->subbuf_size)
420 & channel->commit_count_mask)
421 - (BUFFER_TRUNC(consumed_old, buf->chan)
422 >> channel->n_subbufs_order)
423 != 0) {
424 return -EAGAIN;
425 }
426 /*
427 * Check that we are not about to read the same subbuffer in
428 * which the writer head is.
429 */
430 if ((SUBBUF_TRUNC(write_offset, buf->chan)
431 - SUBBUF_TRUNC(consumed_old, buf->chan))
432 == 0) {
433 return -EAGAIN;
434 }
435
b73a4c47
PMF
436 /* FIXME: is this ok to disable the reading feature? */
437//ust// retval = update_read_sb_index(buf, consumed_idx);
438//ust// if (retval)
439//ust// return retval;
440
441 *consumed = consumed_old;
442
b5b073e2
PMF
443 return 0;
444}
445
b73a4c47 446int ust_buffers_put_subbuf(struct ust_buffer *buf, unsigned long uconsumed_old)
b5b073e2
PMF
447{
448 long consumed_new, consumed_old;
449
450 consumed_old = atomic_long_read(&buf->consumed);
451 consumed_old = consumed_old & (~0xFFFFFFFFL);
452 consumed_old = consumed_old | uconsumed_old;
453 consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan);
454
455//ust// spin_lock(&ltt_buf->full_lock);
456 if (atomic_long_cmpxchg(&buf->consumed, consumed_old,
457 consumed_new)
458 != consumed_old) {
459 /* We have been pushed by the writer : the last
460 * buffer read _is_ corrupted! It can also
461 * happen if this is a buffer we never got. */
462//ust// spin_unlock(&ltt_buf->full_lock);
463 return -EIO;
464 } else {
465 /* tell the client that buffer is now unfull */
466 int index;
467 long data;
468 index = SUBBUF_INDEX(consumed_old, buf->chan);
469 data = BUFFER_OFFSET(consumed_old, buf->chan);
470 ltt_buf_unfull(buf, index, data);
471//ust// spin_unlock(&ltt_buf->full_lock);
472 }
473 return 0;
474}
475
b73a4c47
PMF
476//ust// static void switch_buffer(unsigned long data)
477//ust// {
478//ust// struct ltt_channel_buf_struct *ltt_buf =
479//ust// (struct ltt_channel_buf_struct *)data;
480//ust// struct rchan_buf *buf = ltt_buf->rbuf;
481//ust//
482//ust// if (buf)
483//ust// ltt_force_switch(buf, FORCE_ACTIVE);
484//ust//
485//ust// ltt_buf->switch_timer.expires += ltt_buf->switch_timer_interval;
486//ust// add_timer_on(&ltt_buf->switch_timer, smp_processor_id());
487//ust// }
488//ust//
489//ust// static void start_switch_timer(struct ltt_channel_struct *ltt_channel)
490//ust// {
491//ust// struct rchan *rchan = ltt_channel->trans_channel_data;
492//ust// int cpu;
493//ust//
494//ust// if (!ltt_channel->switch_timer_interval)
495//ust// return;
496//ust//
497//ust// // TODO : hotplug
498//ust// for_each_online_cpu(cpu) {
499//ust// struct ltt_channel_buf_struct *ltt_buf;
500//ust// struct rchan_buf *buf;
501//ust//
502//ust// buf = rchan->buf[cpu];
503//ust// ltt_buf = buf->chan_private;
504//ust// buf->random_access = 1;
505//ust// ltt_buf->switch_timer_interval =
506//ust// ltt_channel->switch_timer_interval;
507//ust// init_timer(&ltt_buf->switch_timer);
508//ust// ltt_buf->switch_timer.function = switch_buffer;
509//ust// ltt_buf->switch_timer.expires = jiffies +
510//ust// ltt_buf->switch_timer_interval;
511//ust// ltt_buf->switch_timer.data = (unsigned long)ltt_buf;
512//ust// add_timer_on(&ltt_buf->switch_timer, cpu);
513//ust// }
514//ust// }
515//ust//
516//ust// /*
517//ust// * Cannot use del_timer_sync with add_timer_on, so use an IPI to locally
518//ust// * delete the timer.
519//ust// */
520//ust// static void stop_switch_timer_ipi(void *info)
521//ust// {
522//ust// struct ltt_channel_buf_struct *ltt_buf =
523//ust// (struct ltt_channel_buf_struct *)info;
524//ust//
525//ust// del_timer(&ltt_buf->switch_timer);
526//ust// }
527//ust//
528//ust// static void stop_switch_timer(struct ltt_channel_struct *ltt_channel)
529//ust// {
530//ust// struct rchan *rchan = ltt_channel->trans_channel_data;
531//ust// int cpu;
532//ust//
533//ust// if (!ltt_channel->switch_timer_interval)
534//ust// return;
535//ust//
536//ust// // TODO : hotplug
537//ust// for_each_online_cpu(cpu) {
538//ust// struct ltt_channel_buf_struct *ltt_buf;
539//ust// struct rchan_buf *buf;
540//ust//
541//ust// buf = rchan->buf[cpu];
542//ust// ltt_buf = buf->chan_private;
543//ust// smp_call_function(stop_switch_timer_ipi, ltt_buf, 1);
544//ust// buf->random_access = 0;
545//ust// }
546//ust// }
547
548static void ust_buffers_print_written(struct ust_channel *chan,
549 long cons_off, unsigned int cpu)
550{
551 struct ust_buffer *buf = chan->buf[cpu];
552 long cons_idx, events_count;
553
554 cons_idx = SUBBUF_INDEX(cons_off, chan);
555 events_count = local_read(&buf->commit_count[cons_idx].events);
556
557 if (events_count)
558 printk(KERN_INFO
559 "channel %s: %lu events written (cpu %u, index %lu)\n",
560 chan->channel_name, events_count, cpu, cons_idx);
561}
562
b5b073e2
PMF
563static void ltt_relay_print_subbuffer_errors(
564 struct ust_channel *channel,
204141ee 565 long cons_off, int cpu)
b5b073e2 566{
204141ee 567 struct ust_buffer *ltt_buf = channel->buf[cpu];
b73a4c47 568 long cons_idx, commit_count, commit_count_sb, write_offset;
b5b073e2
PMF
569
570 cons_idx = SUBBUF_INDEX(cons_off, channel);
b73a4c47
PMF
571 commit_count = local_read(&ltt_buf->commit_count[cons_idx].cc);
572 commit_count_sb = local_read(&ltt_buf->commit_count[cons_idx].cc_sb);
573
b5b073e2
PMF
574 /*
575 * No need to order commit_count and write_offset reads because we
576 * execute after trace is stopped when there are no readers left.
577 */
578 write_offset = local_read(&ltt_buf->offset);
579 WARN( "LTT : unread channel %s offset is %ld "
b73a4c47
PMF
580 "and cons_off : %ld (cpu %d)\n",
581 channel->channel_name, write_offset, cons_off, cpu);
b5b073e2
PMF
582 /* Check each sub-buffer for non filled commit count */
583 if (((commit_count - channel->subbuf_size) & channel->commit_count_mask)
584 - (BUFFER_TRUNC(cons_off, channel) >> channel->n_subbufs_order) != 0) {
585 ERR("LTT : %s : subbuffer %lu has non filled "
b73a4c47
PMF
586 "commit count [cc, cc_sb] [%lu,%lu].\n",
587 channel->channel_name, cons_idx, commit_count, commit_count_sb);
b5b073e2
PMF
588 }
589 ERR("LTT : %s : commit count : %lu, subbuf size %zd\n",
590 channel->channel_name, commit_count,
591 channel->subbuf_size);
592}
593
b73a4c47 594static void ltt_relay_print_errors(struct ust_trace *trace,
204141ee 595 struct ust_channel *channel, int cpu)
b5b073e2 596{
204141ee 597 struct ust_buffer *ltt_buf = channel->buf[cpu];
b5b073e2
PMF
598 long cons_off;
599
4292ed8a
PMF
600 /*
601 * Can be called in the error path of allocation when
602 * trans_channel_data is not yet set.
603 */
604 if (!channel)
605 return;
606
b73a4c47
PMF
607 for (cons_off = 0; cons_off < rchan->alloc_size;
608 cons_off = SUBBUF_ALIGN(cons_off, rchan))
609 ust_buffers_print_written(ltt_chan, cons_off, cpu);
b5b073e2
PMF
610 for (cons_off = atomic_long_read(&ltt_buf->consumed);
611 (SUBBUF_TRUNC(local_read(&ltt_buf->offset),
612 channel)
613 - cons_off) > 0;
614 cons_off = SUBBUF_ALIGN(cons_off, channel))
204141ee 615 ltt_relay_print_subbuffer_errors(channel, cons_off, cpu);
b5b073e2
PMF
616}
617
204141ee 618static void ltt_relay_print_buffer_errors(struct ust_channel *channel, int cpu)
b5b073e2 619{
b73a4c47 620 struct ust_trace *trace = channel->trace;
204141ee 621 struct ust_buffer *ltt_buf = channel->buf[cpu];
b5b073e2
PMF
622
623 if (local_read(&ltt_buf->events_lost))
b73a4c47 624 ERR("channel %s: %ld events lost (cpu %d)",
b5b073e2 625 channel->channel_name,
b73a4c47 626 local_read(&ltt_buf->events_lost), cpu);
b5b073e2 627 if (local_read(&ltt_buf->corrupted_subbuffers))
b73a4c47 628 ERR("channel %s : %ld corrupted subbuffers (cpu %d)",
b5b073e2 629 channel->channel_name,
b73a4c47 630 local_read(&ltt_buf->corrupted_subbuffers), cpu);
b5b073e2 631
204141ee 632 ltt_relay_print_errors(trace, channel, cpu);
b5b073e2
PMF
633}
634
635static void ltt_relay_release_channel(struct kref *kref)
636{
637 struct ust_channel *ltt_chan = container_of(kref,
638 struct ust_channel, kref);
639 free(ltt_chan->buf);
640}
641
642/*
643 * Create ltt buffer.
644 */
b73a4c47 645//ust// static int ltt_relay_create_buffer(struct ust_trace *trace,
b5b073e2
PMF
646//ust// struct ltt_channel_struct *ltt_chan, struct rchan_buf *buf,
647//ust// unsigned int cpu, unsigned int n_subbufs)
648//ust// {
649//ust// struct ltt_channel_buf_struct *ltt_buf =
650//ust// percpu_ptr(ltt_chan->buf, cpu);
651//ust// unsigned int j;
b73a4c47 652//ust//
b5b073e2
PMF
653//ust// ltt_buf->commit_count =
654//ust// kzalloc_node(sizeof(ltt_buf->commit_count) * n_subbufs,
655//ust// GFP_KERNEL, cpu_to_node(cpu));
656//ust// if (!ltt_buf->commit_count)
657//ust// return -ENOMEM;
658//ust// kref_get(&trace->kref);
659//ust// kref_get(&trace->ltt_transport_kref);
660//ust// kref_get(&ltt_chan->kref);
661//ust// local_set(&ltt_buf->offset, ltt_subbuffer_header_size());
662//ust// atomic_long_set(&ltt_buf->consumed, 0);
663//ust// atomic_long_set(&ltt_buf->active_readers, 0);
664//ust// for (j = 0; j < n_subbufs; j++)
665//ust// local_set(&ltt_buf->commit_count[j], 0);
666//ust// init_waitqueue_head(&ltt_buf->write_wait);
667//ust// atomic_set(&ltt_buf->wakeup_readers, 0);
668//ust// spin_lock_init(&ltt_buf->full_lock);
b73a4c47 669//ust//
b5b073e2
PMF
670//ust// ltt_buffer_begin_callback(buf, trace->start_tsc, 0);
671//ust// /* atomic_add made on local variable on data that belongs to
672//ust// * various CPUs : ok because tracing not started (for this cpu). */
673//ust// local_add(ltt_subbuffer_header_size(), &ltt_buf->commit_count[0]);
b73a4c47 674//ust//
b5b073e2
PMF
675//ust// local_set(&ltt_buf->events_lost, 0);
676//ust// local_set(&ltt_buf->corrupted_subbuffers, 0);
b73a4c47 677//ust//
b5b073e2
PMF
678//ust// return 0;
679//ust// }
680
b73a4c47 681static int ust_buffers_init_buffer(struct ust_trace *trace,
b5b073e2
PMF
682 struct ust_channel *ltt_chan, struct ust_buffer *buf,
683 unsigned int n_subbufs)
684{
685 unsigned int j;
686 int fds[2];
687 int result;
688
689 buf->commit_count =
b73a4c47 690 zmalloc(sizeof(*buf->commit_count) * n_subbufs);
b5b073e2
PMF
691 if (!buf->commit_count)
692 return -ENOMEM;
693 kref_get(&trace->kref);
694 kref_get(&trace->ltt_transport_kref);
695 kref_get(&ltt_chan->kref);
696 local_set(&buf->offset, ltt_subbuffer_header_size());
697 atomic_long_set(&buf->consumed, 0);
698 atomic_long_set(&buf->active_readers, 0);
b73a4c47
PMF
699 for (j = 0; j < n_subbufs; j++) {
700 local_set(&buf->commit_count[j].cc, 0);
701 local_set(&buf->commit_count[j].cc_sb, 0);
702 }
b5b073e2
PMF
703//ust// init_waitqueue_head(&buf->write_wait);
704//ust// atomic_set(&buf->wakeup_readers, 0);
705//ust// spin_lock_init(&buf->full_lock);
706
b73a4c47 707 ltt_buffer_begin(buf, trace->start_tsc, 0);
b5b073e2 708
b73a4c47 709 local_add(ltt_subbuffer_header_size(), &buf->commit_count[0].cc);
b5b073e2
PMF
710
711 local_set(&buf->events_lost, 0);
712 local_set(&buf->corrupted_subbuffers, 0);
713
714 result = pipe(fds);
715 if(result == -1) {
716 PERROR("pipe");
717 return -1;
718 }
719 buf->data_ready_fd_read = fds[0];
720 buf->data_ready_fd_write = fds[1];
721
722 /* FIXME: do we actually need this? */
723 result = fcntl(fds[0], F_SETFL, O_NONBLOCK);
724 if(result == -1) {
725 PERROR("fcntl");
726 }
727
728//ust// buf->commit_seq = malloc(sizeof(buf->commit_seq) * n_subbufs);
729//ust// if(!ltt_buf->commit_seq) {
730//ust// return -1;
731//ust// }
732
733 /* FIXME: decrementally destroy on error */
734
735 return 0;
736}
737
738/* FIXME: use this function */
204141ee 739static void ust_buffers_destroy_buffer(struct ust_channel *ltt_chan, int cpu)
b5b073e2 740{
b73a4c47 741 struct ust_trace *trace = ltt_chan->trace;
204141ee 742 struct ust_buffer *ltt_buf = ltt_chan->buf[cpu];
b5b073e2
PMF
743
744 kref_put(&ltt_chan->trace->ltt_transport_kref,
745 ltt_release_transport);
204141ee 746 ltt_relay_print_buffer_errors(ltt_chan, cpu);
b5b073e2
PMF
747//ust// free(ltt_buf->commit_seq);
748 kfree(ltt_buf->commit_count);
749 ltt_buf->commit_count = NULL;
750 kref_put(&ltt_chan->kref, ltt_relay_release_channel);
751 kref_put(&trace->kref, ltt_release_trace);
752//ust// wake_up_interruptible(&trace->kref_wq);
753}
754
204141ee 755static int ust_buffers_alloc_channel_buf_structs(struct ust_channel *chan)
b5b073e2
PMF
756{
757 void *ptr;
758 int result;
204141ee
PMF
759 size_t size;
760 int i;
b5b073e2 761
204141ee 762 size = PAGE_ALIGN(1);
b5b073e2 763
204141ee 764 for(i=0; i<chan->n_cpus; i++) {
b5b073e2 765
204141ee
PMF
766 result = chan->buf_struct_shmids[i] = shmget(getpid(), size, IPC_CREAT | IPC_EXCL | 0700);
767 if(result == -1) {
768 PERROR("shmget");
769 goto destroy_previous;
770 }
b5b073e2 771
204141ee
PMF
772 /* FIXME: should have matching call to shmdt */
773 ptr = shmat(chan->buf_struct_shmids[i], NULL, 0);
774 if(ptr == (void *) -1) {
775 perror("shmat");
776 goto destroy_shm;
777 }
778
779 /* Already mark the shared memory for destruction. This will occur only
780 * when all users have detached.
781 */
782 result = shmctl(chan->buf_struct_shmids[i], IPC_RMID, NULL);
783 if(result == -1) {
784 perror("shmctl");
785 goto destroy_previous;
786 }
787
788 chan->buf[i] = ptr;
b5b073e2
PMF
789 }
790
204141ee 791 return 0;
b5b073e2 792
204141ee
PMF
793 /* Jumping inside this loop occurs from within the other loop above with i as
794 * counter, so it unallocates the structures for the cpu = current_i down to
795 * zero. */
796 for(; i>=0; i--) {
797 destroy_shm:
798 result = shmctl(chan->buf_struct_shmids[i], IPC_RMID, NULL);
799 if(result == -1) {
800 perror("shmctl");
801 }
b5b073e2 802
204141ee
PMF
803 destroy_previous:
804 continue;
b5b073e2
PMF
805 }
806
204141ee 807 return -1;
b5b073e2
PMF
808}
809
810/*
811 * Create channel.
812 */
b73a4c47 813static int ust_buffers_create_channel(const char *trace_name, struct ust_trace *trace,
b5b073e2
PMF
814 const char *channel_name, struct ust_channel *ltt_chan,
815 unsigned int subbuf_size, unsigned int n_subbufs, int overwrite)
816{
b5b073e2
PMF
817 int result;
818
819 kref_init(&ltt_chan->kref);
820
821 ltt_chan->trace = trace;
b5b073e2
PMF
822 ltt_chan->overwrite = overwrite;
823 ltt_chan->n_subbufs_order = get_count_order(n_subbufs);
824 ltt_chan->commit_count_mask = (~0UL >> ltt_chan->n_subbufs_order);
204141ee 825 ltt_chan->n_cpus = get_n_cpus();
b5b073e2 826//ust// ltt_chan->buf = percpu_alloc_mask(sizeof(struct ltt_channel_buf_struct), GFP_KERNEL, cpu_possible_map);
204141ee
PMF
827 ltt_chan->buf = (void *) malloc(ltt_chan->n_cpus * sizeof(void *));
828 if(ltt_chan->buf == NULL) {
829 goto error;
830 }
831 ltt_chan->buf_struct_shmids = (int *) malloc(ltt_chan->n_cpus * sizeof(int));
832 if(ltt_chan->buf_struct_shmids == NULL)
833 goto free_buf;
b5b073e2 834
204141ee
PMF
835 result = ust_buffers_alloc_channel_buf_structs(ltt_chan);
836 if(result != 0) {
837 goto free_buf_struct_shmids;
838 }
b5b073e2 839
b5b073e2 840 result = ust_buffers_channel_open(ltt_chan, subbuf_size, n_subbufs);
204141ee 841 if (result != 0) {
c1f20530 842 ERR("Cannot open channel for trace %s", trace_name);
204141ee 843 goto unalloc_buf_structs;
b5b073e2
PMF
844 }
845
204141ee
PMF
846 return 0;
847
848unalloc_buf_structs:
849 /* FIXME: put a call here to unalloc the buf structs! */
850
851free_buf_struct_shmids:
852 free(ltt_chan->buf_struct_shmids);
b5b073e2 853
204141ee
PMF
854free_buf:
855 free(ltt_chan->buf);
856
857error:
858 return -1;
b5b073e2
PMF
859}
860
861/*
862 * LTTng channel flush function.
863 *
864 * Must be called when no tracing is active in the channel, because of
865 * accesses across CPUs.
866 */
867static notrace void ltt_relay_buffer_flush(struct ust_buffer *buf)
868{
869 int result;
870
871//ust// buf->finalized = 1;
872 ltt_force_switch(buf, FORCE_FLUSH);
873
874 result = write(buf->data_ready_fd_write, "1", 1);
875 if(result == -1) {
876 PERROR("write (in ltt_relay_buffer_flush)");
877 ERR("this should never happen!");
878 }
879}
880
881static void ltt_relay_async_wakeup_chan(struct ust_channel *ltt_channel)
882{
883//ust// unsigned int i;
884//ust// struct rchan *rchan = ltt_channel->trans_channel_data;
885//ust//
886//ust// for_each_possible_cpu(i) {
887//ust// struct ltt_channel_buf_struct *ltt_buf =
888//ust// percpu_ptr(ltt_channel->buf, i);
889//ust//
890//ust// if (atomic_read(&ltt_buf->wakeup_readers) == 1) {
891//ust// atomic_set(&ltt_buf->wakeup_readers, 0);
892//ust// wake_up_interruptible(&rchan->buf[i]->read_wait);
893//ust// }
894//ust// }
895}
896
204141ee 897static void ltt_relay_finish_buffer(struct ust_channel *channel, unsigned int cpu)
b5b073e2
PMF
898{
899// int result;
900
204141ee
PMF
901 if (channel->buf[cpu]) {
902 struct ust_buffer *buf = channel->buf[cpu];
b5b073e2
PMF
903 ltt_relay_buffer_flush(buf);
904//ust// ltt_relay_wake_writers(ltt_buf);
905 /* closing the pipe tells the consumer the buffer is finished */
906
907 //result = write(ltt_buf->data_ready_fd_write, "D", 1);
908 //if(result == -1) {
909 // PERROR("write (in ltt_relay_finish_buffer)");
910 // ERR("this should never happen!");
911 //}
912 close(buf->data_ready_fd_write);
913 }
914}
915
916
917static void ltt_relay_finish_channel(struct ust_channel *channel)
918{
204141ee 919 unsigned int i;
b5b073e2 920
204141ee
PMF
921 for(i=0; i<channel->n_cpus; i++) {
922 ltt_relay_finish_buffer(channel, i);
923 }
b5b073e2
PMF
924}
925
926static void ltt_relay_remove_channel(struct ust_channel *channel)
927{
928 ust_buffers_channel_close(channel);
929 kref_put(&channel->kref, ltt_relay_release_channel);
930}
931
b73a4c47
PMF
932//ust// /*
933//ust// * Returns :
934//ust// * 0 if ok
935//ust// * !0 if execution must be aborted.
936//ust// */
937//ust// static inline int ltt_relay_try_reserve(
938//ust// struct ust_channel *channel, struct ust_buffer *buf,
939//ust// struct ltt_reserve_switch_offsets *offsets, size_t data_size,
940//ust// u64 *tsc, unsigned int *rflags, int largest_align)
941//ust// {
942//ust// offsets->begin = local_read(&buf->offset);
943//ust// offsets->old = offsets->begin;
944//ust// offsets->begin_switch = 0;
945//ust// offsets->end_switch_current = 0;
946//ust// offsets->end_switch_old = 0;
947//ust//
948//ust// *tsc = trace_clock_read64();
949//ust// if (last_tsc_overflow(buf, *tsc))
950//ust// *rflags = LTT_RFLAG_ID_SIZE_TSC;
951//ust//
952//ust// if (SUBBUF_OFFSET(offsets->begin, buf->chan) == 0) {
953//ust// offsets->begin_switch = 1; /* For offsets->begin */
954//ust// } else {
955//ust// offsets->size = ust_get_header_size(channel,
956//ust// offsets->begin, data_size,
957//ust// &offsets->before_hdr_pad, *rflags);
958//ust// offsets->size += ltt_align(offsets->begin + offsets->size,
959//ust// largest_align)
960//ust// + data_size;
961//ust// if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size)
962//ust// > buf->chan->subbuf_size) {
963//ust// offsets->end_switch_old = 1; /* For offsets->old */
964//ust// offsets->begin_switch = 1; /* For offsets->begin */
965//ust// }
966//ust// }
967//ust// if (offsets->begin_switch) {
968//ust// long subbuf_index;
969//ust//
970//ust// if (offsets->end_switch_old)
971//ust// offsets->begin = SUBBUF_ALIGN(offsets->begin,
972//ust// buf->chan);
973//ust// offsets->begin = offsets->begin + ltt_subbuffer_header_size();
974//ust// /* Test new buffer integrity */
975//ust// subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
976//ust// offsets->reserve_commit_diff =
977//ust// (BUFFER_TRUNC(offsets->begin, buf->chan)
978//ust// >> channel->n_subbufs_order)
979//ust// - (local_read(&buf->commit_count[subbuf_index])
980//ust// & channel->commit_count_mask);
981//ust// if (offsets->reserve_commit_diff == 0) {
982//ust// long consumed;
983//ust//
984//ust// consumed = atomic_long_read(&buf->consumed);
985//ust//
986//ust// /* Next buffer not corrupted. */
987//ust// if (!channel->overwrite &&
988//ust// (SUBBUF_TRUNC(offsets->begin, buf->chan)
989//ust// - SUBBUF_TRUNC(consumed, buf->chan))
990//ust// >= channel->alloc_size) {
991//ust//
992//ust// long consumed_idx = SUBBUF_INDEX(consumed, buf->chan);
993//ust// long commit_count = local_read(&buf->commit_count[consumed_idx]);
994//ust// if(((commit_count - buf->chan->subbuf_size) & channel->commit_count_mask) - (BUFFER_TRUNC(consumed, buf->chan) >> channel->n_subbufs_order) != 0) {
995//ust// WARN("Event dropped. Caused by non-committed event.");
996//ust// }
997//ust// else {
998//ust// WARN("Event dropped. Caused by non-consumed buffer.");
999//ust// }
1000//ust// /*
1001//ust// * We do not overwrite non consumed buffers
1002//ust// * and we are full : event is lost.
1003//ust// */
1004//ust// local_inc(&buf->events_lost);
1005//ust// return -1;
1006//ust// } else {
1007//ust// /*
1008//ust// * next buffer not corrupted, we are either in
1009//ust// * overwrite mode or the buffer is not full.
1010//ust// * It's safe to write in this new subbuffer.
1011//ust// */
1012//ust// }
1013//ust// } else {
1014//ust// /*
1015//ust// * Next subbuffer corrupted. Force pushing reader even
1016//ust// * in normal mode. It's safe to write in this new
1017//ust// * subbuffer.
1018//ust// */
1019//ust// }
1020//ust// offsets->size = ust_get_header_size(channel,
1021//ust// offsets->begin, data_size,
1022//ust// &offsets->before_hdr_pad, *rflags);
1023//ust// offsets->size += ltt_align(offsets->begin + offsets->size,
1024//ust// largest_align)
1025//ust// + data_size;
1026//ust// if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size)
1027//ust// > buf->chan->subbuf_size) {
1028//ust// /*
1029//ust// * Event too big for subbuffers, report error, don't
1030//ust// * complete the sub-buffer switch.
1031//ust// */
1032//ust// local_inc(&buf->events_lost);
1033//ust// return -1;
1034//ust// } else {
1035//ust// /*
1036//ust// * We just made a successful buffer switch and the event
1037//ust// * fits in the new subbuffer. Let's write.
1038//ust// */
1039//ust// }
1040//ust// } else {
1041//ust// /*
1042//ust// * Event fits in the current buffer and we are not on a switch
1043//ust// * boundary. It's safe to write.
1044//ust// */
1045//ust// }
1046//ust// offsets->end = offsets->begin + offsets->size;
1047//ust//
1048//ust// if ((SUBBUF_OFFSET(offsets->end, buf->chan)) == 0) {
1049//ust// /*
1050//ust// * The offset_end will fall at the very beginning of the next
1051//ust// * subbuffer.
1052//ust// */
1053//ust// offsets->end_switch_current = 1; /* For offsets->begin */
1054//ust// }
1055//ust// return 0;
1056//ust// }
1057//ust//
1058//ust// /*
1059//ust// * Returns :
1060//ust// * 0 if ok
1061//ust// * !0 if execution must be aborted.
1062//ust// */
1063//ust// static inline int ltt_relay_try_switch(
1064//ust// enum force_switch_mode mode,
1065//ust// struct ust_channel *channel,
1066//ust// struct ust_buffer *buf,
1067//ust// struct ltt_reserve_switch_offsets *offsets,
1068//ust// u64 *tsc)
1069//ust// {
1070//ust// long subbuf_index;
1071//ust//
1072//ust// offsets->begin = local_read(&buf->offset);
1073//ust// offsets->old = offsets->begin;
1074//ust// offsets->begin_switch = 0;
1075//ust// offsets->end_switch_old = 0;
1076//ust//
1077//ust// *tsc = trace_clock_read64();
1078//ust//
1079//ust// if (SUBBUF_OFFSET(offsets->begin, buf->chan) != 0) {
1080//ust// offsets->begin = SUBBUF_ALIGN(offsets->begin, buf->chan);
1081//ust// offsets->end_switch_old = 1;
1082//ust// } else {
1083//ust// /* we do not have to switch : buffer is empty */
1084//ust// return -1;
1085//ust// }
1086//ust// if (mode == FORCE_ACTIVE)
1087//ust// offsets->begin += ltt_subbuffer_header_size();
1088//ust// /*
1089//ust// * Always begin_switch in FORCE_ACTIVE mode.
1090//ust// * Test new buffer integrity
1091//ust// */
1092//ust// subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
1093//ust// offsets->reserve_commit_diff =
1094//ust// (BUFFER_TRUNC(offsets->begin, buf->chan)
1095//ust// >> channel->n_subbufs_order)
1096//ust// - (local_read(&buf->commit_count[subbuf_index])
1097//ust// & channel->commit_count_mask);
1098//ust// if (offsets->reserve_commit_diff == 0) {
1099//ust// /* Next buffer not corrupted. */
1100//ust// if (mode == FORCE_ACTIVE
1101//ust// && !channel->overwrite
1102//ust// && offsets->begin - atomic_long_read(&buf->consumed)
1103//ust// >= channel->alloc_size) {
1104//ust// /*
1105//ust// * We do not overwrite non consumed buffers and we are
1106//ust// * full : ignore switch while tracing is active.
1107//ust// */
1108//ust// return -1;
1109//ust// }
1110//ust// } else {
1111//ust// /*
1112//ust// * Next subbuffer corrupted. Force pushing reader even in normal
1113//ust// * mode
1114//ust// */
1115//ust// }
1116//ust// offsets->end = offsets->begin;
1117//ust// return 0;
1118//ust// }
1119//ust//
1120//ust// static inline void ltt_reserve_push_reader(
1121//ust// struct ust_channel *channel,
1122//ust// struct ust_buffer *buf,
1123//ust// struct ltt_reserve_switch_offsets *offsets)
1124//ust// {
1125//ust// long consumed_old, consumed_new;
1126//ust//
1127//ust// do {
1128//ust// consumed_old = atomic_long_read(&buf->consumed);
1129//ust// /*
1130//ust// * If buffer is in overwrite mode, push the reader consumed
1131//ust// * count if the write position has reached it and we are not
1132//ust// * at the first iteration (don't push the reader farther than
1133//ust// * the writer). This operation can be done concurrently by many
1134//ust// * writers in the same buffer, the writer being at the farthest
1135//ust// * write position sub-buffer index in the buffer being the one
1136//ust// * which will win this loop.
1137//ust// * If the buffer is not in overwrite mode, pushing the reader
1138//ust// * only happens if a sub-buffer is corrupted.
1139//ust// */
1140//ust// if ((SUBBUF_TRUNC(offsets->end-1, buf->chan)
1141//ust// - SUBBUF_TRUNC(consumed_old, buf->chan))
1142//ust// >= channel->alloc_size)
1143//ust// consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan);
1144//ust// else {
1145//ust// consumed_new = consumed_old;
1146//ust// break;
1147//ust// }
1148//ust// } while (atomic_long_cmpxchg(&buf->consumed, consumed_old,
1149//ust// consumed_new) != consumed_old);
1150//ust//
1151//ust// if (consumed_old != consumed_new) {
1152//ust// /*
1153//ust// * Reader pushed : we are the winner of the push, we can
1154//ust// * therefore reequilibrate reserve and commit. Atomic increment
1155//ust// * of the commit count permits other writers to play around
1156//ust// * with this variable before us. We keep track of
1157//ust// * corrupted_subbuffers even in overwrite mode :
1158//ust// * we never want to write over a non completely committed
1159//ust// * sub-buffer : possible causes : the buffer size is too low
1160//ust// * compared to the unordered data input, or there is a writer
1161//ust// * that died between the reserve and the commit.
1162//ust// */
1163//ust// if (offsets->reserve_commit_diff) {
1164//ust// /*
1165//ust// * We have to alter the sub-buffer commit count.
1166//ust// * We do not deliver the previous subbuffer, given it
1167//ust// * was either corrupted or not consumed (overwrite
1168//ust// * mode).
1169//ust// */
1170//ust// local_add(offsets->reserve_commit_diff,
1171//ust// &buf->commit_count[
1172//ust// SUBBUF_INDEX(offsets->begin,
1173//ust// buf->chan)]);
1174//ust// if (!channel->overwrite
1175//ust// || offsets->reserve_commit_diff
1176//ust// != channel->subbuf_size) {
1177//ust// /*
1178//ust// * The reserve commit diff was not subbuf_size :
1179//ust// * it means the subbuffer was partly written to
1180//ust// * and is therefore corrupted. If it is multiple
1181//ust// * of subbuffer size and we are in flight
1182//ust// * recorder mode, we are skipping over a whole
1183//ust// * subbuffer.
1184//ust// */
1185//ust// local_inc(&buf->corrupted_subbuffers);
1186//ust// }
1187//ust// }
1188//ust// }
1189//ust// }
1190//ust//
1191//ust// /**
1192//ust// * ltt_relay_reserve_slot - Atomic slot reservation in a LTTng buffer.
1193//ust// * @trace: the trace structure to log to.
1194//ust// * @ltt_channel: channel structure
1195//ust// * @transport_data: data structure specific to ltt relay
1196//ust// * @data_size: size of the variable length data to log.
1197//ust// * @slot_size: pointer to total size of the slot (out)
1198//ust// * @buf_offset : pointer to reserved buffer offset (out)
1199//ust// * @tsc: pointer to the tsc at the slot reservation (out)
1200//ust// * @cpu: cpuid
1201//ust// *
1202//ust// * Return : -ENOSPC if not enough space, else returns 0.
1203//ust// * It will take care of sub-buffer switching.
1204//ust// */
1205//ust// static notrace int ltt_relay_reserve_slot(struct ust_trace *trace,
1206//ust// struct ust_channel *channel, void **transport_data,
1207//ust// size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc,
1208//ust// unsigned int *rflags, int largest_align, int cpu)
1209//ust// {
1210//ust// struct ust_buffer *buf = *transport_data = channel->buf[cpu];
1211//ust// struct ltt_reserve_switch_offsets offsets;
1212//ust//
1213//ust// offsets.reserve_commit_diff = 0;
1214//ust// offsets.size = 0;
1215//ust//
1216//ust// /*
1217//ust// * Perform retryable operations.
1218//ust// */
1219//ust// if (ltt_nesting > 4) {
1220//ust// local_inc(&buf->events_lost);
1221//ust// return -EPERM;
1222//ust// }
1223//ust// do {
1224//ust// if (ltt_relay_try_reserve(channel, buf, &offsets, data_size, tsc, rflags,
1225//ust// largest_align))
1226//ust// return -ENOSPC;
1227//ust// } while (local_cmpxchg(&buf->offset, offsets.old,
1228//ust// offsets.end) != offsets.old);
1229//ust//
1230//ust// /*
1231//ust// * Atomically update last_tsc. This update races against concurrent
1232//ust// * atomic updates, but the race will always cause supplementary full TSC
1233//ust// * events, never the opposite (missing a full TSC event when it would be
1234//ust// * needed).
1235//ust// */
1236//ust// save_last_tsc(buf, *tsc);
1237//ust//
1238//ust// /*
1239//ust// * Push the reader if necessary
1240//ust// */
1241//ust// ltt_reserve_push_reader(channel, buf, &offsets);
1242//ust//
1243//ust// /*
1244//ust// * Switch old subbuffer if needed.
1245//ust// */
1246//ust// if (offsets.end_switch_old)
1247//ust// ltt_reserve_switch_old_subbuf(channel, buf, &offsets, tsc);
1248//ust//
1249//ust// /*
1250//ust// * Populate new subbuffer.
1251//ust// */
1252//ust// if (offsets.begin_switch)
1253//ust// ltt_reserve_switch_new_subbuf(channel, buf, &offsets, tsc);
1254//ust//
1255//ust// if (offsets.end_switch_current)
1256//ust// ltt_reserve_end_switch_current(channel, buf, &offsets, tsc);
1257//ust//
1258//ust// *slot_size = offsets.size;
1259//ust// *buf_offset = offsets.begin + offsets.before_hdr_pad;
1260//ust// return 0;
1261//ust// }
1262//ust//
1263//ust// /*
1264//ust// * Force a sub-buffer switch for a per-cpu buffer. This operation is
1265//ust// * completely reentrant : can be called while tracing is active with
1266//ust// * absolutely no lock held.
1267//ust// *
1268//ust// * Note, however, that as a local_cmpxchg is used for some atomic
1269//ust// * operations, this function must be called from the CPU which owns the buffer
1270//ust// * for a ACTIVE flush.
1271//ust// */
1272//ust// static notrace void ltt_force_switch(struct ust_buffer *buf,
1273//ust// enum force_switch_mode mode)
1274//ust// {
1275//ust// struct ust_channel *channel = buf->chan;
1276//ust// struct ltt_reserve_switch_offsets offsets;
1277//ust// u64 tsc;
1278//ust//
1279//ust// offsets.reserve_commit_diff = 0;
1280//ust// offsets.size = 0;
1281//ust//
1282//ust// /*
1283//ust// * Perform retryable operations.
1284//ust// */
1285//ust// do {
1286//ust// if (ltt_relay_try_switch(mode, channel, buf, &offsets, &tsc))
1287//ust// return;
1288//ust// } while (local_cmpxchg(&buf->offset, offsets.old,
1289//ust// offsets.end) != offsets.old);
1290//ust//
1291//ust// /*
1292//ust// * Atomically update last_tsc. This update races against concurrent
1293//ust// * atomic updates, but the race will always cause supplementary full TSC
1294//ust// * events, never the opposite (missing a full TSC event when it would be
1295//ust// * needed).
1296//ust// */
1297//ust// save_last_tsc(buf, tsc);
1298//ust//
1299//ust// /*
1300//ust// * Push the reader if necessary
1301//ust// */
1302//ust// if (mode == FORCE_ACTIVE)
1303//ust// ltt_reserve_push_reader(channel, buf, &offsets);
1304//ust//
1305//ust// /*
1306//ust// * Switch old subbuffer if needed.
1307//ust// */
1308//ust// if (offsets.end_switch_old)
1309//ust// ltt_reserve_switch_old_subbuf(channel, buf, &offsets, &tsc);
1310//ust//
1311//ust// /*
1312//ust// * Populate new subbuffer.
1313//ust// */
1314//ust// if (mode == FORCE_ACTIVE)
1315//ust// ltt_reserve_switch_new_subbuf(channel, buf, &offsets, &tsc);
1316//ust// }
b5b073e2
PMF
1317
1318/*
b73a4c47
PMF
1319 * ltt_reserve_switch_old_subbuf: switch old subbuffer
1320 *
1321 * Concurrency safe because we are the last and only thread to alter this
1322 * sub-buffer. As long as it is not delivered and read, no other thread can
1323 * alter the offset, alter the reserve_count or call the
1324 * client_buffer_end_callback on this sub-buffer.
1325 *
1326 * The only remaining threads could be the ones with pending commits. They will
1327 * have to do the deliver themselves. Not concurrency safe in overwrite mode.
1328 * We detect corrupted subbuffers with commit and reserve counts. We keep a
1329 * corrupted sub-buffers count and push the readers across these sub-buffers.
1330 *
1331 * Not concurrency safe if a writer is stalled in a subbuffer and another writer
1332 * switches in, finding out it's corrupted. The result will be than the old
1333 * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
1334 * will be declared corrupted too because of the commit count adjustment.
1335 *
1336 * Note : offset_old should never be 0 here.
b5b073e2 1337 */
b73a4c47
PMF
1338static void ltt_reserve_switch_old_subbuf(
1339 struct ust_channel *chan, struct ust_buffer *buf,
1340 struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
b5b073e2 1341{
b73a4c47
PMF
1342 long oldidx = SUBBUF_INDEX(offsets->old - 1, chan);
1343 long commit_count, padding_size;
b5b073e2 1344
b73a4c47
PMF
1345 padding_size = chan->subbuf_size
1346 - (SUBBUF_OFFSET(offsets->old - 1, chan) + 1);
1347 ltt_buffer_end(buf, *tsc, offsets->old, oldidx);
b5b073e2 1348
b73a4c47
PMF
1349 /*
1350 * Must write slot data before incrementing commit count.
1351 * This compiler barrier is upgraded into a smp_wmb() by the IPI
1352 * sent by get_subbuf() when it does its smp_rmb().
1353 */
1354 barrier();
1355 local_add(padding_size,
1356 &buf->commit_count[oldidx].cc);
1357 commit_count = local_read(&buf->commit_count[oldidx].cc);
1358 ltt_check_deliver(chan, buf, offsets->old - 1, commit_count, oldidx);
1359 ltt_write_commit_counter(buf, oldidx,
1360 offsets->old, commit_count, padding_size);
1361}
b5b073e2 1362
b73a4c47
PMF
1363/*
1364 * ltt_reserve_switch_new_subbuf: Populate new subbuffer.
1365 *
1366 * This code can be executed unordered : writers may already have written to the
1367 * sub-buffer before this code gets executed, caution. The commit makes sure
1368 * that this code is executed before the deliver of this sub-buffer.
1369 */
1370static void ltt_reserve_switch_new_subbuf(
1371 struct ust_channel *chan, struct ust_buffer *buf,
1372 struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
1373{
1374 long beginidx = SUBBUF_INDEX(offsets->begin, chan);
1375 long commit_count;
b5b073e2 1376
b73a4c47 1377 ltt_buffer_begin(buf, *tsc, beginidx);
b5b073e2 1378
b73a4c47
PMF
1379 /*
1380 * Must write slot data before incrementing commit count.
1381 * This compiler barrier is upgraded into a smp_wmb() by the IPI
1382 * sent by get_subbuf() when it does its smp_rmb().
1383 */
1384 barrier();
1385 local_add(ltt_subbuffer_header_size(),
1386 &buf->commit_count[beginidx].cc);
1387 commit_count = local_read(&buf->commit_count[beginidx].cc);
1388 /* Check if the written buffer has to be delivered */
1389 ltt_check_deliver(chan, buf, offsets->begin, commit_count, beginidx);
1390 ltt_write_commit_counter(buf, beginidx,
1391 offsets->begin, commit_count, ltt_subbuffer_header_size());
1392}
b5b073e2 1393
b73a4c47
PMF
1394/*
1395 * ltt_reserve_end_switch_current: finish switching current subbuffer
1396 *
1397 * Concurrency safe because we are the last and only thread to alter this
1398 * sub-buffer. As long as it is not delivered and read, no other thread can
1399 * alter the offset, alter the reserve_count or call the
1400 * client_buffer_end_callback on this sub-buffer.
1401 *
1402 * The only remaining threads could be the ones with pending commits. They will
1403 * have to do the deliver themselves. Not concurrency safe in overwrite mode.
1404 * We detect corrupted subbuffers with commit and reserve counts. We keep a
1405 * corrupted sub-buffers count and push the readers across these sub-buffers.
1406 *
1407 * Not concurrency safe if a writer is stalled in a subbuffer and another writer
1408 * switches in, finding out it's corrupted. The result will be than the old
1409 * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
1410 * will be declared corrupted too because of the commit count adjustment.
1411 */
1412static void ltt_reserve_end_switch_current(
1413 struct ust_channel *chan,
1414 struct ust_buffer *buf,
1415 struct ltt_reserve_switch_offsets *offsets, u64 *tsc)
1416{
1417 long endidx = SUBBUF_INDEX(offsets->end - 1, chan);
1418 long commit_count, padding_size;
1419
1420 padding_size = chan->subbuf_size
1421 - (SUBBUF_OFFSET(offsets->end - 1, chan) + 1);
1422
1423 ltt_buffer_end(buf, *tsc, offsets->end, endidx);
1424
1425 /*
1426 * Must write slot data before incrementing commit count.
1427 * This compiler barrier is upgraded into a smp_wmb() by the IPI
1428 * sent by get_subbuf() when it does its smp_rmb().
1429 */
1430 barrier();
1431 local_add(padding_size,
1432 &buf->commit_count[endidx].cc);
1433 commit_count = local_read(&buf->commit_count[endidx].cc);
1434 ltt_check_deliver(chan, buf,
1435 offsets->end - 1, commit_count, endidx);
1436 ltt_write_commit_counter(buf, endidx,
1437 offsets->end, commit_count, padding_size);
b5b073e2
PMF
1438}
1439
1440/*
1441 * Returns :
1442 * 0 if ok
1443 * !0 if execution must be aborted.
1444 */
b73a4c47 1445static int ltt_relay_try_switch_slow(
b5b073e2 1446 enum force_switch_mode mode,
b73a4c47 1447 struct ust_channel *chan,
b5b073e2
PMF
1448 struct ust_buffer *buf,
1449 struct ltt_reserve_switch_offsets *offsets,
1450 u64 *tsc)
1451{
1452 long subbuf_index;
b73a4c47 1453 long reserve_commit_diff;
b5b073e2
PMF
1454
1455 offsets->begin = local_read(&buf->offset);
1456 offsets->old = offsets->begin;
1457 offsets->begin_switch = 0;
1458 offsets->end_switch_old = 0;
1459
1460 *tsc = trace_clock_read64();
1461
1462 if (SUBBUF_OFFSET(offsets->begin, buf->chan) != 0) {
1463 offsets->begin = SUBBUF_ALIGN(offsets->begin, buf->chan);
1464 offsets->end_switch_old = 1;
1465 } else {
1466 /* we do not have to switch : buffer is empty */
1467 return -1;
1468 }
1469 if (mode == FORCE_ACTIVE)
1470 offsets->begin += ltt_subbuffer_header_size();
1471 /*
1472 * Always begin_switch in FORCE_ACTIVE mode.
1473 * Test new buffer integrity
1474 */
1475 subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
b73a4c47 1476 reserve_commit_diff =
b5b073e2 1477 (BUFFER_TRUNC(offsets->begin, buf->chan)
b73a4c47
PMF
1478 >> chan->n_subbufs_order)
1479 - (local_read(&buf->commit_count[subbuf_index].cc_sb)
1480 & chan->commit_count_mask);
1481 if (reserve_commit_diff == 0) {
b5b073e2
PMF
1482 /* Next buffer not corrupted. */
1483 if (mode == FORCE_ACTIVE
b73a4c47 1484 && !chan->overwrite
b5b073e2 1485 && offsets->begin - atomic_long_read(&buf->consumed)
b73a4c47 1486 >= chan->alloc_size) {
b5b073e2
PMF
1487 /*
1488 * We do not overwrite non consumed buffers and we are
1489 * full : ignore switch while tracing is active.
1490 */
1491 return -1;
1492 }
1493 } else {
1494 /*
1495 * Next subbuffer corrupted. Force pushing reader even in normal
1496 * mode
1497 */
1498 }
1499 offsets->end = offsets->begin;
1500 return 0;
1501}
1502
b5b073e2 1503/*
b73a4c47
PMF
1504 * Force a sub-buffer switch for a per-cpu buffer. This operation is
1505 * completely reentrant : can be called while tracing is active with
1506 * absolutely no lock held.
b5b073e2 1507 *
b73a4c47
PMF
1508 * Note, however, that as a local_cmpxchg is used for some atomic
1509 * operations, this function must be called from the CPU which owns the buffer
1510 * for a ACTIVE flush.
b5b073e2 1511 */
b73a4c47
PMF
1512void ltt_force_switch_lockless_slow(struct ust_buffer *buf,
1513 enum force_switch_mode mode)
b5b073e2 1514{
b73a4c47 1515 struct ust_channel *chan = buf->chan;
b5b073e2 1516 struct ltt_reserve_switch_offsets offsets;
b73a4c47 1517 u64 tsc;
b5b073e2 1518
b5b073e2
PMF
1519 offsets.size = 0;
1520
1521 /*
1522 * Perform retryable operations.
1523 */
b5b073e2 1524 do {
b73a4c47
PMF
1525 if (ltt_relay_try_switch_slow(mode, chan, buf,
1526 &offsets, &tsc))
1527 return;
b5b073e2
PMF
1528 } while (local_cmpxchg(&buf->offset, offsets.old,
1529 offsets.end) != offsets.old);
1530
1531 /*
1532 * Atomically update last_tsc. This update races against concurrent
1533 * atomic updates, but the race will always cause supplementary full TSC
1534 * events, never the opposite (missing a full TSC event when it would be
1535 * needed).
1536 */
b73a4c47 1537 save_last_tsc(buf, tsc);
b5b073e2
PMF
1538
1539 /*
1540 * Push the reader if necessary
1541 */
b73a4c47
PMF
1542 if (mode == FORCE_ACTIVE) {
1543 ltt_reserve_push_reader(chan, buf, offsets.end - 1);
1544//ust// ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.end - 1, chan));
1545 }
b5b073e2
PMF
1546
1547 /*
1548 * Switch old subbuffer if needed.
1549 */
b73a4c47
PMF
1550 if (offsets.end_switch_old) {
1551//ust// ltt_clear_noref_flag(rchan, buf, SUBBUF_INDEX(offsets.old - 1, rchan));
1552 ltt_reserve_switch_old_subbuf(chan, buf, &offsets, &tsc);
1553 }
b5b073e2
PMF
1554
1555 /*
1556 * Populate new subbuffer.
1557 */
b73a4c47
PMF
1558 if (mode == FORCE_ACTIVE)
1559 ltt_reserve_switch_new_subbuf(chan, buf, &offsets, &tsc);
1560}
b5b073e2 1561
b73a4c47
PMF
1562/*
1563 * Returns :
1564 * 0 if ok
1565 * !0 if execution must be aborted.
1566 */
1567static int ltt_relay_try_reserve_slow(struct ust_channel *chan, struct ust_buffer *buf,
1568 struct ltt_reserve_switch_offsets *offsets, size_t data_size,
1569 u64 *tsc, unsigned int *rflags, int largest_align)
1570{
1571 long reserve_commit_diff;
b5b073e2 1572
b73a4c47
PMF
1573 offsets->begin = local_read(&buf->offset);
1574 offsets->old = offsets->begin;
1575 offsets->begin_switch = 0;
1576 offsets->end_switch_current = 0;
1577 offsets->end_switch_old = 0;
1578
1579 *tsc = trace_clock_read64();
1580 if (last_tsc_overflow(buf, *tsc))
1581 *rflags = LTT_RFLAG_ID_SIZE_TSC;
1582
1583 if (unlikely(SUBBUF_OFFSET(offsets->begin, buf->chan) == 0)) {
1584 offsets->begin_switch = 1; /* For offsets->begin */
1585 } else {
1586 offsets->size = ust_get_header_size(chan,
1587 offsets->begin, data_size,
1588 &offsets->before_hdr_pad, *rflags);
1589 offsets->size += ltt_align(offsets->begin + offsets->size,
1590 largest_align)
1591 + data_size;
1592 if (unlikely((SUBBUF_OFFSET(offsets->begin, buf->chan) +
1593 offsets->size) > buf->chan->subbuf_size)) {
1594 offsets->end_switch_old = 1; /* For offsets->old */
1595 offsets->begin_switch = 1; /* For offsets->begin */
1596 }
1597 }
1598 if (unlikely(offsets->begin_switch)) {
1599 long subbuf_index;
1600
1601 /*
1602 * We are typically not filling the previous buffer completely.
1603 */
1604 if (likely(offsets->end_switch_old))
1605 offsets->begin = SUBBUF_ALIGN(offsets->begin,
1606 buf->chan);
1607 offsets->begin = offsets->begin + ltt_subbuffer_header_size();
1608 /* Test new buffer integrity */
1609 subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan);
1610 reserve_commit_diff =
1611 (BUFFER_TRUNC(offsets->begin, buf->chan)
1612 >> chan->n_subbufs_order)
1613 - (local_read(&buf->commit_count[subbuf_index].cc_sb)
1614 & chan->commit_count_mask);
1615 if (likely(reserve_commit_diff == 0)) {
1616 /* Next buffer not corrupted. */
1617 if (unlikely(!chan->overwrite &&
1618 (SUBBUF_TRUNC(offsets->begin, buf->chan)
1619 - SUBBUF_TRUNC(atomic_long_read(
1620 &buf->consumed),
1621 buf->chan))
1622 >= chan->alloc_size)) {
1623 /*
1624 * We do not overwrite non consumed buffers
1625 * and we are full : event is lost.
1626 */
1627 local_inc(&buf->events_lost);
1628 return -1;
1629 } else {
1630 /*
1631 * next buffer not corrupted, we are either in
1632 * overwrite mode or the buffer is not full.
1633 * It's safe to write in this new subbuffer.
1634 */
1635 }
1636 } else {
1637 /*
1638 * Next subbuffer corrupted. Drop event in normal and
1639 * overwrite mode. Caused by either a writer OOPS or
1640 * too many nested writes over a reserve/commit pair.
1641 */
1642 local_inc(&buf->events_lost);
1643 return -1;
1644 }
1645 offsets->size = ust_get_header_size(chan,
1646 offsets->begin, data_size,
1647 &offsets->before_hdr_pad, *rflags);
1648 offsets->size += ltt_align(offsets->begin + offsets->size,
1649 largest_align)
1650 + data_size;
1651 if (unlikely((SUBBUF_OFFSET(offsets->begin, buf->chan)
1652 + offsets->size) > buf->chan->subbuf_size)) {
1653 /*
1654 * Event too big for subbuffers, report error, don't
1655 * complete the sub-buffer switch.
1656 */
1657 local_inc(&buf->events_lost);
1658 return -1;
1659 } else {
1660 /*
1661 * We just made a successful buffer switch and the event
1662 * fits in the new subbuffer. Let's write.
1663 */
1664 }
1665 } else {
1666 /*
1667 * Event fits in the current buffer and we are not on a switch
1668 * boundary. It's safe to write.
1669 */
1670 }
1671 offsets->end = offsets->begin + offsets->size;
1672
1673 if (unlikely((SUBBUF_OFFSET(offsets->end, buf->chan)) == 0)) {
1674 /*
1675 * The offset_end will fall at the very beginning of the next
1676 * subbuffer.
1677 */
1678 offsets->end_switch_current = 1; /* For offsets->begin */
1679 }
b5b073e2
PMF
1680 return 0;
1681}
1682
b73a4c47
PMF
1683/**
1684 * ltt_relay_reserve_slot_lockless_slow - Atomic slot reservation in a buffer.
1685 * @trace: the trace structure to log to.
1686 * @ltt_channel: channel structure
1687 * @transport_data: data structure specific to ltt relay
1688 * @data_size: size of the variable length data to log.
1689 * @slot_size: pointer to total size of the slot (out)
1690 * @buf_offset : pointer to reserved buffer offset (out)
1691 * @tsc: pointer to the tsc at the slot reservation (out)
1692 * @cpu: cpuid
b5b073e2 1693 *
b73a4c47
PMF
1694 * Return : -ENOSPC if not enough space, else returns 0.
1695 * It will take care of sub-buffer switching.
b5b073e2 1696 */
b73a4c47
PMF
1697int ltt_reserve_slot_lockless_slow(struct ust_trace *trace,
1698 struct ust_channel *chan, void **transport_data,
1699 size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc,
1700 unsigned int *rflags, int largest_align, int cpu)
b5b073e2 1701{
b73a4c47 1702 struct ust_buffer *buf = chan->buf[cpu];
b5b073e2 1703 struct ltt_reserve_switch_offsets offsets;
b5b073e2 1704
b5b073e2
PMF
1705 offsets.size = 0;
1706
b5b073e2 1707 do {
b73a4c47
PMF
1708 if (unlikely(ltt_relay_try_reserve_slow(chan, buf, &offsets,
1709 data_size, tsc, rflags, largest_align)))
1710 return -ENOSPC;
1711 } while (unlikely(local_cmpxchg(&buf->offset, offsets.old,
1712 offsets.end) != offsets.old));
b5b073e2
PMF
1713
1714 /*
1715 * Atomically update last_tsc. This update races against concurrent
1716 * atomic updates, but the race will always cause supplementary full TSC
1717 * events, never the opposite (missing a full TSC event when it would be
1718 * needed).
1719 */
b73a4c47 1720 save_last_tsc(buf, *tsc);
b5b073e2
PMF
1721
1722 /*
1723 * Push the reader if necessary
1724 */
b73a4c47
PMF
1725 ltt_reserve_push_reader(chan, buf, offsets.end - 1);
1726
1727 /*
1728 * Clear noref flag for this subbuffer.
1729 */
1730//ust// ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.end - 1, chan));
b5b073e2
PMF
1731
1732 /*
1733 * Switch old subbuffer if needed.
1734 */
b73a4c47
PMF
1735 if (unlikely(offsets.end_switch_old)) {
1736//ust// ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.old - 1, chan));
1737 ltt_reserve_switch_old_subbuf(chan, buf, &offsets, tsc);
1738 }
b5b073e2
PMF
1739
1740 /*
1741 * Populate new subbuffer.
1742 */
b73a4c47
PMF
1743 if (unlikely(offsets.begin_switch))
1744 ltt_reserve_switch_new_subbuf(chan, buf, &offsets, tsc);
1745
1746 if (unlikely(offsets.end_switch_current))
1747 ltt_reserve_end_switch_current(chan, buf, &offsets, tsc);
1748
1749 *slot_size = offsets.size;
1750 *buf_offset = offsets.begin + offsets.before_hdr_pad;
1751 return 0;
b5b073e2
PMF
1752}
1753
b5b073e2
PMF
1754static struct ltt_transport ust_relay_transport = {
1755 .name = "ustrelay",
1756 .ops = {
1757 .create_channel = ust_buffers_create_channel,
1758 .finish_channel = ltt_relay_finish_channel,
1759 .remove_channel = ltt_relay_remove_channel,
1760 .wakeup_channel = ltt_relay_async_wakeup_chan,
b5b073e2
PMF
1761 },
1762};
1763
b5b073e2
PMF
1764static char initialized = 0;
1765
1766void __attribute__((constructor)) init_ustrelay_transport(void)
1767{
1768 if(!initialized) {
1769 ltt_transport_register(&ust_relay_transport);
1770 initialized = 1;
1771 }
1772}
1773
b73a4c47 1774static void __attribute__((destructor)) ust_buffers_exit(void)
b5b073e2
PMF
1775{
1776 ltt_transport_unregister(&ust_relay_transport);
1777}
b73a4c47
PMF
1778
1779size_t ltt_write_event_header_slow(struct ust_trace *trace,
1780 struct ust_channel *channel,
1781 struct ust_buffer *buf, long buf_offset,
1782 u16 eID, u32 event_size,
1783 u64 tsc, unsigned int rflags)
1784{
1785 struct ltt_event_header header;
1786 u16 small_size;
1787
1788 switch (rflags) {
1789 case LTT_RFLAG_ID_SIZE_TSC:
1790 header.id_time = 29 << LTT_TSC_BITS;
1791 break;
1792 case LTT_RFLAG_ID_SIZE:
1793 header.id_time = 30 << LTT_TSC_BITS;
1794 break;
1795 case LTT_RFLAG_ID:
1796 header.id_time = 31 << LTT_TSC_BITS;
1797 break;
1798 }
1799
1800 header.id_time |= (u32)tsc & LTT_TSC_MASK;
1801 ust_buffers_write(buf, buf_offset, &header, sizeof(header));
1802 buf_offset += sizeof(header);
1803
1804 switch (rflags) {
1805 case LTT_RFLAG_ID_SIZE_TSC:
1806 small_size = (u16)min_t(u32, event_size, LTT_MAX_SMALL_SIZE);
1807 ust_buffers_write(buf, buf_offset,
1808 &eID, sizeof(u16));
1809 buf_offset += sizeof(u16);
1810 ust_buffers_write(buf, buf_offset,
1811 &small_size, sizeof(u16));
1812 buf_offset += sizeof(u16);
1813 if (small_size == LTT_MAX_SMALL_SIZE) {
1814 ust_buffers_write(buf, buf_offset,
1815 &event_size, sizeof(u32));
1816 buf_offset += sizeof(u32);
1817 }
1818 buf_offset += ltt_align(buf_offset, sizeof(u64));
1819 ust_buffers_write(buf, buf_offset,
1820 &tsc, sizeof(u64));
1821 buf_offset += sizeof(u64);
1822 break;
1823 case LTT_RFLAG_ID_SIZE:
1824 small_size = (u16)min_t(u32, event_size, LTT_MAX_SMALL_SIZE);
1825 ust_buffers_write(buf, buf_offset,
1826 &eID, sizeof(u16));
1827 buf_offset += sizeof(u16);
1828 ust_buffers_write(buf, buf_offset,
1829 &small_size, sizeof(u16));
1830 buf_offset += sizeof(u16);
1831 if (small_size == LTT_MAX_SMALL_SIZE) {
1832 ust_buffers_write(buf, buf_offset,
1833 &event_size, sizeof(u32));
1834 buf_offset += sizeof(u32);
1835 }
1836 break;
1837 case LTT_RFLAG_ID:
1838 ust_buffers_write(buf, buf_offset,
1839 &eID, sizeof(u16));
1840 buf_offset += sizeof(u16);
1841 break;
1842 }
1843
1844 return buf_offset;
1845}
This page took 0.097024 seconds and 4 git commands to generate.