3 * LTTng userspace tracer buffering system
5 * Copyright (C) 2009 - Pierre-Marc Fournier (pierre-marc dot fournier at polymtl dot ca)
6 * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 #include <ust/kernelcompat.h>
33 #include "tracercore.h"
36 struct ltt_reserve_switch_offsets
{
38 long begin_switch
, end_switch_current
, end_switch_old
;
39 size_t before_hdr_pad
, size
;
43 static DEFINE_MUTEX(ust_buffers_channels_mutex
);
44 static LIST_HEAD(ust_buffers_channels
);
46 static int get_n_cpus(void)
49 static int n_cpus
= 0;
52 /* On Linux, when some processors are offline
53 * _SC_NPROCESSORS_CONF counts the offline
54 * processors, whereas _SC_NPROCESSORS_ONLN
55 * does not. If we used _SC_NPROCESSORS_ONLN,
56 * getcpu() could return a value greater than
57 * this sysconf, in which case the arrays
58 * indexed by processor would overflow.
60 result
= sysconf(_SC_NPROCESSORS_CONF
);
71 /* _ust_buffers_write()
73 * @buf: destination buffer
74 * @offset: offset in destination
76 * @len: length of source
77 * @cpy: already copied
80 void _ust_buffers_write(struct ust_buffer
*buf
, size_t offset
,
81 const void *src
, size_t len
, ssize_t cpy
)
88 WARN_ON(offset
>= buf
->buf_size
);
90 cpy
= min_t(size_t, len
, buf
->buf_size
- offset
);
91 ust_buffers_do_copy(buf
->buf_data
+ offset
, src
, cpy
);
92 } while (unlikely(len
!= cpy
));
95 static int ust_buffers_init_buffer(struct ust_trace
*trace
,
96 struct ust_channel
*ltt_chan
,
97 struct ust_buffer
*buf
,
98 unsigned int n_subbufs
);
100 static int ust_buffers_alloc_buf(struct ust_buffer
*buf
, size_t *size
)
105 *size
= PAGE_ALIGN(*size
);
107 result
= buf
->shmid
= shmget(getpid(), *size
, IPC_CREAT
| IPC_EXCL
| 0700);
108 if(result
== -1 && errno
== EINVAL
) {
109 ERR("shmget() returned EINVAL; maybe /proc/sys/kernel/shmmax should be increased.");
112 else if(result
== -1) {
117 /* FIXME: should have matching call to shmdt */
118 ptr
= shmat(buf
->shmid
, NULL
, 0);
119 if(ptr
== (void *) -1) {
124 /* Already mark the shared memory for destruction. This will occur only
125 * when all users have detached.
127 result
= shmctl(buf
->shmid
, IPC_RMID
, NULL
);
134 buf
->buf_size
= *size
;
139 result
= shmctl(buf
->shmid
, IPC_RMID
, NULL
);
147 int ust_buffers_create_buf(struct ust_channel
*channel
, int cpu
)
150 struct ust_buffer
*buf
= channel
->buf
[cpu
];
153 result
= ust_buffers_alloc_buf(buf
, &channel
->alloc_size
);
158 kref_get(&channel
->kref
);
162 static void ust_buffers_destroy_channel(struct kref
*kref
)
164 struct ust_channel
*chan
= container_of(kref
, struct ust_channel
, kref
);
168 static void ust_buffers_destroy_buf(struct ust_buffer
*buf
)
170 struct ust_channel
*chan
= buf
->chan
;
173 result
= munmap(buf
->buf_data
, buf
->buf_size
);
178 //ust// chan->buf[buf->cpu] = NULL;
180 kref_put(&chan
->kref
, ust_buffers_destroy_channel
);
183 /* called from kref_put */
184 static void ust_buffers_remove_buf(struct kref
*kref
)
186 struct ust_buffer
*buf
= container_of(kref
, struct ust_buffer
, kref
);
187 ust_buffers_destroy_buf(buf
);
190 int ust_buffers_open_buf(struct ust_channel
*chan
, int cpu
)
194 result
= ust_buffers_create_buf(chan
, cpu
);
198 kref_init(&chan
->buf
[cpu
]->kref
);
200 result
= ust_buffers_init_buffer(chan
->trace
, chan
, chan
->buf
[cpu
], chan
->subbuf_cnt
);
206 /* FIXME: decrementally destroy on error? */
210 * ust_buffers_close_buf - close a channel buffer
213 static void ust_buffers_close_buf(struct ust_buffer
*buf
)
215 kref_put(&buf
->kref
, ust_buffers_remove_buf
);
218 int ust_buffers_channel_open(struct ust_channel
*chan
, size_t subbuf_size
, size_t subbuf_cnt
)
223 if(subbuf_size
== 0 || subbuf_cnt
== 0)
226 /* Check that the subbuffer size is larger than a page. */
227 WARN_ON_ONCE(subbuf_size
< PAGE_SIZE
);
230 * Make sure the number of subbuffers and subbuffer size are power of 2.
232 WARN_ON_ONCE(hweight32(subbuf_size
) != 1);
233 WARN_ON(hweight32(subbuf_cnt
) != 1);
235 chan
->version
= UST_CHANNEL_VERSION
;
236 chan
->subbuf_cnt
= subbuf_cnt
;
237 chan
->subbuf_size
= subbuf_size
;
238 chan
->subbuf_size_order
= get_count_order(subbuf_size
);
239 chan
->alloc_size
= subbuf_size
* subbuf_cnt
;
241 kref_init(&chan
->kref
);
243 mutex_lock(&ust_buffers_channels_mutex
);
244 for(i
=0; i
<chan
->n_cpus
; i
++) {
245 result
= ust_buffers_open_buf(chan
, i
);
249 list_add(&chan
->list
, &ust_buffers_channels
);
250 mutex_unlock(&ust_buffers_channels_mutex
);
254 /* Jump directly inside the loop to close the buffers that were already
257 ust_buffers_close_buf(chan
->buf
[i
]);
262 kref_put(&chan
->kref
, ust_buffers_destroy_channel
);
263 mutex_unlock(&ust_buffers_channels_mutex
);
267 void ust_buffers_channel_close(struct ust_channel
*chan
)
273 mutex_lock(&ust_buffers_channels_mutex
);
274 for(i
=0; i
<chan
->n_cpus
; i
++) {
275 /* FIXME: if we make it here, then all buffers were necessarily allocated. Moreover, we don't
276 * initialize to NULL so we cannot use this check. Should we? */
277 //ust// if (chan->buf[i])
278 ust_buffers_close_buf(chan
->buf
[i
]);
281 list_del(&chan
->list
);
282 kref_put(&chan
->kref
, ust_buffers_destroy_channel
);
283 mutex_unlock(&ust_buffers_channels_mutex
);
290 static void ust_buffers_destroy_buffer(struct ust_channel
*ltt_chan
, int cpu
);
292 static void ltt_force_switch(struct ust_buffer
*buf
,
293 enum force_switch_mode mode
);
298 static void ltt_buffer_begin(struct ust_buffer
*buf
,
299 u64 tsc
, unsigned int subbuf_idx
)
301 struct ust_channel
*channel
= buf
->chan
;
302 struct ltt_subbuffer_header
*header
=
303 (struct ltt_subbuffer_header
*)
304 ust_buffers_offset_address(buf
,
305 subbuf_idx
* buf
->chan
->subbuf_size
);
307 header
->cycle_count_begin
= tsc
;
308 header
->data_size
= 0xFFFFFFFF; /* for recognizing crashed buffers */
309 header
->sb_size
= 0xFFFFFFFF; /* for recognizing crashed buffers */
310 /* FIXME: add memory barrier? */
311 ltt_write_trace_header(channel
->trace
, header
);
315 * offset is assumed to never be 0 here : never deliver a completely empty
316 * subbuffer. The lost size is between 0 and subbuf_size-1.
318 static notrace
void ltt_buffer_end(struct ust_buffer
*buf
,
319 u64 tsc
, unsigned int offset
, unsigned int subbuf_idx
)
321 struct ltt_subbuffer_header
*header
=
322 (struct ltt_subbuffer_header
*)
323 ust_buffers_offset_address(buf
,
324 subbuf_idx
* buf
->chan
->subbuf_size
);
325 u32 data_size
= SUBBUF_OFFSET(offset
- 1, buf
->chan
) + 1;
327 header
->data_size
= data_size
;
328 header
->sb_size
= PAGE_ALIGN(data_size
);
329 header
->cycle_count_end
= tsc
;
330 header
->events_lost
= uatomic_read(&buf
->events_lost
);
331 header
->subbuf_corrupt
= uatomic_read(&buf
->corrupted_subbuffers
);
332 if(unlikely(header
->events_lost
> 0)) {
333 DBG("Some events (%d) were lost in %s_%d", header
->events_lost
, buf
->chan
->channel_name
, buf
->cpu
);
338 * This function should not be called from NMI interrupt context
340 static notrace
void ltt_buf_unfull(struct ust_buffer
*buf
,
341 unsigned int subbuf_idx
,
347 * Promote compiler barrier to a smp_mb().
348 * For the specific LTTng case, this IPI call should be removed if the
349 * architecture does not reorder writes. This should eventually be provided by
350 * a separate architecture-specific infrastructure.
352 //ust// static void remote_mb(void *info)
357 int ust_buffers_get_subbuf(struct ust_buffer
*buf
, long *consumed
)
359 struct ust_channel
*channel
= buf
->chan
;
360 long consumed_old
, consumed_idx
, commit_count
, write_offset
;
363 consumed_old
= uatomic_read(&buf
->consumed
);
364 consumed_idx
= SUBBUF_INDEX(consumed_old
, buf
->chan
);
365 commit_count
= uatomic_read(&buf
->commit_count
[consumed_idx
].cc_sb
);
367 * Make sure we read the commit count before reading the buffer
368 * data and the write offset. Correct consumed offset ordering
369 * wrt commit count is insured by the use of cmpxchg to update
370 * the consumed offset.
371 * smp_call_function_single can fail if the remote CPU is offline,
372 * this is OK because then there is no wmb to execute there.
373 * If our thread is executing on the same CPU as the on the buffers
374 * belongs to, we don't have to synchronize it at all. If we are
375 * migrated, the scheduler will take care of the memory barriers.
376 * Normally, smp_call_function_single() should ensure program order when
377 * executing the remote function, which implies that it surrounds the
378 * function execution with :
389 * However, smp_call_function_single() does not seem to clearly execute
390 * such barriers. It depends on spinlock semantic to provide the barrier
391 * before executing the IPI and, when busy-looping, csd_lock_wait only
392 * executes smp_mb() when it has to wait for the other CPU.
394 * I don't trust this code. Therefore, let's add the smp_mb() sequence
395 * required ourself, even if duplicated. It has no performance impact
398 * smp_mb() is needed because smp_rmb() and smp_wmb() only order read vs
399 * read and write vs write. They do not ensure core synchronization. We
400 * really have to ensure total order between the 3 barriers running on
403 //ust// #ifdef LTT_NO_IPI_BARRIER
405 * Local rmb to match the remote wmb to read the commit count before the
406 * buffer data and the write offset.
410 //ust// if (raw_smp_processor_id() != buf->cpu) {
411 //ust// smp_mb(); /* Total order with IPI handler smp_mb() */
412 //ust// smp_call_function_single(buf->cpu, remote_mb, NULL, 1);
413 //ust// smp_mb(); /* Total order with IPI handler smp_mb() */
417 write_offset
= uatomic_read(&buf
->offset
);
419 * Check that the subbuffer we are trying to consume has been
420 * already fully committed.
422 if (((commit_count
- buf
->chan
->subbuf_size
)
423 & channel
->commit_count_mask
)
424 - (BUFFER_TRUNC(consumed_old
, buf
->chan
)
425 >> channel
->n_subbufs_order
)
430 * Check that we are not about to read the same subbuffer in
431 * which the writer head is.
433 if ((SUBBUF_TRUNC(write_offset
, buf
->chan
)
434 - SUBBUF_TRUNC(consumed_old
, buf
->chan
))
439 /* FIXME: is this ok to disable the reading feature? */
440 //ust// retval = update_read_sb_index(buf, consumed_idx);
442 //ust// return retval;
444 *consumed
= consumed_old
;
449 int ust_buffers_put_subbuf(struct ust_buffer
*buf
, unsigned long uconsumed_old
)
451 long consumed_new
, consumed_old
;
453 consumed_old
= uatomic_read(&buf
->consumed
);
454 consumed_old
= consumed_old
& (~0xFFFFFFFFL
);
455 consumed_old
= consumed_old
| uconsumed_old
;
456 consumed_new
= SUBBUF_ALIGN(consumed_old
, buf
->chan
);
458 //ust// spin_lock(<t_buf->full_lock);
459 if (uatomic_cmpxchg(&buf
->consumed
, consumed_old
,
462 /* We have been pushed by the writer : the last
463 * buffer read _is_ corrupted! It can also
464 * happen if this is a buffer we never got. */
465 //ust// spin_unlock(<t_buf->full_lock);
468 /* tell the client that buffer is now unfull */
471 index
= SUBBUF_INDEX(consumed_old
, buf
->chan
);
472 data
= BUFFER_OFFSET(consumed_old
, buf
->chan
);
473 ltt_buf_unfull(buf
, index
, data
);
474 //ust// spin_unlock(<t_buf->full_lock);
479 static void ltt_relay_print_subbuffer_errors(
480 struct ust_channel
*channel
,
481 long cons_off
, int cpu
)
483 struct ust_buffer
*ltt_buf
= channel
->buf
[cpu
];
484 long cons_idx
, commit_count
, commit_count_sb
, write_offset
;
486 cons_idx
= SUBBUF_INDEX(cons_off
, channel
);
487 commit_count
= uatomic_read(<t_buf
->commit_count
[cons_idx
].cc
);
488 commit_count_sb
= uatomic_read(<t_buf
->commit_count
[cons_idx
].cc_sb
);
491 * No need to order commit_count and write_offset reads because we
492 * execute after trace is stopped when there are no readers left.
494 write_offset
= uatomic_read(<t_buf
->offset
);
495 WARN( "LTT : unread channel %s offset is %ld "
496 "and cons_off : %ld (cpu %d)\n",
497 channel
->channel_name
, write_offset
, cons_off
, cpu
);
498 /* Check each sub-buffer for non filled commit count */
499 if (((commit_count
- channel
->subbuf_size
) & channel
->commit_count_mask
)
500 - (BUFFER_TRUNC(cons_off
, channel
) >> channel
->n_subbufs_order
) != 0) {
501 ERR("LTT : %s : subbuffer %lu has non filled "
502 "commit count [cc, cc_sb] [%lu,%lu].\n",
503 channel
->channel_name
, cons_idx
, commit_count
, commit_count_sb
);
505 ERR("LTT : %s : commit count : %lu, subbuf size %zd\n",
506 channel
->channel_name
, commit_count
,
507 channel
->subbuf_size
);
510 static void ltt_relay_print_errors(struct ust_trace
*trace
,
511 struct ust_channel
*channel
, int cpu
)
513 struct ust_buffer
*ltt_buf
= channel
->buf
[cpu
];
517 * Can be called in the error path of allocation when
518 * trans_channel_data is not yet set.
523 //ust// for (cons_off = 0; cons_off < rchan->alloc_size;
524 //ust// cons_off = SUBBUF_ALIGN(cons_off, rchan))
525 //ust// ust_buffers_print_written(ltt_chan, cons_off, cpu);
526 for (cons_off
= uatomic_read(<t_buf
->consumed
);
527 (SUBBUF_TRUNC(uatomic_read(<t_buf
->offset
),
530 cons_off
= SUBBUF_ALIGN(cons_off
, channel
))
531 ltt_relay_print_subbuffer_errors(channel
, cons_off
, cpu
);
534 static void ltt_relay_print_buffer_errors(struct ust_channel
*channel
, int cpu
)
536 struct ust_trace
*trace
= channel
->trace
;
537 struct ust_buffer
*ltt_buf
= channel
->buf
[cpu
];
539 if (uatomic_read(<t_buf
->events_lost
))
540 ERR("channel %s: %ld events lost (cpu %d)",
541 channel
->channel_name
,
542 uatomic_read(<t_buf
->events_lost
), cpu
);
543 if (uatomic_read(<t_buf
->corrupted_subbuffers
))
544 ERR("channel %s : %ld corrupted subbuffers (cpu %d)",
545 channel
->channel_name
,
546 uatomic_read(<t_buf
->corrupted_subbuffers
), cpu
);
548 ltt_relay_print_errors(trace
, channel
, cpu
);
551 static void ltt_relay_release_channel(struct kref
*kref
)
553 struct ust_channel
*ltt_chan
= container_of(kref
,
554 struct ust_channel
, kref
);
561 //ust// static int ltt_relay_create_buffer(struct ust_trace *trace,
562 //ust// struct ltt_channel_struct *ltt_chan, struct rchan_buf *buf,
563 //ust// unsigned int cpu, unsigned int n_subbufs)
565 //ust// struct ltt_channel_buf_struct *ltt_buf =
566 //ust// percpu_ptr(ltt_chan->buf, cpu);
567 //ust// unsigned int j;
569 //ust// ltt_buf->commit_count =
570 //ust// kzalloc_node(sizeof(ltt_buf->commit_count) * n_subbufs,
571 //ust// GFP_KERNEL, cpu_to_node(cpu));
572 //ust// if (!ltt_buf->commit_count)
573 //ust// return -ENOMEM;
574 //ust// kref_get(&trace->kref);
575 //ust// kref_get(&trace->ltt_transport_kref);
576 //ust// kref_get(<t_chan->kref);
577 //ust// uatomic_set(<t_buf->offset, ltt_subbuffer_header_size());
578 //ust// uatomic_set(<t_buf->consumed, 0);
579 //ust// uatomic_set(<t_buf->active_readers, 0);
580 //ust// for (j = 0; j < n_subbufs; j++)
581 //ust// uatomic_set(<t_buf->commit_count[j], 0);
582 //ust// init_waitqueue_head(<t_buf->write_wait);
583 //ust// uatomic_set(<t_buf->wakeup_readers, 0);
584 //ust// spin_lock_init(<t_buf->full_lock);
586 //ust// ltt_buffer_begin_callback(buf, trace->start_tsc, 0);
587 //ust// /* atomic_add made on local variable on data that belongs to
588 //ust// * various CPUs : ok because tracing not started (for this cpu). */
589 //ust// uatomic_add(<t_buf->commit_count[0], ltt_subbuffer_header_size());
591 //ust// uatomic_set(<t_buf->events_lost, 0);
592 //ust// uatomic_set(<t_buf->corrupted_subbuffers, 0);
597 static int ust_buffers_init_buffer(struct ust_trace
*trace
,
598 struct ust_channel
*ltt_chan
, struct ust_buffer
*buf
,
599 unsigned int n_subbufs
)
606 zmalloc(sizeof(*buf
->commit_count
) * n_subbufs
);
607 if (!buf
->commit_count
)
609 kref_get(&trace
->kref
);
610 kref_get(&trace
->ltt_transport_kref
);
611 kref_get(<t_chan
->kref
);
612 uatomic_set(&buf
->offset
, ltt_subbuffer_header_size());
613 uatomic_set(&buf
->consumed
, 0);
614 uatomic_set(&buf
->active_readers
, 0);
615 for (j
= 0; j
< n_subbufs
; j
++) {
616 uatomic_set(&buf
->commit_count
[j
].cc
, 0);
617 uatomic_set(&buf
->commit_count
[j
].cc_sb
, 0);
619 //ust// init_waitqueue_head(&buf->write_wait);
620 //ust// uatomic_set(&buf->wakeup_readers, 0);
621 //ust// spin_lock_init(&buf->full_lock);
623 ltt_buffer_begin(buf
, trace
->start_tsc
, 0);
625 uatomic_add(&buf
->commit_count
[0].cc
, ltt_subbuffer_header_size());
627 uatomic_set(&buf
->events_lost
, 0);
628 uatomic_set(&buf
->corrupted_subbuffers
, 0);
635 buf
->data_ready_fd_read
= fds
[0];
636 buf
->data_ready_fd_write
= fds
[1];
638 /* FIXME: do we actually need this? */
639 result
= fcntl(fds
[0], F_SETFL
, O_NONBLOCK
);
644 //ust// buf->commit_seq = malloc(sizeof(buf->commit_seq) * n_subbufs);
645 //ust// if(!ltt_buf->commit_seq) {
648 memset(buf
->commit_seq
, 0, sizeof(buf
->commit_seq
[0]) * n_subbufs
);
650 /* FIXME: decrementally destroy on error */
655 /* FIXME: use this function */
656 static void ust_buffers_destroy_buffer(struct ust_channel
*ltt_chan
, int cpu
)
658 struct ust_trace
*trace
= ltt_chan
->trace
;
659 struct ust_buffer
*ltt_buf
= ltt_chan
->buf
[cpu
];
661 kref_put(<t_chan
->trace
->ltt_transport_kref
,
662 ltt_release_transport
);
663 ltt_relay_print_buffer_errors(ltt_chan
, cpu
);
664 //ust// free(ltt_buf->commit_seq);
665 free(ltt_buf
->commit_count
);
666 ltt_buf
->commit_count
= NULL
;
667 kref_put(<t_chan
->kref
, ltt_relay_release_channel
);
668 kref_put(&trace
->kref
, ltt_release_trace
);
669 //ust// wake_up_interruptible(&trace->kref_wq);
672 static int ust_buffers_alloc_channel_buf_structs(struct ust_channel
*chan
)
679 size
= PAGE_ALIGN(1);
681 for(i
=0; i
<chan
->n_cpus
; i
++) {
683 result
= chan
->buf_struct_shmids
[i
] = shmget(getpid(), size
, IPC_CREAT
| IPC_EXCL
| 0700);
686 goto destroy_previous
;
689 /* FIXME: should have matching call to shmdt */
690 ptr
= shmat(chan
->buf_struct_shmids
[i
], NULL
, 0);
691 if(ptr
== (void *) -1) {
696 /* Already mark the shared memory for destruction. This will occur only
697 * when all users have detached.
699 result
= shmctl(chan
->buf_struct_shmids
[i
], IPC_RMID
, NULL
);
702 goto destroy_previous
;
710 /* Jumping inside this loop occurs from within the other loop above with i as
711 * counter, so it unallocates the structures for the cpu = current_i down to
715 result
= shmctl(chan
->buf_struct_shmids
[i
], IPC_RMID
, NULL
);
730 static int ust_buffers_create_channel(const char *trace_name
, struct ust_trace
*trace
,
731 const char *channel_name
, struct ust_channel
*ltt_chan
,
732 unsigned int subbuf_size
, unsigned int n_subbufs
, int overwrite
)
736 kref_init(<t_chan
->kref
);
738 ltt_chan
->trace
= trace
;
739 ltt_chan
->overwrite
= overwrite
;
740 ltt_chan
->n_subbufs_order
= get_count_order(n_subbufs
);
741 ltt_chan
->commit_count_mask
= (~0UL >> ltt_chan
->n_subbufs_order
);
742 ltt_chan
->n_cpus
= get_n_cpus();
743 //ust// ltt_chan->buf = percpu_alloc_mask(sizeof(struct ltt_channel_buf_struct), GFP_KERNEL, cpu_possible_map);
744 ltt_chan
->buf
= (void *) malloc(ltt_chan
->n_cpus
* sizeof(void *));
745 if(ltt_chan
->buf
== NULL
) {
748 ltt_chan
->buf_struct_shmids
= (int *) malloc(ltt_chan
->n_cpus
* sizeof(int));
749 if(ltt_chan
->buf_struct_shmids
== NULL
)
752 result
= ust_buffers_alloc_channel_buf_structs(ltt_chan
);
754 goto free_buf_struct_shmids
;
757 result
= ust_buffers_channel_open(ltt_chan
, subbuf_size
, n_subbufs
);
759 ERR("Cannot open channel for trace %s", trace_name
);
760 goto unalloc_buf_structs
;
766 /* FIXME: put a call here to unalloc the buf structs! */
768 free_buf_struct_shmids
:
769 free(ltt_chan
->buf_struct_shmids
);
779 * LTTng channel flush function.
781 * Must be called when no tracing is active in the channel, because of
782 * accesses across CPUs.
784 static notrace
void ltt_relay_buffer_flush(struct ust_buffer
*buf
)
788 //ust// buf->finalized = 1;
789 ltt_force_switch(buf
, FORCE_FLUSH
);
791 result
= write(buf
->data_ready_fd_write
, "1", 1);
793 PERROR("write (in ltt_relay_buffer_flush)");
794 ERR("this should never happen!");
798 static void ltt_relay_async_wakeup_chan(struct ust_channel
*ltt_channel
)
800 //ust// unsigned int i;
801 //ust// struct rchan *rchan = ltt_channel->trans_channel_data;
803 //ust// for_each_possible_cpu(i) {
804 //ust// struct ltt_channel_buf_struct *ltt_buf =
805 //ust// percpu_ptr(ltt_channel->buf, i);
807 //ust// if (uatomic_read(<t_buf->wakeup_readers) == 1) {
808 //ust// uatomic_set(<t_buf->wakeup_readers, 0);
809 //ust// wake_up_interruptible(&rchan->buf[i]->read_wait);
814 static void ltt_relay_finish_buffer(struct ust_channel
*channel
, unsigned int cpu
)
818 if (channel
->buf
[cpu
]) {
819 struct ust_buffer
*buf
= channel
->buf
[cpu
];
820 ltt_relay_buffer_flush(buf
);
821 //ust// ltt_relay_wake_writers(ltt_buf);
822 /* closing the pipe tells the consumer the buffer is finished */
824 //result = write(ltt_buf->data_ready_fd_write, "D", 1);
826 // PERROR("write (in ltt_relay_finish_buffer)");
827 // ERR("this should never happen!");
829 close(buf
->data_ready_fd_write
);
834 static void ltt_relay_finish_channel(struct ust_channel
*channel
)
838 for(i
=0; i
<channel
->n_cpus
; i
++) {
839 ltt_relay_finish_buffer(channel
, i
);
843 static void ltt_relay_remove_channel(struct ust_channel
*channel
)
845 ust_buffers_channel_close(channel
);
846 kref_put(&channel
->kref
, ltt_relay_release_channel
);
850 * ltt_reserve_switch_old_subbuf: switch old subbuffer
852 * Concurrency safe because we are the last and only thread to alter this
853 * sub-buffer. As long as it is not delivered and read, no other thread can
854 * alter the offset, alter the reserve_count or call the
855 * client_buffer_end_callback on this sub-buffer.
857 * The only remaining threads could be the ones with pending commits. They will
858 * have to do the deliver themselves. Not concurrency safe in overwrite mode.
859 * We detect corrupted subbuffers with commit and reserve counts. We keep a
860 * corrupted sub-buffers count and push the readers across these sub-buffers.
862 * Not concurrency safe if a writer is stalled in a subbuffer and another writer
863 * switches in, finding out it's corrupted. The result will be than the old
864 * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
865 * will be declared corrupted too because of the commit count adjustment.
867 * Note : offset_old should never be 0 here.
869 static void ltt_reserve_switch_old_subbuf(
870 struct ust_channel
*chan
, struct ust_buffer
*buf
,
871 struct ltt_reserve_switch_offsets
*offsets
, u64
*tsc
)
873 long oldidx
= SUBBUF_INDEX(offsets
->old
- 1, chan
);
874 long commit_count
, padding_size
;
876 padding_size
= chan
->subbuf_size
877 - (SUBBUF_OFFSET(offsets
->old
- 1, chan
) + 1);
878 ltt_buffer_end(buf
, *tsc
, offsets
->old
, oldidx
);
881 * Must write slot data before incrementing commit count.
882 * This compiler barrier is upgraded into a smp_wmb() by the IPI
883 * sent by get_subbuf() when it does its smp_rmb().
886 uatomic_add(&buf
->commit_count
[oldidx
].cc
, padding_size
);
887 commit_count
= uatomic_read(&buf
->commit_count
[oldidx
].cc
);
888 ltt_check_deliver(chan
, buf
, offsets
->old
- 1, commit_count
, oldidx
);
889 ltt_write_commit_counter(chan
, buf
, oldidx
,
890 offsets
->old
, commit_count
, padding_size
);
894 * ltt_reserve_switch_new_subbuf: Populate new subbuffer.
896 * This code can be executed unordered : writers may already have written to the
897 * sub-buffer before this code gets executed, caution. The commit makes sure
898 * that this code is executed before the deliver of this sub-buffer.
900 static void ltt_reserve_switch_new_subbuf(
901 struct ust_channel
*chan
, struct ust_buffer
*buf
,
902 struct ltt_reserve_switch_offsets
*offsets
, u64
*tsc
)
904 long beginidx
= SUBBUF_INDEX(offsets
->begin
, chan
);
907 ltt_buffer_begin(buf
, *tsc
, beginidx
);
910 * Must write slot data before incrementing commit count.
911 * This compiler barrier is upgraded into a smp_wmb() by the IPI
912 * sent by get_subbuf() when it does its smp_rmb().
915 uatomic_add(&buf
->commit_count
[beginidx
].cc
, ltt_subbuffer_header_size());
916 commit_count
= uatomic_read(&buf
->commit_count
[beginidx
].cc
);
917 /* Check if the written buffer has to be delivered */
918 ltt_check_deliver(chan
, buf
, offsets
->begin
, commit_count
, beginidx
);
919 ltt_write_commit_counter(chan
, buf
, beginidx
,
920 offsets
->begin
, commit_count
, ltt_subbuffer_header_size());
924 * ltt_reserve_end_switch_current: finish switching current subbuffer
926 * Concurrency safe because we are the last and only thread to alter this
927 * sub-buffer. As long as it is not delivered and read, no other thread can
928 * alter the offset, alter the reserve_count or call the
929 * client_buffer_end_callback on this sub-buffer.
931 * The only remaining threads could be the ones with pending commits. They will
932 * have to do the deliver themselves. Not concurrency safe in overwrite mode.
933 * We detect corrupted subbuffers with commit and reserve counts. We keep a
934 * corrupted sub-buffers count and push the readers across these sub-buffers.
936 * Not concurrency safe if a writer is stalled in a subbuffer and another writer
937 * switches in, finding out it's corrupted. The result will be than the old
938 * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer
939 * will be declared corrupted too because of the commit count adjustment.
941 static void ltt_reserve_end_switch_current(
942 struct ust_channel
*chan
,
943 struct ust_buffer
*buf
,
944 struct ltt_reserve_switch_offsets
*offsets
, u64
*tsc
)
946 long endidx
= SUBBUF_INDEX(offsets
->end
- 1, chan
);
947 long commit_count
, padding_size
;
949 padding_size
= chan
->subbuf_size
950 - (SUBBUF_OFFSET(offsets
->end
- 1, chan
) + 1);
952 ltt_buffer_end(buf
, *tsc
, offsets
->end
, endidx
);
955 * Must write slot data before incrementing commit count.
956 * This compiler barrier is upgraded into a smp_wmb() by the IPI
957 * sent by get_subbuf() when it does its smp_rmb().
960 uatomic_add(&buf
->commit_count
[endidx
].cc
, padding_size
);
961 commit_count
= uatomic_read(&buf
->commit_count
[endidx
].cc
);
962 ltt_check_deliver(chan
, buf
,
963 offsets
->end
- 1, commit_count
, endidx
);
964 ltt_write_commit_counter(chan
, buf
, endidx
,
965 offsets
->end
, commit_count
, padding_size
);
971 * !0 if execution must be aborted.
973 static int ltt_relay_try_switch_slow(
974 enum force_switch_mode mode
,
975 struct ust_channel
*chan
,
976 struct ust_buffer
*buf
,
977 struct ltt_reserve_switch_offsets
*offsets
,
981 long reserve_commit_diff
;
983 offsets
->begin
= uatomic_read(&buf
->offset
);
984 offsets
->old
= offsets
->begin
;
985 offsets
->begin_switch
= 0;
986 offsets
->end_switch_old
= 0;
988 *tsc
= trace_clock_read64();
990 if (SUBBUF_OFFSET(offsets
->begin
, buf
->chan
) != 0) {
991 offsets
->begin
= SUBBUF_ALIGN(offsets
->begin
, buf
->chan
);
992 offsets
->end_switch_old
= 1;
994 /* we do not have to switch : buffer is empty */
997 if (mode
== FORCE_ACTIVE
)
998 offsets
->begin
+= ltt_subbuffer_header_size();
1000 * Always begin_switch in FORCE_ACTIVE mode.
1001 * Test new buffer integrity
1003 subbuf_index
= SUBBUF_INDEX(offsets
->begin
, buf
->chan
);
1004 reserve_commit_diff
=
1005 (BUFFER_TRUNC(offsets
->begin
, buf
->chan
)
1006 >> chan
->n_subbufs_order
)
1007 - (uatomic_read(&buf
->commit_count
[subbuf_index
].cc_sb
)
1008 & chan
->commit_count_mask
);
1009 if (reserve_commit_diff
== 0) {
1010 /* Next buffer not corrupted. */
1011 if (mode
== FORCE_ACTIVE
1013 && offsets
->begin
- uatomic_read(&buf
->consumed
)
1014 >= chan
->alloc_size
) {
1016 * We do not overwrite non consumed buffers and we are
1017 * full : ignore switch while tracing is active.
1023 * Next subbuffer corrupted. Force pushing reader even in normal
1027 offsets
->end
= offsets
->begin
;
1032 * Force a sub-buffer switch for a per-cpu buffer. This operation is
1033 * completely reentrant : can be called while tracing is active with
1034 * absolutely no lock held.
1036 void ltt_force_switch_lockless_slow(struct ust_buffer
*buf
,
1037 enum force_switch_mode mode
)
1039 struct ust_channel
*chan
= buf
->chan
;
1040 struct ltt_reserve_switch_offsets offsets
;
1045 DBG("Switching (forced) %s_%d", chan
->channel_name
, buf
->cpu
);
1047 * Perform retryable operations.
1050 if (ltt_relay_try_switch_slow(mode
, chan
, buf
,
1053 } while (uatomic_cmpxchg(&buf
->offset
, offsets
.old
,
1054 offsets
.end
) != offsets
.old
);
1057 * Atomically update last_tsc. This update races against concurrent
1058 * atomic updates, but the race will always cause supplementary full TSC
1059 * events, never the opposite (missing a full TSC event when it would be
1062 save_last_tsc(buf
, tsc
);
1065 * Push the reader if necessary
1067 if (mode
== FORCE_ACTIVE
) {
1068 ltt_reserve_push_reader(chan
, buf
, offsets
.end
- 1);
1069 //ust// ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.end - 1, chan));
1073 * Switch old subbuffer if needed.
1075 if (offsets
.end_switch_old
) {
1076 //ust// ltt_clear_noref_flag(rchan, buf, SUBBUF_INDEX(offsets.old - 1, rchan));
1077 ltt_reserve_switch_old_subbuf(chan
, buf
, &offsets
, &tsc
);
1081 * Populate new subbuffer.
1083 if (mode
== FORCE_ACTIVE
)
1084 ltt_reserve_switch_new_subbuf(chan
, buf
, &offsets
, &tsc
);
1090 * !0 if execution must be aborted.
1092 static int ltt_relay_try_reserve_slow(struct ust_channel
*chan
, struct ust_buffer
*buf
,
1093 struct ltt_reserve_switch_offsets
*offsets
, size_t data_size
,
1094 u64
*tsc
, unsigned int *rflags
, int largest_align
)
1096 long reserve_commit_diff
;
1098 offsets
->begin
= uatomic_read(&buf
->offset
);
1099 offsets
->old
= offsets
->begin
;
1100 offsets
->begin_switch
= 0;
1101 offsets
->end_switch_current
= 0;
1102 offsets
->end_switch_old
= 0;
1104 *tsc
= trace_clock_read64();
1105 if (last_tsc_overflow(buf
, *tsc
))
1106 *rflags
= LTT_RFLAG_ID_SIZE_TSC
;
1108 if (unlikely(SUBBUF_OFFSET(offsets
->begin
, buf
->chan
) == 0)) {
1109 offsets
->begin_switch
= 1; /* For offsets->begin */
1111 offsets
->size
= ust_get_header_size(chan
,
1112 offsets
->begin
, data_size
,
1113 &offsets
->before_hdr_pad
, *rflags
);
1114 offsets
->size
+= ltt_align(offsets
->begin
+ offsets
->size
,
1117 if (unlikely((SUBBUF_OFFSET(offsets
->begin
, buf
->chan
) +
1118 offsets
->size
) > buf
->chan
->subbuf_size
)) {
1119 offsets
->end_switch_old
= 1; /* For offsets->old */
1120 offsets
->begin_switch
= 1; /* For offsets->begin */
1123 if (unlikely(offsets
->begin_switch
)) {
1127 * We are typically not filling the previous buffer completely.
1129 if (likely(offsets
->end_switch_old
))
1130 offsets
->begin
= SUBBUF_ALIGN(offsets
->begin
,
1132 offsets
->begin
= offsets
->begin
+ ltt_subbuffer_header_size();
1133 /* Test new buffer integrity */
1134 subbuf_index
= SUBBUF_INDEX(offsets
->begin
, buf
->chan
);
1135 reserve_commit_diff
=
1136 (BUFFER_TRUNC(offsets
->begin
, buf
->chan
)
1137 >> chan
->n_subbufs_order
)
1138 - (uatomic_read(&buf
->commit_count
[subbuf_index
].cc_sb
)
1139 & chan
->commit_count_mask
);
1140 if (likely(reserve_commit_diff
== 0)) {
1141 /* Next buffer not corrupted. */
1142 if (unlikely(!chan
->overwrite
&&
1143 (SUBBUF_TRUNC(offsets
->begin
, buf
->chan
)
1144 - SUBBUF_TRUNC(uatomic_read(
1147 >= chan
->alloc_size
)) {
1149 * We do not overwrite non consumed buffers
1150 * and we are full : event is lost.
1152 uatomic_inc(&buf
->events_lost
);
1156 * next buffer not corrupted, we are either in
1157 * overwrite mode or the buffer is not full.
1158 * It's safe to write in this new subbuffer.
1163 * Next subbuffer corrupted. Drop event in normal and
1164 * overwrite mode. Caused by either a writer OOPS or
1165 * too many nested writes over a reserve/commit pair.
1167 uatomic_inc(&buf
->events_lost
);
1170 offsets
->size
= ust_get_header_size(chan
,
1171 offsets
->begin
, data_size
,
1172 &offsets
->before_hdr_pad
, *rflags
);
1173 offsets
->size
+= ltt_align(offsets
->begin
+ offsets
->size
,
1176 if (unlikely((SUBBUF_OFFSET(offsets
->begin
, buf
->chan
)
1177 + offsets
->size
) > buf
->chan
->subbuf_size
)) {
1179 * Event too big for subbuffers, report error, don't
1180 * complete the sub-buffer switch.
1182 uatomic_inc(&buf
->events_lost
);
1186 * We just made a successful buffer switch and the event
1187 * fits in the new subbuffer. Let's write.
1192 * Event fits in the current buffer and we are not on a switch
1193 * boundary. It's safe to write.
1196 offsets
->end
= offsets
->begin
+ offsets
->size
;
1198 if (unlikely((SUBBUF_OFFSET(offsets
->end
, buf
->chan
)) == 0)) {
1200 * The offset_end will fall at the very beginning of the next
1203 offsets
->end_switch_current
= 1; /* For offsets->begin */
1209 * ltt_relay_reserve_slot_lockless_slow - Atomic slot reservation in a buffer.
1210 * @trace: the trace structure to log to.
1211 * @ltt_channel: channel structure
1212 * @transport_data: data structure specific to ltt relay
1213 * @data_size: size of the variable length data to log.
1214 * @slot_size: pointer to total size of the slot (out)
1215 * @buf_offset : pointer to reserved buffer offset (out)
1216 * @tsc: pointer to the tsc at the slot reservation (out)
1219 * Return : -ENOSPC if not enough space, else returns 0.
1220 * It will take care of sub-buffer switching.
1222 int ltt_reserve_slot_lockless_slow(struct ust_trace
*trace
,
1223 struct ust_channel
*chan
, void **transport_data
,
1224 size_t data_size
, size_t *slot_size
, long *buf_offset
, u64
*tsc
,
1225 unsigned int *rflags
, int largest_align
, int cpu
)
1227 struct ust_buffer
*buf
= chan
->buf
[cpu
];
1228 struct ltt_reserve_switch_offsets offsets
;
1233 if (unlikely(ltt_relay_try_reserve_slow(chan
, buf
, &offsets
,
1234 data_size
, tsc
, rflags
, largest_align
)))
1236 } while (unlikely(uatomic_cmpxchg(&buf
->offset
, offsets
.old
,
1237 offsets
.end
) != offsets
.old
));
1240 * Atomically update last_tsc. This update races against concurrent
1241 * atomic updates, but the race will always cause supplementary full TSC
1242 * events, never the opposite (missing a full TSC event when it would be
1245 save_last_tsc(buf
, *tsc
);
1248 * Push the reader if necessary
1250 ltt_reserve_push_reader(chan
, buf
, offsets
.end
- 1);
1253 * Clear noref flag for this subbuffer.
1255 //ust// ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.end - 1, chan));
1258 * Switch old subbuffer if needed.
1260 if (unlikely(offsets
.end_switch_old
)) {
1261 //ust// ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(offsets.old - 1, chan));
1262 ltt_reserve_switch_old_subbuf(chan
, buf
, &offsets
, tsc
);
1263 DBG("Switching %s_%d", chan
->channel_name
, cpu
);
1267 * Populate new subbuffer.
1269 if (unlikely(offsets
.begin_switch
))
1270 ltt_reserve_switch_new_subbuf(chan
, buf
, &offsets
, tsc
);
1272 if (unlikely(offsets
.end_switch_current
))
1273 ltt_reserve_end_switch_current(chan
, buf
, &offsets
, tsc
);
1275 *slot_size
= offsets
.size
;
1276 *buf_offset
= offsets
.begin
+ offsets
.before_hdr_pad
;
1280 static struct ltt_transport ust_relay_transport
= {
1283 .create_channel
= ust_buffers_create_channel
,
1284 .finish_channel
= ltt_relay_finish_channel
,
1285 .remove_channel
= ltt_relay_remove_channel
,
1286 .wakeup_channel
= ltt_relay_async_wakeup_chan
,
1290 static char initialized
= 0;
1292 void __attribute__((constructor
)) init_ustrelay_transport(void)
1295 ltt_transport_register(&ust_relay_transport
);
1300 static void __attribute__((destructor
)) ust_buffers_exit(void)
1302 ltt_transport_unregister(&ust_relay_transport
);
1305 size_t ltt_write_event_header_slow(struct ust_trace
*trace
,
1306 struct ust_channel
*channel
,
1307 struct ust_buffer
*buf
, long buf_offset
,
1308 u16 eID
, u32 event_size
,
1309 u64 tsc
, unsigned int rflags
)
1311 struct ltt_event_header header
;
1315 case LTT_RFLAG_ID_SIZE_TSC
:
1316 header
.id_time
= 29 << LTT_TSC_BITS
;
1318 case LTT_RFLAG_ID_SIZE
:
1319 header
.id_time
= 30 << LTT_TSC_BITS
;
1322 header
.id_time
= 31 << LTT_TSC_BITS
;
1326 header
.id_time
|= (u32
)tsc
& LTT_TSC_MASK
;
1327 ust_buffers_write(buf
, buf_offset
, &header
, sizeof(header
));
1328 buf_offset
+= sizeof(header
);
1331 case LTT_RFLAG_ID_SIZE_TSC
:
1332 small_size
= (u16
)min_t(u32
, event_size
, LTT_MAX_SMALL_SIZE
);
1333 ust_buffers_write(buf
, buf_offset
,
1335 buf_offset
+= sizeof(u16
);
1336 ust_buffers_write(buf
, buf_offset
,
1337 &small_size
, sizeof(u16
));
1338 buf_offset
+= sizeof(u16
);
1339 if (small_size
== LTT_MAX_SMALL_SIZE
) {
1340 ust_buffers_write(buf
, buf_offset
,
1341 &event_size
, sizeof(u32
));
1342 buf_offset
+= sizeof(u32
);
1344 buf_offset
+= ltt_align(buf_offset
, sizeof(u64
));
1345 ust_buffers_write(buf
, buf_offset
,
1347 buf_offset
+= sizeof(u64
);
1349 case LTT_RFLAG_ID_SIZE
:
1350 small_size
= (u16
)min_t(u32
, event_size
, LTT_MAX_SMALL_SIZE
);
1351 ust_buffers_write(buf
, buf_offset
,
1353 buf_offset
+= sizeof(u16
);
1354 ust_buffers_write(buf
, buf_offset
,
1355 &small_size
, sizeof(u16
));
1356 buf_offset
+= sizeof(u16
);
1357 if (small_size
== LTT_MAX_SMALL_SIZE
) {
1358 ust_buffers_write(buf
, buf_offset
,
1359 &event_size
, sizeof(u32
));
1360 buf_offset
+= sizeof(u32
);
1364 ust_buffers_write(buf
, buf_offset
,
1366 buf_offset
+= sizeof(u16
);