| 1 | /** |
| 2 | * ltt-type-serializer.c |
| 3 | * |
| 4 | * LTTng specialized type serializer. |
| 5 | * |
| 6 | * Copyright Mathieu Desnoyers, 2008. |
| 7 | * |
| 8 | * Dual LGPL v2.1/GPL v2 license. |
| 9 | */ |
| 10 | #include <urcu/rculist.h> |
| 11 | #include <ust/type-serializer.h> |
| 12 | #include <ust/core.h> |
| 13 | #include <ust/clock.h> |
| 14 | #include "tracer.h" |
| 15 | |
| 16 | notrace |
| 17 | void _ltt_specialized_trace(const struct marker *mdata, void *probe_data, |
| 18 | void *serialize_private, unsigned int data_size, |
| 19 | unsigned int largest_align) |
| 20 | { |
| 21 | int ret; |
| 22 | uint16_t eID; |
| 23 | size_t slot_size; |
| 24 | unsigned int chan_index; |
| 25 | struct ust_buffer *buf; |
| 26 | struct ust_channel *chan; |
| 27 | struct ust_trace *trace; |
| 28 | u64 tsc; |
| 29 | long buf_offset; |
| 30 | int cpu; |
| 31 | unsigned int rflags; |
| 32 | |
| 33 | /* |
| 34 | * If we get here, it's probably because we have useful work to do. |
| 35 | */ |
| 36 | if (unlikely(ltt_traces.num_active_traces == 0)) |
| 37 | return; |
| 38 | |
| 39 | rcu_read_lock(); |
| 40 | cpu = ust_get_cpu(); |
| 41 | |
| 42 | /* Force volatile access. */ |
| 43 | STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) + 1); |
| 44 | |
| 45 | /* |
| 46 | * asm volatile and "memory" clobber prevent the compiler from moving |
| 47 | * instructions out of the ltt nesting count. This is required to ensure |
| 48 | * that probe side-effects which can cause recursion (e.g. unforeseen |
| 49 | * traps, divisions by 0, ...) are triggered within the incremented |
| 50 | * nesting count section. |
| 51 | */ |
| 52 | barrier(); |
| 53 | eID = mdata->event_id; |
| 54 | chan_index = mdata->channel_id; |
| 55 | |
| 56 | /* |
| 57 | * Iterate on each trace, typically small number of active traces, |
| 58 | * list iteration with prefetch is usually slower. |
| 59 | */ |
| 60 | list_for_each_entry_rcu(trace, <t_traces.head, list) { |
| 61 | if (unlikely(!trace->active)) |
| 62 | continue; |
| 63 | //ust// if (unlikely(!ltt_run_filter(trace, eID))) |
| 64 | //ust// continue; |
| 65 | #ifdef CONFIG_LTT_DEBUG_EVENT_SIZE |
| 66 | rflags = LTT_RFLAG_ID_SIZE; |
| 67 | #else |
| 68 | if (unlikely(eID >= LTT_FREE_EVENTS)) |
| 69 | rflags = LTT_RFLAG_ID; |
| 70 | else |
| 71 | rflags = 0; |
| 72 | #endif |
| 73 | /* |
| 74 | * Skip channels added after trace creation. |
| 75 | */ |
| 76 | if (unlikely(chan_index >= trace->nr_channels)) |
| 77 | continue; |
| 78 | chan = &trace->channels[chan_index]; |
| 79 | if (!chan->active) |
| 80 | continue; |
| 81 | |
| 82 | /* reserve space : header and data */ |
| 83 | ret = ltt_reserve_slot(chan, trace, data_size, largest_align, |
| 84 | cpu, &buf, &slot_size, &buf_offset, &tsc, |
| 85 | &rflags); |
| 86 | if (unlikely(ret < 0)) |
| 87 | continue; /* buffer full */ |
| 88 | |
| 89 | /* Out-of-order write : header and data */ |
| 90 | buf_offset = ltt_write_event_header(chan, buf, |
| 91 | buf_offset, eID, data_size, |
| 92 | tsc, rflags); |
| 93 | if (data_size) { |
| 94 | buf_offset += ltt_align(buf_offset, largest_align); |
| 95 | ust_buffers_write(buf, buf_offset, |
| 96 | serialize_private, data_size); |
| 97 | buf_offset += data_size; |
| 98 | } |
| 99 | /* Out-of-order commit */ |
| 100 | ltt_commit_slot(chan, buf, buf_offset, data_size, slot_size); |
| 101 | } |
| 102 | /* |
| 103 | * asm volatile and "memory" clobber prevent the compiler from moving |
| 104 | * instructions out of the ltt nesting count. This is required to ensure |
| 105 | * that probe side-effects which can cause recursion (e.g. unforeseen |
| 106 | * traps, divisions by 0, ...) are triggered within the incremented |
| 107 | * nesting count section. |
| 108 | */ |
| 109 | barrier(); |
| 110 | STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) - 1); |
| 111 | rcu_read_unlock(); |
| 112 | } |