Code base to fix the print errors in UST (v4)
[ust.git] / libustconsumer / lowlevel.c
1 /* Copyright (C) 2009 Pierre-Marc Fournier
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18 #include <stdlib.h>
19 #include <assert.h>
20 #include <byteswap.h>
21
22 #include "ust/ustconsumer.h"
23 #include "buffers.h"
24 #include "tracer.h"
25 #include "usterr.h"
26
27 /* This truncates to an offset in the buffer. */
28 #define USTD_BUFFER_TRUNC(offset, bufinfo) \
29 ((offset) & (~(((bufinfo)->subbuf_size*(bufinfo)->n_subbufs)-1)))
30
31 #define LTT_MAGIC_NUMBER 0x00D6B7ED
32 #define LTT_REV_MAGIC_NUMBER 0xEDB7D600
33
34
35 static void ltt_relay_print_subbuffer_errors(
36 struct buffer_info *buf,
37 long cons_off, int cpu)
38 {
39 struct ust_buffer *ust_buf = buf->bufstruct_mem;
40 long cons_idx, commit_count, commit_count_mask, write_offset;
41
42 cons_idx = SUBBUF_INDEX(cons_off, buf);
43 commit_count = uatomic_read(&ust_buf->commit_seq[cons_idx]);
44 commit_count_mask = (~0UL >> get_count_order(buf->n_subbufs));
45
46 /*
47 * No need to order commit_count and write_offset reads because we
48 * execute after trace is stopped when there are no readers left.
49 */
50 write_offset = uatomic_read(&ust_buf->offset);
51 WARN( "LTT : unread channel %s offset is %ld "
52 "and cons_off : %ld (cpu %d)\n",
53 buf->channel, write_offset, cons_off, cpu);
54 /* Check each sub-buffer for non filled commit count */
55 if (((commit_count - buf->subbuf_size) & commit_count_mask)
56 - (BUFFER_TRUNC(cons_off, buf) >> get_count_order(buf->n_subbufs)) != 0) {
57 ERR("LTT : %s : subbuffer %lu has non filled "
58 "commit count [seq] [%lu].\n",
59 buf->channel, cons_idx, commit_count);
60 }
61 ERR("LTT : %s : commit count : %lu, subbuf size %d\n",
62 buf->channel, commit_count,
63 buf->subbuf_size);
64 }
65
66 static void ltt_relay_print_errors(struct buffer_info *buf, int cpu)
67 {
68 struct ust_buffer *ust_buf = buf->bufstruct_mem;
69 long cons_off;
70
71 for (cons_off = uatomic_read(&ust_buf->consumed);
72 (SUBBUF_TRUNC(uatomic_read(&ust_buf->offset), buf)
73 - cons_off) > 0;
74 cons_off = SUBBUF_ALIGN(cons_off, buf))
75 ltt_relay_print_subbuffer_errors(buf, cons_off, cpu);
76 }
77
78 static void ltt_relay_print_buffer_errors(struct buffer_info *buf, int cpu)
79 {
80 struct ust_buffer *ust_buf = buf->bufstruct_mem;
81
82 if (uatomic_read(&ust_buf->events_lost))
83 ERR("channel %s: %ld events lost (cpu %d)",
84 buf->channel,
85 uatomic_read(&ust_buf->events_lost), cpu);
86 if (uatomic_read(&ust_buf->corrupted_subbuffers))
87 ERR("channel %s : %ld corrupted subbuffers (cpu %d)",
88 buf->channel,
89 uatomic_read(&ust_buf->corrupted_subbuffers), cpu);
90
91 ltt_relay_print_errors(buf, cpu);
92 }
93
94 /* Returns the size of a subbuffer size. This is the size that
95 * will need to be written to disk.
96 *
97 * @subbuffer: pointer to the beginning of the subbuffer (the
98 * beginning of its header)
99 */
100
101 size_t subbuffer_data_size(void *subbuf)
102 {
103 struct ltt_subbuffer_header *header = subbuf;
104 int reverse;
105 u32 data_size;
106
107 if(header->magic_number == LTT_MAGIC_NUMBER) {
108 reverse = 0;
109 }
110 else if(header->magic_number == LTT_REV_MAGIC_NUMBER) {
111 reverse = 1;
112 }
113 else {
114 return -1;
115 }
116
117 data_size = header->sb_size;
118 if(reverse)
119 data_size = bswap_32(data_size);
120
121 return data_size;
122 }
123
124
125 void finish_consuming_dead_subbuffer(struct ustconsumer_callbacks *callbacks, struct buffer_info *buf)
126 {
127 struct ust_buffer *ustbuf = buf->bufstruct_mem;
128
129 long write_offset = uatomic_read(&ustbuf->offset);
130 long consumed_offset = uatomic_read(&ustbuf->consumed);
131
132 long i_subbuf;
133
134 DBG("processing dead buffer (%s)", buf->name);
135 DBG("consumed offset is %ld (%s)", consumed_offset, buf->name);
136 DBG("write offset is %ld (%s)", write_offset, buf->name);
137
138 /* First subbuf that we need to consume now. It is not modulo'd.
139 * Consumed_offset is the next byte to consume. */
140 long first_subbuf = consumed_offset / buf->subbuf_size;
141 /* Last subbuf that we need to consume now. It is not modulo'd.
142 * Write_offset is the next place to write so write_offset-1 is the
143 * last place written. */
144 long last_subbuf = (write_offset - 1) / buf->subbuf_size;
145
146 DBG("first_subbuf=%ld", first_subbuf);
147 DBG("last_subbuf=%ld", last_subbuf);
148
149 if(last_subbuf - first_subbuf >= buf->n_subbufs) {
150 DBG("an overflow has occurred, nothing can be recovered");
151 return;
152 }
153
154 /* Iterate on subbuffers to recover. */
155 for(i_subbuf = first_subbuf % buf->n_subbufs; ; i_subbuf++, i_subbuf %= buf->n_subbufs) {
156 /* commit_seq is the offset in the buffer of the end of the last sequential commit.
157 * Bytes beyond this limit cannot be recovered. This is a free-running counter. */
158 long commit_seq = uatomic_read(&ustbuf->commit_seq[i_subbuf]);
159
160 unsigned long valid_length = buf->subbuf_size;
161 long n_subbufs_order = get_count_order(buf->n_subbufs);
162 long commit_seq_mask = (~0UL >> n_subbufs_order);
163
164 struct ltt_subbuffer_header *header = (struct ltt_subbuffer_header *)((char *)buf->mem+i_subbuf*buf->subbuf_size);
165
166 /* Check if subbuf was fully written. This is from Mathieu's algorithm/paper. */
167 /* FIXME: not sure data_size = 0xffffffff when the buffer is not full. It might
168 * take the value of the header size initially */
169 if (((commit_seq - buf->subbuf_size) & commit_seq_mask)
170 - (USTD_BUFFER_TRUNC(consumed_offset, buf) >> n_subbufs_order) == 0
171 && header->data_size != 0xffffffff && header->sb_size != 0xffffffff) {
172 /* If it was, we only check the data_size. This is the amount of valid data at
173 * the beginning of the subbuffer. */
174 valid_length = header->data_size;
175 DBG("writing full subbuffer (%ld) with valid_length = %ld", i_subbuf, valid_length);
176 }
177 else {
178 /* If the subbuffer was not fully written, then we don't check data_size because
179 * it hasn't been written yet. Instead we check commit_seq and use it to choose
180 * a value for data_size. The viewer will need this value when parsing.
181 */
182
183 valid_length = commit_seq & (buf->subbuf_size-1);
184 DBG("writing unfull subbuffer (%ld) with valid_length = %ld", i_subbuf, valid_length);
185 header->data_size = valid_length;
186 header->sb_size = PAGE_ALIGN(valid_length);
187 assert(i_subbuf == (last_subbuf % buf->n_subbufs));
188 }
189
190 /* TODO: check on_read_partial_subbuffer return value */
191 if(callbacks->on_read_partial_subbuffer)
192 callbacks->on_read_partial_subbuffer(callbacks, buf, i_subbuf, valid_length);
193
194 /* Manually increment the consumed offset */
195 /* TODO ybrosseau 2011-03-02: Should only be done if the previous read was successful */
196 uatomic_add(&ustbuf->consumed, buf->subbuf_size);
197
198 if(i_subbuf == last_subbuf % buf->n_subbufs)
199 break;
200 }
201
202 ltt_relay_print_buffer_errors(buf, buf->channel_cpu);
203 }
204
This page took 0.03277 seconds and 4 git commands to generate.