initialize fields of LttTrace to ensure deterministic behavior and that valgrid does...
[lttv.git] / ltt / tracefile.c
1 /* This file is part of the Linux Trace Toolkit viewer
2 * Copyright (C) 2005 Mathieu Desnoyers
3 *
4 * Complete rewrite from the original version made by XangXiu Yang.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License Version 2.1 as published by the Free Software Foundation.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the
17 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18 * Boston, MA 02111-1307, USA.
19 */
20
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24
25 #include <stdio.h>
26 #include <fcntl.h>
27 #include <string.h>
28 #include <dirent.h>
29 #include <sys/stat.h>
30 #include <sys/types.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <math.h>
34 #include <glib.h>
35 #include <glib/gprintf.h>
36 #include <malloc.h>
37 #include <sys/mman.h>
38 #include <string.h>
39 #include <ctype.h>
40 #include <inttypes.h>
41
42 // For realpath
43 #include <limits.h>
44 #include <stdlib.h>
45
46
47 #include <ltt/ltt.h>
48 #include "ltt-private.h"
49 #include <ltt/trace.h>
50 #include <ltt/event.h>
51 #include <ltt/ltt-types.h>
52 #include <ltt/marker.h>
53
54 /* from marker.c */
55 extern long marker_update_fields_offsets(struct marker_info *info, const char *data);
56
57 /* Tracefile names used in this file */
58
59 GQuark LTT_TRACEFILE_NAME_METADATA;
60
61 #ifndef g_open
62 #define g_open open
63 #endif
64
65
66 #define __UNUSED__ __attribute__((__unused__))
67
68 #define g_info(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_INFO, format)
69
70 #ifndef g_debug
71 #define g_debug(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, format)
72 #endif
73
74 #define g_close close
75
76 /* Those macros must be called from within a function where page_size is a known
77 * variable */
78 #define PAGE_MASK (~(page_size-1))
79 #define PAGE_ALIGN(addr) (((addr)+page_size-1)&PAGE_MASK)
80
81 LttTrace *father_trace = NULL;
82
83 /* set the offset of the fields belonging to the event,
84 need the information of the archecture */
85 //void set_fields_offsets(LttTracefile *tf, LttEventType *event_type);
86 //size_t get_fields_offsets(LttTracefile *tf, LttEventType *event_type, void *data);
87
88 /* map a fixed size or a block information from the file (fd) */
89 static gint map_block(LttTracefile * tf, guint block_num);
90
91 /* calculate nsec per cycles for current block */
92 #if 0
93 static guint32 calc_nsecs_per_cycle(LttTracefile * t);
94 static guint64 cycles_2_ns(LttTracefile *tf, guint64 cycles);
95 #endif //0
96
97 /* go to the next event */
98 static int ltt_seek_next_event(LttTracefile *tf);
99
100 static int open_tracefiles(LttTrace *trace, gchar *root_path,
101 gchar *relative_path);
102 static int ltt_process_metadata_tracefile(LttTracefile *tf);
103 static void ltt_tracefile_time_span_get(LttTracefile *tf,
104 LttTime *start, LttTime *end);
105 static void group_time_span_get(GQuark name, gpointer data, gpointer user_data);
106 static gint map_block(LttTracefile * tf, guint block_num);
107 static void ltt_update_event_size(LttTracefile *tf);
108
109 /* Enable event debugging */
110 static int a_event_debug = 0;
111
112 void ltt_event_debug(int state)
113 {
114 a_event_debug = state;
115 }
116
117 /* trace can be NULL
118 *
119 * Return value : 0 success, 1 bad tracefile
120 */
121 static int parse_trace_header(ltt_subbuffer_header_t *header,
122 LttTracefile *tf, LttTrace *t)
123 {
124 if (header->magic_number == LTT_MAGIC_NUMBER)
125 tf->reverse_bo = 0;
126 else if(header->magic_number == LTT_REV_MAGIC_NUMBER)
127 tf->reverse_bo = 1;
128 else /* invalid magic number, bad tracefile ! */
129 return 1;
130
131 if(t) {
132 t->ltt_major_version = header->major_version;
133 t->ltt_minor_version = header->minor_version;
134 t->arch_size = header->arch_size;
135 }
136 tf->alignment = header->alignment;
137
138 /* Get float byte order : might be different from int byte order
139 * (or is set to 0 if the trace has no float (kernel trace)) */
140 tf->float_word_order = 0;
141
142 switch(header->major_version) {
143 case 0:
144 case 1:
145 g_warning("Unsupported trace version : %hhu.%hhu",
146 header->major_version, header->minor_version);
147 return 1;
148 break;
149 case 2:
150 switch(header->minor_version) {
151 case 3:
152 {
153 struct ltt_subbuffer_header_2_3 *vheader = header;
154 tf->buffer_header_size = ltt_subbuffer_header_size();
155 tf->tscbits = 27;
156 tf->eventbits = 5;
157 tf->tsc_mask = ((1ULL << tf->tscbits) - 1);
158 tf->tsc_mask_next_bit = (1ULL << tf->tscbits);
159
160 if(t) {
161 t->start_freq = ltt_get_uint64(LTT_GET_BO(tf),
162 &vheader->start_freq);
163 t->freq_scale = ltt_get_uint32(LTT_GET_BO(tf),
164 &vheader->freq_scale);
165 if(father_trace) {
166 t->start_freq = father_trace->start_freq;
167 t->freq_scale = father_trace->freq_scale;
168 } else {
169 father_trace = t;
170 }
171 t->start_tsc = ltt_get_uint64(LTT_GET_BO(tf),
172 &vheader->cycle_count_begin);
173 t->start_monotonic = 0;
174 t->start_time.tv_sec = ltt_get_uint64(LTT_GET_BO(tf),
175 &vheader->start_time_sec);
176 t->start_time.tv_nsec = ltt_get_uint64(LTT_GET_BO(tf),
177 &vheader->start_time_usec);
178 t->start_time.tv_nsec *= 1000; /* microsec to nanosec */
179
180 t->start_time_from_tsc = ltt_time_from_uint64(
181 (double)t->start_tsc
182 * 1000000000.0 * tf->trace->freq_scale
183 / (double)t->start_freq);
184 }
185 }
186 break;
187 default:
188 g_warning("Unsupported trace version : %hhu.%hhu",
189 header->major_version, header->minor_version);
190 return 1;
191 }
192 break;
193 default:
194 g_warning("Unsupported trace version : %hhu.%hhu",
195 header->major_version, header->minor_version);
196 return 1;
197 }
198 return 0;
199 }
200
201
202
203 /*****************************************************************************
204 *Function name
205 * ltt_tracefile_open : open a trace file, construct a LttTracefile
206 *Input params
207 * t : the trace containing the tracefile
208 * fileName : path name of the trace file
209 * tf : the tracefile structure
210 *Return value
211 * : 0 for success, -1 otherwise.
212 ****************************************************************************/
213
214 static gint ltt_tracefile_open(LttTrace *t, gchar * fileName, LttTracefile *tf)
215 {
216 struct stat lTDFStat; /* Trace data file status */
217 ltt_subbuffer_header_t *header;
218 int page_size = getpagesize();
219
220 //open the file
221 tf->long_name = g_quark_from_string(fileName);
222 tf->trace = t;
223 tf->fd = open(fileName, O_RDONLY);
224 if(tf->fd < 0){
225 g_warning("Unable to open input data file %s\n", fileName);
226 goto end;
227 }
228
229 // Get the file's status
230 if(fstat(tf->fd, &lTDFStat) < 0){
231 g_warning("Unable to get the status of the input data file %s\n", fileName);
232 goto close_file;
233 }
234
235 // Is the file large enough to contain a trace
236 if(lTDFStat.st_size <
237 (off_t)(ltt_subbuffer_header_size())){
238 g_print("The input data file %s does not contain a trace\n", fileName);
239 goto close_file;
240 }
241
242 /* Temporarily map the buffer start header to get trace information */
243 /* Multiple of pages aligned head */
244 tf->buffer.head = mmap(0,
245 PAGE_ALIGN(ltt_subbuffer_header_size()), PROT_READ,
246 MAP_PRIVATE, tf->fd, 0);
247 if(tf->buffer.head == MAP_FAILED) {
248 perror("Error in allocating memory for buffer of tracefile");
249 goto close_file;
250 }
251 g_assert( ( (gulong)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
252
253 header = (ltt_subbuffer_header_t *)tf->buffer.head;
254
255 if(parse_trace_header(header, tf, NULL)) {
256 g_warning("parse_trace_header error");
257 goto unmap_file;
258 }
259
260 //store the size of the file
261 tf->file_size = lTDFStat.st_size;
262 tf->buf_size = ltt_get_uint32(LTT_GET_BO(tf), &header->buf_size);
263 tf->num_blocks = tf->file_size / tf->buf_size;
264 tf->events_lost = 0;
265 tf->subbuf_corrupt = 0;
266
267 if(munmap(tf->buffer.head,
268 PAGE_ALIGN(ltt_subbuffer_header_size()))) {
269 g_warning("unmap size : %zu\n",
270 PAGE_ALIGN(ltt_subbuffer_header_size()));
271 perror("munmap error");
272 g_assert(0);
273 }
274 tf->buffer.head = NULL;
275
276 //read the first block
277 if(map_block(tf,0)) {
278 perror("Cannot map block for tracefile");
279 goto close_file;
280 }
281
282 return 0;
283
284 /* Error */
285 unmap_file:
286 if(munmap(tf->buffer.head,
287 PAGE_ALIGN(ltt_subbuffer_header_size()))) {
288 g_warning("unmap size : %zu\n",
289 PAGE_ALIGN(ltt_subbuffer_header_size()));
290 perror("munmap error");
291 g_assert(0);
292 }
293 close_file:
294 close(tf->fd);
295 end:
296 return -1;
297 }
298
299
300 /*****************************************************************************
301 *Function name
302 * ltt_tracefile_close: close a trace file,
303 *Input params
304 * t : tracefile which will be closed
305 ****************************************************************************/
306
307 static void ltt_tracefile_close(LttTracefile *t)
308 {
309 int page_size = getpagesize();
310
311 if(t->buffer.head != NULL)
312 if(munmap(t->buffer.head, PAGE_ALIGN(t->buf_size))) {
313 g_warning("unmap size : %u\n",
314 PAGE_ALIGN(t->buf_size));
315 perror("munmap error");
316 g_assert(0);
317 }
318
319 close(t->fd);
320 }
321
322 /****************************************************************************
323 * get_absolute_pathname
324 *
325 * return the unique pathname in the system
326 *
327 * MD : Fixed this function so it uses realpath, dealing well with
328 * forgotten cases (.. were not used correctly before).
329 *
330 ****************************************************************************/
331 void get_absolute_pathname(const gchar *pathname, gchar * abs_pathname)
332 {
333 abs_pathname[0] = '\0';
334
335 if (realpath(pathname, abs_pathname) != NULL)
336 return;
337 else
338 {
339 /* error, return the original path unmodified */
340 strcpy(abs_pathname, pathname);
341 return;
342 }
343 return;
344 }
345
346 /* Search for something like : .*_.*
347 *
348 * The left side is the name, the right side is the number.
349 * Exclude leading /.
350 * Exclude flight- prefix.
351 */
352
353 static int get_tracefile_name_number(gchar *raw_name,
354 GQuark *name,
355 guint *num,
356 gulong *tid,
357 gulong *pgid,
358 guint64 *creation)
359 {
360 guint raw_name_len = strlen(raw_name);
361 gchar char_name[PATH_MAX];
362 int i;
363 int underscore_pos;
364 long int cpu_num;
365 gchar *endptr;
366 gchar *tmpptr;
367
368 /* skip leading / */
369 for(i = 0; i < raw_name_len-1;i++) {
370 if(raw_name[i] != '/')
371 break;
372 }
373 raw_name = &raw_name[i];
374 raw_name_len = strlen(raw_name);
375
376 for(i=raw_name_len-1;i>=0;i--) {
377 if(raw_name[i] == '_') break;
378 }
379 if(i==-1) { /* Either not found or name length is 0 */
380 /* This is a userspace tracefile */
381 strncpy(char_name, raw_name, raw_name_len);
382 char_name[raw_name_len] = '\0';
383 *name = g_quark_from_string(char_name);
384 *num = 0; /* unknown cpu */
385 for(i=0;i<raw_name_len;i++) {
386 if(raw_name[i] == '/') {
387 break;
388 }
389 }
390 i++;
391 for(;i<raw_name_len;i++) {
392 if(raw_name[i] == '/') {
393 break;
394 }
395 }
396 i++;
397 for(;i<raw_name_len;i++) {
398 if(raw_name[i] == '-') {
399 break;
400 }
401 }
402 if(i == raw_name_len) return -1;
403 i++;
404 tmpptr = &raw_name[i];
405 for(;i<raw_name_len;i++) {
406 if(raw_name[i] == '.') {
407 raw_name[i] = ' ';
408 break;
409 }
410 }
411 *tid = strtoul(tmpptr, &endptr, 10);
412 if(endptr == tmpptr)
413 return -1; /* No digit */
414 if(*tid == ULONG_MAX)
415 return -1; /* underflow / overflow */
416 i++;
417 tmpptr = &raw_name[i];
418 for(;i<raw_name_len;i++) {
419 if(raw_name[i] == '.') {
420 raw_name[i] = ' ';
421 break;
422 }
423 }
424 *pgid = strtoul(tmpptr, &endptr, 10);
425 if(endptr == tmpptr)
426 return -1; /* No digit */
427 if(*pgid == ULONG_MAX)
428 return -1; /* underflow / overflow */
429 i++;
430 tmpptr = &raw_name[i];
431 *creation = strtoull(tmpptr, &endptr, 10);
432 if(endptr == tmpptr)
433 return -1; /* No digit */
434 if(*creation == G_MAXUINT64)
435 return -1; /* underflow / overflow */
436 } else {
437 underscore_pos = i;
438
439 cpu_num = strtol(raw_name+underscore_pos+1, &endptr, 10);
440
441 if(endptr == raw_name+underscore_pos+1)
442 return -1; /* No digit */
443 if(cpu_num == LONG_MIN || cpu_num == LONG_MAX)
444 return -1; /* underflow / overflow */
445
446 if (!strncmp(raw_name, "flight-", sizeof("flight-") - 1)) {
447 raw_name += sizeof("flight-") - 1;
448 underscore_pos -= sizeof("flight-") - 1;
449 }
450 strncpy(char_name, raw_name, underscore_pos);
451 char_name[underscore_pos] = '\0';
452 *name = g_quark_from_string(char_name);
453 *num = cpu_num;
454 }
455
456
457 return 0;
458 }
459
460
461 GData **ltt_trace_get_tracefiles_groups(LttTrace *trace)
462 {
463 return &trace->tracefiles;
464 }
465
466
467 void compute_tracefile_group(GQuark key_id,
468 GArray *group,
469 struct compute_tracefile_group_args *args)
470 {
471 unsigned int i;
472 LttTracefile *tf;
473
474 for(i=0; i<group->len; i++) {
475 tf = &g_array_index (group, LttTracefile, i);
476 if(tf->cpu_online)
477 args->func(tf, args->func_args);
478 }
479 }
480
481
482 static void ltt_tracefile_group_destroy(gpointer data)
483 {
484 GArray *group = (GArray *)data;
485 unsigned int i;
486 LttTracefile *tf;
487
488 if (group->len > 0)
489 destroy_marker_data(g_array_index (group, LttTracefile, 0).mdata);
490 for(i=0; i<group->len; i++) {
491 tf = &g_array_index (group, LttTracefile, i);
492 if(tf->cpu_online)
493 ltt_tracefile_close(tf);
494 }
495 g_array_free(group, TRUE);
496 }
497
498 static __attribute__ ((__unused__)) gboolean ltt_tracefile_group_has_cpu_online(gpointer data)
499 {
500 GArray *group = (GArray *)data;
501 unsigned int i;
502 LttTracefile *tf;
503
504 for(i=0; i<group->len; i++) {
505 tf = &g_array_index (group, LttTracefile, i);
506 if(tf->cpu_online)
507 return 1;
508 }
509 return 0;
510 }
511
512
513 /* Open each tracefile under a specific directory. Put them in a
514 * GData : permits to access them using their tracefile group pathname.
515 * i.e. access control/modules tracefile group by index :
516 * "control/module".
517 *
518 * relative path is the path relative to the trace root
519 * root path is the full path
520 *
521 * A tracefile group is simply an array where all the per cpu tracefiles sit.
522 */
523
524 static int open_tracefiles(LttTrace *trace, gchar *root_path, gchar *relative_path)
525 {
526 DIR *dir = opendir(root_path);
527 struct dirent *entry;
528 struct stat stat_buf;
529 int ret, i;
530 struct marker_data *mdata;
531
532 gchar path[PATH_MAX];
533 int path_len;
534 gchar *path_ptr;
535
536 int rel_path_len;
537 gchar rel_path[PATH_MAX];
538 gchar *rel_path_ptr;
539 LttTracefile tmp_tf;
540
541 if(dir == NULL) {
542 perror(root_path);
543 return ENOENT;
544 }
545
546 strncpy(path, root_path, PATH_MAX-1);
547 path_len = strlen(path);
548 path[path_len] = '/';
549 path_len++;
550 path_ptr = path + path_len;
551
552 strncpy(rel_path, relative_path, PATH_MAX-1);
553 rel_path_len = strlen(rel_path);
554 rel_path[rel_path_len] = '/';
555 rel_path_len++;
556 rel_path_ptr = rel_path + rel_path_len;
557
558 while((entry = readdir(dir)) != NULL) {
559
560 if(entry->d_name[0] == '.') continue;
561
562 strncpy(path_ptr, entry->d_name, PATH_MAX - path_len);
563 strncpy(rel_path_ptr, entry->d_name, PATH_MAX - rel_path_len);
564
565 ret = stat(path, &stat_buf);
566 if(ret == -1) {
567 perror(path);
568 continue;
569 }
570
571 g_debug("Tracefile file or directory : %s\n", path);
572
573 // if(strcmp(rel_path, "/eventdefs") == 0) continue;
574
575 if(S_ISDIR(stat_buf.st_mode)) {
576
577 g_debug("Entering subdirectory...\n");
578 ret = open_tracefiles(trace, path, rel_path);
579 if(ret < 0) continue;
580 } else if(S_ISREG(stat_buf.st_mode)) {
581 GQuark name;
582 guint num;
583 gulong tid, pgid;
584 guint64 creation;
585 GArray *group;
586 num = 0;
587 tid = pgid = 0;
588 creation = 0;
589 if(get_tracefile_name_number(rel_path, &name, &num, &tid, &pgid, &creation))
590 continue; /* invalid name */
591
592 g_debug("Opening file.\n");
593 if(ltt_tracefile_open(trace, path, &tmp_tf)) {
594 g_info("Error opening tracefile %s", path);
595
596 continue; /* error opening the tracefile : bad magic number ? */
597 }
598
599 g_debug("Tracefile name is %s and number is %u",
600 g_quark_to_string(name), num);
601
602 mdata = NULL;
603 tmp_tf.cpu_online = 1;
604 tmp_tf.cpu_num = num;
605 tmp_tf.name = name;
606 tmp_tf.tid = tid;
607 tmp_tf.pgid = pgid;
608 tmp_tf.creation = creation;
609 group = g_datalist_id_get_data(&trace->tracefiles, name);
610 if(group == NULL) {
611 /* Elements are automatically cleared when the array is allocated.
612 * It makes the cpu_online variable set to 0 : cpu offline, by default.
613 */
614 group = g_array_sized_new (FALSE, TRUE, sizeof(LttTracefile), 10);
615 g_datalist_id_set_data_full(&trace->tracefiles, name,
616 group, ltt_tracefile_group_destroy);
617 mdata = allocate_marker_data();
618 if (!mdata)
619 g_error("Error in allocating marker data");
620 }
621
622 /* Add the per cpu tracefile to the named group */
623 unsigned int old_len = group->len;
624 if(num+1 > old_len)
625 group = g_array_set_size(group, num+1);
626
627 g_assert(group->len > 0);
628 if (!mdata)
629 mdata = g_array_index (group, LttTracefile, 0).mdata;
630
631 g_array_index (group, LttTracefile, num) = tmp_tf;
632 g_array_index (group, LttTracefile, num).event.tracefile =
633 &g_array_index (group, LttTracefile, num);
634 for (i = 0; i < group->len; i++)
635 g_array_index (group, LttTracefile, i).mdata = mdata;
636 }
637 }
638
639 closedir(dir);
640
641 return 0;
642 }
643
644
645 /* Presumes the tracefile is already seeked at the beginning. It makes sense,
646 * because it must be done just after the opening */
647 static int ltt_process_metadata_tracefile(LttTracefile *tf)
648 {
649 int err;
650
651 while(1) {
652 err = ltt_tracefile_read_seek(tf);
653 if(err == EPERM) goto seek_error;
654 else if(err == ERANGE) break; /* End of tracefile */
655
656 err = ltt_tracefile_read_update_event(tf);
657 if(err) goto update_error;
658
659 /* The rules are :
660 * It contains only core events :
661 * 0 : set_marker_id
662 * 1 : set_marker_format
663 */
664 if(tf->event.event_id >= MARKER_CORE_IDS) {
665 /* Should only contain core events */
666 g_warning("Error in processing metadata file %s, "
667 "should not contain event id %u.", g_quark_to_string(tf->name),
668 tf->event.event_id);
669 err = EPERM;
670 goto event_id_error;
671 } else {
672 char *pos;
673 const char *channel_name, *marker_name, *format;
674 uint16_t id;
675 guint8 int_size, long_size, pointer_size, size_t_size, alignment;
676
677 switch((enum marker_id)tf->event.event_id) {
678 case MARKER_ID_SET_MARKER_ID:
679 channel_name = pos = tf->event.data;
680 pos += strlen(channel_name) + 1;
681 marker_name = pos;
682 g_debug("Doing MARKER_ID_SET_MARKER_ID of marker %s.%s",
683 channel_name, marker_name);
684 pos += strlen(marker_name) + 1;
685 pos += ltt_align((size_t)pos, sizeof(guint16), tf->alignment);
686 id = ltt_get_uint16(LTT_GET_BO(tf), pos);
687 g_debug("In MARKER_ID_SET_MARKER_ID of marker %s.%s id %hu",
688 channel_name, marker_name, id);
689 pos += sizeof(guint16);
690 int_size = *(guint8*)pos;
691 pos += sizeof(guint8);
692 long_size = *(guint8*)pos;
693 pos += sizeof(guint8);
694 pointer_size = *(guint8*)pos;
695 pos += sizeof(guint8);
696 size_t_size = *(guint8*)pos;
697 pos += sizeof(guint8);
698 alignment = *(guint8*)pos;
699 pos += sizeof(guint8);
700 marker_id_event(tf->trace,
701 g_quark_from_string(channel_name),
702 g_quark_from_string(marker_name),
703 id, int_size, long_size,
704 pointer_size, size_t_size, alignment);
705 break;
706 case MARKER_ID_SET_MARKER_FORMAT:
707 channel_name = pos = tf->event.data;
708 pos += strlen(channel_name) + 1;
709 marker_name = pos;
710 g_debug("Doing MARKER_ID_SET_MARKER_FORMAT of marker %s.%s",
711 channel_name, marker_name);
712 pos += strlen(marker_name) + 1;
713 format = pos;
714 pos += strlen(format) + 1;
715 marker_format_event(tf->trace,
716 g_quark_from_string(channel_name),
717 g_quark_from_string(marker_name),
718 format);
719 /* get information from dictionary TODO */
720 break;
721 default:
722 g_warning("Error in processing metadata file %s, "
723 "unknown event id %hhu.",
724 g_quark_to_string(tf->name),
725 tf->event.event_id);
726 err = EPERM;
727 goto event_id_error;
728 }
729 }
730 }
731 return 0;
732
733 /* Error handling */
734 event_id_error:
735 update_error:
736 seek_error:
737 g_warning("An error occured in metadata tracefile parsing");
738 return err;
739 }
740
741 /*
742 * Open a trace and return its LttTrace handle.
743 *
744 * pathname must be the directory of the trace
745 */
746
747 LttTrace *ltt_trace_open(const gchar *pathname)
748 {
749 gchar abs_path[PATH_MAX];
750 LttTrace * t;
751 LttTracefile *tf;
752 GArray *group;
753 unsigned int i;
754 int ret;
755 ltt_subbuffer_header_t *header;
756 DIR *dir;
757 struct dirent *entry;
758 struct stat stat_buf;
759 gchar path[PATH_MAX];
760
761 t = g_new(LttTrace, 1);
762 if(!t) goto alloc_error;
763
764 get_absolute_pathname(pathname, abs_path);
765 t->pathname = g_quark_from_string(abs_path);
766
767 t->start_tsc = 0;
768 t->freq_scale = 1;
769 t->start_freq = 1;
770 t->start_time_from_tsc = ltt_time_zero;
771
772 g_datalist_init(&t->tracefiles);
773
774 /* Test to see if it looks like a trace */
775 dir = opendir(abs_path);
776 if(dir == NULL) {
777 perror(abs_path);
778 goto open_error;
779 }
780 while((entry = readdir(dir)) != NULL) {
781 strcpy(path, abs_path);
782 strcat(path, "/");
783 strcat(path, entry->d_name);
784 ret = stat(path, &stat_buf);
785 if(ret == -1) {
786 perror(path);
787 continue;
788 }
789 }
790 closedir(dir);
791
792 /* Open all the tracefiles */
793 if(open_tracefiles(t, abs_path, "")) {
794 g_warning("Error opening tracefile %s", abs_path);
795 goto find_error;
796 }
797
798 /* Parse each trace metadata_N files : get runtime fac. info */
799 group = g_datalist_id_get_data(&t->tracefiles, LTT_TRACEFILE_NAME_METADATA);
800 if(group == NULL) {
801 g_warning("Trace %s has no metadata tracefile", abs_path);
802 goto find_error;
803 }
804
805 /*
806 * Get the trace information for the metadata_0 tracefile.
807 * Getting a correct trace start_time and start_tsc is insured by the fact
808 * that no subbuffers are supposed to be lost in the metadata channel.
809 * Therefore, the first subbuffer contains the start_tsc timestamp in its
810 * buffer header.
811 */
812 g_assert(group->len > 0);
813 tf = &g_array_index (group, LttTracefile, 0);
814 header = (ltt_subbuffer_header_t *)tf->buffer.head;
815 ret = parse_trace_header(header, tf, t);
816 g_assert(!ret);
817
818 t->num_cpu = group->len;
819
820 //ret = allocate_marker_data(t);
821 //if (ret)
822 // g_error("Error in allocating marker data");
823
824 for(i=0; i<group->len; i++) {
825 tf = &g_array_index (group, LttTracefile, i);
826 if (tf->cpu_online)
827 if(ltt_process_metadata_tracefile(tf))
828 goto find_error;
829 // goto metadata_error;
830 }
831
832 return t;
833
834 /* Error handling */
835 //metadata_error:
836 // destroy_marker_data(t);
837 find_error:
838 g_datalist_clear(&t->tracefiles);
839 open_error:
840 g_free(t);
841 alloc_error:
842 return NULL;
843
844 }
845
846 /* Open another, completely independant, instance of a trace.
847 *
848 * A read on this new instance will read the first event of the trace.
849 *
850 * When we copy a trace, we want all the opening actions to happen again :
851 * the trace will be reopened and totally independant from the original.
852 * That's why we call ltt_trace_open.
853 */
854 LttTrace *ltt_trace_copy(LttTrace *self)
855 {
856 return ltt_trace_open(g_quark_to_string(self->pathname));
857 }
858
859 /*
860 * Close a trace
861 */
862
863 void ltt_trace_close(LttTrace *t)
864 {
865 g_datalist_clear(&t->tracefiles);
866 g_free(t);
867 }
868
869
870 /*****************************************************************************
871 * Get the start time and end time of the trace
872 ****************************************************************************/
873
874 void ltt_tracefile_time_span_get(LttTracefile *tf,
875 LttTime *start, LttTime *end)
876 {
877 int err;
878
879 err = map_block(tf, 0);
880 if(unlikely(err)) {
881 g_error("Can not map block");
882 *start = ltt_time_infinite;
883 } else
884 *start = tf->buffer.begin.timestamp;
885
886 err = map_block(tf, tf->num_blocks - 1); /* Last block */
887 if(unlikely(err)) {
888 g_error("Can not map block");
889 *end = ltt_time_zero;
890 } else
891 *end = tf->buffer.end.timestamp;
892 }
893
894 struct tracefile_time_span_get_args {
895 LttTrace *t;
896 LttTime *start;
897 LttTime *end;
898 };
899
900 static void group_time_span_get(GQuark name, gpointer data, gpointer user_data)
901 {
902 struct tracefile_time_span_get_args *args =
903 (struct tracefile_time_span_get_args*)user_data;
904
905 GArray *group = (GArray *)data;
906 unsigned int i;
907 LttTracefile *tf;
908 LttTime tmp_start;
909 LttTime tmp_end;
910
911 for(i=0; i<group->len; i++) {
912 tf = &g_array_index (group, LttTracefile, i);
913 if(tf->cpu_online) {
914 ltt_tracefile_time_span_get(tf, &tmp_start, &tmp_end);
915 if(ltt_time_compare(*args->start, tmp_start)>0) *args->start = tmp_start;
916 if(ltt_time_compare(*args->end, tmp_end)<0) *args->end = tmp_end;
917 }
918 }
919 }
920
921 /* return the start and end time of a trace */
922
923 void ltt_trace_time_span_get(LttTrace *t, LttTime *start, LttTime *end)
924 {
925 LttTime min_start = ltt_time_infinite;
926 LttTime max_end = ltt_time_zero;
927 struct tracefile_time_span_get_args args = { t, &min_start, &max_end };
928
929 g_datalist_foreach(&t->tracefiles, &group_time_span_get, &args);
930
931 if(start != NULL) *start = min_start;
932 if(end != NULL) *end = max_end;
933
934 }
935
936
937 /* Seek to the first event in a tracefile that has a time equal or greater than
938 * the time passed in parameter.
939 *
940 * If the time parameter is outside the tracefile time span, seek to the first
941 * event or if after, return ERANGE.
942 *
943 * If the time parameter is before the first event, we have to seek specially to
944 * there.
945 *
946 * If the time is after the end of the trace, return ERANGE.
947 *
948 * Do a binary search to find the right block, then a sequential search in the
949 * block to find the event.
950 *
951 * In the special case where the time requested fits inside a block that has no
952 * event corresponding to the requested time, the first event of the next block
953 * will be seeked.
954 *
955 * IMPORTANT NOTE : // FIXME everywhere...
956 *
957 * You MUST NOT do a ltt_tracefile_read right after a ltt_tracefile_seek_time :
958 * you will jump over an event if you do.
959 *
960 * Return value : 0 : no error, the tf->event can be used
961 * ERANGE : time if after the last event of the trace
962 * otherwise : this is an error.
963 *
964 * */
965
966 int ltt_tracefile_seek_time(LttTracefile *tf, LttTime time)
967 {
968 int ret = 0;
969 int err;
970 unsigned int block_num, high, low;
971
972 /* seek at the beginning of trace */
973 err = map_block(tf, 0); /* First block */
974 if(unlikely(err)) {
975 g_error("Can not map block");
976 goto fail;
977 }
978
979 /* If the time is lower or equal the beginning of the trace,
980 * go to the first event. */
981 if(ltt_time_compare(time, tf->buffer.begin.timestamp) <= 0) {
982 ret = ltt_tracefile_read(tf);
983 if(ret == ERANGE) goto range;
984 else if (ret) goto fail;
985 goto found; /* There is either no event in the trace or the event points
986 to the first event in the trace */
987 }
988
989 err = map_block(tf, tf->num_blocks - 1); /* Last block */
990 if(unlikely(err)) {
991 g_error("Can not map block");
992 goto fail;
993 }
994
995 /* If the time is after the end of the trace, return ERANGE. */
996 if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
997 goto range;
998 }
999
1000 /* Binary search the block */
1001 high = tf->num_blocks - 1;
1002 low = 0;
1003
1004 while(1) {
1005 block_num = ((high-low) / 2) + low;
1006
1007 err = map_block(tf, block_num);
1008 if(unlikely(err)) {
1009 g_error("Can not map block");
1010 goto fail;
1011 }
1012 if(high == low) {
1013 /* We cannot divide anymore : this is what would happen if the time
1014 * requested was exactly between two consecutive buffers'end and start
1015 * timestamps. This is also what would happend if we didn't deal with out
1016 * of span cases prior in this function. */
1017 /* The event is right in the buffer!
1018 * (or in the next buffer first event) */
1019 while(1) {
1020 ret = ltt_tracefile_read(tf);
1021 if(ret == ERANGE) goto range; /* ERANGE or EPERM */
1022 else if(ret) goto fail;
1023
1024 if(ltt_time_compare(time, tf->event.event_time) <= 0)
1025 goto found;
1026 }
1027
1028 } else if(ltt_time_compare(time, tf->buffer.begin.timestamp) < 0) {
1029 /* go to lower part */
1030 high = block_num - 1;
1031 } else if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
1032 /* go to higher part */
1033 low = block_num + 1;
1034 } else {/* The event is right in the buffer!
1035 (or in the next buffer first event) */
1036 while(1) {
1037 ret = ltt_tracefile_read(tf);
1038 if(ret == ERANGE) goto range; /* ERANGE or EPERM */
1039 else if(ret) goto fail;
1040
1041 if(ltt_time_compare(time, tf->event.event_time) <= 0)
1042 break;
1043 }
1044 goto found;
1045 }
1046 }
1047
1048 found:
1049 return 0;
1050 range:
1051 return ERANGE;
1052
1053 /* Error handling */
1054 fail:
1055 g_error("ltt_tracefile_seek_time failed on tracefile %s",
1056 g_quark_to_string(tf->name));
1057 return EPERM;
1058 }
1059
1060 /* Seek to a position indicated by an LttEventPosition
1061 */
1062
1063 int ltt_tracefile_seek_position(LttTracefile *tf, const LttEventPosition *ep)
1064 {
1065 int err;
1066
1067 if(ep->tracefile != tf) {
1068 goto fail;
1069 }
1070
1071 err = map_block(tf, ep->block);
1072 if(unlikely(err)) {
1073 g_error("Can not map block");
1074 goto fail;
1075 }
1076
1077 tf->event.offset = ep->offset;
1078
1079 /* Put back the event real tsc */
1080 tf->event.tsc = ep->tsc;
1081 tf->buffer.tsc = ep->tsc;
1082
1083 err = ltt_tracefile_read_update_event(tf);
1084 if(err) goto fail;
1085
1086 /* deactivate this, as it does nothing for now
1087 err = ltt_tracefile_read_op(tf);
1088 if(err) goto fail;
1089 */
1090
1091 return 0;
1092
1093 fail:
1094 g_error("ltt_tracefile_seek_time failed on tracefile %s",
1095 g_quark_to_string(tf->name));
1096 return 1;
1097 }
1098
1099 /* Given a TSC value, return the LttTime (seconds,nanoseconds) it
1100 * corresponds to.
1101 */
1102
1103 LttTime ltt_interpolate_time_from_tsc(LttTracefile *tf, guint64 tsc)
1104 {
1105 LttTime time;
1106
1107 if(tsc > tf->trace->start_tsc) {
1108 time = ltt_time_from_uint64(
1109 (double)(tsc - tf->trace->start_tsc)
1110 * 1000000000.0 * tf->trace->freq_scale
1111 / (double)tf->trace->start_freq);
1112 time = ltt_time_add(tf->trace->start_time_from_tsc, time);
1113 } else {
1114 time = ltt_time_from_uint64(
1115 (double)(tf->trace->start_tsc - tsc)
1116 * 1000000000.0 * tf->trace->freq_scale
1117 / (double)tf->trace->start_freq);
1118 time = ltt_time_sub(tf->trace->start_time_from_tsc, time);
1119 }
1120 return time;
1121 }
1122
1123 /* Calculate the real event time based on the buffer boundaries */
1124 LttTime ltt_interpolate_time(LttTracefile *tf, LttEvent *event)
1125 {
1126 return ltt_interpolate_time_from_tsc(tf, tf->buffer.tsc);
1127 }
1128
1129
1130 /* Get the current event of the tracefile : valid until the next read */
1131 LttEvent *ltt_tracefile_get_event(LttTracefile *tf)
1132 {
1133 return &tf->event;
1134 }
1135
1136
1137
1138 /*****************************************************************************
1139 *Function name
1140 * ltt_tracefile_read : Read the next event in the tracefile
1141 *Input params
1142 * t : tracefile
1143 *Return value
1144 *
1145 * Returns 0 if an event can be used in tf->event.
1146 * Returns ERANGE on end of trace. The event in tf->event still can be used
1147 * (if the last block was not empty).
1148 * Returns EPERM on error.
1149 *
1150 * This function does make the tracefile event structure point to the event
1151 * currently pointed to by the tf->event.
1152 *
1153 * Note : you must call a ltt_tracefile_seek to the beginning of the trace to
1154 * reinitialize it after an error if you want results to be coherent.
1155 * It would be the case if a end of trace last buffer has no event : the end
1156 * of trace wouldn't be returned, but an error.
1157 * We make the assumption there is at least one event per buffer.
1158 ****************************************************************************/
1159
1160 int ltt_tracefile_read(LttTracefile *tf)
1161 {
1162 int err;
1163
1164 err = ltt_tracefile_read_seek(tf);
1165 if(err) return err;
1166 err = ltt_tracefile_read_update_event(tf);
1167 if(err) return err;
1168
1169 /* deactivate this, as it does nothing for now
1170 err = ltt_tracefile_read_op(tf);
1171 if(err) return err;
1172 */
1173
1174 return 0;
1175 }
1176
1177 int ltt_tracefile_read_seek(LttTracefile *tf)
1178 {
1179 int err;
1180
1181 /* Get next buffer until we finally have an event, or end of trace */
1182 while(1) {
1183 err = ltt_seek_next_event(tf);
1184 if(unlikely(err == ENOPROTOOPT)) {
1185 return EPERM;
1186 }
1187
1188 /* Are we at the end of the buffer ? */
1189 if(err == ERANGE) {
1190 if(unlikely(tf->buffer.index == tf->num_blocks-1)){ /* end of trace ? */
1191 return ERANGE;
1192 } else {
1193 /* get next block */
1194 err = map_block(tf, tf->buffer.index + 1);
1195 if(unlikely(err)) {
1196 g_error("Can not map block");
1197 return EPERM;
1198 }
1199 }
1200 } else break; /* We found an event ! */
1201 }
1202
1203 return 0;
1204 }
1205
1206 /* do an operation when reading a new event */
1207
1208 /* This function does nothing for now */
1209 #if 0
1210 int ltt_tracefile_read_op(LttTracefile *tf)
1211 {
1212 LttEvent *event;
1213
1214 event = &tf->event;
1215
1216 /* do event specific operation */
1217
1218 /* nothing */
1219
1220 return 0;
1221 }
1222 #endif
1223
1224 static void print_debug_event_header(LttEvent *ev, void *start_pos, void *end_pos)
1225 {
1226 unsigned int offset = 0;
1227 int i, j;
1228
1229 g_printf("Event header (tracefile %s offset %" PRIx64 "):\n",
1230 g_quark_to_string(ev->tracefile->long_name),
1231 ((uint64_t)ev->tracefile->buffer.index * ev->tracefile->buf_size)
1232 + (long)start_pos - (long)ev->tracefile->buffer.head);
1233
1234 while (offset < (long)end_pos - (long)start_pos) {
1235 g_printf("%8lx", (long)start_pos - (long)ev->tracefile->buffer.head + offset);
1236 g_printf(" ");
1237
1238 for (i = 0; i < 4 ; i++) {
1239 for (j = 0; j < 4; j++) {
1240 if (offset + ((i * 4) + j) <
1241 (long)end_pos - (long)start_pos)
1242 g_printf("%02hhX",
1243 ((char*)start_pos)[offset + ((i * 4) + j)]);
1244 else
1245 g_printf(" ");
1246 g_printf(" ");
1247 }
1248 if (i < 4)
1249 g_printf(" ");
1250 }
1251 offset+=16;
1252 g_printf("\n");
1253 }
1254 }
1255
1256
1257 /* same as ltt_tracefile_read, but does not seek to the next event nor call
1258 * event specific operation. */
1259 int ltt_tracefile_read_update_event(LttTracefile *tf)
1260 {
1261 void * pos;
1262 LttEvent *event;
1263 void *pos_aligned;
1264 guint16 packed_evid; /* event id reader from the 5 bits in header */
1265
1266 event = &tf->event;
1267 pos = tf->buffer.head + event->offset;
1268
1269 /* Read event header */
1270
1271 /* Align the head */
1272 pos += ltt_align((size_t)pos, sizeof(guint32), tf->alignment);
1273 pos_aligned = pos;
1274
1275 event->timestamp = ltt_get_uint32(LTT_GET_BO(tf), pos);
1276 event->event_id = packed_evid = event->timestamp >> tf->tscbits;
1277 event->timestamp = event->timestamp & tf->tsc_mask;
1278 pos += sizeof(guint32);
1279
1280 switch (packed_evid) {
1281 case 29: /* LTT_RFLAG_ID_SIZE_TSC */
1282 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1283 pos += sizeof(guint16);
1284 event->event_size = ltt_get_uint16(LTT_GET_BO(tf), pos);
1285 pos += sizeof(guint16);
1286 if (event->event_size == 0xFFFF) {
1287 event->event_size = ltt_get_uint32(LTT_GET_BO(tf), pos);
1288 pos += sizeof(guint32);
1289 }
1290 pos += ltt_align((size_t)pos, sizeof(guint64), tf->alignment);
1291 tf->buffer.tsc = ltt_get_uint64(LTT_GET_BO(tf), pos);
1292 pos += sizeof(guint64);
1293 break;
1294 case 30: /* LTT_RFLAG_ID_SIZE */
1295 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1296 pos += sizeof(guint16);
1297 event->event_size = ltt_get_uint16(LTT_GET_BO(tf), pos);
1298 pos += sizeof(guint16);
1299 if (event->event_size == 0xFFFF) {
1300 event->event_size = ltt_get_uint32(LTT_GET_BO(tf), pos);
1301 pos += sizeof(guint32);
1302 }
1303 break;
1304 case 31: /* LTT_RFLAG_ID */
1305 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1306 pos += sizeof(guint16);
1307 event->event_size = G_MAXUINT;
1308 break;
1309 default:
1310 event->event_size = G_MAXUINT;
1311 break;
1312 }
1313
1314 if (likely(packed_evid != 29)) {
1315 /* No extended timestamp */
1316 if (event->timestamp < (tf->buffer.tsc & tf->tsc_mask))
1317 tf->buffer.tsc = ((tf->buffer.tsc & ~tf->tsc_mask) /* overflow */
1318 + tf->tsc_mask_next_bit)
1319 | (guint64)event->timestamp;
1320 else
1321 tf->buffer.tsc = (tf->buffer.tsc & ~tf->tsc_mask) /* no overflow */
1322 | (guint64)event->timestamp;
1323 }
1324 event->tsc = tf->buffer.tsc;
1325
1326 event->event_time = ltt_interpolate_time(tf, event);
1327
1328 if (a_event_debug)
1329 print_debug_event_header(event, pos_aligned, pos);
1330
1331 event->data = pos;
1332
1333 /*
1334 * Let ltt_update_event_size update event->data according to the largest
1335 * alignment within the payload.
1336 * Get the data size and update the event fields with the current
1337 * information. */
1338 ltt_update_event_size(tf);
1339
1340 return 0;
1341 }
1342
1343
1344 /****************************************************************************
1345 *Function name
1346 * map_block : map a block from the file
1347 *Input Params
1348 * lttdes : ltt trace file
1349 * whichBlock : the block which will be read
1350 *return value
1351 * 0 : success
1352 * EINVAL : lseek fail
1353 * EIO : can not read from the file
1354 ****************************************************************************/
1355
1356 static gint map_block(LttTracefile * tf, guint block_num)
1357 {
1358 int page_size = getpagesize();
1359 ltt_subbuffer_header_t *header;
1360
1361 g_assert(block_num < tf->num_blocks);
1362
1363 if(tf->buffer.head != NULL) {
1364 if(munmap(tf->buffer.head, PAGE_ALIGN(tf->buf_size))) {
1365 g_warning("unmap size : %u\n",
1366 PAGE_ALIGN(tf->buf_size));
1367 perror("munmap error");
1368 g_assert(0);
1369 }
1370 }
1371
1372 /* Multiple of pages aligned head */
1373 tf->buffer.head = mmap(0,
1374 PAGE_ALIGN(tf->buf_size),
1375 PROT_READ, MAP_PRIVATE, tf->fd,
1376 PAGE_ALIGN((off_t)tf->buf_size * (off_t)block_num));
1377
1378 if(tf->buffer.head == MAP_FAILED) {
1379 perror("Error in allocating memory for buffer of tracefile");
1380 g_assert(0);
1381 goto map_error;
1382 }
1383 g_assert( ( (gulong)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
1384
1385
1386 tf->buffer.index = block_num;
1387
1388 header = (ltt_subbuffer_header_t *)tf->buffer.head;
1389
1390 tf->buffer.begin.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
1391 &header->cycle_count_begin);
1392 tf->buffer.begin.freq = tf->trace->start_freq;
1393
1394 tf->buffer.begin.timestamp = ltt_interpolate_time_from_tsc(tf,
1395 tf->buffer.begin.cycle_count);
1396 tf->buffer.end.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
1397 &header->cycle_count_end);
1398 tf->buffer.end.freq = tf->trace->start_freq;
1399
1400 tf->buffer.lost_size = ltt_get_uint32(LTT_GET_BO(tf),
1401 &header->lost_size);
1402 tf->buffer.end.timestamp = ltt_interpolate_time_from_tsc(tf,
1403 tf->buffer.end.cycle_count);
1404 tf->buffer.tsc = tf->buffer.begin.cycle_count;
1405 tf->event.tsc = tf->buffer.tsc;
1406 tf->buffer.freq = tf->buffer.begin.freq;
1407
1408 /* FIXME
1409 * eventually support variable buffer size : will need a partial pre-read of
1410 * the headers to create an index when we open the trace... eventually. */
1411 g_assert(tf->buf_size == ltt_get_uint32(LTT_GET_BO(tf),
1412 &header->buf_size));
1413
1414 /* Make the current event point to the beginning of the buffer :
1415 * it means that the event read must get the first event. */
1416 tf->event.tracefile = tf;
1417 tf->event.block = block_num;
1418 tf->event.offset = 0;
1419
1420 if (header->events_lost) {
1421 g_warning("%d events lost so far in tracefile %s at block %u",
1422 (guint)header->events_lost,
1423 g_quark_to_string(tf->long_name),
1424 block_num);
1425 tf->events_lost = header->events_lost;
1426 }
1427 if (header->subbuf_corrupt) {
1428 g_warning("%d subbuffer(s) corrupted so far in tracefile %s at block %u",
1429 (guint)header->subbuf_corrupt,
1430 g_quark_to_string(tf->long_name),
1431 block_num);
1432 tf->subbuf_corrupt = header->subbuf_corrupt;
1433 }
1434
1435 return 0;
1436
1437 map_error:
1438 return -errno;
1439 }
1440
1441 static void print_debug_event_data(LttEvent *ev)
1442 {
1443 unsigned int offset = 0;
1444 int i, j;
1445
1446 if (!max(ev->event_size, ev->data_size))
1447 return;
1448
1449 g_printf("Event data (tracefile %s offset %" PRIx64 "):\n",
1450 g_quark_to_string(ev->tracefile->long_name),
1451 ((uint64_t)ev->tracefile->buffer.index * ev->tracefile->buf_size)
1452 + (long)ev->data - (long)ev->tracefile->buffer.head);
1453
1454 while (offset < max(ev->event_size, ev->data_size)) {
1455 g_printf("%8lx", (long)ev->data + offset
1456 - (long)ev->tracefile->buffer.head);
1457 g_printf(" ");
1458
1459 for (i = 0; i < 4 ; i++) {
1460 for (j = 0; j < 4; j++) {
1461 if (offset + ((i * 4) + j) < max(ev->event_size, ev->data_size))
1462 g_printf("%02hhX", ((char*)ev->data)[offset + ((i * 4) + j)]);
1463 else
1464 g_printf(" ");
1465 g_printf(" ");
1466 }
1467 if (i < 4)
1468 g_printf(" ");
1469 }
1470
1471 g_printf(" ");
1472
1473 for (i = 0; i < 4; i++) {
1474 for (j = 0; j < 4; j++) {
1475 if (offset + ((i * 4) + j) < max(ev->event_size, ev->data_size)) {
1476 if (isprint(((char*)ev->data)[offset + ((i * 4) + j)]))
1477 g_printf("%c", ((char*)ev->data)[offset + ((i * 4) + j)]);
1478 else
1479 g_printf(".");
1480 } else
1481 g_printf(" ");
1482 }
1483 }
1484 offset+=16;
1485 g_printf("\n");
1486 }
1487 }
1488
1489 /* It will update the fields offsets too */
1490 void ltt_update_event_size(LttTracefile *tf)
1491 {
1492 off_t size = 0;
1493 struct marker_info *info;
1494
1495 if (tf->name == LTT_TRACEFILE_NAME_METADATA) {
1496 switch((enum marker_id)tf->event.event_id) {
1497 case MARKER_ID_SET_MARKER_ID:
1498 size = strlen((char*)tf->event.data) + 1;
1499 g_debug("marker %s id set", (char*)tf->event.data + size);
1500 size += strlen((char*)tf->event.data + size) + 1;
1501 size += ltt_align(size, sizeof(guint16), tf->alignment);
1502 size += sizeof(guint16);
1503 size += sizeof(guint8);
1504 size += sizeof(guint8);
1505 size += sizeof(guint8);
1506 size += sizeof(guint8);
1507 size += sizeof(guint8);
1508 break;
1509 case MARKER_ID_SET_MARKER_FORMAT:
1510 size = strlen((char*)tf->event.data) + 1;
1511 g_debug("marker %s format set", (char*)tf->event.data);
1512 size += strlen((char*)tf->event.data + size) + 1;
1513 size += strlen((char*)tf->event.data + size) + 1;
1514 break;
1515 }
1516 }
1517
1518 info = marker_get_info_from_id(tf->mdata, tf->event.event_id);
1519
1520 if (tf->event.event_id >= MARKER_CORE_IDS)
1521 g_assert(info != NULL);
1522
1523 /* Do not update field offsets of core markers when initially reading the
1524 * metadata tracefile when the infos about these markers do not exist yet.
1525 */
1526 if (likely(info && info->fields)) {
1527 /* alignment */
1528 tf->event.data += ltt_align((off_t)(unsigned long)tf->event.data,
1529 info->largest_align,
1530 info->alignment);
1531 /* size, dynamically computed */
1532 if (info->size != -1)
1533 size = info->size;
1534 else
1535 size = marker_update_fields_offsets(marker_get_info_from_id(tf->mdata,
1536 tf->event.event_id), tf->event.data);
1537 }
1538
1539 tf->event.data_size = size;
1540
1541 /* Check consistency between kernel and LTTV structure sizes */
1542 if(tf->event.event_size == G_MAXUINT) {
1543 /* Event size too big to fit in the event size field */
1544 tf->event.event_size = tf->event.data_size;
1545 }
1546
1547 if (a_event_debug)
1548 print_debug_event_data(&tf->event);
1549
1550 if (tf->event.data_size != tf->event.event_size) {
1551 struct marker_info *info = marker_get_info_from_id(tf->mdata,
1552 tf->event.event_id);
1553 if (!info)
1554 g_error("Undescribed event %hhu in channel %s", tf->event.event_id,
1555 g_quark_to_string(tf->name));
1556 g_error("Kernel/LTTV event size differs for event %s: kernel %u, LTTV %u",
1557 g_quark_to_string(info->name),
1558 tf->event.event_size, tf->event.data_size);
1559 exit(-1);
1560 }
1561 }
1562
1563
1564 /* Take the tf current event offset and use the event id to figure out where is
1565 * the next event offset.
1566 *
1567 * This is an internal function not aiming at being used elsewhere : it will
1568 * not jump over the current block limits. Please consider using
1569 * ltt_tracefile_read to do this.
1570 *
1571 * Returns 0 on success
1572 * ERANGE if we are at the end of the buffer.
1573 * ENOPROTOOPT if an error occured when getting the current event size.
1574 */
1575 static int ltt_seek_next_event(LttTracefile *tf)
1576 {
1577 int ret = 0;
1578 void *pos;
1579
1580 /* seek over the buffer header if we are at the buffer start */
1581 if(tf->event.offset == 0) {
1582 tf->event.offset += tf->buffer_header_size;
1583
1584 if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
1585 ret = ERANGE;
1586 }
1587 goto found;
1588 }
1589
1590 pos = tf->event.data;
1591
1592 if(tf->event.data_size < 0) goto error;
1593
1594 pos += (size_t)tf->event.data_size;
1595
1596 tf->event.offset = pos - tf->buffer.head;
1597
1598 if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
1599 ret = ERANGE;
1600 goto found;
1601 }
1602 g_assert(tf->event.offset < tf->buf_size - tf->buffer.lost_size);
1603
1604 found:
1605 return ret;
1606
1607 error:
1608 g_error("Error in ltt_seek_next_event for tracefile %s",
1609 g_quark_to_string(tf->name));
1610 return ENOPROTOOPT;
1611 }
1612
1613
1614 /*****************************************************************************
1615 *Function name
1616 * ltt_get_int : get an integer number
1617 *Input params
1618 * reverse_byte_order: must we reverse the byte order ?
1619 * size : the size of the integer
1620 * ptr : the data pointer
1621 *Return value
1622 * gint64 : a 64 bits integer
1623 ****************************************************************************/
1624
1625 gint64 ltt_get_int(gboolean reverse_byte_order, gint size, void *data)
1626 {
1627 gint64 val;
1628
1629 switch(size) {
1630 case 1: val = *((gint8*)data); break;
1631 case 2: val = ltt_get_int16(reverse_byte_order, data); break;
1632 case 4: val = ltt_get_int32(reverse_byte_order, data); break;
1633 case 8: val = ltt_get_int64(reverse_byte_order, data); break;
1634 default: val = ltt_get_int64(reverse_byte_order, data);
1635 g_critical("get_int : integer size %d unknown", size);
1636 break;
1637 }
1638
1639 return val;
1640 }
1641
1642 /*****************************************************************************
1643 *Function name
1644 * ltt_get_uint : get an unsigned integer number
1645 *Input params
1646 * reverse_byte_order: must we reverse the byte order ?
1647 * size : the size of the integer
1648 * ptr : the data pointer
1649 *Return value
1650 * guint64 : a 64 bits unsigned integer
1651 ****************************************************************************/
1652
1653 guint64 ltt_get_uint(gboolean reverse_byte_order, gint size, void *data)
1654 {
1655 guint64 val;
1656
1657 switch(size) {
1658 case 1: val = *((gint8*)data); break;
1659 case 2: val = ltt_get_uint16(reverse_byte_order, data); break;
1660 case 4: val = ltt_get_uint32(reverse_byte_order, data); break;
1661 case 8: val = ltt_get_uint64(reverse_byte_order, data); break;
1662 default: val = ltt_get_uint64(reverse_byte_order, data);
1663 g_critical("get_uint : unsigned integer size %d unknown",
1664 size);
1665 break;
1666 }
1667
1668 return val;
1669 }
1670
1671
1672 /* get the node name of the system */
1673
1674 char * ltt_trace_system_description_node_name (LttSystemDescription * s)
1675 {
1676 return s->node_name;
1677 }
1678
1679
1680 /* get the domain name of the system */
1681
1682 char * ltt_trace_system_description_domain_name (LttSystemDescription * s)
1683 {
1684 return s->domain_name;
1685 }
1686
1687
1688 /* get the description of the system */
1689
1690 char * ltt_trace_system_description_description (LttSystemDescription * s)
1691 {
1692 return s->description;
1693 }
1694
1695
1696 /* get the NTP corrected start time of the trace */
1697 LttTime ltt_trace_start_time(LttTrace *t)
1698 {
1699 return t->start_time;
1700 }
1701
1702 /* get the monotonic start time of the trace */
1703 LttTime ltt_trace_start_time_monotonic(LttTrace *t)
1704 {
1705 return t->start_time_from_tsc;
1706 }
1707
1708 static __attribute__ ((__unused__)) LttTracefile *ltt_tracefile_new()
1709 {
1710 LttTracefile *tf;
1711 tf = g_new(LttTracefile, 1);
1712 tf->event.tracefile = tf;
1713 return tf;
1714 }
1715
1716 static __attribute__ ((__unused__)) void ltt_tracefile_destroy(LttTracefile *tf)
1717 {
1718 g_free(tf);
1719 }
1720
1721 static __attribute__ ((__unused__)) void ltt_tracefile_copy(LttTracefile *dest, const LttTracefile *src)
1722 {
1723 *dest = *src;
1724 }
1725
1726 /* Before library loading... */
1727
1728 static __attribute__((constructor)) void init(void)
1729 {
1730 LTT_TRACEFILE_NAME_METADATA = g_quark_from_string("metadata");
1731 }
This page took 0.083388 seconds and 4 git commands to generate.