fix bad trace dir
[lttv.git] / trunk / lttv / ltt / tracefile.c
1 /* This file is part of the Linux Trace Toolkit viewer
2 * Copyright (C) 2005 Mathieu Desnoyers
3 *
4 * Complete rewrite from the original version made by XangXiu Yang.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License Version 2.1 as published by the Free Software Foundation.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the
17 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18 * Boston, MA 02111-1307, USA.
19 */
20
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24
25 #include <stdio.h>
26 #include <fcntl.h>
27 #include <string.h>
28 #include <dirent.h>
29 #include <sys/stat.h>
30 #include <sys/types.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <math.h>
34 #include <glib.h>
35 #include <glib/gprintf.h>
36 #include <malloc.h>
37 #include <sys/mman.h>
38 #include <string.h>
39 #include <ctype.h>
40 #include <inttypes.h>
41
42 // For realpath
43 #include <limits.h>
44 #include <stdlib.h>
45
46
47 #include <ltt/ltt.h>
48 #include "ltt-private.h"
49 #include <ltt/trace.h>
50 #include <ltt/event.h>
51 #include <ltt/ltt-types.h>
52 #include <ltt/marker.h>
53
54 /* from marker.c */
55 extern long marker_update_fields_offsets(struct marker_info *info, const char *data);
56
57 /* Tracefile names used in this file */
58
59 GQuark LTT_TRACEFILE_NAME_METADATA;
60
61 #ifndef g_open
62 #define g_open open
63 #endif
64
65
66 #define __UNUSED__ __attribute__((__unused__))
67
68 #define g_info(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_INFO, format)
69
70 #ifndef g_debug
71 #define g_debug(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, format)
72 #endif
73
74 #define g_close close
75
76 /* Those macros must be called from within a function where page_size is a known
77 * variable */
78 #define PAGE_MASK (~(page_size-1))
79 #define PAGE_ALIGN(addr) (((addr)+page_size-1)&PAGE_MASK)
80
81 LttTrace *father_trace = NULL;
82
83 /* set the offset of the fields belonging to the event,
84 need the information of the archecture */
85 //void set_fields_offsets(LttTracefile *tf, LttEventType *event_type);
86 //size_t get_fields_offsets(LttTracefile *tf, LttEventType *event_type, void *data);
87
88 #if 0
89 /* get the size of the field type according to
90 * The facility size information. */
91 static inline void preset_field_type_size(LttTracefile *tf,
92 LttEventType *event_type,
93 off_t offset_root, off_t offset_parent,
94 enum field_status *fixed_root, enum field_status *fixed_parent,
95 LttField *field);
96 #endif //0
97
98 /* map a fixed size or a block information from the file (fd) */
99 static gint map_block(LttTracefile * tf, guint block_num);
100
101 /* calculate nsec per cycles for current block */
102 #if 0
103 static guint32 calc_nsecs_per_cycle(LttTracefile * t);
104 static guint64 cycles_2_ns(LttTracefile *tf, guint64 cycles);
105 #endif //0
106
107 /* go to the next event */
108 static int ltt_seek_next_event(LttTracefile *tf);
109
110 static int open_tracefiles(LttTrace *trace, gchar *root_path,
111 gchar *relative_path);
112 static int ltt_process_metadata_tracefile(LttTracefile *tf);
113 static void ltt_tracefile_time_span_get(LttTracefile *tf,
114 LttTime *start, LttTime *end);
115 static void group_time_span_get(GQuark name, gpointer data, gpointer user_data);
116 static gint map_block(LttTracefile * tf, guint block_num);
117 static void ltt_update_event_size(LttTracefile *tf);
118
119 /* Enable event debugging */
120 static int a_event_debug = 0;
121
122 void ltt_event_debug(int state)
123 {
124 a_event_debug = state;
125 }
126
127 /* trace can be NULL
128 *
129 * Return value : 0 success, 1 bad tracefile
130 */
131 static int parse_trace_header(ltt_subbuffer_header_t *header,
132 LttTracefile *tf, LttTrace *t)
133 {
134 if (header->magic_number == LTT_MAGIC_NUMBER)
135 tf->reverse_bo = 0;
136 else if(header->magic_number == LTT_REV_MAGIC_NUMBER)
137 tf->reverse_bo = 1;
138 else /* invalid magic number, bad tracefile ! */
139 return 1;
140
141 if(t) {
142 t->ltt_major_version = header->major_version;
143 t->ltt_minor_version = header->minor_version;
144 t->arch_size = header->arch_size;
145 }
146 tf->alignment = header->alignment;
147
148 /* Get float byte order : might be different from int byte order
149 * (or is set to 0 if the trace has no float (kernel trace)) */
150 tf->float_word_order = 0;
151
152 switch(header->major_version) {
153 case 0:
154 case 1:
155 g_warning("Unsupported trace version : %hhu.%hhu",
156 header->major_version, header->minor_version);
157 return 1;
158 break;
159 case 2:
160 switch(header->minor_version) {
161 case 3:
162 {
163 struct ltt_subbuffer_header_2_3 *vheader = header;
164 tf->buffer_header_size = ltt_subbuffer_header_size();
165 tf->tscbits = 27;
166 tf->eventbits = 5;
167 tf->tsc_mask = ((1ULL << tf->tscbits) - 1);
168 tf->tsc_mask_next_bit = (1ULL << tf->tscbits);
169
170 if(t) {
171 t->start_freq = ltt_get_uint64(LTT_GET_BO(tf),
172 &vheader->start_freq);
173 t->freq_scale = ltt_get_uint32(LTT_GET_BO(tf),
174 &vheader->freq_scale);
175 if(father_trace) {
176 t->start_freq = father_trace->start_freq;
177 t->freq_scale = father_trace->freq_scale;
178 } else {
179 father_trace = t;
180 }
181 t->start_tsc = ltt_get_uint64(LTT_GET_BO(tf),
182 &vheader->cycle_count_begin);
183 t->start_monotonic = 0;
184 t->start_time.tv_sec = ltt_get_uint64(LTT_GET_BO(tf),
185 &vheader->start_time_sec);
186 t->start_time.tv_nsec = ltt_get_uint64(LTT_GET_BO(tf),
187 &vheader->start_time_usec);
188 t->start_time.tv_nsec *= 1000; /* microsec to nanosec */
189
190 t->start_time_from_tsc = ltt_time_from_uint64(
191 (double)t->start_tsc
192 * 1000000000.0 * tf->trace->freq_scale
193 / (double)t->start_freq);
194 }
195 }
196 break;
197 default:
198 g_warning("Unsupported trace version : %hhu.%hhu",
199 header->major_version, header->minor_version);
200 return 1;
201 }
202 break;
203 default:
204 g_warning("Unsupported trace version : %hhu.%hhu",
205 header->major_version, header->minor_version);
206 return 1;
207 }
208 return 0;
209 }
210
211
212
213 /*****************************************************************************
214 *Function name
215 * ltt_tracefile_open : open a trace file, construct a LttTracefile
216 *Input params
217 * t : the trace containing the tracefile
218 * fileName : path name of the trace file
219 * tf : the tracefile structure
220 *Return value
221 * : 0 for success, -1 otherwise.
222 ****************************************************************************/
223
224 static gint ltt_tracefile_open(LttTrace *t, gchar * fileName, LttTracefile *tf)
225 {
226 struct stat lTDFStat; /* Trace data file status */
227 ltt_subbuffer_header_t *header;
228 int page_size = getpagesize();
229
230 //open the file
231 tf->long_name = g_quark_from_string(fileName);
232 tf->trace = t;
233 tf->fd = open(fileName, O_RDONLY);
234 if(tf->fd < 0){
235 g_warning("Unable to open input data file %s\n", fileName);
236 goto end;
237 }
238
239 // Get the file's status
240 if(fstat(tf->fd, &lTDFStat) < 0){
241 g_warning("Unable to get the status of the input data file %s\n", fileName);
242 goto close_file;
243 }
244
245 // Is the file large enough to contain a trace
246 if(lTDFStat.st_size <
247 (off_t)(ltt_subbuffer_header_size())){
248 g_print("The input data file %s does not contain a trace\n", fileName);
249 goto close_file;
250 }
251
252 /* Temporarily map the buffer start header to get trace information */
253 /* Multiple of pages aligned head */
254 tf->buffer.head = mmap(0,
255 PAGE_ALIGN(ltt_subbuffer_header_size()), PROT_READ,
256 MAP_PRIVATE, tf->fd, 0);
257 if(tf->buffer.head == MAP_FAILED) {
258 perror("Error in allocating memory for buffer of tracefile");
259 goto close_file;
260 }
261 g_assert( ( (gulong)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
262
263 header = (ltt_subbuffer_header_t *)tf->buffer.head;
264
265 if(parse_trace_header(header, tf, NULL)) {
266 g_warning("parse_trace_header error");
267 goto unmap_file;
268 }
269
270 //store the size of the file
271 tf->file_size = lTDFStat.st_size;
272 tf->buf_size = ltt_get_uint32(LTT_GET_BO(tf), &header->buf_size);
273 tf->num_blocks = tf->file_size / tf->buf_size;
274 tf->events_lost = 0;
275 tf->subbuf_corrupt = 0;
276
277 if(munmap(tf->buffer.head,
278 PAGE_ALIGN(ltt_subbuffer_header_size()))) {
279 g_warning("unmap size : %zu\n",
280 PAGE_ALIGN(ltt_subbuffer_header_size()));
281 perror("munmap error");
282 g_assert(0);
283 }
284 tf->buffer.head = NULL;
285
286 //read the first block
287 if(map_block(tf,0)) {
288 perror("Cannot map block for tracefile");
289 goto close_file;
290 }
291
292 return 0;
293
294 /* Error */
295 unmap_file:
296 if(munmap(tf->buffer.head,
297 PAGE_ALIGN(ltt_subbuffer_header_size()))) {
298 g_warning("unmap size : %zu\n",
299 PAGE_ALIGN(ltt_subbuffer_header_size()));
300 perror("munmap error");
301 g_assert(0);
302 }
303 close_file:
304 close(tf->fd);
305 end:
306 return -1;
307 }
308
309
310 /*****************************************************************************
311 *Function name
312 * ltt_tracefile_close: close a trace file,
313 *Input params
314 * t : tracefile which will be closed
315 ****************************************************************************/
316
317 static void ltt_tracefile_close(LttTracefile *t)
318 {
319 int page_size = getpagesize();
320
321 if(t->buffer.head != NULL)
322 if(munmap(t->buffer.head, PAGE_ALIGN(t->buf_size))) {
323 g_warning("unmap size : %u\n",
324 PAGE_ALIGN(t->buf_size));
325 perror("munmap error");
326 g_assert(0);
327 }
328
329 close(t->fd);
330 }
331
332 /****************************************************************************
333 * get_absolute_pathname
334 *
335 * return the unique pathname in the system
336 *
337 * MD : Fixed this function so it uses realpath, dealing well with
338 * forgotten cases (.. were not used correctly before).
339 *
340 ****************************************************************************/
341 void get_absolute_pathname(const gchar *pathname, gchar * abs_pathname)
342 {
343 abs_pathname[0] = '\0';
344
345 if (realpath(pathname, abs_pathname) != NULL)
346 return;
347 else
348 {
349 /* error, return the original path unmodified */
350 strcpy(abs_pathname, pathname);
351 return;
352 }
353 return;
354 }
355
356 /* Search for something like : .*_.*
357 *
358 * The left side is the name, the right side is the number.
359 * Exclude leading /.
360 * Exclude flight- prefix.
361 */
362
363 static int get_tracefile_name_number(gchar *raw_name,
364 GQuark *name,
365 guint *num,
366 gulong *tid,
367 gulong *pgid,
368 guint64 *creation)
369 {
370 guint raw_name_len = strlen(raw_name);
371 gchar char_name[PATH_MAX];
372 int i;
373 int underscore_pos;
374 long int cpu_num;
375 gchar *endptr;
376 gchar *tmpptr;
377
378 /* skip leading / */
379 for(i = 0; i < raw_name_len-1;i++) {
380 if(raw_name[i] != '/')
381 break;
382 }
383 raw_name = &raw_name[i];
384 raw_name_len = strlen(raw_name);
385
386 for(i=raw_name_len-1;i>=0;i--) {
387 if(raw_name[i] == '_') break;
388 }
389 if(i==-1) { /* Either not found or name length is 0 */
390 /* This is a userspace tracefile */
391 strncpy(char_name, raw_name, raw_name_len);
392 char_name[raw_name_len] = '\0';
393 *name = g_quark_from_string(char_name);
394 *num = 0; /* unknown cpu */
395 for(i=0;i<raw_name_len;i++) {
396 if(raw_name[i] == '/') {
397 break;
398 }
399 }
400 i++;
401 for(;i<raw_name_len;i++) {
402 if(raw_name[i] == '/') {
403 break;
404 }
405 }
406 i++;
407 for(;i<raw_name_len;i++) {
408 if(raw_name[i] == '-') {
409 break;
410 }
411 }
412 if(i == raw_name_len) return -1;
413 i++;
414 tmpptr = &raw_name[i];
415 for(;i<raw_name_len;i++) {
416 if(raw_name[i] == '.') {
417 raw_name[i] = ' ';
418 break;
419 }
420 }
421 *tid = strtoul(tmpptr, &endptr, 10);
422 if(endptr == tmpptr)
423 return -1; /* No digit */
424 if(*tid == ULONG_MAX)
425 return -1; /* underflow / overflow */
426 i++;
427 tmpptr = &raw_name[i];
428 for(;i<raw_name_len;i++) {
429 if(raw_name[i] == '.') {
430 raw_name[i] = ' ';
431 break;
432 }
433 }
434 *pgid = strtoul(tmpptr, &endptr, 10);
435 if(endptr == tmpptr)
436 return -1; /* No digit */
437 if(*pgid == ULONG_MAX)
438 return -1; /* underflow / overflow */
439 i++;
440 tmpptr = &raw_name[i];
441 *creation = strtoull(tmpptr, &endptr, 10);
442 if(endptr == tmpptr)
443 return -1; /* No digit */
444 if(*creation == G_MAXUINT64)
445 return -1; /* underflow / overflow */
446 } else {
447 underscore_pos = i;
448
449 cpu_num = strtol(raw_name+underscore_pos+1, &endptr, 10);
450
451 if(endptr == raw_name+underscore_pos+1)
452 return -1; /* No digit */
453 if(cpu_num == LONG_MIN || cpu_num == LONG_MAX)
454 return -1; /* underflow / overflow */
455
456 if (!strncmp(raw_name, "flight-", sizeof("flight-") - 1)) {
457 raw_name += sizeof("flight-") - 1;
458 underscore_pos -= sizeof("flight-") - 1;
459 }
460 strncpy(char_name, raw_name, underscore_pos);
461 char_name[underscore_pos] = '\0';
462 *name = g_quark_from_string(char_name);
463 *num = cpu_num;
464 }
465
466
467 return 0;
468 }
469
470
471 GData **ltt_trace_get_tracefiles_groups(LttTrace *trace)
472 {
473 return &trace->tracefiles;
474 }
475
476
477 void compute_tracefile_group(GQuark key_id,
478 GArray *group,
479 struct compute_tracefile_group_args *args)
480 {
481 unsigned int i;
482 LttTracefile *tf;
483
484 for(i=0; i<group->len; i++) {
485 tf = &g_array_index (group, LttTracefile, i);
486 if(tf->cpu_online)
487 args->func(tf, args->func_args);
488 }
489 }
490
491
492 static void ltt_tracefile_group_destroy(gpointer data)
493 {
494 GArray *group = (GArray *)data;
495 unsigned int i;
496 LttTracefile *tf;
497
498 if (group->len > 0)
499 destroy_marker_data(g_array_index (group, LttTracefile, 0).mdata);
500 for(i=0; i<group->len; i++) {
501 tf = &g_array_index (group, LttTracefile, i);
502 if(tf->cpu_online)
503 ltt_tracefile_close(tf);
504 }
505 g_array_free(group, TRUE);
506 }
507
508 static __attribute__ ((__unused__)) gboolean ltt_tracefile_group_has_cpu_online(gpointer data)
509 {
510 GArray *group = (GArray *)data;
511 unsigned int i;
512 LttTracefile *tf;
513
514 for(i=0; i<group->len; i++) {
515 tf = &g_array_index (group, LttTracefile, i);
516 if(tf->cpu_online)
517 return 1;
518 }
519 return 0;
520 }
521
522
523 /* Open each tracefile under a specific directory. Put them in a
524 * GData : permits to access them using their tracefile group pathname.
525 * i.e. access control/modules tracefile group by index :
526 * "control/module".
527 *
528 * relative path is the path relative to the trace root
529 * root path is the full path
530 *
531 * A tracefile group is simply an array where all the per cpu tracefiles sit.
532 */
533
534 static int open_tracefiles(LttTrace *trace, gchar *root_path, gchar *relative_path)
535 {
536 DIR *dir = opendir(root_path);
537 struct dirent *entry;
538 struct stat stat_buf;
539 int ret, i;
540 struct marker_data *mdata;
541
542 gchar path[PATH_MAX];
543 int path_len;
544 gchar *path_ptr;
545
546 int rel_path_len;
547 gchar rel_path[PATH_MAX];
548 gchar *rel_path_ptr;
549 LttTracefile tmp_tf;
550
551 if(dir == NULL) {
552 perror(root_path);
553 return ENOENT;
554 }
555
556 strncpy(path, root_path, PATH_MAX-1);
557 path_len = strlen(path);
558 path[path_len] = '/';
559 path_len++;
560 path_ptr = path + path_len;
561
562 strncpy(rel_path, relative_path, PATH_MAX-1);
563 rel_path_len = strlen(rel_path);
564 rel_path[rel_path_len] = '/';
565 rel_path_len++;
566 rel_path_ptr = rel_path + rel_path_len;
567
568 while((entry = readdir(dir)) != NULL) {
569
570 if(entry->d_name[0] == '.') continue;
571
572 strncpy(path_ptr, entry->d_name, PATH_MAX - path_len);
573 strncpy(rel_path_ptr, entry->d_name, PATH_MAX - rel_path_len);
574
575 ret = stat(path, &stat_buf);
576 if(ret == -1) {
577 perror(path);
578 continue;
579 }
580
581 g_debug("Tracefile file or directory : %s\n", path);
582
583 // if(strcmp(rel_path, "/eventdefs") == 0) continue;
584
585 if(S_ISDIR(stat_buf.st_mode)) {
586
587 g_debug("Entering subdirectory...\n");
588 ret = open_tracefiles(trace, path, rel_path);
589 if(ret < 0) continue;
590 } else if(S_ISREG(stat_buf.st_mode)) {
591 GQuark name;
592 guint num;
593 gulong tid, pgid;
594 guint64 creation;
595 GArray *group;
596 num = 0;
597 tid = pgid = 0;
598 creation = 0;
599 if(get_tracefile_name_number(rel_path, &name, &num, &tid, &pgid, &creation))
600 continue; /* invalid name */
601
602 g_debug("Opening file.\n");
603 if(ltt_tracefile_open(trace, path, &tmp_tf)) {
604 g_info("Error opening tracefile %s", path);
605
606 continue; /* error opening the tracefile : bad magic number ? */
607 }
608
609 g_debug("Tracefile name is %s and number is %u",
610 g_quark_to_string(name), num);
611
612 mdata = NULL;
613 tmp_tf.cpu_online = 1;
614 tmp_tf.cpu_num = num;
615 tmp_tf.name = name;
616 tmp_tf.tid = tid;
617 tmp_tf.pgid = pgid;
618 tmp_tf.creation = creation;
619 group = g_datalist_id_get_data(&trace->tracefiles, name);
620 if(group == NULL) {
621 /* Elements are automatically cleared when the array is allocated.
622 * It makes the cpu_online variable set to 0 : cpu offline, by default.
623 */
624 group = g_array_sized_new (FALSE, TRUE, sizeof(LttTracefile), 10);
625 g_datalist_id_set_data_full(&trace->tracefiles, name,
626 group, ltt_tracefile_group_destroy);
627 mdata = allocate_marker_data();
628 if (!mdata)
629 g_error("Error in allocating marker data");
630 }
631
632 /* Add the per cpu tracefile to the named group */
633 unsigned int old_len = group->len;
634 if(num+1 > old_len)
635 group = g_array_set_size(group, num+1);
636
637 g_assert(group->len > 0);
638 if (!mdata)
639 mdata = g_array_index (group, LttTracefile, 0).mdata;
640
641 g_array_index (group, LttTracefile, num) = tmp_tf;
642 g_array_index (group, LttTracefile, num).event.tracefile =
643 &g_array_index (group, LttTracefile, num);
644 for (i = 0; i < group->len; i++)
645 g_array_index (group, LttTracefile, i).mdata = mdata;
646 }
647 }
648
649 closedir(dir);
650
651 return 0;
652 }
653
654
655 /* Presumes the tracefile is already seeked at the beginning. It makes sense,
656 * because it must be done just after the opening */
657 static int ltt_process_metadata_tracefile(LttTracefile *tf)
658 {
659 int err;
660
661 while(1) {
662 err = ltt_tracefile_read_seek(tf);
663 if(err == EPERM) goto seek_error;
664 else if(err == ERANGE) break; /* End of tracefile */
665
666 err = ltt_tracefile_read_update_event(tf);
667 if(err) goto update_error;
668
669 /* The rules are :
670 * It contains only core events :
671 * 0 : set_marker_id
672 * 1 : set_marker_format
673 */
674 if(tf->event.event_id >= MARKER_CORE_IDS) {
675 /* Should only contain core events */
676 g_warning("Error in processing metadata file %s, "
677 "should not contain event id %u.", g_quark_to_string(tf->name),
678 tf->event.event_id);
679 err = EPERM;
680 goto event_id_error;
681 } else {
682 char *pos;
683 const char *channel_name, *marker_name, *format;
684 uint16_t id;
685 guint8 int_size, long_size, pointer_size, size_t_size, alignment;
686
687 switch((enum marker_id)tf->event.event_id) {
688 case MARKER_ID_SET_MARKER_ID:
689 channel_name = pos = tf->event.data;
690 pos += strlen(channel_name) + 1;
691 marker_name = pos;
692 g_debug("Doing MARKER_ID_SET_MARKER_ID of marker %s.%s",
693 channel_name, marker_name);
694 pos += strlen(marker_name) + 1;
695 pos += ltt_align((size_t)pos, sizeof(guint16), tf->alignment);
696 id = ltt_get_uint16(LTT_GET_BO(tf), pos);
697 g_debug("In MARKER_ID_SET_MARKER_ID of marker %s.%s id %hu",
698 channel_name, marker_name, id);
699 pos += sizeof(guint16);
700 int_size = *(guint8*)pos;
701 pos += sizeof(guint8);
702 long_size = *(guint8*)pos;
703 pos += sizeof(guint8);
704 pointer_size = *(guint8*)pos;
705 pos += sizeof(guint8);
706 size_t_size = *(guint8*)pos;
707 pos += sizeof(guint8);
708 alignment = *(guint8*)pos;
709 pos += sizeof(guint8);
710 marker_id_event(tf->trace,
711 g_quark_from_string(channel_name),
712 g_quark_from_string(marker_name),
713 id, int_size, long_size,
714 pointer_size, size_t_size, alignment);
715 break;
716 case MARKER_ID_SET_MARKER_FORMAT:
717 channel_name = pos = tf->event.data;
718 pos += strlen(channel_name) + 1;
719 marker_name = pos;
720 g_debug("Doing MARKER_ID_SET_MARKER_FORMAT of marker %s.%s",
721 channel_name, marker_name);
722 pos += strlen(marker_name) + 1;
723 format = pos;
724 pos += strlen(format) + 1;
725 marker_format_event(tf->trace,
726 g_quark_from_string(channel_name),
727 g_quark_from_string(marker_name),
728 format);
729 /* get information from dictionary TODO */
730 break;
731 default:
732 g_warning("Error in processing metadata file %s, "
733 "unknown event id %hhu.",
734 g_quark_to_string(tf->name),
735 tf->event.event_id);
736 err = EPERM;
737 goto event_id_error;
738 }
739 }
740 }
741 return 0;
742
743 /* Error handling */
744 event_id_error:
745 update_error:
746 seek_error:
747 g_warning("An error occured in metadata tracefile parsing");
748 return err;
749 }
750
751 /*
752 * Open a trace and return its LttTrace handle.
753 *
754 * pathname must be the directory of the trace
755 */
756
757 LttTrace *ltt_trace_open(const gchar *pathname)
758 {
759 gchar abs_path[PATH_MAX];
760 LttTrace * t;
761 LttTracefile *tf;
762 GArray *group;
763 unsigned int i;
764 int ret;
765 ltt_subbuffer_header_t *header;
766 DIR *dir;
767 struct dirent *entry;
768 struct stat stat_buf;
769 gchar path[PATH_MAX];
770
771 t = g_new(LttTrace, 1);
772 if(!t) goto alloc_error;
773
774 get_absolute_pathname(pathname, abs_path);
775 t->pathname = g_quark_from_string(abs_path);
776
777 g_datalist_init(&t->tracefiles);
778
779 /* Test to see if it looks like a trace */
780 dir = opendir(abs_path);
781 if(dir == NULL) {
782 perror(abs_path);
783 goto open_error;
784 }
785 while((entry = readdir(dir)) != NULL) {
786 strcpy(path, abs_path);
787 strcat(path, "/");
788 strcat(path, entry->d_name);
789 ret = stat(path, &stat_buf);
790 if(ret == -1) {
791 perror(path);
792 continue;
793 }
794 }
795 closedir(dir);
796
797 /* Open all the tracefiles */
798 if(open_tracefiles(t, abs_path, "")) {
799 g_warning("Error opening tracefile %s", abs_path);
800 goto find_error;
801 }
802
803 /* Parse each trace metadata_N files : get runtime fac. info */
804 group = g_datalist_id_get_data(&t->tracefiles, LTT_TRACEFILE_NAME_METADATA);
805 if(group == NULL) {
806 g_warning("Trace %s has no metadata tracefile", abs_path);
807 goto find_error;
808 }
809
810 /*
811 * Get the trace information for the metadata_0 tracefile.
812 * Getting a correct trace start_time and start_tsc is insured by the fact
813 * that no subbuffers are supposed to be lost in the metadata channel.
814 * Therefore, the first subbuffer contains the start_tsc timestamp in its
815 * buffer header.
816 */
817 g_assert(group->len > 0);
818 tf = &g_array_index (group, LttTracefile, 0);
819 header = (ltt_subbuffer_header_t *)tf->buffer.head;
820 ret = parse_trace_header(header, tf, t);
821 g_assert(!ret);
822
823 t->num_cpu = group->len;
824
825 //ret = allocate_marker_data(t);
826 //if (ret)
827 // g_error("Error in allocating marker data");
828
829 for(i=0; i<group->len; i++) {
830 tf = &g_array_index (group, LttTracefile, i);
831 if (tf->cpu_online)
832 if(ltt_process_metadata_tracefile(tf))
833 goto find_error;
834 // goto metadata_error;
835 }
836
837 return t;
838
839 /* Error handling */
840 //metadata_error:
841 // destroy_marker_data(t);
842 find_error:
843 g_datalist_clear(&t->tracefiles);
844 open_error:
845 g_free(t);
846 alloc_error:
847 return NULL;
848
849 }
850
851 /* Open another, completely independant, instance of a trace.
852 *
853 * A read on this new instance will read the first event of the trace.
854 *
855 * When we copy a trace, we want all the opening actions to happen again :
856 * the trace will be reopened and totally independant from the original.
857 * That's why we call ltt_trace_open.
858 */
859 LttTrace *ltt_trace_copy(LttTrace *self)
860 {
861 return ltt_trace_open(g_quark_to_string(self->pathname));
862 }
863
864 /*
865 * Close a trace
866 */
867
868 void ltt_trace_close(LttTrace *t)
869 {
870 g_datalist_clear(&t->tracefiles);
871 g_free(t);
872 }
873
874
875 /*****************************************************************************
876 * Get the start time and end time of the trace
877 ****************************************************************************/
878
879 void ltt_tracefile_time_span_get(LttTracefile *tf,
880 LttTime *start, LttTime *end)
881 {
882 int err;
883
884 err = map_block(tf, 0);
885 if(unlikely(err)) {
886 g_error("Can not map block");
887 *start = ltt_time_infinite;
888 } else
889 *start = tf->buffer.begin.timestamp;
890
891 err = map_block(tf, tf->num_blocks - 1); /* Last block */
892 if(unlikely(err)) {
893 g_error("Can not map block");
894 *end = ltt_time_zero;
895 } else
896 *end = tf->buffer.end.timestamp;
897 }
898
899 struct tracefile_time_span_get_args {
900 LttTrace *t;
901 LttTime *start;
902 LttTime *end;
903 };
904
905 static void group_time_span_get(GQuark name, gpointer data, gpointer user_data)
906 {
907 struct tracefile_time_span_get_args *args =
908 (struct tracefile_time_span_get_args*)user_data;
909
910 GArray *group = (GArray *)data;
911 unsigned int i;
912 LttTracefile *tf;
913 LttTime tmp_start;
914 LttTime tmp_end;
915
916 for(i=0; i<group->len; i++) {
917 tf = &g_array_index (group, LttTracefile, i);
918 if(tf->cpu_online) {
919 ltt_tracefile_time_span_get(tf, &tmp_start, &tmp_end);
920 if(ltt_time_compare(*args->start, tmp_start)>0) *args->start = tmp_start;
921 if(ltt_time_compare(*args->end, tmp_end)<0) *args->end = tmp_end;
922 }
923 }
924 }
925
926 /* return the start and end time of a trace */
927
928 void ltt_trace_time_span_get(LttTrace *t, LttTime *start, LttTime *end)
929 {
930 LttTime min_start = ltt_time_infinite;
931 LttTime max_end = ltt_time_zero;
932 struct tracefile_time_span_get_args args = { t, &min_start, &max_end };
933
934 g_datalist_foreach(&t->tracefiles, &group_time_span_get, &args);
935
936 if(start != NULL) *start = min_start;
937 if(end != NULL) *end = max_end;
938
939 }
940
941
942 /* Seek to the first event in a tracefile that has a time equal or greater than
943 * the time passed in parameter.
944 *
945 * If the time parameter is outside the tracefile time span, seek to the first
946 * event or if after, return ERANGE.
947 *
948 * If the time parameter is before the first event, we have to seek specially to
949 * there.
950 *
951 * If the time is after the end of the trace, return ERANGE.
952 *
953 * Do a binary search to find the right block, then a sequential search in the
954 * block to find the event.
955 *
956 * In the special case where the time requested fits inside a block that has no
957 * event corresponding to the requested time, the first event of the next block
958 * will be seeked.
959 *
960 * IMPORTANT NOTE : // FIXME everywhere...
961 *
962 * You MUST NOT do a ltt_tracefile_read right after a ltt_tracefile_seek_time :
963 * you will jump over an event if you do.
964 *
965 * Return value : 0 : no error, the tf->event can be used
966 * ERANGE : time if after the last event of the trace
967 * otherwise : this is an error.
968 *
969 * */
970
971 int ltt_tracefile_seek_time(LttTracefile *tf, LttTime time)
972 {
973 int ret = 0;
974 int err;
975 unsigned int block_num, high, low;
976
977 /* seek at the beginning of trace */
978 err = map_block(tf, 0); /* First block */
979 if(unlikely(err)) {
980 g_error("Can not map block");
981 goto fail;
982 }
983
984 /* If the time is lower or equal the beginning of the trace,
985 * go to the first event. */
986 if(ltt_time_compare(time, tf->buffer.begin.timestamp) <= 0) {
987 ret = ltt_tracefile_read(tf);
988 if(ret == ERANGE) goto range;
989 else if (ret) goto fail;
990 goto found; /* There is either no event in the trace or the event points
991 to the first event in the trace */
992 }
993
994 err = map_block(tf, tf->num_blocks - 1); /* Last block */
995 if(unlikely(err)) {
996 g_error("Can not map block");
997 goto fail;
998 }
999
1000 /* If the time is after the end of the trace, return ERANGE. */
1001 if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
1002 goto range;
1003 }
1004
1005 /* Binary search the block */
1006 high = tf->num_blocks - 1;
1007 low = 0;
1008
1009 while(1) {
1010 block_num = ((high-low) / 2) + low;
1011
1012 err = map_block(tf, block_num);
1013 if(unlikely(err)) {
1014 g_error("Can not map block");
1015 goto fail;
1016 }
1017 if(high == low) {
1018 /* We cannot divide anymore : this is what would happen if the time
1019 * requested was exactly between two consecutive buffers'end and start
1020 * timestamps. This is also what would happend if we didn't deal with out
1021 * of span cases prior in this function. */
1022 /* The event is right in the buffer!
1023 * (or in the next buffer first event) */
1024 while(1) {
1025 ret = ltt_tracefile_read(tf);
1026 if(ret == ERANGE) goto range; /* ERANGE or EPERM */
1027 else if(ret) goto fail;
1028
1029 if(ltt_time_compare(time, tf->event.event_time) <= 0)
1030 goto found;
1031 }
1032
1033 } else if(ltt_time_compare(time, tf->buffer.begin.timestamp) < 0) {
1034 /* go to lower part */
1035 high = block_num - 1;
1036 } else if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
1037 /* go to higher part */
1038 low = block_num + 1;
1039 } else {/* The event is right in the buffer!
1040 (or in the next buffer first event) */
1041 while(1) {
1042 ret = ltt_tracefile_read(tf);
1043 if(ret == ERANGE) goto range; /* ERANGE or EPERM */
1044 else if(ret) goto fail;
1045
1046 if(ltt_time_compare(time, tf->event.event_time) <= 0)
1047 break;
1048 }
1049 goto found;
1050 }
1051 }
1052
1053 found:
1054 return 0;
1055 range:
1056 return ERANGE;
1057
1058 /* Error handling */
1059 fail:
1060 g_error("ltt_tracefile_seek_time failed on tracefile %s",
1061 g_quark_to_string(tf->name));
1062 return EPERM;
1063 }
1064
1065 /* Seek to a position indicated by an LttEventPosition
1066 */
1067
1068 int ltt_tracefile_seek_position(LttTracefile *tf, const LttEventPosition *ep)
1069 {
1070 int err;
1071
1072 if(ep->tracefile != tf) {
1073 goto fail;
1074 }
1075
1076 err = map_block(tf, ep->block);
1077 if(unlikely(err)) {
1078 g_error("Can not map block");
1079 goto fail;
1080 }
1081
1082 tf->event.offset = ep->offset;
1083
1084 /* Put back the event real tsc */
1085 tf->event.tsc = ep->tsc;
1086 tf->buffer.tsc = ep->tsc;
1087
1088 err = ltt_tracefile_read_update_event(tf);
1089 if(err) goto fail;
1090
1091 /* deactivate this, as it does nothing for now
1092 err = ltt_tracefile_read_op(tf);
1093 if(err) goto fail;
1094 */
1095
1096 return 0;
1097
1098 fail:
1099 g_error("ltt_tracefile_seek_time failed on tracefile %s",
1100 g_quark_to_string(tf->name));
1101 return 1;
1102 }
1103
1104 /* Given a TSC value, return the LttTime (seconds,nanoseconds) it
1105 * corresponds to.
1106 */
1107
1108 LttTime ltt_interpolate_time_from_tsc(LttTracefile *tf, guint64 tsc)
1109 {
1110 LttTime time;
1111
1112 if(tsc > tf->trace->start_tsc) {
1113 time = ltt_time_from_uint64(
1114 (double)(tsc - tf->trace->start_tsc)
1115 * 1000000000.0 * tf->trace->freq_scale
1116 / (double)tf->trace->start_freq);
1117 time = ltt_time_add(tf->trace->start_time_from_tsc, time);
1118 } else {
1119 time = ltt_time_from_uint64(
1120 (double)(tf->trace->start_tsc - tsc)
1121 * 1000000000.0 * tf->trace->freq_scale
1122 / (double)tf->trace->start_freq);
1123 time = ltt_time_sub(tf->trace->start_time_from_tsc, time);
1124 }
1125 return time;
1126 }
1127
1128 /* Calculate the real event time based on the buffer boundaries */
1129 LttTime ltt_interpolate_time(LttTracefile *tf, LttEvent *event)
1130 {
1131 return ltt_interpolate_time_from_tsc(tf, tf->buffer.tsc);
1132 }
1133
1134
1135 /* Get the current event of the tracefile : valid until the next read */
1136 LttEvent *ltt_tracefile_get_event(LttTracefile *tf)
1137 {
1138 return &tf->event;
1139 }
1140
1141
1142
1143 /*****************************************************************************
1144 *Function name
1145 * ltt_tracefile_read : Read the next event in the tracefile
1146 *Input params
1147 * t : tracefile
1148 *Return value
1149 *
1150 * Returns 0 if an event can be used in tf->event.
1151 * Returns ERANGE on end of trace. The event in tf->event still can be used
1152 * (if the last block was not empty).
1153 * Returns EPERM on error.
1154 *
1155 * This function does make the tracefile event structure point to the event
1156 * currently pointed to by the tf->event.
1157 *
1158 * Note : you must call a ltt_tracefile_seek to the beginning of the trace to
1159 * reinitialize it after an error if you want results to be coherent.
1160 * It would be the case if a end of trace last buffer has no event : the end
1161 * of trace wouldn't be returned, but an error.
1162 * We make the assumption there is at least one event per buffer.
1163 ****************************************************************************/
1164
1165 int ltt_tracefile_read(LttTracefile *tf)
1166 {
1167 int err;
1168
1169 err = ltt_tracefile_read_seek(tf);
1170 if(err) return err;
1171 err = ltt_tracefile_read_update_event(tf);
1172 if(err) return err;
1173
1174 /* deactivate this, as it does nothing for now
1175 err = ltt_tracefile_read_op(tf);
1176 if(err) return err;
1177 */
1178
1179 return 0;
1180 }
1181
1182 int ltt_tracefile_read_seek(LttTracefile *tf)
1183 {
1184 int err;
1185
1186 /* Get next buffer until we finally have an event, or end of trace */
1187 while(1) {
1188 err = ltt_seek_next_event(tf);
1189 if(unlikely(err == ENOPROTOOPT)) {
1190 return EPERM;
1191 }
1192
1193 /* Are we at the end of the buffer ? */
1194 if(err == ERANGE) {
1195 if(unlikely(tf->buffer.index == tf->num_blocks-1)){ /* end of trace ? */
1196 return ERANGE;
1197 } else {
1198 /* get next block */
1199 err = map_block(tf, tf->buffer.index + 1);
1200 if(unlikely(err)) {
1201 g_error("Can not map block");
1202 return EPERM;
1203 }
1204 }
1205 } else break; /* We found an event ! */
1206 }
1207
1208 return 0;
1209 }
1210
1211 /* do an operation when reading a new event */
1212
1213 /* This function does nothing for now */
1214 #if 0
1215 int ltt_tracefile_read_op(LttTracefile *tf)
1216 {
1217 LttEvent *event;
1218
1219 event = &tf->event;
1220
1221 /* do event specific operation */
1222
1223 /* nothing */
1224
1225 return 0;
1226 }
1227 #endif
1228
1229 static void print_debug_event_header(LttEvent *ev, void *start_pos, void *end_pos)
1230 {
1231 unsigned int offset = 0;
1232 int i, j;
1233
1234 g_printf("Event header (tracefile %s offset %" PRIx64 "):\n",
1235 g_quark_to_string(ev->tracefile->long_name),
1236 ((uint64_t)ev->tracefile->buffer.index * ev->tracefile->buf_size)
1237 + (long)start_pos - (long)ev->tracefile->buffer.head);
1238
1239 while (offset < (long)end_pos - (long)start_pos) {
1240 g_printf("%8lx", (long)start_pos - (long)ev->tracefile->buffer.head + offset);
1241 g_printf(" ");
1242
1243 for (i = 0; i < 4 ; i++) {
1244 for (j = 0; j < 4; j++) {
1245 if (offset + ((i * 4) + j) <
1246 (long)end_pos - (long)start_pos)
1247 g_printf("%02hhX",
1248 ((char*)start_pos)[offset + ((i * 4) + j)]);
1249 else
1250 g_printf(" ");
1251 g_printf(" ");
1252 }
1253 if (i < 4)
1254 g_printf(" ");
1255 }
1256 offset+=16;
1257 g_printf("\n");
1258 }
1259 }
1260
1261
1262 /* same as ltt_tracefile_read, but does not seek to the next event nor call
1263 * event specific operation. */
1264 int ltt_tracefile_read_update_event(LttTracefile *tf)
1265 {
1266 void * pos;
1267 LttEvent *event;
1268 void *pos_aligned;
1269
1270 event = &tf->event;
1271 pos = tf->buffer.head + event->offset;
1272
1273 /* Read event header */
1274
1275 /* Align the head */
1276 pos += ltt_align((size_t)pos, sizeof(guint32), tf->alignment);
1277 pos_aligned = pos;
1278
1279 event->timestamp = ltt_get_uint32(LTT_GET_BO(tf), pos);
1280 event->event_id = event->timestamp >> tf->tscbits;
1281 event->timestamp = event->timestamp & tf->tsc_mask;
1282 pos += sizeof(guint32);
1283
1284 switch (event->event_id) {
1285 case 29: /* LTT_RFLAG_ID_SIZE_TSC */
1286 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1287 pos += sizeof(guint16);
1288 event->event_size = ltt_get_uint16(LTT_GET_BO(tf), pos);
1289 pos += sizeof(guint16);
1290 if (event->event_size == 0xFFFF) {
1291 event->event_size = ltt_get_uint32(LTT_GET_BO(tf), pos);
1292 pos += sizeof(guint32);
1293 }
1294 pos += ltt_align((size_t)pos, sizeof(guint64), tf->alignment);
1295 tf->buffer.tsc = ltt_get_uint64(LTT_GET_BO(tf), pos);
1296 pos += sizeof(guint64);
1297 break;
1298 case 30: /* LTT_RFLAG_ID_SIZE */
1299 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1300 pos += sizeof(guint16);
1301 event->event_size = ltt_get_uint16(LTT_GET_BO(tf), pos);
1302 pos += sizeof(guint16);
1303 if (event->event_size == 0xFFFF) {
1304 event->event_size = ltt_get_uint32(LTT_GET_BO(tf), pos);
1305 pos += sizeof(guint32);
1306 }
1307 break;
1308 case 31: /* LTT_RFLAG_ID */
1309 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1310 pos += sizeof(guint16);
1311 event->event_size = G_MAXUINT;
1312 break;
1313 default:
1314 event->event_size = G_MAXUINT;
1315 break;
1316 }
1317
1318 if (likely(event->event_id != 29)) {
1319 /* No extended timestamp */
1320 if (event->timestamp < (tf->buffer.tsc & tf->tsc_mask))
1321 tf->buffer.tsc = ((tf->buffer.tsc & ~tf->tsc_mask) /* overflow */
1322 + tf->tsc_mask_next_bit)
1323 | (guint64)event->timestamp;
1324 else
1325 tf->buffer.tsc = (tf->buffer.tsc & ~tf->tsc_mask) /* no overflow */
1326 | (guint64)event->timestamp;
1327 }
1328 event->tsc = tf->buffer.tsc;
1329
1330 event->event_time = ltt_interpolate_time(tf, event);
1331
1332 if (a_event_debug)
1333 print_debug_event_header(event, pos_aligned, pos);
1334
1335 event->data = pos;
1336
1337 /*
1338 * Let ltt_update_event_size update event->data according to the largest
1339 * alignment within the payload.
1340 * Get the data size and update the event fields with the current
1341 * information. */
1342 ltt_update_event_size(tf);
1343
1344 return 0;
1345 }
1346
1347
1348 /****************************************************************************
1349 *Function name
1350 * map_block : map a block from the file
1351 *Input Params
1352 * lttdes : ltt trace file
1353 * whichBlock : the block which will be read
1354 *return value
1355 * 0 : success
1356 * EINVAL : lseek fail
1357 * EIO : can not read from the file
1358 ****************************************************************************/
1359
1360 static gint map_block(LttTracefile * tf, guint block_num)
1361 {
1362 int page_size = getpagesize();
1363 ltt_subbuffer_header_t *header;
1364
1365 g_assert(block_num < tf->num_blocks);
1366
1367 if(tf->buffer.head != NULL) {
1368 if(munmap(tf->buffer.head, PAGE_ALIGN(tf->buf_size))) {
1369 g_warning("unmap size : %u\n",
1370 PAGE_ALIGN(tf->buf_size));
1371 perror("munmap error");
1372 g_assert(0);
1373 }
1374 }
1375
1376 /* Multiple of pages aligned head */
1377 tf->buffer.head = mmap(0,
1378 PAGE_ALIGN(tf->buf_size),
1379 PROT_READ, MAP_PRIVATE, tf->fd,
1380 PAGE_ALIGN((off_t)tf->buf_size * (off_t)block_num));
1381
1382 if(tf->buffer.head == MAP_FAILED) {
1383 perror("Error in allocating memory for buffer of tracefile");
1384 g_assert(0);
1385 goto map_error;
1386 }
1387 g_assert( ( (gulong)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
1388
1389
1390 tf->buffer.index = block_num;
1391
1392 header = (ltt_subbuffer_header_t *)tf->buffer.head;
1393
1394 tf->buffer.begin.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
1395 &header->cycle_count_begin);
1396 tf->buffer.begin.freq = tf->trace->start_freq;
1397
1398 tf->buffer.begin.timestamp = ltt_interpolate_time_from_tsc(tf,
1399 tf->buffer.begin.cycle_count);
1400 tf->buffer.end.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
1401 &header->cycle_count_end);
1402 tf->buffer.end.freq = tf->trace->start_freq;
1403
1404 tf->buffer.lost_size = ltt_get_uint32(LTT_GET_BO(tf),
1405 &header->lost_size);
1406 tf->buffer.end.timestamp = ltt_interpolate_time_from_tsc(tf,
1407 tf->buffer.end.cycle_count);
1408 tf->buffer.tsc = tf->buffer.begin.cycle_count;
1409 tf->event.tsc = tf->buffer.tsc;
1410 tf->buffer.freq = tf->buffer.begin.freq;
1411
1412 /* FIXME
1413 * eventually support variable buffer size : will need a partial pre-read of
1414 * the headers to create an index when we open the trace... eventually. */
1415 g_assert(tf->buf_size == ltt_get_uint32(LTT_GET_BO(tf),
1416 &header->buf_size));
1417
1418 /* Make the current event point to the beginning of the buffer :
1419 * it means that the event read must get the first event. */
1420 tf->event.tracefile = tf;
1421 tf->event.block = block_num;
1422 tf->event.offset = 0;
1423
1424 if (header->events_lost) {
1425 g_warning("%d events lost so far in tracefile %s at block %u",
1426 (guint)header->events_lost,
1427 g_quark_to_string(tf->long_name),
1428 block_num);
1429 tf->events_lost = header->events_lost;
1430 }
1431 if (header->subbuf_corrupt) {
1432 g_warning("%d subbuffer(s) corrupted so far in tracefile %s at block %u",
1433 (guint)header->subbuf_corrupt,
1434 g_quark_to_string(tf->long_name),
1435 block_num);
1436 tf->subbuf_corrupt = header->subbuf_corrupt;
1437 }
1438
1439 return 0;
1440
1441 map_error:
1442 return -errno;
1443 }
1444
1445 static void print_debug_event_data(LttEvent *ev)
1446 {
1447 unsigned int offset = 0;
1448 int i, j;
1449
1450 if (!max(ev->event_size, ev->data_size))
1451 return;
1452
1453 g_printf("Event data (tracefile %s offset %" PRIx64 "):\n",
1454 g_quark_to_string(ev->tracefile->long_name),
1455 ((uint64_t)ev->tracefile->buffer.index * ev->tracefile->buf_size)
1456 + (long)ev->data - (long)ev->tracefile->buffer.head);
1457
1458 while (offset < max(ev->event_size, ev->data_size)) {
1459 g_printf("%8lx", (long)ev->data + offset
1460 - (long)ev->tracefile->buffer.head);
1461 g_printf(" ");
1462
1463 for (i = 0; i < 4 ; i++) {
1464 for (j = 0; j < 4; j++) {
1465 if (offset + ((i * 4) + j) < max(ev->event_size, ev->data_size))
1466 g_printf("%02hhX", ((char*)ev->data)[offset + ((i * 4) + j)]);
1467 else
1468 g_printf(" ");
1469 g_printf(" ");
1470 }
1471 if (i < 4)
1472 g_printf(" ");
1473 }
1474
1475 g_printf(" ");
1476
1477 for (i = 0; i < 4; i++) {
1478 for (j = 0; j < 4; j++) {
1479 if (offset + ((i * 4) + j) < max(ev->event_size, ev->data_size)) {
1480 if (isprint(((char*)ev->data)[offset + ((i * 4) + j)]))
1481 g_printf("%c", ((char*)ev->data)[offset + ((i * 4) + j)]);
1482 else
1483 g_printf(".");
1484 } else
1485 g_printf(" ");
1486 }
1487 }
1488 offset+=16;
1489 g_printf("\n");
1490 }
1491 }
1492
1493 /* It will update the fields offsets too */
1494 void ltt_update_event_size(LttTracefile *tf)
1495 {
1496 off_t size = 0;
1497 struct marker_info *info;
1498
1499 if (tf->name == LTT_TRACEFILE_NAME_METADATA) {
1500 switch((enum marker_id)tf->event.event_id) {
1501 case MARKER_ID_SET_MARKER_ID:
1502 size = strlen((char*)tf->event.data) + 1;
1503 g_debug("marker %s id set", (char*)tf->event.data + size);
1504 size += strlen((char*)tf->event.data + size) + 1;
1505 size += ltt_align(size, sizeof(guint16), tf->alignment);
1506 size += sizeof(guint16);
1507 size += sizeof(guint8);
1508 size += sizeof(guint8);
1509 size += sizeof(guint8);
1510 size += sizeof(guint8);
1511 size += sizeof(guint8);
1512 break;
1513 case MARKER_ID_SET_MARKER_FORMAT:
1514 size = strlen((char*)tf->event.data) + 1;
1515 g_debug("marker %s format set", (char*)tf->event.data);
1516 size += strlen((char*)tf->event.data + size) + 1;
1517 size += strlen((char*)tf->event.data + size) + 1;
1518 break;
1519 }
1520 }
1521
1522 info = marker_get_info_from_id(tf->mdata, tf->event.event_id);
1523
1524 if (tf->event.event_id >= MARKER_CORE_IDS)
1525 g_assert(info != NULL);
1526
1527 /* Do not update field offsets of core markers when initially reading the
1528 * metadata tracefile when the infos about these markers do not exist yet.
1529 */
1530 if (likely(info && info->fields)) {
1531 /* alignment */
1532 tf->event.data += ltt_align((off_t)(unsigned long)tf->event.data,
1533 info->largest_align,
1534 info->alignment);
1535 /* size, dynamically computed */
1536 if (info->size != -1)
1537 size = info->size;
1538 else
1539 size = marker_update_fields_offsets(marker_get_info_from_id(tf->mdata,
1540 tf->event.event_id), tf->event.data);
1541 }
1542
1543 tf->event.data_size = size;
1544
1545 /* Check consistency between kernel and LTTV structure sizes */
1546 if(tf->event.event_size == G_MAXUINT) {
1547 /* Event size too big to fit in the event size field */
1548 tf->event.event_size = tf->event.data_size;
1549 }
1550
1551 if (a_event_debug)
1552 print_debug_event_data(&tf->event);
1553
1554 if (tf->event.data_size != tf->event.event_size) {
1555 struct marker_info *info = marker_get_info_from_id(tf->mdata,
1556 tf->event.event_id);
1557 if (!info)
1558 g_error("Undescribed event %hhu in channel %s", tf->event.event_id,
1559 g_quark_to_string(tf->name));
1560 g_error("Kernel/LTTV event size differs for event %s: kernel %u, LTTV %u",
1561 g_quark_to_string(info->name),
1562 tf->event.event_size, tf->event.data_size);
1563 exit(-1);
1564 }
1565 }
1566
1567
1568 /* Take the tf current event offset and use the event id to figure out where is
1569 * the next event offset.
1570 *
1571 * This is an internal function not aiming at being used elsewhere : it will
1572 * not jump over the current block limits. Please consider using
1573 * ltt_tracefile_read to do this.
1574 *
1575 * Returns 0 on success
1576 * ERANGE if we are at the end of the buffer.
1577 * ENOPROTOOPT if an error occured when getting the current event size.
1578 */
1579 static int ltt_seek_next_event(LttTracefile *tf)
1580 {
1581 int ret = 0;
1582 void *pos;
1583
1584 /* seek over the buffer header if we are at the buffer start */
1585 if(tf->event.offset == 0) {
1586 tf->event.offset += tf->buffer_header_size;
1587
1588 if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
1589 ret = ERANGE;
1590 }
1591 goto found;
1592 }
1593
1594 pos = tf->event.data;
1595
1596 if(tf->event.data_size < 0) goto error;
1597
1598 pos += (size_t)tf->event.data_size;
1599
1600 tf->event.offset = pos - tf->buffer.head;
1601
1602 if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
1603 ret = ERANGE;
1604 goto found;
1605 }
1606 g_assert(tf->event.offset < tf->buf_size - tf->buffer.lost_size);
1607
1608 found:
1609 return ret;
1610
1611 error:
1612 g_error("Error in ltt_seek_next_event for tracefile %s",
1613 g_quark_to_string(tf->name));
1614 return ENOPROTOOPT;
1615 }
1616
1617 #if 0
1618 /*****************************************************************************
1619 *Function name
1620 * set_fields_offsets : set the precomputable offset of the fields
1621 *Input params
1622 * tracefile : opened trace file
1623 * event_type : the event type
1624 ****************************************************************************/
1625
1626 void set_fields_offsets(LttTracefile *tf, LttEventType *event_type)
1627 {
1628 LttField *field = event_type->root_field;
1629 enum field_status fixed_root = FIELD_FIXED, fixed_parent = FIELD_FIXED;
1630
1631 if(likely(field))
1632 preset_field_type_size(tf, event_type, 0, 0,
1633 &fixed_root, &fixed_parent,
1634 field);
1635
1636 }
1637 #endif //0
1638
1639
1640 /*****************************************************************************
1641 *Function name
1642 * get_alignment : Get the alignment needed for a field.
1643 *Input params
1644 * field : field
1645 *
1646 * returns : The size on which it must be aligned.
1647 *
1648 ****************************************************************************/
1649 #if 0
1650 off_t get_alignment(LttField *field)
1651 {
1652 LttType *type = &field->field_type;
1653
1654 switch(type->type_class) {
1655 case LTT_INT_FIXED:
1656 case LTT_UINT_FIXED:
1657 case LTT_POINTER:
1658 case LTT_CHAR:
1659 case LTT_UCHAR:
1660 case LTT_SHORT:
1661 case LTT_USHORT:
1662 case LTT_INT:
1663 case LTT_UINT:
1664 case LTT_LONG:
1665 case LTT_ULONG:
1666 case LTT_SIZE_T:
1667 case LTT_SSIZE_T:
1668 case LTT_OFF_T:
1669 case LTT_FLOAT:
1670 case LTT_ENUM:
1671 /* Align offset on type size */
1672 g_assert(field->field_size != 0);
1673 return field->field_size;
1674 break;
1675 case LTT_STRING:
1676 return 1;
1677 break;
1678 case LTT_ARRAY:
1679 g_assert(type->fields->len == 1);
1680 {
1681 LttField *child = &g_array_index(type->fields, LttField, 0);
1682 return get_alignment(child);
1683 }
1684 break;
1685 case LTT_SEQUENCE:
1686 g_assert(type->fields->len == 2);
1687 {
1688 off_t localign = 1;
1689 LttField *child = &g_array_index(type->fields, LttField, 0);
1690
1691 localign = max(localign, get_alignment(child));
1692
1693 child = &g_array_index(type->fields, LttField, 1);
1694 localign = max(localign, get_alignment(child));
1695
1696 return localign;
1697 }
1698 break;
1699 case LTT_STRUCT:
1700 case LTT_UNION:
1701 {
1702 guint i;
1703 off_t localign = 1;
1704
1705 for(i=0; i<type->fields->len; i++) {
1706 LttField *child = &g_array_index(type->fields, LttField, i);
1707 localign = max(localign, get_alignment(child));
1708 }
1709 return localign;
1710 }
1711 break;
1712 case LTT_NONE:
1713 default:
1714 g_error("get_alignment : unknown type");
1715 return -1;
1716 }
1717 }
1718
1719 #endif //0
1720
1721 /*****************************************************************************
1722 *Function name
1723 * field_compute_static_size : Determine the size of fields known by their
1724 * sole definition. Unions, arrays and struct sizes might be known, but
1725 * the parser does not give that information.
1726 *Input params
1727 * tf : tracefile
1728 * field : field
1729 *
1730 ****************************************************************************/
1731 #if 0
1732 void field_compute_static_size(LttFacility *fac, LttField *field)
1733 {
1734 LttType *type = &field->field_type;
1735
1736 switch(type->type_class) {
1737 case LTT_INT_FIXED:
1738 case LTT_UINT_FIXED:
1739 case LTT_POINTER:
1740 case LTT_CHAR:
1741 case LTT_UCHAR:
1742 case LTT_SHORT:
1743 case LTT_USHORT:
1744 case LTT_INT:
1745 case LTT_UINT:
1746 case LTT_LONG:
1747 case LTT_ULONG:
1748 case LTT_SIZE_T:
1749 case LTT_SSIZE_T:
1750 case LTT_OFF_T:
1751 case LTT_FLOAT:
1752 case LTT_ENUM:
1753 case LTT_STRING:
1754 /* nothing to do */
1755 break;
1756 case LTT_ARRAY:
1757 /* note this : array type size is the number of elements in the array,
1758 * while array field size of the length of the array in bytes */
1759 g_assert(type->fields->len == 1);
1760 {
1761 LttField *child = &g_array_index(type->fields, LttField, 0);
1762 field_compute_static_size(fac, child);
1763
1764 if(child->field_size != 0) {
1765 field->field_size = type->size * child->field_size;
1766 field->dynamic_offsets = g_array_sized_new(FALSE, TRUE,
1767 sizeof(off_t), type->size);
1768 } else {
1769 field->field_size = 0;
1770 }
1771 }
1772 break;
1773 case LTT_SEQUENCE:
1774 g_assert(type->fields->len == 2);
1775 {
1776 off_t local_offset = 0;
1777 LttField *child = &g_array_index(type->fields, LttField, 1);
1778 field_compute_static_size(fac, child);
1779 field->field_size = 0;
1780 type->size = 0;
1781 if(child->field_size != 0) {
1782 field->dynamic_offsets = g_array_sized_new(FALSE, TRUE,
1783 sizeof(off_t), SEQUENCE_AVG_ELEMENTS);
1784 }
1785 }
1786 break;
1787 case LTT_STRUCT:
1788 case LTT_UNION:
1789 {
1790 guint i;
1791 for(i=0;i<type->fields->len;i++) {
1792 LttField *child = &g_array_index(type->fields, LttField, i);
1793 field_compute_static_size(fac, child);
1794 if(child->field_size != 0) {
1795 type->size += ltt_align(type->size, get_alignment(child),
1796 fac->alignment);
1797 type->size += child->field_size;
1798 } else {
1799 /* As soon as we find a child with variable size, we have
1800 * a variable size */
1801 type->size = 0;
1802 break;
1803 }
1804 }
1805 field->field_size = type->size;
1806 }
1807 break;
1808 default:
1809 g_error("field_static_size : unknown type");
1810 }
1811
1812 }
1813 #endif //0
1814
1815
1816 /*****************************************************************************
1817 *Function name
1818 * precompute_fields_offsets : set the precomputable offset of the fields
1819 *Input params
1820 * fac : facility
1821 * field : the field
1822 * offset : pointer to the current offset, must be incremented
1823 *
1824 * return : 1 : found a variable length field, stop the processing.
1825 * 0 otherwise.
1826 ****************************************************************************/
1827
1828 #if 0
1829 gint precompute_fields_offsets(LttFacility *fac, LttField *field, off_t *offset, gint is_compact)
1830 {
1831 LttType *type = &field->field_type;
1832
1833 if(unlikely(is_compact)) {
1834 g_assert(field->field_size != 0);
1835 /* FIXME THIS IS A HUUUUUGE hack :
1836 * offset is between the compact_data field in struct LttEvent
1837 * and the address of the field root in the memory map.
1838 * ark. Both will stay at the same addresses while the event
1839 * is readable, so it's ok.
1840 */
1841 field->offset_root = 0;
1842 field->fixed_root = FIELD_FIXED;
1843 return 0;
1844 }
1845
1846 switch(type->type_class) {
1847 case LTT_INT_FIXED:
1848 case LTT_UINT_FIXED:
1849 case LTT_POINTER:
1850 case LTT_CHAR:
1851 case LTT_UCHAR:
1852 case LTT_SHORT:
1853 case LTT_USHORT:
1854 case LTT_INT:
1855 case LTT_UINT:
1856 case LTT_LONG:
1857 case LTT_ULONG:
1858 case LTT_SIZE_T:
1859 case LTT_SSIZE_T:
1860 case LTT_OFF_T:
1861 case LTT_FLOAT:
1862 case LTT_ENUM:
1863 g_assert(field->field_size != 0);
1864 /* Align offset on type size */
1865 *offset += ltt_align(*offset, get_alignment(field),
1866 fac->alignment);
1867 /* remember offset */
1868 field->offset_root = *offset;
1869 field->fixed_root = FIELD_FIXED;
1870 /* Increment offset */
1871 *offset += field->field_size;
1872 return 0;
1873 break;
1874 case LTT_STRING:
1875 field->offset_root = *offset;
1876 field->fixed_root = FIELD_FIXED;
1877 return 1;
1878 break;
1879 case LTT_ARRAY:
1880 g_assert(type->fields->len == 1);
1881 {
1882 LttField *child = &g_array_index(type->fields, LttField, 0);
1883
1884 *offset += ltt_align(*offset, get_alignment(field),
1885 fac->alignment);
1886
1887 /* remember offset */
1888 field->offset_root = *offset;
1889 field->array_offset = *offset;
1890 field->fixed_root = FIELD_FIXED;
1891
1892 /* Let the child be variable */
1893 //precompute_fields_offsets(tf, child, offset);
1894
1895 if(field->field_size != 0) {
1896 /* Increment offset */
1897 /* field_size is the array size in bytes */
1898 *offset += field->field_size;
1899 return 0;
1900 } else {
1901 return 1;
1902 }
1903 }
1904 break;
1905 case LTT_SEQUENCE:
1906 g_assert(type->fields->len == 2);
1907 {
1908 LttField *child;
1909 guint ret;
1910
1911 *offset += ltt_align(*offset, get_alignment(field),
1912 fac->alignment);
1913
1914 /* remember offset */
1915 field->offset_root = *offset;
1916 field->fixed_root = FIELD_FIXED;
1917
1918 child = &g_array_index(type->fields, LttField, 0);
1919 ret = precompute_fields_offsets(fac, child, offset, is_compact);
1920 g_assert(ret == 0); /* Seq len cannot have variable len */
1921
1922 child = &g_array_index(type->fields, LttField, 1);
1923 *offset += ltt_align(*offset, get_alignment(child),
1924 fac->alignment);
1925 field->array_offset = *offset;
1926 /* Let the child be variable. */
1927 //ret = precompute_fields_offsets(fac, child, offset);
1928
1929 /* Cannot precompute fields offsets of sequence members, and has
1930 * variable length. */
1931 return 1;
1932 }
1933 break;
1934 case LTT_STRUCT:
1935 {
1936 LttField *child;
1937 guint i;
1938 gint ret=0;
1939
1940 *offset += ltt_align(*offset, get_alignment(field),
1941 fac->alignment);
1942 /* remember offset */
1943 field->offset_root = *offset;
1944 field->fixed_root = FIELD_FIXED;
1945
1946 for(i=0; i< type->fields->len; i++) {
1947 child = &g_array_index(type->fields, LttField, i);
1948 ret = precompute_fields_offsets(fac, child, offset, is_compact);
1949
1950 if(ret) break;
1951 }
1952 return ret;
1953 }
1954 break;
1955 case LTT_UNION:
1956 {
1957 LttField *child;
1958 guint i;
1959 gint ret=0;
1960
1961 *offset += ltt_align(*offset, get_alignment(field),
1962 fac->alignment);
1963 /* remember offset */
1964 field->offset_root = *offset;
1965 field->fixed_root = FIELD_FIXED;
1966
1967 for(i=0; i< type->fields->len; i++) {
1968 *offset = field->offset_root;
1969 child = &g_array_index(type->fields, LttField, i);
1970 ret = precompute_fields_offsets(fac, child, offset, is_compact);
1971
1972 if(ret) break;
1973 }
1974 *offset = field->offset_root + field->field_size;
1975 return ret;
1976 }
1977
1978 break;
1979 case LTT_NONE:
1980 default:
1981 g_error("precompute_fields_offsets : unknown type");
1982 return 1;
1983 }
1984
1985 }
1986
1987 #endif //0
1988
1989 #if 0
1990 /*****************************************************************************
1991 *Function name
1992 * precompute_offsets : set the precomputable offset of an event type
1993 *Input params
1994 * tf : tracefile
1995 * event : event type
1996 *
1997 ****************************************************************************/
1998 void precompute_offsets(LttFacility *fac, LttEventType *event)
1999 {
2000 guint i;
2001 off_t offset = 0;
2002 gint ret;
2003
2004 /* First, compute the size of fixed size fields. Will determine size for
2005 * arrays, struct and unions, which is not done by the parser */
2006 for(i=0; i<event->fields->len; i++) {
2007 LttField *field = &g_array_index(event->fields, LttField, i);
2008 field_compute_static_size(fac, field);
2009 }
2010
2011 /* Precompute all known offsets */
2012 for(i=0; i<event->fields->len; i++) {
2013 LttField *field = &g_array_index(event->fields, LttField, i);
2014 if(event->has_compact_data && i == 0)
2015 ret = precompute_fields_offsets(fac, field, &offset, 1);
2016 else
2017 ret = precompute_fields_offsets(fac, field, &offset, 0);
2018 if(ret) break;
2019 }
2020 }
2021 #endif //0
2022
2023
2024
2025 /*****************************************************************************
2026 *Function name
2027 * preset_field_type_size : set the fixed sizes of the field type
2028 *Input params
2029 * tf : tracefile
2030 * event_type : event type
2031 * offset_root : offset from the root
2032 * offset_parent : offset from the parent
2033 * fixed_root : Do we know a fixed offset to the root ?
2034 * fixed_parent : Do we know a fixed offset to the parent ?
2035 * field : field
2036 ****************************************************************************/
2037
2038
2039
2040 // preset the fixed size offsets. Calculate them just like genevent-new : an
2041 // increment of a *to value that represents the offset from the start of the
2042 // event data.
2043 // The preset information is : offsets up to (and including) the first element
2044 // of variable size. All subsequent fields must be flagged "VARIABLE OFFSET".
2045 #if 0
2046 void preset_field_type_size(LttTracefile *tf, LttEventType *event_type,
2047 off_t offset_root, off_t offset_parent,
2048 enum field_status *fixed_root, enum field_status *fixed_parent,
2049 LttField *field)
2050 {
2051 enum field_status local_fixed_root, local_fixed_parent;
2052 guint i;
2053 LttType *type;
2054
2055 g_assert(field->fixed_root == FIELD_UNKNOWN);
2056 g_assert(field->fixed_parent == FIELD_UNKNOWN);
2057 g_assert(field->fixed_size == FIELD_UNKNOWN);
2058
2059 type = field->field_type;
2060
2061 field->fixed_root = *fixed_root;
2062 if(field->fixed_root == FIELD_FIXED)
2063 field->offset_root = offset_root;
2064 else
2065 field->offset_root = 0;
2066
2067 field->fixed_parent = *fixed_parent;
2068 if(field->fixed_parent == FIELD_FIXED)
2069 field->offset_parent = offset_parent;
2070 else
2071 field->offset_parent = 0;
2072
2073 size_t current_root_offset;
2074 size_t current_offset;
2075 enum field_status current_child_status, final_child_status;
2076 size_t max_size;
2077
2078 switch(type->type_class) {
2079 case LTT_INT_FIXED:
2080 case LTT_UINT_FIXED:
2081 case LTT_CHAR:
2082 case LTT_UCHAR:
2083 case LTT_SHORT:
2084 case LTT_USHORT:
2085 case LTT_INT:
2086 case LTT_UINT:
2087 case LTT_FLOAT:
2088 case LTT_ENUM:
2089 field->field_size = ltt_type_size(tf->trace, type);
2090 field->fixed_size = FIELD_FIXED;
2091 break;
2092 case LTT_POINTER:
2093 field->field_size = (off_t)event_type->facility->pointer_size;
2094 field->fixed_size = FIELD_FIXED;
2095 break;
2096 case LTT_LONG:
2097 case LTT_ULONG:
2098 field->field_size = (off_t)event_type->facility->long_size;
2099 field->fixed_size = FIELD_FIXED;
2100 break;
2101 case LTT_SIZE_T:
2102 case LTT_SSIZE_T:
2103 case LTT_OFF_T:
2104 field->field_size = (off_t)event_type->facility->size_t_size;
2105 field->fixed_size = FIELD_FIXED;
2106 break;
2107 case LTT_SEQUENCE:
2108 local_fixed_root = FIELD_VARIABLE;
2109 local_fixed_parent = FIELD_VARIABLE;
2110 preset_field_type_size(tf, event_type,
2111 0, 0,
2112 &local_fixed_root, &local_fixed_parent,
2113 field->child[0]);
2114 field->fixed_size = FIELD_VARIABLE;
2115 field->field_size = 0;
2116 *fixed_root = FIELD_VARIABLE;
2117 *fixed_parent = FIELD_VARIABLE;
2118 break;
2119 case LTT_STRING:
2120 field->fixed_size = FIELD_VARIABLE;
2121 field->field_size = 0;
2122 *fixed_root = FIELD_VARIABLE;
2123 *fixed_parent = FIELD_VARIABLE;
2124 break;
2125 case LTT_ARRAY:
2126 local_fixed_root = FIELD_VARIABLE;
2127 local_fixed_parent = FIELD_VARIABLE;
2128 preset_field_type_size(tf, event_type,
2129 0, 0,
2130 &local_fixed_root, &local_fixed_parent,
2131 field->child[0]);
2132 field->fixed_size = field->child[0]->fixed_size;
2133 if(field->fixed_size == FIELD_FIXED) {
2134 field->field_size = type->element_number * field->child[0]->field_size;
2135 } else {
2136 field->field_size = 0;
2137 *fixed_root = FIELD_VARIABLE;
2138 *fixed_parent = FIELD_VARIABLE;
2139 }
2140 break;
2141 case LTT_STRUCT:
2142 current_root_offset = field->offset_root;
2143 current_offset = 0;
2144 current_child_status = FIELD_FIXED;
2145 for(i=0;i<type->element_number;i++) {
2146 preset_field_type_size(tf, event_type,
2147 current_root_offset, current_offset,
2148 fixed_root, &current_child_status,
2149 field->child[i]);
2150 if(current_child_status == FIELD_FIXED) {
2151 current_root_offset += field->child[i]->field_size;
2152 current_offset += field->child[i]->field_size;
2153 } else {
2154 current_root_offset = 0;
2155 current_offset = 0;
2156 }
2157 }
2158 if(current_child_status != FIELD_FIXED) {
2159 *fixed_parent = current_child_status;
2160 field->field_size = 0;
2161 field->fixed_size = current_child_status;
2162 } else {
2163 field->field_size = current_offset;
2164 field->fixed_size = FIELD_FIXED;
2165 }
2166 break;
2167 case LTT_UNION:
2168 current_root_offset = field->offset_root;
2169 current_offset = 0;
2170 max_size = 0;
2171 final_child_status = FIELD_FIXED;
2172 for(i=0;i<type->element_number;i++) {
2173 enum field_status current_root_child_status = FIELD_FIXED;
2174 enum field_status current_child_status = FIELD_FIXED;
2175 preset_field_type_size(tf, event_type,
2176 current_root_offset, current_offset,
2177 &current_root_child_status, &current_child_status,
2178 field->child[i]);
2179 if(current_child_status != FIELD_FIXED)
2180 final_child_status = current_child_status;
2181 else
2182 max_size = max(max_size, field->child[i]->field_size);
2183 }
2184 if(final_child_status != FIELD_FIXED) {
2185 g_error("LTTV does not support variable size fields in unions.");
2186 /* This will stop the application. */
2187 *fixed_root = final_child_status;
2188 *fixed_parent = final_child_status;
2189 field->field_size = 0;
2190 field->fixed_size = current_child_status;
2191 } else {
2192 field->field_size = max_size;
2193 field->fixed_size = FIELD_FIXED;
2194 }
2195 break;
2196 case LTT_NONE:
2197 g_error("unexpected type NONE");
2198 break;
2199 }
2200
2201 }
2202 #endif //0
2203
2204 /*****************************************************************************
2205 *Function name
2206 * check_fields_compatibility : Check for compatibility between two fields :
2207 * do they use the same inner structure ?
2208 *Input params
2209 * event_type1 : event type
2210 * event_type2 : event type
2211 * field1 : field
2212 * field2 : field
2213 *Returns : 0 if identical
2214 * 1 if not.
2215 ****************************************************************************/
2216 // this function checks for equality of field types. Therefore, it does not use
2217 // per se offsets. For instance, an aligned version of a structure is
2218 // compatible with an unaligned version of the same structure.
2219 #if 0
2220 gint check_fields_compatibility(LttEventType *event_type1,
2221 LttEventType *event_type2,
2222 LttField *field1, LttField *field2)
2223 {
2224 guint different = 0;
2225 LttType *type1;
2226 LttType *type2;
2227
2228 if(field1 == NULL) {
2229 if(field2 == NULL) goto end;
2230 else {
2231 different = 1;
2232 goto end;
2233 }
2234 } else if(field2 == NULL) {
2235 different = 1;
2236 goto end;
2237 }
2238
2239 type1 = &field1->field_type;
2240 type2 = &field2->field_type;
2241
2242 if(type1->type_class != type2->type_class) {
2243 different = 1;
2244 goto end;
2245 }
2246 if(type1->network != type2->network) {
2247 different = 1;
2248 goto end;
2249 }
2250
2251 switch(type1->type_class) {
2252 case LTT_INT_FIXED:
2253 case LTT_UINT_FIXED:
2254 case LTT_POINTER:
2255 case LTT_CHAR:
2256 case LTT_UCHAR:
2257 case LTT_SHORT:
2258 case LTT_USHORT:
2259 case LTT_INT:
2260 case LTT_UINT:
2261 case LTT_LONG:
2262 case LTT_ULONG:
2263 case LTT_SIZE_T:
2264 case LTT_SSIZE_T:
2265 case LTT_OFF_T:
2266 case LTT_FLOAT:
2267 case LTT_ENUM:
2268 if(field1->field_size != field2->field_size)
2269 different = 1;
2270 break;
2271 case LTT_STRING:
2272 break;
2273 case LTT_ARRAY:
2274 {
2275 LttField *child1 = &g_array_index(type1->fields, LttField, 0);
2276 LttField *child2 = &g_array_index(type2->fields, LttField, 0);
2277
2278 if(type1->size != type2->size)
2279 different = 1;
2280 if(check_fields_compatibility(event_type1, event_type2, child1, child2))
2281 different = 1;
2282 }
2283 break;
2284 case LTT_SEQUENCE:
2285 {
2286 LttField *child1 = &g_array_index(type1->fields, LttField, 1);
2287 LttField *child2 = &g_array_index(type2->fields, LttField, 1);
2288
2289 if(check_fields_compatibility(event_type1, event_type2, child1, child2))
2290 different = 1;
2291 }
2292 break;
2293 case LTT_STRUCT:
2294 case LTT_UNION:
2295 {
2296 LttField *child;
2297 guint i;
2298
2299 if(type1->fields->len != type2->fields->len) {
2300 different = 1;
2301 goto end;
2302 }
2303
2304 for(i=0; i< type1->fields->len; i++) {
2305 LttField *child1;
2306 LttField *child2;
2307 child1 = &g_array_index(type1->fields, LttField, i);
2308 child2 = &g_array_index(type2->fields, LttField, i);
2309 different = check_fields_compatibility(event_type1,
2310 event_type2, child1, child2);
2311
2312 if(different) break;
2313 }
2314 }
2315 break;
2316 case LTT_NONE:
2317 default:
2318 g_error("check_fields_compatibility : unknown type");
2319 }
2320
2321 end:
2322 return different;
2323 }
2324 #endif //0
2325
2326 #if 0
2327 gint check_fields_compatibility(LttEventType *event_type1,
2328 LttEventType *event_type2,
2329 LttField *field1, LttField *field2)
2330 {
2331 guint different = 0;
2332 guint i;
2333 LttType *type1;
2334 LttType *type2;
2335
2336 if(field1 == NULL) {
2337 if(field2 == NULL) goto end;
2338 else {
2339 different = 1;
2340 goto end;
2341 }
2342 } else if(field2 == NULL) {
2343 different = 1;
2344 goto end;
2345 }
2346
2347 g_assert(field1->fixed_root != FIELD_UNKNOWN);
2348 g_assert(field2->fixed_root != FIELD_UNKNOWN);
2349 g_assert(field1->fixed_parent != FIELD_UNKNOWN);
2350 g_assert(field2->fixed_parent != FIELD_UNKNOWN);
2351 g_assert(field1->fixed_size != FIELD_UNKNOWN);
2352 g_assert(field2->fixed_size != FIELD_UNKNOWN);
2353
2354 type1 = field1->field_type;
2355 type2 = field2->field_type;
2356
2357 if(type1->type_class != type2->type_class) {
2358 different = 1;
2359 goto end;
2360 }
2361 if(type1->element_name != type2->element_name) {
2362 different = 1;
2363 goto end;
2364 }
2365
2366 switch(type1->type_class) {
2367 case LTT_INT_FIXED:
2368 case LTT_UINT_FIXED:
2369 case LTT_POINTER:
2370 case LTT_CHAR:
2371 case LTT_UCHAR:
2372 case LTT_SHORT:
2373 case LTT_USHORT:
2374 case LTT_INT:
2375 case LTT_UINT:
2376 case LTT_FLOAT:
2377 case LTT_POINTER:
2378 case LTT_LONG:
2379 case LTT_ULONG:
2380 case LTT_SIZE_T:
2381 case LTT_SSIZE_T:
2382 case LTT_OFF_T:
2383 if(field1->field_size != field2->field_size) {
2384 different = 1;
2385 goto end;
2386 }
2387 break;
2388 case LTT_ENUM:
2389 if(type1->element_number != type2->element_number) {
2390 different = 1;
2391 goto end;
2392 }
2393 for(i=0;i<type1->element_number;i++) {
2394 if(type1->enum_strings[i] != type2->enum_strings[i]) {
2395 different = 1;
2396 goto end;
2397 }
2398 }
2399 break;
2400 case LTT_SEQUENCE:
2401 /* Two elements : size and child */
2402 g_assert(type1->element_number != type2->element_number);
2403 for(i=0;i<type1->element_number;i++) {
2404 if(check_fields_compatibility(event_type1, event_type2,
2405 field1->child[0], field2->child[0])) {
2406 different = 1;
2407 goto end;
2408 }
2409 }
2410 break;
2411 case LTT_STRING:
2412 break;
2413 case LTT_ARRAY:
2414 if(field1->field_size != field2->field_size) {
2415 different = 1;
2416 goto end;
2417 }
2418 /* Two elements : size and child */
2419 g_assert(type1->element_number != type2->element_number);
2420 for(i=0;i<type1->element_number;i++) {
2421 if(check_fields_compatibility(event_type1, event_type2,
2422 field1->child[0], field2->child[0])) {
2423 different = 1;
2424 goto end;
2425 }
2426 }
2427 break;
2428 case LTT_STRUCT:
2429 case LTT_UNION:
2430 if(type1->element_number != type2->element_number) {
2431 different = 1;
2432 break;
2433 }
2434 for(i=0;i<type1->element_number;i++) {
2435 if(check_fields_compatibility(event_type1, event_type2,
2436 field1->child[0], field2->child[0])) {
2437 different = 1;
2438 goto end;
2439 }
2440 }
2441 break;
2442 }
2443 end:
2444 return different;
2445 }
2446 #endif //0
2447
2448
2449 /*****************************************************************************
2450 *Function name
2451 * ltt_get_int : get an integer number
2452 *Input params
2453 * reverse_byte_order: must we reverse the byte order ?
2454 * size : the size of the integer
2455 * ptr : the data pointer
2456 *Return value
2457 * gint64 : a 64 bits integer
2458 ****************************************************************************/
2459
2460 gint64 ltt_get_int(gboolean reverse_byte_order, gint size, void *data)
2461 {
2462 gint64 val;
2463
2464 switch(size) {
2465 case 1: val = *((gint8*)data); break;
2466 case 2: val = ltt_get_int16(reverse_byte_order, data); break;
2467 case 4: val = ltt_get_int32(reverse_byte_order, data); break;
2468 case 8: val = ltt_get_int64(reverse_byte_order, data); break;
2469 default: val = ltt_get_int64(reverse_byte_order, data);
2470 g_critical("get_int : integer size %d unknown", size);
2471 break;
2472 }
2473
2474 return val;
2475 }
2476
2477 /*****************************************************************************
2478 *Function name
2479 * ltt_get_uint : get an unsigned integer number
2480 *Input params
2481 * reverse_byte_order: must we reverse the byte order ?
2482 * size : the size of the integer
2483 * ptr : the data pointer
2484 *Return value
2485 * guint64 : a 64 bits unsigned integer
2486 ****************************************************************************/
2487
2488 guint64 ltt_get_uint(gboolean reverse_byte_order, gint size, void *data)
2489 {
2490 guint64 val;
2491
2492 switch(size) {
2493 case 1: val = *((gint8*)data); break;
2494 case 2: val = ltt_get_uint16(reverse_byte_order, data); break;
2495 case 4: val = ltt_get_uint32(reverse_byte_order, data); break;
2496 case 8: val = ltt_get_uint64(reverse_byte_order, data); break;
2497 default: val = ltt_get_uint64(reverse_byte_order, data);
2498 g_critical("get_uint : unsigned integer size %d unknown",
2499 size);
2500 break;
2501 }
2502
2503 return val;
2504 }
2505
2506
2507 /* get the node name of the system */
2508
2509 char * ltt_trace_system_description_node_name (LttSystemDescription * s)
2510 {
2511 return s->node_name;
2512 }
2513
2514
2515 /* get the domain name of the system */
2516
2517 char * ltt_trace_system_description_domain_name (LttSystemDescription * s)
2518 {
2519 return s->domain_name;
2520 }
2521
2522
2523 /* get the description of the system */
2524
2525 char * ltt_trace_system_description_description (LttSystemDescription * s)
2526 {
2527 return s->description;
2528 }
2529
2530
2531 /* get the NTP corrected start time of the trace */
2532 LttTime ltt_trace_start_time(LttTrace *t)
2533 {
2534 return t->start_time;
2535 }
2536
2537 /* get the monotonic start time of the trace */
2538 LttTime ltt_trace_start_time_monotonic(LttTrace *t)
2539 {
2540 return t->start_time_from_tsc;
2541 }
2542
2543 static __attribute__ ((__unused__)) LttTracefile *ltt_tracefile_new()
2544 {
2545 LttTracefile *tf;
2546 tf = g_new(LttTracefile, 1);
2547 tf->event.tracefile = tf;
2548 return tf;
2549 }
2550
2551 static __attribute__ ((__unused__)) void ltt_tracefile_destroy(LttTracefile *tf)
2552 {
2553 g_free(tf);
2554 }
2555
2556 static __attribute__ ((__unused__)) void ltt_tracefile_copy(LttTracefile *dest, const LttTracefile *src)
2557 {
2558 *dest = *src;
2559 }
2560
2561 /* Before library loading... */
2562
2563 static __attribute__((constructor)) void init(void)
2564 {
2565 LTT_TRACEFILE_NAME_METADATA = g_quark_from_string("metadata");
2566 }
This page took 0.088185 seconds and 4 git commands to generate.