warning fixes
[lttv.git] / trunk / lttv / ltt / tracefile.c
1 /* This file is part of the Linux Trace Toolkit viewer
2 * Copyright (C) 2005 Mathieu Desnoyers
3 *
4 * Complete rewrite from the original version made by XangXiu Yang.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License Version 2.1 as published by the Free Software Foundation.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the
17 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18 * Boston, MA 02111-1307, USA.
19 */
20
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24
25 #include <stdio.h>
26 #include <fcntl.h>
27 #include <string.h>
28 #include <dirent.h>
29 #include <sys/stat.h>
30 #include <sys/types.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <math.h>
34 #include <glib.h>
35 #include <glib/gprintf.h>
36 #include <malloc.h>
37 #include <sys/mman.h>
38 #include <string.h>
39 #include <ctype.h>
40 #include <inttypes.h>
41
42 // For realpath
43 #include <limits.h>
44 #include <stdlib.h>
45
46
47 #include <ltt/ltt.h>
48 #include "ltt-private.h"
49 #include <ltt/trace.h>
50 #include <ltt/event.h>
51 #include <ltt/ltt-types.h>
52 #include <ltt/marker.h>
53
54 /* from marker.c */
55 extern long marker_update_fields_offsets(struct marker_info *info, const char *data);
56
57 /* Tracefile names used in this file */
58
59 GQuark LTT_TRACEFILE_NAME_METADATA;
60
61 #ifndef g_open
62 #define g_open open
63 #endif
64
65
66 #define __UNUSED__ __attribute__((__unused__))
67
68 #define g_info(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_INFO, format)
69
70 #ifndef g_debug
71 #define g_debug(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, format)
72 #endif
73
74 #define g_close close
75
76 /* Those macros must be called from within a function where page_size is a known
77 * variable */
78 #define PAGE_MASK (~(page_size-1))
79 #define PAGE_ALIGN(addr) (((addr)+page_size-1)&PAGE_MASK)
80
81 LttTrace *father_trace = NULL;
82
83 /* set the offset of the fields belonging to the event,
84 need the information of the archecture */
85 //void set_fields_offsets(LttTracefile *tf, LttEventType *event_type);
86 //size_t get_fields_offsets(LttTracefile *tf, LttEventType *event_type, void *data);
87
88 #if 0
89 /* get the size of the field type according to
90 * The facility size information. */
91 static inline void preset_field_type_size(LttTracefile *tf,
92 LttEventType *event_type,
93 off_t offset_root, off_t offset_parent,
94 enum field_status *fixed_root, enum field_status *fixed_parent,
95 LttField *field);
96 #endif //0
97
98 /* map a fixed size or a block information from the file (fd) */
99 static gint map_block(LttTracefile * tf, guint block_num);
100
101 /* calculate nsec per cycles for current block */
102 #if 0
103 static guint32 calc_nsecs_per_cycle(LttTracefile * t);
104 static guint64 cycles_2_ns(LttTracefile *tf, guint64 cycles);
105 #endif //0
106
107 /* go to the next event */
108 static int ltt_seek_next_event(LttTracefile *tf);
109
110 static int open_tracefiles(LttTrace *trace, gchar *root_path,
111 gchar *relative_path);
112 static int ltt_process_metadata_tracefile(LttTracefile *tf);
113 static void ltt_tracefile_time_span_get(LttTracefile *tf,
114 LttTime *start, LttTime *end);
115 static void group_time_span_get(GQuark name, gpointer data, gpointer user_data);
116 static gint map_block(LttTracefile * tf, guint block_num);
117 static void ltt_update_event_size(LttTracefile *tf);
118
119 /* Enable event debugging */
120 static int a_event_debug = 0;
121
122 void ltt_event_debug(int state)
123 {
124 a_event_debug = state;
125 }
126
127 /* trace can be NULL
128 *
129 * Return value : 0 success, 1 bad tracefile
130 */
131 static int parse_trace_header(ltt_subbuffer_header_t *header,
132 LttTracefile *tf, LttTrace *t)
133 {
134 if (header->magic_number == LTT_MAGIC_NUMBER)
135 tf->reverse_bo = 0;
136 else if(header->magic_number == LTT_REV_MAGIC_NUMBER)
137 tf->reverse_bo = 1;
138 else /* invalid magic number, bad tracefile ! */
139 return 1;
140
141 if(t) {
142 t->ltt_major_version = header->major_version;
143 t->ltt_minor_version = header->minor_version;
144 t->arch_size = header->arch_size;
145 }
146 tf->alignment = header->alignment;
147
148 /* Get float byte order : might be different from int byte order
149 * (or is set to 0 if the trace has no float (kernel trace)) */
150 tf->float_word_order = 0;
151
152 switch(header->major_version) {
153 case 0:
154 case 1:
155 g_warning("Unsupported trace version : %hhu.%hhu",
156 header->major_version, header->minor_version);
157 return 1;
158 break;
159 case 2:
160 switch(header->minor_version) {
161 case 3:
162 {
163 struct ltt_subbuffer_header_2_3 *vheader = header;
164 tf->buffer_header_size = ltt_subbuffer_header_size();
165 tf->tscbits = 27;
166 tf->eventbits = 5;
167 tf->tsc_mask = ((1ULL << tf->tscbits) - 1);
168 tf->tsc_mask_next_bit = (1ULL << tf->tscbits);
169
170 if(t) {
171 t->start_freq = ltt_get_uint64(LTT_GET_BO(tf),
172 &vheader->start_freq);
173 t->freq_scale = ltt_get_uint32(LTT_GET_BO(tf),
174 &vheader->freq_scale);
175 if(father_trace) {
176 t->start_freq = father_trace->start_freq;
177 t->freq_scale = father_trace->freq_scale;
178 } else {
179 father_trace = t;
180 }
181 t->start_tsc = ltt_get_uint64(LTT_GET_BO(tf),
182 &vheader->cycle_count_begin);
183 t->start_monotonic = 0;
184 t->start_time.tv_sec = ltt_get_uint64(LTT_GET_BO(tf),
185 &vheader->start_time_sec);
186 t->start_time.tv_nsec = ltt_get_uint64(LTT_GET_BO(tf),
187 &vheader->start_time_usec);
188 t->start_time.tv_nsec *= 1000; /* microsec to nanosec */
189
190 t->start_time_from_tsc = ltt_time_from_uint64(
191 (double)t->start_tsc
192 * 1000000000.0 * tf->trace->freq_scale
193 / (double)t->start_freq);
194 }
195 }
196 break;
197 default:
198 g_warning("Unsupported trace version : %hhu.%hhu",
199 header->major_version, header->minor_version);
200 return 1;
201 }
202 break;
203 default:
204 g_warning("Unsupported trace version : %hhu.%hhu",
205 header->major_version, header->minor_version);
206 return 1;
207 }
208 return 0;
209 }
210
211
212
213 /*****************************************************************************
214 *Function name
215 * ltt_tracefile_open : open a trace file, construct a LttTracefile
216 *Input params
217 * t : the trace containing the tracefile
218 * fileName : path name of the trace file
219 * tf : the tracefile structure
220 *Return value
221 * : 0 for success, -1 otherwise.
222 ****************************************************************************/
223
224 static gint ltt_tracefile_open(LttTrace *t, gchar * fileName, LttTracefile *tf)
225 {
226 struct stat lTDFStat; /* Trace data file status */
227 ltt_subbuffer_header_t *header;
228 int page_size = getpagesize();
229
230 //open the file
231 tf->long_name = g_quark_from_string(fileName);
232 tf->trace = t;
233 tf->fd = open(fileName, O_RDONLY);
234 if(tf->fd < 0){
235 g_warning("Unable to open input data file %s\n", fileName);
236 goto end;
237 }
238
239 // Get the file's status
240 if(fstat(tf->fd, &lTDFStat) < 0){
241 g_warning("Unable to get the status of the input data file %s\n", fileName);
242 goto close_file;
243 }
244
245 // Is the file large enough to contain a trace
246 if(lTDFStat.st_size <
247 (off_t)(ltt_subbuffer_header_size())){
248 g_print("The input data file %s does not contain a trace\n", fileName);
249 goto close_file;
250 }
251
252 /* Temporarily map the buffer start header to get trace information */
253 /* Multiple of pages aligned head */
254 tf->buffer.head = mmap(0,
255 PAGE_ALIGN(ltt_subbuffer_header_size()), PROT_READ,
256 MAP_PRIVATE, tf->fd, 0);
257 if(tf->buffer.head == MAP_FAILED) {
258 perror("Error in allocating memory for buffer of tracefile");
259 goto close_file;
260 }
261 g_assert( ( (gulong)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
262
263 header = (ltt_subbuffer_header_t *)tf->buffer.head;
264
265 if(parse_trace_header(header, tf, NULL)) {
266 g_warning("parse_trace_header error");
267 goto unmap_file;
268 }
269
270 //store the size of the file
271 tf->file_size = lTDFStat.st_size;
272 tf->buf_size = ltt_get_uint32(LTT_GET_BO(tf), &header->buf_size);
273 tf->num_blocks = tf->file_size / tf->buf_size;
274 tf->events_lost = 0;
275 tf->subbuf_corrupt = 0;
276
277 if(munmap(tf->buffer.head,
278 PAGE_ALIGN(ltt_subbuffer_header_size()))) {
279 g_warning("unmap size : %zu\n",
280 PAGE_ALIGN(ltt_subbuffer_header_size()));
281 perror("munmap error");
282 g_assert(0);
283 }
284 tf->buffer.head = NULL;
285
286 //read the first block
287 if(map_block(tf,0)) {
288 perror("Cannot map block for tracefile");
289 goto close_file;
290 }
291
292 return 0;
293
294 /* Error */
295 unmap_file:
296 if(munmap(tf->buffer.head,
297 PAGE_ALIGN(ltt_subbuffer_header_size()))) {
298 g_warning("unmap size : %zu\n",
299 PAGE_ALIGN(ltt_subbuffer_header_size()));
300 perror("munmap error");
301 g_assert(0);
302 }
303 close_file:
304 close(tf->fd);
305 end:
306 return -1;
307 }
308
309
310 /*****************************************************************************
311 *Function name
312 * ltt_tracefile_close: close a trace file,
313 *Input params
314 * t : tracefile which will be closed
315 ****************************************************************************/
316
317 static void ltt_tracefile_close(LttTracefile *t)
318 {
319 int page_size = getpagesize();
320
321 if(t->buffer.head != NULL)
322 if(munmap(t->buffer.head, PAGE_ALIGN(t->buf_size))) {
323 g_warning("unmap size : %u\n",
324 PAGE_ALIGN(t->buf_size));
325 perror("munmap error");
326 g_assert(0);
327 }
328
329 close(t->fd);
330 }
331
332 /****************************************************************************
333 * get_absolute_pathname
334 *
335 * return the unique pathname in the system
336 *
337 * MD : Fixed this function so it uses realpath, dealing well with
338 * forgotten cases (.. were not used correctly before).
339 *
340 ****************************************************************************/
341 void get_absolute_pathname(const gchar *pathname, gchar * abs_pathname)
342 {
343 abs_pathname[0] = '\0';
344
345 if (realpath(pathname, abs_pathname) != NULL)
346 return;
347 else
348 {
349 /* error, return the original path unmodified */
350 strcpy(abs_pathname, pathname);
351 return;
352 }
353 return;
354 }
355
356 /* Search for something like : .*_.*
357 *
358 * The left side is the name, the right side is the number.
359 * Exclude leading /.
360 * Exclude flight- prefix.
361 */
362
363 static int get_tracefile_name_number(gchar *raw_name,
364 GQuark *name,
365 guint *num,
366 gulong *tid,
367 gulong *pgid,
368 guint64 *creation)
369 {
370 guint raw_name_len = strlen(raw_name);
371 gchar char_name[PATH_MAX];
372 unsigned int i;
373 int underscore_pos;
374 long int cpu_num;
375 gchar *endptr;
376 gchar *tmpptr;
377
378 /* skip leading / */
379 for(i = 0; i < raw_name_len-1;i++) {
380 if(raw_name[i] != '/')
381 break;
382 }
383 raw_name = &raw_name[i];
384 raw_name_len = strlen(raw_name);
385
386 for(i=raw_name_len-1;i>=0;i--) {
387 if(raw_name[i] == '_') break;
388 }
389 if(i==-1) { /* Either not found or name length is 0 */
390 /* This is a userspace tracefile */
391 strncpy(char_name, raw_name, raw_name_len);
392 char_name[raw_name_len] = '\0';
393 *name = g_quark_from_string(char_name);
394 *num = 0; /* unknown cpu */
395 for(i=0;i<raw_name_len;i++) {
396 if(raw_name[i] == '/') {
397 break;
398 }
399 }
400 i++;
401 for(;i<raw_name_len;i++) {
402 if(raw_name[i] == '/') {
403 break;
404 }
405 }
406 i++;
407 for(;i<raw_name_len;i++) {
408 if(raw_name[i] == '-') {
409 break;
410 }
411 }
412 if(i == raw_name_len) return -1;
413 i++;
414 tmpptr = &raw_name[i];
415 for(;i<raw_name_len;i++) {
416 if(raw_name[i] == '.') {
417 raw_name[i] = ' ';
418 break;
419 }
420 }
421 *tid = strtoul(tmpptr, &endptr, 10);
422 if(endptr == tmpptr)
423 return -1; /* No digit */
424 if(*tid == ULONG_MAX)
425 return -1; /* underflow / overflow */
426 i++;
427 tmpptr = &raw_name[i];
428 for(;i<raw_name_len;i++) {
429 if(raw_name[i] == '.') {
430 raw_name[i] = ' ';
431 break;
432 }
433 }
434 *pgid = strtoul(tmpptr, &endptr, 10);
435 if(endptr == tmpptr)
436 return -1; /* No digit */
437 if(*pgid == ULONG_MAX)
438 return -1; /* underflow / overflow */
439 i++;
440 tmpptr = &raw_name[i];
441 *creation = strtoull(tmpptr, &endptr, 10);
442 if(endptr == tmpptr)
443 return -1; /* No digit */
444 if(*creation == G_MAXUINT64)
445 return -1; /* underflow / overflow */
446 } else {
447 underscore_pos = i;
448
449 cpu_num = strtol(raw_name+underscore_pos+1, &endptr, 10);
450
451 if(endptr == raw_name+underscore_pos+1)
452 return -1; /* No digit */
453 if(cpu_num == LONG_MIN || cpu_num == LONG_MAX)
454 return -1; /* underflow / overflow */
455
456 if (!strncmp(raw_name, "flight-", sizeof("flight-") - 1)) {
457 raw_name += sizeof("flight-") - 1;
458 underscore_pos -= sizeof("flight-") - 1;
459 }
460 strncpy(char_name, raw_name, underscore_pos);
461 char_name[underscore_pos] = '\0';
462 *name = g_quark_from_string(char_name);
463 *num = cpu_num;
464 }
465
466
467 return 0;
468 }
469
470
471 GData **ltt_trace_get_tracefiles_groups(LttTrace *trace)
472 {
473 return &trace->tracefiles;
474 }
475
476
477 void compute_tracefile_group(GQuark key_id,
478 GArray *group,
479 struct compute_tracefile_group_args *args)
480 {
481 unsigned int i;
482 LttTracefile *tf;
483
484 for(i=0; i<group->len; i++) {
485 tf = &g_array_index (group, LttTracefile, i);
486 if(tf->cpu_online)
487 args->func(tf, args->func_args);
488 }
489 }
490
491
492 static void ltt_tracefile_group_destroy(gpointer data)
493 {
494 GArray *group = (GArray *)data;
495 unsigned int i;
496 LttTracefile *tf;
497
498 if (group->len > 0)
499 destroy_marker_data(g_array_index (group, LttTracefile, 0).mdata);
500 for(i=0; i<group->len; i++) {
501 tf = &g_array_index (group, LttTracefile, i);
502 if(tf->cpu_online)
503 ltt_tracefile_close(tf);
504 }
505 g_array_free(group, TRUE);
506 }
507
508 static __attribute__ ((__unused__)) gboolean ltt_tracefile_group_has_cpu_online(gpointer data)
509 {
510 GArray *group = (GArray *)data;
511 unsigned int i;
512 LttTracefile *tf;
513
514 for(i=0; i<group->len; i++) {
515 tf = &g_array_index (group, LttTracefile, i);
516 if(tf->cpu_online)
517 return 1;
518 }
519 return 0;
520 }
521
522
523 /* Open each tracefile under a specific directory. Put them in a
524 * GData : permits to access them using their tracefile group pathname.
525 * i.e. access control/modules tracefile group by index :
526 * "control/module".
527 *
528 * relative path is the path relative to the trace root
529 * root path is the full path
530 *
531 * A tracefile group is simply an array where all the per cpu tracefiles sit.
532 */
533
534 static int open_tracefiles(LttTrace *trace, gchar *root_path, gchar *relative_path)
535 {
536 DIR *dir = opendir(root_path);
537 struct dirent *entry;
538 struct stat stat_buf;
539 int ret, i;
540 struct marker_data *mdata;
541
542 gchar path[PATH_MAX];
543 int path_len;
544 gchar *path_ptr;
545
546 int rel_path_len;
547 gchar rel_path[PATH_MAX];
548 gchar *rel_path_ptr;
549 LttTracefile tmp_tf;
550
551 if(dir == NULL) {
552 perror(root_path);
553 return ENOENT;
554 }
555
556 strncpy(path, root_path, PATH_MAX-1);
557 path_len = strlen(path);
558 path[path_len] = '/';
559 path_len++;
560 path_ptr = path + path_len;
561
562 strncpy(rel_path, relative_path, PATH_MAX-1);
563 rel_path_len = strlen(rel_path);
564 rel_path[rel_path_len] = '/';
565 rel_path_len++;
566 rel_path_ptr = rel_path + rel_path_len;
567
568 while((entry = readdir(dir)) != NULL) {
569
570 if(entry->d_name[0] == '.') continue;
571
572 strncpy(path_ptr, entry->d_name, PATH_MAX - path_len);
573 strncpy(rel_path_ptr, entry->d_name, PATH_MAX - rel_path_len);
574
575 ret = stat(path, &stat_buf);
576 if(ret == -1) {
577 perror(path);
578 continue;
579 }
580
581 g_debug("Tracefile file or directory : %s\n", path);
582
583 // if(strcmp(rel_path, "/eventdefs") == 0) continue;
584
585 if(S_ISDIR(stat_buf.st_mode)) {
586
587 g_debug("Entering subdirectory...\n");
588 ret = open_tracefiles(trace, path, rel_path);
589 if(ret < 0) continue;
590 } else if(S_ISREG(stat_buf.st_mode)) {
591 GQuark name;
592 guint num;
593 gulong tid, pgid;
594 guint64 creation;
595 GArray *group;
596 num = 0;
597 tid = pgid = 0;
598 creation = 0;
599 if(get_tracefile_name_number(rel_path, &name, &num, &tid, &pgid, &creation))
600 continue; /* invalid name */
601
602 g_debug("Opening file.\n");
603 if(ltt_tracefile_open(trace, path, &tmp_tf)) {
604 g_info("Error opening tracefile %s", path);
605
606 continue; /* error opening the tracefile : bad magic number ? */
607 }
608
609 g_debug("Tracefile name is %s and number is %u",
610 g_quark_to_string(name), num);
611
612 mdata = NULL;
613 tmp_tf.cpu_online = 1;
614 tmp_tf.cpu_num = num;
615 tmp_tf.name = name;
616 tmp_tf.tid = tid;
617 tmp_tf.pgid = pgid;
618 tmp_tf.creation = creation;
619 group = g_datalist_id_get_data(&trace->tracefiles, name);
620 if(group == NULL) {
621 /* Elements are automatically cleared when the array is allocated.
622 * It makes the cpu_online variable set to 0 : cpu offline, by default.
623 */
624 group = g_array_sized_new (FALSE, TRUE, sizeof(LttTracefile), 10);
625 g_datalist_id_set_data_full(&trace->tracefiles, name,
626 group, ltt_tracefile_group_destroy);
627 mdata = allocate_marker_data();
628 if (!mdata)
629 g_error("Error in allocating marker data");
630 }
631
632 /* Add the per cpu tracefile to the named group */
633 unsigned int old_len = group->len;
634 if(num+1 > old_len)
635 group = g_array_set_size(group, num+1);
636
637 g_assert(group->len > 0);
638 if (!mdata)
639 mdata = g_array_index (group, LttTracefile, 0).mdata;
640
641 g_array_index (group, LttTracefile, num) = tmp_tf;
642 g_array_index (group, LttTracefile, num).event.tracefile =
643 &g_array_index (group, LttTracefile, num);
644 for (i = 0; i < group->len; i++)
645 g_array_index (group, LttTracefile, i).mdata = mdata;
646 }
647 }
648
649 closedir(dir);
650
651 return 0;
652 }
653
654
655 /* Presumes the tracefile is already seeked at the beginning. It makes sense,
656 * because it must be done just after the opening */
657 static int ltt_process_metadata_tracefile(LttTracefile *tf)
658 {
659 int err;
660
661 while(1) {
662 err = ltt_tracefile_read_seek(tf);
663 if(err == EPERM) goto seek_error;
664 else if(err == ERANGE) break; /* End of tracefile */
665
666 err = ltt_tracefile_read_update_event(tf);
667 if(err) goto update_error;
668
669 /* The rules are :
670 * It contains only core events :
671 * 0 : set_marker_id
672 * 1 : set_marker_format
673 */
674 if(tf->event.event_id >= MARKER_CORE_IDS) {
675 /* Should only contain core events */
676 g_warning("Error in processing metadata file %s, "
677 "should not contain event id %u.", g_quark_to_string(tf->name),
678 tf->event.event_id);
679 err = EPERM;
680 goto event_id_error;
681 } else {
682 char *pos;
683 const char *channel_name, *marker_name, *format;
684 uint16_t id;
685 guint8 int_size, long_size, pointer_size, size_t_size, alignment;
686
687 switch((enum marker_id)tf->event.event_id) {
688 case MARKER_ID_SET_MARKER_ID:
689 channel_name = pos = tf->event.data;
690 pos += strlen(channel_name) + 1;
691 marker_name = pos;
692 g_debug("Doing MARKER_ID_SET_MARKER_ID of marker %s.%s",
693 channel_name, marker_name);
694 pos += strlen(marker_name) + 1;
695 pos += ltt_align((size_t)pos, sizeof(guint16), tf->alignment);
696 id = ltt_get_uint16(LTT_GET_BO(tf), pos);
697 g_debug("In MARKER_ID_SET_MARKER_ID of marker %s.%s id %hu",
698 channel_name, marker_name, id);
699 pos += sizeof(guint16);
700 int_size = *(guint8*)pos;
701 pos += sizeof(guint8);
702 long_size = *(guint8*)pos;
703 pos += sizeof(guint8);
704 pointer_size = *(guint8*)pos;
705 pos += sizeof(guint8);
706 size_t_size = *(guint8*)pos;
707 pos += sizeof(guint8);
708 alignment = *(guint8*)pos;
709 pos += sizeof(guint8);
710 marker_id_event(tf->trace,
711 g_quark_from_string(channel_name),
712 g_quark_from_string(marker_name),
713 id, int_size, long_size,
714 pointer_size, size_t_size, alignment);
715 break;
716 case MARKER_ID_SET_MARKER_FORMAT:
717 channel_name = pos = tf->event.data;
718 pos += strlen(channel_name) + 1;
719 marker_name = pos;
720 g_debug("Doing MARKER_ID_SET_MARKER_FORMAT of marker %s.%s",
721 channel_name, marker_name);
722 pos += strlen(marker_name) + 1;
723 format = pos;
724 pos += strlen(format) + 1;
725 marker_format_event(tf->trace,
726 g_quark_from_string(channel_name),
727 g_quark_from_string(marker_name),
728 format);
729 /* get information from dictionary TODO */
730 break;
731 default:
732 g_warning("Error in processing metadata file %s, "
733 "unknown event id %hhu.",
734 g_quark_to_string(tf->name),
735 tf->event.event_id);
736 err = EPERM;
737 goto event_id_error;
738 }
739 }
740 }
741 return 0;
742
743 /* Error handling */
744 event_id_error:
745 update_error:
746 seek_error:
747 g_warning("An error occured in metadata tracefile parsing");
748 return err;
749 }
750
751 /*
752 * Open a trace and return its LttTrace handle.
753 *
754 * pathname must be the directory of the trace
755 */
756
757 LttTrace *ltt_trace_open(const gchar *pathname)
758 {
759 gchar abs_path[PATH_MAX];
760 LttTrace * t;
761 LttTracefile *tf;
762 GArray *group;
763 unsigned int i;
764 int ret;
765 ltt_subbuffer_header_t *header;
766 DIR *dir;
767 struct dirent *entry;
768 struct stat stat_buf;
769 gchar path[PATH_MAX];
770
771 t = g_new(LttTrace, 1);
772 if(!t) goto alloc_error;
773
774 get_absolute_pathname(pathname, abs_path);
775 t->pathname = g_quark_from_string(abs_path);
776
777 g_datalist_init(&t->tracefiles);
778
779 /* Test to see if it looks like a trace */
780 dir = opendir(abs_path);
781 if(dir == NULL) {
782 perror(abs_path);
783 goto open_error;
784 }
785 while((entry = readdir(dir)) != NULL) {
786 strcpy(path, abs_path);
787 strcat(path, "/");
788 strcat(path, entry->d_name);
789 ret = stat(path, &stat_buf);
790 if(ret == -1) {
791 perror(path);
792 continue;
793 }
794 }
795 closedir(dir);
796
797 /* Open all the tracefiles */
798 if(open_tracefiles(t, abs_path, "")) {
799 g_warning("Error opening tracefile %s", abs_path);
800 goto find_error;
801 }
802
803 /* Parse each trace metadata_N files : get runtime fac. info */
804 group = g_datalist_id_get_data(&t->tracefiles, LTT_TRACEFILE_NAME_METADATA);
805 if(group == NULL) {
806 g_error("Trace %s has no metadata tracefile", abs_path);
807 g_assert(0);
808 goto find_error;
809 }
810
811 /*
812 * Get the trace information for the metadata_0 tracefile.
813 * Getting a correct trace start_time and start_tsc is insured by the fact
814 * that no subbuffers are supposed to be lost in the metadata channel.
815 * Therefore, the first subbuffer contains the start_tsc timestamp in its
816 * buffer header.
817 */
818 g_assert(group->len > 0);
819 tf = &g_array_index (group, LttTracefile, 0);
820 header = (ltt_subbuffer_header_t *)tf->buffer.head;
821 ret = parse_trace_header(header, tf, t);
822 g_assert(!ret);
823
824 t->num_cpu = group->len;
825
826 //ret = allocate_marker_data(t);
827 //if (ret)
828 // g_error("Error in allocating marker data");
829
830 for(i=0; i<group->len; i++) {
831 tf = &g_array_index (group, LttTracefile, i);
832 if (tf->cpu_online)
833 if(ltt_process_metadata_tracefile(tf))
834 goto find_error;
835 // goto metadata_error;
836 }
837
838 return t;
839
840 /* Error handling */
841 //metadata_error:
842 // destroy_marker_data(t);
843 find_error:
844 g_datalist_clear(&t->tracefiles);
845 open_error:
846 g_free(t);
847 alloc_error:
848 return NULL;
849
850 }
851
852 /* Open another, completely independant, instance of a trace.
853 *
854 * A read on this new instance will read the first event of the trace.
855 *
856 * When we copy a trace, we want all the opening actions to happen again :
857 * the trace will be reopened and totally independant from the original.
858 * That's why we call ltt_trace_open.
859 */
860 LttTrace *ltt_trace_copy(LttTrace *self)
861 {
862 return ltt_trace_open(g_quark_to_string(self->pathname));
863 }
864
865 /*
866 * Close a trace
867 */
868
869 void ltt_trace_close(LttTrace *t)
870 {
871 g_datalist_clear(&t->tracefiles);
872 g_free(t);
873 }
874
875
876 /*****************************************************************************
877 * Get the start time and end time of the trace
878 ****************************************************************************/
879
880 void ltt_tracefile_time_span_get(LttTracefile *tf,
881 LttTime *start, LttTime *end)
882 {
883 int err;
884
885 err = map_block(tf, 0);
886 if(unlikely(err)) {
887 g_error("Can not map block");
888 *start = ltt_time_infinite;
889 } else
890 *start = tf->buffer.begin.timestamp;
891
892 err = map_block(tf, tf->num_blocks - 1); /* Last block */
893 if(unlikely(err)) {
894 g_error("Can not map block");
895 *end = ltt_time_zero;
896 } else
897 *end = tf->buffer.end.timestamp;
898 }
899
900 struct tracefile_time_span_get_args {
901 LttTrace *t;
902 LttTime *start;
903 LttTime *end;
904 };
905
906 static void group_time_span_get(GQuark name, gpointer data, gpointer user_data)
907 {
908 struct tracefile_time_span_get_args *args =
909 (struct tracefile_time_span_get_args*)user_data;
910
911 GArray *group = (GArray *)data;
912 unsigned int i;
913 LttTracefile *tf;
914 LttTime tmp_start;
915 LttTime tmp_end;
916
917 for(i=0; i<group->len; i++) {
918 tf = &g_array_index (group, LttTracefile, i);
919 if(tf->cpu_online) {
920 ltt_tracefile_time_span_get(tf, &tmp_start, &tmp_end);
921 if(ltt_time_compare(*args->start, tmp_start)>0) *args->start = tmp_start;
922 if(ltt_time_compare(*args->end, tmp_end)<0) *args->end = tmp_end;
923 }
924 }
925 }
926
927 /* return the start and end time of a trace */
928
929 void ltt_trace_time_span_get(LttTrace *t, LttTime *start, LttTime *end)
930 {
931 LttTime min_start = ltt_time_infinite;
932 LttTime max_end = ltt_time_zero;
933 struct tracefile_time_span_get_args args = { t, &min_start, &max_end };
934
935 g_datalist_foreach(&t->tracefiles, &group_time_span_get, &args);
936
937 if(start != NULL) *start = min_start;
938 if(end != NULL) *end = max_end;
939
940 }
941
942
943 /* Seek to the first event in a tracefile that has a time equal or greater than
944 * the time passed in parameter.
945 *
946 * If the time parameter is outside the tracefile time span, seek to the first
947 * event or if after, return ERANGE.
948 *
949 * If the time parameter is before the first event, we have to seek specially to
950 * there.
951 *
952 * If the time is after the end of the trace, return ERANGE.
953 *
954 * Do a binary search to find the right block, then a sequential search in the
955 * block to find the event.
956 *
957 * In the special case where the time requested fits inside a block that has no
958 * event corresponding to the requested time, the first event of the next block
959 * will be seeked.
960 *
961 * IMPORTANT NOTE : // FIXME everywhere...
962 *
963 * You MUST NOT do a ltt_tracefile_read right after a ltt_tracefile_seek_time :
964 * you will jump over an event if you do.
965 *
966 * Return value : 0 : no error, the tf->event can be used
967 * ERANGE : time if after the last event of the trace
968 * otherwise : this is an error.
969 *
970 * */
971
972 int ltt_tracefile_seek_time(LttTracefile *tf, LttTime time)
973 {
974 int ret = 0;
975 int err;
976 unsigned int block_num, high, low;
977
978 /* seek at the beginning of trace */
979 err = map_block(tf, 0); /* First block */
980 if(unlikely(err)) {
981 g_error("Can not map block");
982 goto fail;
983 }
984
985 /* If the time is lower or equal the beginning of the trace,
986 * go to the first event. */
987 if(ltt_time_compare(time, tf->buffer.begin.timestamp) <= 0) {
988 ret = ltt_tracefile_read(tf);
989 if(ret == ERANGE) goto range;
990 else if (ret) goto fail;
991 goto found; /* There is either no event in the trace or the event points
992 to the first event in the trace */
993 }
994
995 err = map_block(tf, tf->num_blocks - 1); /* Last block */
996 if(unlikely(err)) {
997 g_error("Can not map block");
998 goto fail;
999 }
1000
1001 /* If the time is after the end of the trace, return ERANGE. */
1002 if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
1003 goto range;
1004 }
1005
1006 /* Binary search the block */
1007 high = tf->num_blocks - 1;
1008 low = 0;
1009
1010 while(1) {
1011 block_num = ((high-low) / 2) + low;
1012
1013 err = map_block(tf, block_num);
1014 if(unlikely(err)) {
1015 g_error("Can not map block");
1016 goto fail;
1017 }
1018 if(high == low) {
1019 /* We cannot divide anymore : this is what would happen if the time
1020 * requested was exactly between two consecutive buffers'end and start
1021 * timestamps. This is also what would happend if we didn't deal with out
1022 * of span cases prior in this function. */
1023 /* The event is right in the buffer!
1024 * (or in the next buffer first event) */
1025 while(1) {
1026 ret = ltt_tracefile_read(tf);
1027 if(ret == ERANGE) goto range; /* ERANGE or EPERM */
1028 else if(ret) goto fail;
1029
1030 if(ltt_time_compare(time, tf->event.event_time) <= 0)
1031 goto found;
1032 }
1033
1034 } else if(ltt_time_compare(time, tf->buffer.begin.timestamp) < 0) {
1035 /* go to lower part */
1036 high = block_num - 1;
1037 } else if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
1038 /* go to higher part */
1039 low = block_num + 1;
1040 } else {/* The event is right in the buffer!
1041 (or in the next buffer first event) */
1042 while(1) {
1043 ret = ltt_tracefile_read(tf);
1044 if(ret == ERANGE) goto range; /* ERANGE or EPERM */
1045 else if(ret) goto fail;
1046
1047 if(ltt_time_compare(time, tf->event.event_time) <= 0)
1048 break;
1049 }
1050 goto found;
1051 }
1052 }
1053
1054 found:
1055 return 0;
1056 range:
1057 return ERANGE;
1058
1059 /* Error handling */
1060 fail:
1061 g_error("ltt_tracefile_seek_time failed on tracefile %s",
1062 g_quark_to_string(tf->name));
1063 return EPERM;
1064 }
1065
1066 /* Seek to a position indicated by an LttEventPosition
1067 */
1068
1069 int ltt_tracefile_seek_position(LttTracefile *tf, const LttEventPosition *ep)
1070 {
1071 int err;
1072
1073 if(ep->tracefile != tf) {
1074 goto fail;
1075 }
1076
1077 err = map_block(tf, ep->block);
1078 if(unlikely(err)) {
1079 g_error("Can not map block");
1080 goto fail;
1081 }
1082
1083 tf->event.offset = ep->offset;
1084
1085 /* Put back the event real tsc */
1086 tf->event.tsc = ep->tsc;
1087 tf->buffer.tsc = ep->tsc;
1088
1089 err = ltt_tracefile_read_update_event(tf);
1090 if(err) goto fail;
1091
1092 /* deactivate this, as it does nothing for now
1093 err = ltt_tracefile_read_op(tf);
1094 if(err) goto fail;
1095 */
1096
1097 return 0;
1098
1099 fail:
1100 g_error("ltt_tracefile_seek_time failed on tracefile %s",
1101 g_quark_to_string(tf->name));
1102 return 1;
1103 }
1104
1105 /* Given a TSC value, return the LttTime (seconds,nanoseconds) it
1106 * corresponds to.
1107 */
1108
1109 LttTime ltt_interpolate_time_from_tsc(LttTracefile *tf, guint64 tsc)
1110 {
1111 LttTime time;
1112
1113 if(tsc > tf->trace->start_tsc) {
1114 time = ltt_time_from_uint64(
1115 (double)(tsc - tf->trace->start_tsc)
1116 * 1000000000.0 * tf->trace->freq_scale
1117 / (double)tf->trace->start_freq);
1118 time = ltt_time_add(tf->trace->start_time_from_tsc, time);
1119 } else {
1120 time = ltt_time_from_uint64(
1121 (double)(tf->trace->start_tsc - tsc)
1122 * 1000000000.0 * tf->trace->freq_scale
1123 / (double)tf->trace->start_freq);
1124 time = ltt_time_sub(tf->trace->start_time_from_tsc, time);
1125 }
1126 return time;
1127 }
1128
1129 /* Calculate the real event time based on the buffer boundaries */
1130 LttTime ltt_interpolate_time(LttTracefile *tf, LttEvent *event)
1131 {
1132 return ltt_interpolate_time_from_tsc(tf, tf->buffer.tsc);
1133 }
1134
1135
1136 /* Get the current event of the tracefile : valid until the next read */
1137 LttEvent *ltt_tracefile_get_event(LttTracefile *tf)
1138 {
1139 return &tf->event;
1140 }
1141
1142
1143
1144 /*****************************************************************************
1145 *Function name
1146 * ltt_tracefile_read : Read the next event in the tracefile
1147 *Input params
1148 * t : tracefile
1149 *Return value
1150 *
1151 * Returns 0 if an event can be used in tf->event.
1152 * Returns ERANGE on end of trace. The event in tf->event still can be used
1153 * (if the last block was not empty).
1154 * Returns EPERM on error.
1155 *
1156 * This function does make the tracefile event structure point to the event
1157 * currently pointed to by the tf->event.
1158 *
1159 * Note : you must call a ltt_tracefile_seek to the beginning of the trace to
1160 * reinitialize it after an error if you want results to be coherent.
1161 * It would be the case if a end of trace last buffer has no event : the end
1162 * of trace wouldn't be returned, but an error.
1163 * We make the assumption there is at least one event per buffer.
1164 ****************************************************************************/
1165
1166 int ltt_tracefile_read(LttTracefile *tf)
1167 {
1168 int err;
1169
1170 err = ltt_tracefile_read_seek(tf);
1171 if(err) return err;
1172 err = ltt_tracefile_read_update_event(tf);
1173 if(err) return err;
1174
1175 /* deactivate this, as it does nothing for now
1176 err = ltt_tracefile_read_op(tf);
1177 if(err) return err;
1178 */
1179
1180 return 0;
1181 }
1182
1183 int ltt_tracefile_read_seek(LttTracefile *tf)
1184 {
1185 int err;
1186
1187 /* Get next buffer until we finally have an event, or end of trace */
1188 while(1) {
1189 err = ltt_seek_next_event(tf);
1190 if(unlikely(err == ENOPROTOOPT)) {
1191 return EPERM;
1192 }
1193
1194 /* Are we at the end of the buffer ? */
1195 if(err == ERANGE) {
1196 if(unlikely(tf->buffer.index == tf->num_blocks-1)){ /* end of trace ? */
1197 return ERANGE;
1198 } else {
1199 /* get next block */
1200 err = map_block(tf, tf->buffer.index + 1);
1201 if(unlikely(err)) {
1202 g_error("Can not map block");
1203 return EPERM;
1204 }
1205 }
1206 } else break; /* We found an event ! */
1207 }
1208
1209 return 0;
1210 }
1211
1212 /* do an operation when reading a new event */
1213
1214 /* This function does nothing for now */
1215 #if 0
1216 int ltt_tracefile_read_op(LttTracefile *tf)
1217 {
1218 LttEvent *event;
1219
1220 event = &tf->event;
1221
1222 /* do event specific operation */
1223
1224 /* nothing */
1225
1226 return 0;
1227 }
1228 #endif
1229
1230 static void print_debug_event_header(LttEvent *ev, void *start_pos, void *end_pos)
1231 {
1232 unsigned int offset = 0;
1233 int i, j;
1234
1235 g_printf("Event header (tracefile %s offset %" PRIx64 "):\n",
1236 g_quark_to_string(ev->tracefile->long_name),
1237 ((uint64_t)ev->tracefile->buffer.index * ev->tracefile->buf_size)
1238 + (long)start_pos - (long)ev->tracefile->buffer.head);
1239
1240 while (offset < (long)end_pos - (long)start_pos) {
1241 g_printf("%8lx", (long)start_pos - (long)ev->tracefile->buffer.head + offset);
1242 g_printf(" ");
1243
1244 for (i = 0; i < 4 ; i++) {
1245 for (j = 0; j < 4; j++) {
1246 if (offset + ((i * 4) + j) <
1247 (long)end_pos - (long)start_pos)
1248 g_printf("%02hhX",
1249 ((char*)start_pos)[offset + ((i * 4) + j)]);
1250 else
1251 g_printf(" ");
1252 g_printf(" ");
1253 }
1254 if (i < 4)
1255 g_printf(" ");
1256 }
1257 offset+=16;
1258 g_printf("\n");
1259 }
1260 }
1261
1262
1263 /* same as ltt_tracefile_read, but does not seek to the next event nor call
1264 * event specific operation. */
1265 int ltt_tracefile_read_update_event(LttTracefile *tf)
1266 {
1267 void * pos;
1268 LttEvent *event;
1269 void *pos_aligned;
1270
1271 event = &tf->event;
1272 pos = tf->buffer.head + event->offset;
1273
1274 /* Read event header */
1275
1276 /* Align the head */
1277 pos += ltt_align((size_t)pos, sizeof(guint32), tf->alignment);
1278 pos_aligned = pos;
1279
1280 event->timestamp = ltt_get_uint32(LTT_GET_BO(tf), pos);
1281 event->event_id = event->timestamp >> tf->tscbits;
1282 event->timestamp = event->timestamp & tf->tsc_mask;
1283 pos += sizeof(guint32);
1284
1285 switch (event->event_id) {
1286 case 29: /* LTT_RFLAG_ID_SIZE_TSC */
1287 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1288 pos += sizeof(guint16);
1289 event->event_size = ltt_get_uint16(LTT_GET_BO(tf), pos);
1290 pos += sizeof(guint16);
1291 if (event->event_size == 0xFFFF) {
1292 event->event_size = ltt_get_uint32(LTT_GET_BO(tf), pos);
1293 pos += sizeof(guint32);
1294 }
1295 pos += ltt_align((size_t)pos, sizeof(guint64), tf->alignment);
1296 tf->buffer.tsc = ltt_get_uint64(LTT_GET_BO(tf), pos);
1297 pos += sizeof(guint64);
1298 break;
1299 case 30: /* LTT_RFLAG_ID_SIZE */
1300 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1301 pos += sizeof(guint16);
1302 event->event_size = ltt_get_uint16(LTT_GET_BO(tf), pos);
1303 pos += sizeof(guint16);
1304 if (event->event_size == 0xFFFF) {
1305 event->event_size = ltt_get_uint32(LTT_GET_BO(tf), pos);
1306 pos += sizeof(guint32);
1307 }
1308 break;
1309 case 31: /* LTT_RFLAG_ID */
1310 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1311 pos += sizeof(guint16);
1312 event->event_size = G_MAXUINT;
1313 break;
1314 default:
1315 event->event_size = G_MAXUINT;
1316 break;
1317 }
1318
1319 if (likely(event->event_id != 29)) {
1320 /* No extended timestamp */
1321 if (event->timestamp < (tf->buffer.tsc & tf->tsc_mask))
1322 tf->buffer.tsc = ((tf->buffer.tsc & ~tf->tsc_mask) /* overflow */
1323 + tf->tsc_mask_next_bit)
1324 | (guint64)event->timestamp;
1325 else
1326 tf->buffer.tsc = (tf->buffer.tsc & ~tf->tsc_mask) /* no overflow */
1327 | (guint64)event->timestamp;
1328 }
1329 event->tsc = tf->buffer.tsc;
1330
1331 event->event_time = ltt_interpolate_time(tf, event);
1332
1333 if (a_event_debug)
1334 print_debug_event_header(event, pos_aligned, pos);
1335
1336 event->data = pos;
1337
1338 /*
1339 * Let ltt_update_event_size update event->data according to the largest
1340 * alignment within the payload.
1341 * Get the data size and update the event fields with the current
1342 * information. */
1343 ltt_update_event_size(tf);
1344
1345 return 0;
1346 }
1347
1348
1349 /****************************************************************************
1350 *Function name
1351 * map_block : map a block from the file
1352 *Input Params
1353 * lttdes : ltt trace file
1354 * whichBlock : the block which will be read
1355 *return value
1356 * 0 : success
1357 * EINVAL : lseek fail
1358 * EIO : can not read from the file
1359 ****************************************************************************/
1360
1361 static gint map_block(LttTracefile * tf, guint block_num)
1362 {
1363 int page_size = getpagesize();
1364 ltt_subbuffer_header_t *header;
1365
1366 g_assert(block_num < tf->num_blocks);
1367
1368 if(tf->buffer.head != NULL) {
1369 if(munmap(tf->buffer.head, PAGE_ALIGN(tf->buf_size))) {
1370 g_warning("unmap size : %u\n",
1371 PAGE_ALIGN(tf->buf_size));
1372 perror("munmap error");
1373 g_assert(0);
1374 }
1375 }
1376
1377 /* Multiple of pages aligned head */
1378 tf->buffer.head = mmap(0,
1379 PAGE_ALIGN(tf->buf_size),
1380 PROT_READ, MAP_PRIVATE, tf->fd,
1381 PAGE_ALIGN((off_t)tf->buf_size * (off_t)block_num));
1382
1383 if(tf->buffer.head == MAP_FAILED) {
1384 perror("Error in allocating memory for buffer of tracefile");
1385 g_assert(0);
1386 goto map_error;
1387 }
1388 g_assert( ( (gulong)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
1389
1390
1391 tf->buffer.index = block_num;
1392
1393 header = (ltt_subbuffer_header_t *)tf->buffer.head;
1394
1395 tf->buffer.begin.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
1396 &header->cycle_count_begin);
1397 tf->buffer.begin.freq = tf->trace->start_freq;
1398
1399 tf->buffer.begin.timestamp = ltt_interpolate_time_from_tsc(tf,
1400 tf->buffer.begin.cycle_count);
1401 tf->buffer.end.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
1402 &header->cycle_count_end);
1403 tf->buffer.end.freq = tf->trace->start_freq;
1404
1405 tf->buffer.lost_size = ltt_get_uint32(LTT_GET_BO(tf),
1406 &header->lost_size);
1407 tf->buffer.end.timestamp = ltt_interpolate_time_from_tsc(tf,
1408 tf->buffer.end.cycle_count);
1409 tf->buffer.tsc = tf->buffer.begin.cycle_count;
1410 tf->event.tsc = tf->buffer.tsc;
1411 tf->buffer.freq = tf->buffer.begin.freq;
1412
1413 /* FIXME
1414 * eventually support variable buffer size : will need a partial pre-read of
1415 * the headers to create an index when we open the trace... eventually. */
1416 g_assert(tf->buf_size == ltt_get_uint32(LTT_GET_BO(tf),
1417 &header->buf_size));
1418
1419 /* Make the current event point to the beginning of the buffer :
1420 * it means that the event read must get the first event. */
1421 tf->event.tracefile = tf;
1422 tf->event.block = block_num;
1423 tf->event.offset = 0;
1424
1425 if (header->events_lost) {
1426 g_warning("%d events lost so far in tracefile %s at block %u",
1427 (guint)header->events_lost,
1428 g_quark_to_string(tf->long_name),
1429 block_num);
1430 tf->events_lost = header->events_lost;
1431 }
1432 if (header->subbuf_corrupt) {
1433 g_warning("%d subbuffer(s) corrupted so far in tracefile %s at block %u",
1434 (guint)header->subbuf_corrupt,
1435 g_quark_to_string(tf->long_name),
1436 block_num);
1437 tf->subbuf_corrupt = header->subbuf_corrupt;
1438 }
1439
1440 return 0;
1441
1442 map_error:
1443 return -errno;
1444 }
1445
1446 static void print_debug_event_data(LttEvent *ev)
1447 {
1448 unsigned int offset = 0;
1449 int i, j;
1450
1451 if (!max(ev->event_size, ev->data_size))
1452 return;
1453
1454 g_printf("Event data (tracefile %s offset %" PRIx64 "):\n",
1455 g_quark_to_string(ev->tracefile->long_name),
1456 ((uint64_t)ev->tracefile->buffer.index * ev->tracefile->buf_size)
1457 + (long)ev->data - (long)ev->tracefile->buffer.head);
1458
1459 while (offset < max(ev->event_size, ev->data_size)) {
1460 g_printf("%8lx", (long)ev->data + offset
1461 - (long)ev->tracefile->buffer.head);
1462 g_printf(" ");
1463
1464 for (i = 0; i < 4 ; i++) {
1465 for (j = 0; j < 4; j++) {
1466 if (offset + ((i * 4) + j) < max(ev->event_size, ev->data_size))
1467 g_printf("%02hhX", ((char*)ev->data)[offset + ((i * 4) + j)]);
1468 else
1469 g_printf(" ");
1470 g_printf(" ");
1471 }
1472 if (i < 4)
1473 g_printf(" ");
1474 }
1475
1476 g_printf(" ");
1477
1478 for (i = 0; i < 4; i++) {
1479 for (j = 0; j < 4; j++) {
1480 if (offset + ((i * 4) + j) < max(ev->event_size, ev->data_size)) {
1481 if (isprint(((char*)ev->data)[offset + ((i * 4) + j)]))
1482 g_printf("%c", ((char*)ev->data)[offset + ((i * 4) + j)]);
1483 else
1484 g_printf(".");
1485 } else
1486 g_printf(" ");
1487 }
1488 }
1489 offset+=16;
1490 g_printf("\n");
1491 }
1492 }
1493
1494 /* It will update the fields offsets too */
1495 void ltt_update_event_size(LttTracefile *tf)
1496 {
1497 off_t size = 0;
1498 struct marker_info *info;
1499
1500 if (tf->name == LTT_TRACEFILE_NAME_METADATA) {
1501 switch((enum marker_id)tf->event.event_id) {
1502 case MARKER_ID_SET_MARKER_ID:
1503 size = strlen((char*)tf->event.data) + 1;
1504 g_debug("marker %s id set", (char*)tf->event.data + size);
1505 size += strlen((char*)tf->event.data + size) + 1;
1506 size += ltt_align(size, sizeof(guint16), tf->alignment);
1507 size += sizeof(guint16);
1508 size += sizeof(guint8);
1509 size += sizeof(guint8);
1510 size += sizeof(guint8);
1511 size += sizeof(guint8);
1512 size += sizeof(guint8);
1513 break;
1514 case MARKER_ID_SET_MARKER_FORMAT:
1515 size = strlen((char*)tf->event.data) + 1;
1516 g_debug("marker %s format set", (char*)tf->event.data);
1517 size += strlen((char*)tf->event.data + size) + 1;
1518 size += strlen((char*)tf->event.data + size) + 1;
1519 break;
1520 }
1521 }
1522
1523 info = marker_get_info_from_id(tf->mdata, tf->event.event_id);
1524
1525 if (tf->event.event_id >= MARKER_CORE_IDS)
1526 g_assert(info != NULL);
1527
1528 /* Do not update field offsets of core markers when initially reading the
1529 * metadata tracefile when the infos about these markers do not exist yet.
1530 */
1531 if (likely(info && info->fields)) {
1532 /* alignment */
1533 tf->event.data += ltt_align((off_t)(unsigned long)tf->event.data,
1534 info->largest_align,
1535 info->alignment);
1536 /* size, dynamically computed */
1537 if (info->size != -1)
1538 size = info->size;
1539 else
1540 size = marker_update_fields_offsets(marker_get_info_from_id(tf->mdata,
1541 tf->event.event_id), tf->event.data);
1542 }
1543
1544 tf->event.data_size = size;
1545
1546 /* Check consistency between kernel and LTTV structure sizes */
1547 if(tf->event.event_size == G_MAXUINT) {
1548 /* Event size too big to fit in the event size field */
1549 tf->event.event_size = tf->event.data_size;
1550 }
1551
1552 if (a_event_debug)
1553 print_debug_event_data(&tf->event);
1554
1555 if (tf->event.data_size != tf->event.event_size) {
1556 struct marker_info *info = marker_get_info_from_id(tf->mdata,
1557 tf->event.event_id);
1558 if (!info)
1559 g_error("Undescribed event %hhu in channel %s", tf->event.event_id,
1560 g_quark_to_string(tf->name));
1561 g_error("Kernel/LTTV event size differs for event %s: kernel %u, LTTV %u",
1562 g_quark_to_string(info->name),
1563 tf->event.event_size, tf->event.data_size);
1564 exit(-1);
1565 }
1566 }
1567
1568
1569 /* Take the tf current event offset and use the event id to figure out where is
1570 * the next event offset.
1571 *
1572 * This is an internal function not aiming at being used elsewhere : it will
1573 * not jump over the current block limits. Please consider using
1574 * ltt_tracefile_read to do this.
1575 *
1576 * Returns 0 on success
1577 * ERANGE if we are at the end of the buffer.
1578 * ENOPROTOOPT if an error occured when getting the current event size.
1579 */
1580 static int ltt_seek_next_event(LttTracefile *tf)
1581 {
1582 int ret = 0;
1583 void *pos;
1584
1585 /* seek over the buffer header if we are at the buffer start */
1586 if(tf->event.offset == 0) {
1587 tf->event.offset += tf->buffer_header_size;
1588
1589 if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
1590 ret = ERANGE;
1591 }
1592 goto found;
1593 }
1594
1595 pos = tf->event.data;
1596
1597 if(tf->event.data_size < 0) goto error;
1598
1599 pos += (size_t)tf->event.data_size;
1600
1601 tf->event.offset = pos - tf->buffer.head;
1602
1603 if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
1604 ret = ERANGE;
1605 goto found;
1606 }
1607 g_assert(tf->event.offset < tf->buf_size - tf->buffer.lost_size);
1608
1609 found:
1610 return ret;
1611
1612 error:
1613 g_error("Error in ltt_seek_next_event for tracefile %s",
1614 g_quark_to_string(tf->name));
1615 return ENOPROTOOPT;
1616 }
1617
1618 #if 0
1619 /*****************************************************************************
1620 *Function name
1621 * set_fields_offsets : set the precomputable offset of the fields
1622 *Input params
1623 * tracefile : opened trace file
1624 * event_type : the event type
1625 ****************************************************************************/
1626
1627 void set_fields_offsets(LttTracefile *tf, LttEventType *event_type)
1628 {
1629 LttField *field = event_type->root_field;
1630 enum field_status fixed_root = FIELD_FIXED, fixed_parent = FIELD_FIXED;
1631
1632 if(likely(field))
1633 preset_field_type_size(tf, event_type, 0, 0,
1634 &fixed_root, &fixed_parent,
1635 field);
1636
1637 }
1638 #endif //0
1639
1640
1641 /*****************************************************************************
1642 *Function name
1643 * get_alignment : Get the alignment needed for a field.
1644 *Input params
1645 * field : field
1646 *
1647 * returns : The size on which it must be aligned.
1648 *
1649 ****************************************************************************/
1650 #if 0
1651 off_t get_alignment(LttField *field)
1652 {
1653 LttType *type = &field->field_type;
1654
1655 switch(type->type_class) {
1656 case LTT_INT_FIXED:
1657 case LTT_UINT_FIXED:
1658 case LTT_POINTER:
1659 case LTT_CHAR:
1660 case LTT_UCHAR:
1661 case LTT_SHORT:
1662 case LTT_USHORT:
1663 case LTT_INT:
1664 case LTT_UINT:
1665 case LTT_LONG:
1666 case LTT_ULONG:
1667 case LTT_SIZE_T:
1668 case LTT_SSIZE_T:
1669 case LTT_OFF_T:
1670 case LTT_FLOAT:
1671 case LTT_ENUM:
1672 /* Align offset on type size */
1673 g_assert(field->field_size != 0);
1674 return field->field_size;
1675 break;
1676 case LTT_STRING:
1677 return 1;
1678 break;
1679 case LTT_ARRAY:
1680 g_assert(type->fields->len == 1);
1681 {
1682 LttField *child = &g_array_index(type->fields, LttField, 0);
1683 return get_alignment(child);
1684 }
1685 break;
1686 case LTT_SEQUENCE:
1687 g_assert(type->fields->len == 2);
1688 {
1689 off_t localign = 1;
1690 LttField *child = &g_array_index(type->fields, LttField, 0);
1691
1692 localign = max(localign, get_alignment(child));
1693
1694 child = &g_array_index(type->fields, LttField, 1);
1695 localign = max(localign, get_alignment(child));
1696
1697 return localign;
1698 }
1699 break;
1700 case LTT_STRUCT:
1701 case LTT_UNION:
1702 {
1703 guint i;
1704 off_t localign = 1;
1705
1706 for(i=0; i<type->fields->len; i++) {
1707 LttField *child = &g_array_index(type->fields, LttField, i);
1708 localign = max(localign, get_alignment(child));
1709 }
1710 return localign;
1711 }
1712 break;
1713 case LTT_NONE:
1714 default:
1715 g_error("get_alignment : unknown type");
1716 return -1;
1717 }
1718 }
1719
1720 #endif //0
1721
1722 /*****************************************************************************
1723 *Function name
1724 * field_compute_static_size : Determine the size of fields known by their
1725 * sole definition. Unions, arrays and struct sizes might be known, but
1726 * the parser does not give that information.
1727 *Input params
1728 * tf : tracefile
1729 * field : field
1730 *
1731 ****************************************************************************/
1732 #if 0
1733 void field_compute_static_size(LttFacility *fac, LttField *field)
1734 {
1735 LttType *type = &field->field_type;
1736
1737 switch(type->type_class) {
1738 case LTT_INT_FIXED:
1739 case LTT_UINT_FIXED:
1740 case LTT_POINTER:
1741 case LTT_CHAR:
1742 case LTT_UCHAR:
1743 case LTT_SHORT:
1744 case LTT_USHORT:
1745 case LTT_INT:
1746 case LTT_UINT:
1747 case LTT_LONG:
1748 case LTT_ULONG:
1749 case LTT_SIZE_T:
1750 case LTT_SSIZE_T:
1751 case LTT_OFF_T:
1752 case LTT_FLOAT:
1753 case LTT_ENUM:
1754 case LTT_STRING:
1755 /* nothing to do */
1756 break;
1757 case LTT_ARRAY:
1758 /* note this : array type size is the number of elements in the array,
1759 * while array field size of the length of the array in bytes */
1760 g_assert(type->fields->len == 1);
1761 {
1762 LttField *child = &g_array_index(type->fields, LttField, 0);
1763 field_compute_static_size(fac, child);
1764
1765 if(child->field_size != 0) {
1766 field->field_size = type->size * child->field_size;
1767 field->dynamic_offsets = g_array_sized_new(FALSE, TRUE,
1768 sizeof(off_t), type->size);
1769 } else {
1770 field->field_size = 0;
1771 }
1772 }
1773 break;
1774 case LTT_SEQUENCE:
1775 g_assert(type->fields->len == 2);
1776 {
1777 off_t local_offset = 0;
1778 LttField *child = &g_array_index(type->fields, LttField, 1);
1779 field_compute_static_size(fac, child);
1780 field->field_size = 0;
1781 type->size = 0;
1782 if(child->field_size != 0) {
1783 field->dynamic_offsets = g_array_sized_new(FALSE, TRUE,
1784 sizeof(off_t), SEQUENCE_AVG_ELEMENTS);
1785 }
1786 }
1787 break;
1788 case LTT_STRUCT:
1789 case LTT_UNION:
1790 {
1791 guint i;
1792 for(i=0;i<type->fields->len;i++) {
1793 LttField *child = &g_array_index(type->fields, LttField, i);
1794 field_compute_static_size(fac, child);
1795 if(child->field_size != 0) {
1796 type->size += ltt_align(type->size, get_alignment(child),
1797 fac->alignment);
1798 type->size += child->field_size;
1799 } else {
1800 /* As soon as we find a child with variable size, we have
1801 * a variable size */
1802 type->size = 0;
1803 break;
1804 }
1805 }
1806 field->field_size = type->size;
1807 }
1808 break;
1809 default:
1810 g_error("field_static_size : unknown type");
1811 }
1812
1813 }
1814 #endif //0
1815
1816
1817 /*****************************************************************************
1818 *Function name
1819 * precompute_fields_offsets : set the precomputable offset of the fields
1820 *Input params
1821 * fac : facility
1822 * field : the field
1823 * offset : pointer to the current offset, must be incremented
1824 *
1825 * return : 1 : found a variable length field, stop the processing.
1826 * 0 otherwise.
1827 ****************************************************************************/
1828
1829 #if 0
1830 gint precompute_fields_offsets(LttFacility *fac, LttField *field, off_t *offset, gint is_compact)
1831 {
1832 LttType *type = &field->field_type;
1833
1834 if(unlikely(is_compact)) {
1835 g_assert(field->field_size != 0);
1836 /* FIXME THIS IS A HUUUUUGE hack :
1837 * offset is between the compact_data field in struct LttEvent
1838 * and the address of the field root in the memory map.
1839 * ark. Both will stay at the same addresses while the event
1840 * is readable, so it's ok.
1841 */
1842 field->offset_root = 0;
1843 field->fixed_root = FIELD_FIXED;
1844 return 0;
1845 }
1846
1847 switch(type->type_class) {
1848 case LTT_INT_FIXED:
1849 case LTT_UINT_FIXED:
1850 case LTT_POINTER:
1851 case LTT_CHAR:
1852 case LTT_UCHAR:
1853 case LTT_SHORT:
1854 case LTT_USHORT:
1855 case LTT_INT:
1856 case LTT_UINT:
1857 case LTT_LONG:
1858 case LTT_ULONG:
1859 case LTT_SIZE_T:
1860 case LTT_SSIZE_T:
1861 case LTT_OFF_T:
1862 case LTT_FLOAT:
1863 case LTT_ENUM:
1864 g_assert(field->field_size != 0);
1865 /* Align offset on type size */
1866 *offset += ltt_align(*offset, get_alignment(field),
1867 fac->alignment);
1868 /* remember offset */
1869 field->offset_root = *offset;
1870 field->fixed_root = FIELD_FIXED;
1871 /* Increment offset */
1872 *offset += field->field_size;
1873 return 0;
1874 break;
1875 case LTT_STRING:
1876 field->offset_root = *offset;
1877 field->fixed_root = FIELD_FIXED;
1878 return 1;
1879 break;
1880 case LTT_ARRAY:
1881 g_assert(type->fields->len == 1);
1882 {
1883 LttField *child = &g_array_index(type->fields, LttField, 0);
1884
1885 *offset += ltt_align(*offset, get_alignment(field),
1886 fac->alignment);
1887
1888 /* remember offset */
1889 field->offset_root = *offset;
1890 field->array_offset = *offset;
1891 field->fixed_root = FIELD_FIXED;
1892
1893 /* Let the child be variable */
1894 //precompute_fields_offsets(tf, child, offset);
1895
1896 if(field->field_size != 0) {
1897 /* Increment offset */
1898 /* field_size is the array size in bytes */
1899 *offset += field->field_size;
1900 return 0;
1901 } else {
1902 return 1;
1903 }
1904 }
1905 break;
1906 case LTT_SEQUENCE:
1907 g_assert(type->fields->len == 2);
1908 {
1909 LttField *child;
1910 guint ret;
1911
1912 *offset += ltt_align(*offset, get_alignment(field),
1913 fac->alignment);
1914
1915 /* remember offset */
1916 field->offset_root = *offset;
1917 field->fixed_root = FIELD_FIXED;
1918
1919 child = &g_array_index(type->fields, LttField, 0);
1920 ret = precompute_fields_offsets(fac, child, offset, is_compact);
1921 g_assert(ret == 0); /* Seq len cannot have variable len */
1922
1923 child = &g_array_index(type->fields, LttField, 1);
1924 *offset += ltt_align(*offset, get_alignment(child),
1925 fac->alignment);
1926 field->array_offset = *offset;
1927 /* Let the child be variable. */
1928 //ret = precompute_fields_offsets(fac, child, offset);
1929
1930 /* Cannot precompute fields offsets of sequence members, and has
1931 * variable length. */
1932 return 1;
1933 }
1934 break;
1935 case LTT_STRUCT:
1936 {
1937 LttField *child;
1938 guint i;
1939 gint ret=0;
1940
1941 *offset += ltt_align(*offset, get_alignment(field),
1942 fac->alignment);
1943 /* remember offset */
1944 field->offset_root = *offset;
1945 field->fixed_root = FIELD_FIXED;
1946
1947 for(i=0; i< type->fields->len; i++) {
1948 child = &g_array_index(type->fields, LttField, i);
1949 ret = precompute_fields_offsets(fac, child, offset, is_compact);
1950
1951 if(ret) break;
1952 }
1953 return ret;
1954 }
1955 break;
1956 case LTT_UNION:
1957 {
1958 LttField *child;
1959 guint i;
1960 gint ret=0;
1961
1962 *offset += ltt_align(*offset, get_alignment(field),
1963 fac->alignment);
1964 /* remember offset */
1965 field->offset_root = *offset;
1966 field->fixed_root = FIELD_FIXED;
1967
1968 for(i=0; i< type->fields->len; i++) {
1969 *offset = field->offset_root;
1970 child = &g_array_index(type->fields, LttField, i);
1971 ret = precompute_fields_offsets(fac, child, offset, is_compact);
1972
1973 if(ret) break;
1974 }
1975 *offset = field->offset_root + field->field_size;
1976 return ret;
1977 }
1978
1979 break;
1980 case LTT_NONE:
1981 default:
1982 g_error("precompute_fields_offsets : unknown type");
1983 return 1;
1984 }
1985
1986 }
1987
1988 #endif //0
1989
1990 #if 0
1991 /*****************************************************************************
1992 *Function name
1993 * precompute_offsets : set the precomputable offset of an event type
1994 *Input params
1995 * tf : tracefile
1996 * event : event type
1997 *
1998 ****************************************************************************/
1999 void precompute_offsets(LttFacility *fac, LttEventType *event)
2000 {
2001 guint i;
2002 off_t offset = 0;
2003 gint ret;
2004
2005 /* First, compute the size of fixed size fields. Will determine size for
2006 * arrays, struct and unions, which is not done by the parser */
2007 for(i=0; i<event->fields->len; i++) {
2008 LttField *field = &g_array_index(event->fields, LttField, i);
2009 field_compute_static_size(fac, field);
2010 }
2011
2012 /* Precompute all known offsets */
2013 for(i=0; i<event->fields->len; i++) {
2014 LttField *field = &g_array_index(event->fields, LttField, i);
2015 if(event->has_compact_data && i == 0)
2016 ret = precompute_fields_offsets(fac, field, &offset, 1);
2017 else
2018 ret = precompute_fields_offsets(fac, field, &offset, 0);
2019 if(ret) break;
2020 }
2021 }
2022 #endif //0
2023
2024
2025
2026 /*****************************************************************************
2027 *Function name
2028 * preset_field_type_size : set the fixed sizes of the field type
2029 *Input params
2030 * tf : tracefile
2031 * event_type : event type
2032 * offset_root : offset from the root
2033 * offset_parent : offset from the parent
2034 * fixed_root : Do we know a fixed offset to the root ?
2035 * fixed_parent : Do we know a fixed offset to the parent ?
2036 * field : field
2037 ****************************************************************************/
2038
2039
2040
2041 // preset the fixed size offsets. Calculate them just like genevent-new : an
2042 // increment of a *to value that represents the offset from the start of the
2043 // event data.
2044 // The preset information is : offsets up to (and including) the first element
2045 // of variable size. All subsequent fields must be flagged "VARIABLE OFFSET".
2046 #if 0
2047 void preset_field_type_size(LttTracefile *tf, LttEventType *event_type,
2048 off_t offset_root, off_t offset_parent,
2049 enum field_status *fixed_root, enum field_status *fixed_parent,
2050 LttField *field)
2051 {
2052 enum field_status local_fixed_root, local_fixed_parent;
2053 guint i;
2054 LttType *type;
2055
2056 g_assert(field->fixed_root == FIELD_UNKNOWN);
2057 g_assert(field->fixed_parent == FIELD_UNKNOWN);
2058 g_assert(field->fixed_size == FIELD_UNKNOWN);
2059
2060 type = field->field_type;
2061
2062 field->fixed_root = *fixed_root;
2063 if(field->fixed_root == FIELD_FIXED)
2064 field->offset_root = offset_root;
2065 else
2066 field->offset_root = 0;
2067
2068 field->fixed_parent = *fixed_parent;
2069 if(field->fixed_parent == FIELD_FIXED)
2070 field->offset_parent = offset_parent;
2071 else
2072 field->offset_parent = 0;
2073
2074 size_t current_root_offset;
2075 size_t current_offset;
2076 enum field_status current_child_status, final_child_status;
2077 size_t max_size;
2078
2079 switch(type->type_class) {
2080 case LTT_INT_FIXED:
2081 case LTT_UINT_FIXED:
2082 case LTT_CHAR:
2083 case LTT_UCHAR:
2084 case LTT_SHORT:
2085 case LTT_USHORT:
2086 case LTT_INT:
2087 case LTT_UINT:
2088 case LTT_FLOAT:
2089 case LTT_ENUM:
2090 field->field_size = ltt_type_size(tf->trace, type);
2091 field->fixed_size = FIELD_FIXED;
2092 break;
2093 case LTT_POINTER:
2094 field->field_size = (off_t)event_type->facility->pointer_size;
2095 field->fixed_size = FIELD_FIXED;
2096 break;
2097 case LTT_LONG:
2098 case LTT_ULONG:
2099 field->field_size = (off_t)event_type->facility->long_size;
2100 field->fixed_size = FIELD_FIXED;
2101 break;
2102 case LTT_SIZE_T:
2103 case LTT_SSIZE_T:
2104 case LTT_OFF_T:
2105 field->field_size = (off_t)event_type->facility->size_t_size;
2106 field->fixed_size = FIELD_FIXED;
2107 break;
2108 case LTT_SEQUENCE:
2109 local_fixed_root = FIELD_VARIABLE;
2110 local_fixed_parent = FIELD_VARIABLE;
2111 preset_field_type_size(tf, event_type,
2112 0, 0,
2113 &local_fixed_root, &local_fixed_parent,
2114 field->child[0]);
2115 field->fixed_size = FIELD_VARIABLE;
2116 field->field_size = 0;
2117 *fixed_root = FIELD_VARIABLE;
2118 *fixed_parent = FIELD_VARIABLE;
2119 break;
2120 case LTT_STRING:
2121 field->fixed_size = FIELD_VARIABLE;
2122 field->field_size = 0;
2123 *fixed_root = FIELD_VARIABLE;
2124 *fixed_parent = FIELD_VARIABLE;
2125 break;
2126 case LTT_ARRAY:
2127 local_fixed_root = FIELD_VARIABLE;
2128 local_fixed_parent = FIELD_VARIABLE;
2129 preset_field_type_size(tf, event_type,
2130 0, 0,
2131 &local_fixed_root, &local_fixed_parent,
2132 field->child[0]);
2133 field->fixed_size = field->child[0]->fixed_size;
2134 if(field->fixed_size == FIELD_FIXED) {
2135 field->field_size = type->element_number * field->child[0]->field_size;
2136 } else {
2137 field->field_size = 0;
2138 *fixed_root = FIELD_VARIABLE;
2139 *fixed_parent = FIELD_VARIABLE;
2140 }
2141 break;
2142 case LTT_STRUCT:
2143 current_root_offset = field->offset_root;
2144 current_offset = 0;
2145 current_child_status = FIELD_FIXED;
2146 for(i=0;i<type->element_number;i++) {
2147 preset_field_type_size(tf, event_type,
2148 current_root_offset, current_offset,
2149 fixed_root, &current_child_status,
2150 field->child[i]);
2151 if(current_child_status == FIELD_FIXED) {
2152 current_root_offset += field->child[i]->field_size;
2153 current_offset += field->child[i]->field_size;
2154 } else {
2155 current_root_offset = 0;
2156 current_offset = 0;
2157 }
2158 }
2159 if(current_child_status != FIELD_FIXED) {
2160 *fixed_parent = current_child_status;
2161 field->field_size = 0;
2162 field->fixed_size = current_child_status;
2163 } else {
2164 field->field_size = current_offset;
2165 field->fixed_size = FIELD_FIXED;
2166 }
2167 break;
2168 case LTT_UNION:
2169 current_root_offset = field->offset_root;
2170 current_offset = 0;
2171 max_size = 0;
2172 final_child_status = FIELD_FIXED;
2173 for(i=0;i<type->element_number;i++) {
2174 enum field_status current_root_child_status = FIELD_FIXED;
2175 enum field_status current_child_status = FIELD_FIXED;
2176 preset_field_type_size(tf, event_type,
2177 current_root_offset, current_offset,
2178 &current_root_child_status, &current_child_status,
2179 field->child[i]);
2180 if(current_child_status != FIELD_FIXED)
2181 final_child_status = current_child_status;
2182 else
2183 max_size = max(max_size, field->child[i]->field_size);
2184 }
2185 if(final_child_status != FIELD_FIXED) {
2186 g_error("LTTV does not support variable size fields in unions.");
2187 /* This will stop the application. */
2188 *fixed_root = final_child_status;
2189 *fixed_parent = final_child_status;
2190 field->field_size = 0;
2191 field->fixed_size = current_child_status;
2192 } else {
2193 field->field_size = max_size;
2194 field->fixed_size = FIELD_FIXED;
2195 }
2196 break;
2197 case LTT_NONE:
2198 g_error("unexpected type NONE");
2199 break;
2200 }
2201
2202 }
2203 #endif //0
2204
2205 /*****************************************************************************
2206 *Function name
2207 * check_fields_compatibility : Check for compatibility between two fields :
2208 * do they use the same inner structure ?
2209 *Input params
2210 * event_type1 : event type
2211 * event_type2 : event type
2212 * field1 : field
2213 * field2 : field
2214 *Returns : 0 if identical
2215 * 1 if not.
2216 ****************************************************************************/
2217 // this function checks for equality of field types. Therefore, it does not use
2218 // per se offsets. For instance, an aligned version of a structure is
2219 // compatible with an unaligned version of the same structure.
2220 #if 0
2221 gint check_fields_compatibility(LttEventType *event_type1,
2222 LttEventType *event_type2,
2223 LttField *field1, LttField *field2)
2224 {
2225 guint different = 0;
2226 LttType *type1;
2227 LttType *type2;
2228
2229 if(field1 == NULL) {
2230 if(field2 == NULL) goto end;
2231 else {
2232 different = 1;
2233 goto end;
2234 }
2235 } else if(field2 == NULL) {
2236 different = 1;
2237 goto end;
2238 }
2239
2240 type1 = &field1->field_type;
2241 type2 = &field2->field_type;
2242
2243 if(type1->type_class != type2->type_class) {
2244 different = 1;
2245 goto end;
2246 }
2247 if(type1->network != type2->network) {
2248 different = 1;
2249 goto end;
2250 }
2251
2252 switch(type1->type_class) {
2253 case LTT_INT_FIXED:
2254 case LTT_UINT_FIXED:
2255 case LTT_POINTER:
2256 case LTT_CHAR:
2257 case LTT_UCHAR:
2258 case LTT_SHORT:
2259 case LTT_USHORT:
2260 case LTT_INT:
2261 case LTT_UINT:
2262 case LTT_LONG:
2263 case LTT_ULONG:
2264 case LTT_SIZE_T:
2265 case LTT_SSIZE_T:
2266 case LTT_OFF_T:
2267 case LTT_FLOAT:
2268 case LTT_ENUM:
2269 if(field1->field_size != field2->field_size)
2270 different = 1;
2271 break;
2272 case LTT_STRING:
2273 break;
2274 case LTT_ARRAY:
2275 {
2276 LttField *child1 = &g_array_index(type1->fields, LttField, 0);
2277 LttField *child2 = &g_array_index(type2->fields, LttField, 0);
2278
2279 if(type1->size != type2->size)
2280 different = 1;
2281 if(check_fields_compatibility(event_type1, event_type2, child1, child2))
2282 different = 1;
2283 }
2284 break;
2285 case LTT_SEQUENCE:
2286 {
2287 LttField *child1 = &g_array_index(type1->fields, LttField, 1);
2288 LttField *child2 = &g_array_index(type2->fields, LttField, 1);
2289
2290 if(check_fields_compatibility(event_type1, event_type2, child1, child2))
2291 different = 1;
2292 }
2293 break;
2294 case LTT_STRUCT:
2295 case LTT_UNION:
2296 {
2297 LttField *child;
2298 guint i;
2299
2300 if(type1->fields->len != type2->fields->len) {
2301 different = 1;
2302 goto end;
2303 }
2304
2305 for(i=0; i< type1->fields->len; i++) {
2306 LttField *child1;
2307 LttField *child2;
2308 child1 = &g_array_index(type1->fields, LttField, i);
2309 child2 = &g_array_index(type2->fields, LttField, i);
2310 different = check_fields_compatibility(event_type1,
2311 event_type2, child1, child2);
2312
2313 if(different) break;
2314 }
2315 }
2316 break;
2317 case LTT_NONE:
2318 default:
2319 g_error("check_fields_compatibility : unknown type");
2320 }
2321
2322 end:
2323 return different;
2324 }
2325 #endif //0
2326
2327 #if 0
2328 gint check_fields_compatibility(LttEventType *event_type1,
2329 LttEventType *event_type2,
2330 LttField *field1, LttField *field2)
2331 {
2332 guint different = 0;
2333 guint i;
2334 LttType *type1;
2335 LttType *type2;
2336
2337 if(field1 == NULL) {
2338 if(field2 == NULL) goto end;
2339 else {
2340 different = 1;
2341 goto end;
2342 }
2343 } else if(field2 == NULL) {
2344 different = 1;
2345 goto end;
2346 }
2347
2348 g_assert(field1->fixed_root != FIELD_UNKNOWN);
2349 g_assert(field2->fixed_root != FIELD_UNKNOWN);
2350 g_assert(field1->fixed_parent != FIELD_UNKNOWN);
2351 g_assert(field2->fixed_parent != FIELD_UNKNOWN);
2352 g_assert(field1->fixed_size != FIELD_UNKNOWN);
2353 g_assert(field2->fixed_size != FIELD_UNKNOWN);
2354
2355 type1 = field1->field_type;
2356 type2 = field2->field_type;
2357
2358 if(type1->type_class != type2->type_class) {
2359 different = 1;
2360 goto end;
2361 }
2362 if(type1->element_name != type2->element_name) {
2363 different = 1;
2364 goto end;
2365 }
2366
2367 switch(type1->type_class) {
2368 case LTT_INT_FIXED:
2369 case LTT_UINT_FIXED:
2370 case LTT_POINTER:
2371 case LTT_CHAR:
2372 case LTT_UCHAR:
2373 case LTT_SHORT:
2374 case LTT_USHORT:
2375 case LTT_INT:
2376 case LTT_UINT:
2377 case LTT_FLOAT:
2378 case LTT_POINTER:
2379 case LTT_LONG:
2380 case LTT_ULONG:
2381 case LTT_SIZE_T:
2382 case LTT_SSIZE_T:
2383 case LTT_OFF_T:
2384 if(field1->field_size != field2->field_size) {
2385 different = 1;
2386 goto end;
2387 }
2388 break;
2389 case LTT_ENUM:
2390 if(type1->element_number != type2->element_number) {
2391 different = 1;
2392 goto end;
2393 }
2394 for(i=0;i<type1->element_number;i++) {
2395 if(type1->enum_strings[i] != type2->enum_strings[i]) {
2396 different = 1;
2397 goto end;
2398 }
2399 }
2400 break;
2401 case LTT_SEQUENCE:
2402 /* Two elements : size and child */
2403 g_assert(type1->element_number != type2->element_number);
2404 for(i=0;i<type1->element_number;i++) {
2405 if(check_fields_compatibility(event_type1, event_type2,
2406 field1->child[0], field2->child[0])) {
2407 different = 1;
2408 goto end;
2409 }
2410 }
2411 break;
2412 case LTT_STRING:
2413 break;
2414 case LTT_ARRAY:
2415 if(field1->field_size != field2->field_size) {
2416 different = 1;
2417 goto end;
2418 }
2419 /* Two elements : size and child */
2420 g_assert(type1->element_number != type2->element_number);
2421 for(i=0;i<type1->element_number;i++) {
2422 if(check_fields_compatibility(event_type1, event_type2,
2423 field1->child[0], field2->child[0])) {
2424 different = 1;
2425 goto end;
2426 }
2427 }
2428 break;
2429 case LTT_STRUCT:
2430 case LTT_UNION:
2431 if(type1->element_number != type2->element_number) {
2432 different = 1;
2433 break;
2434 }
2435 for(i=0;i<type1->element_number;i++) {
2436 if(check_fields_compatibility(event_type1, event_type2,
2437 field1->child[0], field2->child[0])) {
2438 different = 1;
2439 goto end;
2440 }
2441 }
2442 break;
2443 }
2444 end:
2445 return different;
2446 }
2447 #endif //0
2448
2449
2450 /*****************************************************************************
2451 *Function name
2452 * ltt_get_int : get an integer number
2453 *Input params
2454 * reverse_byte_order: must we reverse the byte order ?
2455 * size : the size of the integer
2456 * ptr : the data pointer
2457 *Return value
2458 * gint64 : a 64 bits integer
2459 ****************************************************************************/
2460
2461 gint64 ltt_get_int(gboolean reverse_byte_order, gint size, void *data)
2462 {
2463 gint64 val;
2464
2465 switch(size) {
2466 case 1: val = *((gint8*)data); break;
2467 case 2: val = ltt_get_int16(reverse_byte_order, data); break;
2468 case 4: val = ltt_get_int32(reverse_byte_order, data); break;
2469 case 8: val = ltt_get_int64(reverse_byte_order, data); break;
2470 default: val = ltt_get_int64(reverse_byte_order, data);
2471 g_critical("get_int : integer size %d unknown", size);
2472 break;
2473 }
2474
2475 return val;
2476 }
2477
2478 /*****************************************************************************
2479 *Function name
2480 * ltt_get_uint : get an unsigned integer number
2481 *Input params
2482 * reverse_byte_order: must we reverse the byte order ?
2483 * size : the size of the integer
2484 * ptr : the data pointer
2485 *Return value
2486 * guint64 : a 64 bits unsigned integer
2487 ****************************************************************************/
2488
2489 guint64 ltt_get_uint(gboolean reverse_byte_order, gint size, void *data)
2490 {
2491 guint64 val;
2492
2493 switch(size) {
2494 case 1: val = *((gint8*)data); break;
2495 case 2: val = ltt_get_uint16(reverse_byte_order, data); break;
2496 case 4: val = ltt_get_uint32(reverse_byte_order, data); break;
2497 case 8: val = ltt_get_uint64(reverse_byte_order, data); break;
2498 default: val = ltt_get_uint64(reverse_byte_order, data);
2499 g_critical("get_uint : unsigned integer size %d unknown",
2500 size);
2501 break;
2502 }
2503
2504 return val;
2505 }
2506
2507
2508 /* get the node name of the system */
2509
2510 char * ltt_trace_system_description_node_name (LttSystemDescription * s)
2511 {
2512 return s->node_name;
2513 }
2514
2515
2516 /* get the domain name of the system */
2517
2518 char * ltt_trace_system_description_domain_name (LttSystemDescription * s)
2519 {
2520 return s->domain_name;
2521 }
2522
2523
2524 /* get the description of the system */
2525
2526 char * ltt_trace_system_description_description (LttSystemDescription * s)
2527 {
2528 return s->description;
2529 }
2530
2531
2532 /* get the NTP corrected start time of the trace */
2533 LttTime ltt_trace_start_time(LttTrace *t)
2534 {
2535 return t->start_time;
2536 }
2537
2538 /* get the monotonic start time of the trace */
2539 LttTime ltt_trace_start_time_monotonic(LttTrace *t)
2540 {
2541 return t->start_time_from_tsc;
2542 }
2543
2544 static __attribute__ ((__unused__)) LttTracefile *ltt_tracefile_new()
2545 {
2546 LttTracefile *tf;
2547 tf = g_new(LttTracefile, 1);
2548 tf->event.tracefile = tf;
2549 return tf;
2550 }
2551
2552 static __attribute__ ((__unused__)) void ltt_tracefile_destroy(LttTracefile *tf)
2553 {
2554 g_free(tf);
2555 }
2556
2557 static __attribute__ ((__unused__)) void ltt_tracefile_copy(LttTracefile *dest, const LttTracefile *src)
2558 {
2559 *dest = *src;
2560 }
2561
2562 /* Before library loading... */
2563
2564 static __attribute__((constructor)) void init(void)
2565 {
2566 LTT_TRACEFILE_NAME_METADATA = g_quark_from_string("metadata");
2567 }
This page took 0.095383 seconds and 4 git commands to generate.