new ltt-usertrace
[lttv.git] / ltt-usertrace / ltt / ltt-facility-user_generic.h
1 #ifndef _LTT_FACILITY_USER_GENERIC_H_
2 #define _LTT_FACILITY_USER_GENERIC_H_
3
4 #include <sys/types.h>
5 #include <ltt/ltt-facility-id-user_generic.h>
6 #include <ltt/ltt-usertrace.h>
7
8 /* Named types */
9
10 /* Event string structures */
11 static inline void lttng_write_string_user_generic_string_data(
12 void *buffer,
13 size_t *to_base,
14 size_t *to,
15 const void **from,
16 size_t *len,
17 const char * obj)
18 {
19 size_t size;
20 size_t align;
21
22 /* Flush pending memcpy */
23 if(*len != 0) {
24 if(buffer != NULL)
25 memcpy(buffer+*to_base+*to, *from, *len);
26 }
27 *to += *len;
28 *len = 0;
29
30 align = sizeof(char);
31
32 if(*len == 0) {
33 *to += ltt_align(*to, align); /* align output */
34 } else {
35 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
36 }
37
38 /* Contains variable sized fields : must explode the structure */
39
40 size = strlen(obj) + 1; /* Include final NULL char. */
41 if(buffer != NULL)
42 memcpy(buffer+*to_base+*to, obj, size);
43 *to += size;
44
45 /* Realign the *to_base on arch size, set *to to 0 */
46 *to += ltt_align(*to, sizeof(void *));
47 *to_base = *to_base+*to;
48 *to = 0;
49
50 /* Put source *from just after the C string */
51 *from += size;
52 }
53
54
55 /* Event string logging function */
56 #ifndef LTT_TRACE_FAST
57 static inline int trace_user_generic_string(
58 const char * lttng_param_data)
59 #ifndef LTT_TRACE
60 {
61 }
62 #else
63 {
64 int ret = 0;
65 void *buffer = NULL;
66 size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
67 size_t *to_base = &real_to_base;
68 size_t real_to = 0;
69 size_t *to = &real_to;
70 size_t real_len = 0;
71 size_t *len = &real_len;
72 size_t reserve_size;
73 size_t slot_size;
74 const void *real_from;
75 const void **from = &real_from;
76 /* For each field, calculate the field size. */
77 /* size = *to_base + *to + *len */
78 /* Assume that the padding for alignment starts at a
79 * sizeof(void *) address. */
80
81 *from = lttng_param_data;
82 lttng_write_string_user_generic_string_data(buffer, to_base, to, from, len, lttng_param_data);
83
84 reserve_size = *to_base + *to + *len;
85 {
86 char stack_buffer[reserve_size];
87 buffer = stack_buffer;
88
89 *to_base = *to = *len = 0;
90
91 *from = lttng_param_data;
92 lttng_write_string_user_generic_string_data(buffer, to_base, to, from, len, lttng_param_data);
93
94 /* Flush pending memcpy */
95 if(*len != 0) {
96 memcpy(buffer+*to_base+*to, *from, *len);
97 *to += *len;
98 *len = 0;
99 }
100
101 ret = ltt_trace_generic(ltt_facility_user_generic_F583779E, event_user_generic_string, buffer, reserve_size, LTT_BLOCKING);
102 }
103
104 return ret;
105
106 }
107 #endif //LTT_TRACE
108 #endif //!LTT_TRACE_FAST
109
110 #ifdef LTT_TRACE_FAST
111 static inline int trace_user_generic_string(
112 const char * lttng_param_data)
113 #ifndef LTT_TRACE
114 {
115 }
116 #else
117 {
118 unsigned int index;
119 struct ltt_trace_info *trace = thread_trace_info;
120 struct ltt_buf *ltt_buf;
121 void *buffer = NULL;
122 size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
123 size_t *to_base = &real_to_base;
124 size_t real_to = 0;
125 size_t *to = &real_to;
126 size_t real_len = 0;
127 size_t *len = &real_len;
128 size_t reserve_size;
129 size_t slot_size;
130 const void *real_from;
131 const void **from = &real_from;
132 uint64_t tsc;
133 size_t before_hdr_pad, after_hdr_pad, header_size;
134
135 if(!trace) {
136 ltt_thread_init();
137 trace = thread_trace_info;
138 }
139
140
141 /* For each field, calculate the field size. */
142 /* size = *to_base + *to + *len */
143 /* Assume that the padding for alignment starts at a
144 * sizeof(void *) address. */
145
146 *from = lttng_param_data;
147 lttng_write_string_user_generic_string_data(buffer, to_base, to, from, len, lttng_param_data);
148
149 reserve_size = *to_base + *to + *len;
150 trace->nesting++;
151 index = ltt_get_index_from_facility(ltt_facility_user_generic_F583779E,
152 event_user_generic_string);
153
154 {
155 ltt_buf = ltt_get_channel_from_index(trace, index);
156 slot_size = 0;
157 buffer = ltt_reserve_slot(trace, ltt_buf,
158 reserve_size, &slot_size, &tsc,
159 &before_hdr_pad, &after_hdr_pad, &header_size);
160 if(!buffer) goto end; /* buffer full */
161
162 *to_base = *to = *len = 0;
163
164 ltt_write_event_header(trace, ltt_buf, buffer,
165 ltt_facility_user_generic_F583779E, event_user_generic_string,
166 reserve_size, before_hdr_pad, tsc);
167 *to_base += before_hdr_pad + after_hdr_pad + header_size;
168
169 *from = lttng_param_data;
170 lttng_write_string_user_generic_string_data(buffer, to_base, to, from, len, lttng_param_data);
171
172 /* Flush pending memcpy */
173 if(*len != 0) {
174 memcpy(buffer+*to_base+*to, *from, *len);
175 *to += *len;
176 *len = 0;
177 }
178
179 ltt_commit_slot(ltt_buf, buffer, slot_size);
180
181 }
182
183 end:
184 trace->nesting--;
185 }
186 #endif //LTT_TRACE
187 #endif //LTT_TRACE_FAST
188
189 /* Event string_pointer structures */
190 static inline void lttng_write_string_user_generic_string_pointer_string(
191 void *buffer,
192 size_t *to_base,
193 size_t *to,
194 const void **from,
195 size_t *len,
196 const char * obj)
197 {
198 size_t size;
199 size_t align;
200
201 /* Flush pending memcpy */
202 if(*len != 0) {
203 if(buffer != NULL)
204 memcpy(buffer+*to_base+*to, *from, *len);
205 }
206 *to += *len;
207 *len = 0;
208
209 align = sizeof(char);
210
211 if(*len == 0) {
212 *to += ltt_align(*to, align); /* align output */
213 } else {
214 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
215 }
216
217 /* Contains variable sized fields : must explode the structure */
218
219 size = strlen(obj) + 1; /* Include final NULL char. */
220 if(buffer != NULL)
221 memcpy(buffer+*to_base+*to, obj, size);
222 *to += size;
223
224 /* Realign the *to_base on arch size, set *to to 0 */
225 *to += ltt_align(*to, sizeof(void *));
226 *to_base = *to_base+*to;
227 *to = 0;
228
229 /* Put source *from just after the C string */
230 *from += size;
231 }
232
233
234 /* Event string_pointer logging function */
235 #ifndef LTT_TRACE_FAST
236 static inline int trace_user_generic_string_pointer(
237 const char * lttng_param_string,
238 const void * lttng_param_pointer)
239 #ifndef LTT_TRACE
240 {
241 }
242 #else
243 {
244 int ret = 0;
245 void *buffer = NULL;
246 size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
247 size_t *to_base = &real_to_base;
248 size_t real_to = 0;
249 size_t *to = &real_to;
250 size_t real_len = 0;
251 size_t *len = &real_len;
252 size_t reserve_size;
253 size_t slot_size;
254 size_t align;
255 const void *real_from;
256 const void **from = &real_from;
257 /* For each field, calculate the field size. */
258 /* size = *to_base + *to + *len */
259 /* Assume that the padding for alignment starts at a
260 * sizeof(void *) address. */
261
262 *from = lttng_param_string;
263 lttng_write_string_user_generic_string_pointer_string(buffer, to_base, to, from, len, lttng_param_string);
264
265 *from = &lttng_param_pointer;
266 align = sizeof(const void *);
267
268 if(*len == 0) {
269 *to += ltt_align(*to, align); /* align output */
270 } else {
271 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
272 }
273
274 *len += sizeof(const void *);
275
276 reserve_size = *to_base + *to + *len;
277 {
278 char stack_buffer[reserve_size];
279 buffer = stack_buffer;
280
281 *to_base = *to = *len = 0;
282
283 *from = lttng_param_string;
284 lttng_write_string_user_generic_string_pointer_string(buffer, to_base, to, from, len, lttng_param_string);
285
286 /* Flush pending memcpy */
287 if(*len != 0) {
288 memcpy(buffer+*to_base+*to, *from, *len);
289 *to += *len;
290 *len = 0;
291 }
292
293 *from = &lttng_param_pointer;
294 align = sizeof(const void *);
295
296 if(*len == 0) {
297 *to += ltt_align(*to, align); /* align output */
298 } else {
299 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
300 }
301
302 *len += sizeof(const void *);
303
304 /* Flush pending memcpy */
305 if(*len != 0) {
306 memcpy(buffer+*to_base+*to, *from, *len);
307 *to += *len;
308 *len = 0;
309 }
310
311 ret = ltt_trace_generic(ltt_facility_user_generic_F583779E, event_user_generic_string_pointer, buffer, reserve_size, LTT_BLOCKING);
312 }
313
314 return ret;
315
316 }
317 #endif //LTT_TRACE
318 #endif //!LTT_TRACE_FAST
319
320 #ifdef LTT_TRACE_FAST
321 static inline int trace_user_generic_string_pointer(
322 const char * lttng_param_string,
323 const void * lttng_param_pointer)
324 #ifndef LTT_TRACE
325 {
326 }
327 #else
328 {
329 unsigned int index;
330 struct ltt_trace_info *trace = thread_trace_info;
331 struct ltt_buf *ltt_buf;
332 void *buffer = NULL;
333 size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
334 size_t *to_base = &real_to_base;
335 size_t real_to = 0;
336 size_t *to = &real_to;
337 size_t real_len = 0;
338 size_t *len = &real_len;
339 size_t reserve_size;
340 size_t slot_size;
341 size_t align;
342 const void *real_from;
343 const void **from = &real_from;
344 uint64_t tsc;
345 size_t before_hdr_pad, after_hdr_pad, header_size;
346
347 if(!trace) {
348 ltt_thread_init();
349 trace = thread_trace_info;
350 }
351
352
353 /* For each field, calculate the field size. */
354 /* size = *to_base + *to + *len */
355 /* Assume that the padding for alignment starts at a
356 * sizeof(void *) address. */
357
358 *from = lttng_param_string;
359 lttng_write_string_user_generic_string_pointer_string(buffer, to_base, to, from, len, lttng_param_string);
360
361 *from = &lttng_param_pointer;
362 align = sizeof(const void *);
363
364 if(*len == 0) {
365 *to += ltt_align(*to, align); /* align output */
366 } else {
367 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
368 }
369
370 *len += sizeof(const void *);
371
372 reserve_size = *to_base + *to + *len;
373 trace->nesting++;
374 index = ltt_get_index_from_facility(ltt_facility_user_generic_F583779E,
375 event_user_generic_string_pointer);
376
377 {
378 ltt_buf = ltt_get_channel_from_index(trace, index);
379 slot_size = 0;
380 buffer = ltt_reserve_slot(trace, ltt_buf,
381 reserve_size, &slot_size, &tsc,
382 &before_hdr_pad, &after_hdr_pad, &header_size);
383 if(!buffer) goto end; /* buffer full */
384
385 *to_base = *to = *len = 0;
386
387 ltt_write_event_header(trace, ltt_buf, buffer,
388 ltt_facility_user_generic_F583779E, event_user_generic_string_pointer,
389 reserve_size, before_hdr_pad, tsc);
390 *to_base += before_hdr_pad + after_hdr_pad + header_size;
391
392 *from = lttng_param_string;
393 lttng_write_string_user_generic_string_pointer_string(buffer, to_base, to, from, len, lttng_param_string);
394
395 /* Flush pending memcpy */
396 if(*len != 0) {
397 memcpy(buffer+*to_base+*to, *from, *len);
398 *to += *len;
399 *len = 0;
400 }
401
402 *from = &lttng_param_pointer;
403 align = sizeof(const void *);
404
405 if(*len == 0) {
406 *to += ltt_align(*to, align); /* align output */
407 } else {
408 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
409 }
410
411 *len += sizeof(const void *);
412
413 /* Flush pending memcpy */
414 if(*len != 0) {
415 memcpy(buffer+*to_base+*to, *from, *len);
416 *to += *len;
417 *len = 0;
418 }
419
420 ltt_commit_slot(ltt_buf, buffer, slot_size);
421
422 }
423
424 end:
425 trace->nesting--;
426 }
427 #endif //LTT_TRACE
428 #endif //LTT_TRACE_FAST
429
430 /* Event slow_printf structures */
431 static inline void lttng_write_string_user_generic_slow_printf_string(
432 void *buffer,
433 size_t *to_base,
434 size_t *to,
435 const void **from,
436 size_t *len,
437 const char * obj)
438 {
439 size_t size;
440 size_t align;
441
442 /* Flush pending memcpy */
443 if(*len != 0) {
444 if(buffer != NULL)
445 memcpy(buffer+*to_base+*to, *from, *len);
446 }
447 *to += *len;
448 *len = 0;
449
450 align = sizeof(char);
451
452 if(*len == 0) {
453 *to += ltt_align(*to, align); /* align output */
454 } else {
455 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
456 }
457
458 /* Contains variable sized fields : must explode the structure */
459
460 size = strlen(obj) + 1; /* Include final NULL char. */
461 if(buffer != NULL)
462 memcpy(buffer+*to_base+*to, obj, size);
463 *to += size;
464
465 /* Realign the *to_base on arch size, set *to to 0 */
466 *to += ltt_align(*to, sizeof(void *));
467 *to_base = *to_base+*to;
468 *to = 0;
469
470 /* Put source *from just after the C string */
471 *from += size;
472 }
473
474
475 /* Event slow_printf logging function */
476 #ifndef LTT_TRACE_FAST
477 static inline int trace_user_generic_slow_printf_param_buffer(
478 void *buffer,
479 size_t reserve_size)
480 #ifndef LTT_TRACE
481 {
482 }
483 #else
484 {
485 int ret = 0;
486 reserve_size = ltt_align(reserve_size, sizeof(void *));
487 {
488 ret = ltt_trace_generic(ltt_facility_user_generic_F583779E, event_user_generic_slow_printf, buffer, reserve_size, LTT_BLOCKING);
489 }
490
491 return ret;
492
493 }
494 #endif //LTT_TRACE
495 #endif //!LTT_TRACE_FAST
496
497 #ifdef LTT_TRACE_FAST
498 static inline int trace_user_generic_slow_printf(
499 const char * lttng_param_string)
500 #ifndef LTT_TRACE
501 {
502 }
503 #else
504 {
505 unsigned int index;
506 struct ltt_trace_info *trace = thread_trace_info;
507 struct ltt_buf *ltt_buf;
508 void *buffer = NULL;
509 size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
510 size_t *to_base = &real_to_base;
511 size_t real_to = 0;
512 size_t *to = &real_to;
513 size_t real_len = 0;
514 size_t *len = &real_len;
515 size_t reserve_size;
516 size_t slot_size;
517 const void *real_from;
518 const void **from = &real_from;
519 uint64_t tsc;
520 size_t before_hdr_pad, after_hdr_pad, header_size;
521
522 if(!trace) {
523 ltt_thread_init();
524 trace = thread_trace_info;
525 }
526
527
528 /* For each field, calculate the field size. */
529 /* size = *to_base + *to + *len */
530 /* Assume that the padding for alignment starts at a
531 * sizeof(void *) address. */
532
533 *from = lttng_param_string;
534 lttng_write_string_user_generic_slow_printf_string(buffer, to_base, to, from, len, lttng_param_string);
535
536 reserve_size = *to_base + *to + *len;
537 trace->nesting++;
538 index = ltt_get_index_from_facility(ltt_facility_user_generic_F583779E,
539 event_user_generic_slow_printf);
540
541 {
542 ltt_buf = ltt_get_channel_from_index(trace, index);
543 slot_size = 0;
544 buffer = ltt_reserve_slot(trace, ltt_buf,
545 reserve_size, &slot_size, &tsc,
546 &before_hdr_pad, &after_hdr_pad, &header_size);
547 if(!buffer) goto end; /* buffer full */
548
549 *to_base = *to = *len = 0;
550
551 ltt_write_event_header(trace, ltt_buf, buffer,
552 ltt_facility_user_generic_F583779E, event_user_generic_slow_printf,
553 reserve_size, before_hdr_pad, tsc);
554 *to_base += before_hdr_pad + after_hdr_pad + header_size;
555
556 *from = lttng_param_string;
557 lttng_write_string_user_generic_slow_printf_string(buffer, to_base, to, from, len, lttng_param_string);
558
559 /* Flush pending memcpy */
560 if(*len != 0) {
561 memcpy(buffer+*to_base+*to, *from, *len);
562 *to += *len;
563 *len = 0;
564 }
565
566 ltt_commit_slot(ltt_buf, buffer, slot_size);
567
568 }
569
570 end:
571 trace->nesting--;
572 }
573 #endif //LTT_TRACE
574 #endif //LTT_TRACE_FAST
575
576 /* Event function_entry structures */
577
578 /* Event function_entry logging function */
579 #ifndef LTT_TRACE_FAST
580 static inline __attribute__((no_instrument_function)) int trace_user_generic_function_entry(
581 const void * lttng_param_this_fn,
582 const void * lttng_param_call_site)
583 #ifndef LTT_TRACE
584 {
585 }
586 #else
587 {
588 int ret = 0;
589 void *buffer = NULL;
590 size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
591 size_t *to_base = &real_to_base;
592 size_t real_to = 0;
593 size_t *to = &real_to;
594 size_t real_len = 0;
595 size_t *len = &real_len;
596 size_t reserve_size;
597 size_t slot_size;
598 size_t align;
599 const void *real_from;
600 const void **from = &real_from;
601 /* For each field, calculate the field size. */
602 /* size = *to_base + *to + *len */
603 /* Assume that the padding for alignment starts at a
604 * sizeof(void *) address. */
605
606 *from = &lttng_param_this_fn;
607 align = sizeof(const void *);
608
609 if(*len == 0) {
610 *to += ltt_align(*to, align); /* align output */
611 } else {
612 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
613 }
614
615 *len += sizeof(const void *);
616
617 *from = &lttng_param_call_site;
618 align = sizeof(const void *);
619
620 if(*len == 0) {
621 *to += ltt_align(*to, align); /* align output */
622 } else {
623 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
624 }
625
626 *len += sizeof(const void *);
627
628 reserve_size = *to_base + *to + *len;
629 {
630 char stack_buffer[reserve_size];
631 buffer = stack_buffer;
632
633 *to_base = *to = *len = 0;
634
635 *from = &lttng_param_this_fn;
636 align = sizeof(const void *);
637
638 if(*len == 0) {
639 *to += ltt_align(*to, align); /* align output */
640 } else {
641 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
642 }
643
644 *len += sizeof(const void *);
645
646 /* Flush pending memcpy */
647 if(*len != 0) {
648 memcpy(buffer+*to_base+*to, *from, *len);
649 *to += *len;
650 *len = 0;
651 }
652
653 *from = &lttng_param_call_site;
654 align = sizeof(const void *);
655
656 if(*len == 0) {
657 *to += ltt_align(*to, align); /* align output */
658 } else {
659 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
660 }
661
662 *len += sizeof(const void *);
663
664 /* Flush pending memcpy */
665 if(*len != 0) {
666 memcpy(buffer+*to_base+*to, *from, *len);
667 *to += *len;
668 *len = 0;
669 }
670
671 ret = ltt_trace_generic(ltt_facility_user_generic_F583779E, event_user_generic_function_entry, buffer, reserve_size, LTT_BLOCKING);
672 }
673
674 return ret;
675
676 }
677 #endif //LTT_TRACE
678 #endif //!LTT_TRACE_FAST
679
680 #ifdef LTT_TRACE_FAST
681 static inline __attribute__((no_instrument_function)) int trace_user_generic_function_entry(
682 const void * lttng_param_this_fn,
683 const void * lttng_param_call_site)
684 #ifndef LTT_TRACE
685 {
686 }
687 #else
688 {
689 unsigned int index;
690 struct ltt_trace_info *trace = thread_trace_info;
691 struct ltt_buf *ltt_buf;
692 void *buffer = NULL;
693 size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
694 size_t *to_base = &real_to_base;
695 size_t real_to = 0;
696 size_t *to = &real_to;
697 size_t real_len = 0;
698 size_t *len = &real_len;
699 size_t reserve_size;
700 size_t slot_size;
701 size_t align;
702 const void *real_from;
703 const void **from = &real_from;
704 uint64_t tsc;
705 size_t before_hdr_pad, after_hdr_pad, header_size;
706
707 if(!trace) {
708 ltt_thread_init();
709 trace = thread_trace_info;
710 }
711
712
713 /* For each field, calculate the field size. */
714 /* size = *to_base + *to + *len */
715 /* Assume that the padding for alignment starts at a
716 * sizeof(void *) address. */
717
718 *from = &lttng_param_this_fn;
719 align = sizeof(const void *);
720
721 if(*len == 0) {
722 *to += ltt_align(*to, align); /* align output */
723 } else {
724 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
725 }
726
727 *len += sizeof(const void *);
728
729 *from = &lttng_param_call_site;
730 align = sizeof(const void *);
731
732 if(*len == 0) {
733 *to += ltt_align(*to, align); /* align output */
734 } else {
735 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
736 }
737
738 *len += sizeof(const void *);
739
740 reserve_size = *to_base + *to + *len;
741 trace->nesting++;
742 index = ltt_get_index_from_facility(ltt_facility_user_generic_F583779E,
743 event_user_generic_function_entry);
744
745 {
746 ltt_buf = ltt_get_channel_from_index(trace, index);
747 slot_size = 0;
748 buffer = ltt_reserve_slot(trace, ltt_buf,
749 reserve_size, &slot_size, &tsc,
750 &before_hdr_pad, &after_hdr_pad, &header_size);
751 if(!buffer) goto end; /* buffer full */
752
753 *to_base = *to = *len = 0;
754
755 ltt_write_event_header(trace, ltt_buf, buffer,
756 ltt_facility_user_generic_F583779E, event_user_generic_function_entry,
757 reserve_size, before_hdr_pad, tsc);
758 *to_base += before_hdr_pad + after_hdr_pad + header_size;
759
760 *from = &lttng_param_this_fn;
761 align = sizeof(const void *);
762
763 if(*len == 0) {
764 *to += ltt_align(*to, align); /* align output */
765 } else {
766 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
767 }
768
769 *len += sizeof(const void *);
770
771 /* Flush pending memcpy */
772 if(*len != 0) {
773 memcpy(buffer+*to_base+*to, *from, *len);
774 *to += *len;
775 *len = 0;
776 }
777
778 *from = &lttng_param_call_site;
779 align = sizeof(const void *);
780
781 if(*len == 0) {
782 *to += ltt_align(*to, align); /* align output */
783 } else {
784 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
785 }
786
787 *len += sizeof(const void *);
788
789 /* Flush pending memcpy */
790 if(*len != 0) {
791 memcpy(buffer+*to_base+*to, *from, *len);
792 *to += *len;
793 *len = 0;
794 }
795
796 ltt_commit_slot(ltt_buf, buffer, slot_size);
797
798 }
799
800 end:
801 trace->nesting--;
802 }
803 #endif //LTT_TRACE
804 #endif //LTT_TRACE_FAST
805
806 /* Event function_exit structures */
807
808 /* Event function_exit logging function */
809 #ifndef LTT_TRACE_FAST
810 static inline __attribute__((no_instrument_function)) int trace_user_generic_function_exit(
811 const void * lttng_param_this_fn,
812 const void * lttng_param_call_site)
813 #ifndef LTT_TRACE
814 {
815 }
816 #else
817 {
818 int ret = 0;
819 void *buffer = NULL;
820 size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
821 size_t *to_base = &real_to_base;
822 size_t real_to = 0;
823 size_t *to = &real_to;
824 size_t real_len = 0;
825 size_t *len = &real_len;
826 size_t reserve_size;
827 size_t slot_size;
828 size_t align;
829 const void *real_from;
830 const void **from = &real_from;
831 /* For each field, calculate the field size. */
832 /* size = *to_base + *to + *len */
833 /* Assume that the padding for alignment starts at a
834 * sizeof(void *) address. */
835
836 *from = &lttng_param_this_fn;
837 align = sizeof(const void *);
838
839 if(*len == 0) {
840 *to += ltt_align(*to, align); /* align output */
841 } else {
842 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
843 }
844
845 *len += sizeof(const void *);
846
847 *from = &lttng_param_call_site;
848 align = sizeof(const void *);
849
850 if(*len == 0) {
851 *to += ltt_align(*to, align); /* align output */
852 } else {
853 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
854 }
855
856 *len += sizeof(const void *);
857
858 reserve_size = *to_base + *to + *len;
859 {
860 char stack_buffer[reserve_size];
861 buffer = stack_buffer;
862
863 *to_base = *to = *len = 0;
864
865 *from = &lttng_param_this_fn;
866 align = sizeof(const void *);
867
868 if(*len == 0) {
869 *to += ltt_align(*to, align); /* align output */
870 } else {
871 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
872 }
873
874 *len += sizeof(const void *);
875
876 /* Flush pending memcpy */
877 if(*len != 0) {
878 memcpy(buffer+*to_base+*to, *from, *len);
879 *to += *len;
880 *len = 0;
881 }
882
883 *from = &lttng_param_call_site;
884 align = sizeof(const void *);
885
886 if(*len == 0) {
887 *to += ltt_align(*to, align); /* align output */
888 } else {
889 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
890 }
891
892 *len += sizeof(const void *);
893
894 /* Flush pending memcpy */
895 if(*len != 0) {
896 memcpy(buffer+*to_base+*to, *from, *len);
897 *to += *len;
898 *len = 0;
899 }
900
901 ret = ltt_trace_generic(ltt_facility_user_generic_F583779E, event_user_generic_function_exit, buffer, reserve_size, LTT_BLOCKING);
902 }
903
904 return ret;
905
906 }
907 #endif //LTT_TRACE
908 #endif //!LTT_TRACE_FAST
909
910 #ifdef LTT_TRACE_FAST
911 static inline __attribute__((no_instrument_function)) int trace_user_generic_function_exit(
912 const void * lttng_param_this_fn,
913 const void * lttng_param_call_site)
914 #ifndef LTT_TRACE
915 {
916 }
917 #else
918 {
919 unsigned int index;
920 struct ltt_trace_info *trace = thread_trace_info;
921 struct ltt_buf *ltt_buf;
922 void *buffer = NULL;
923 size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
924 size_t *to_base = &real_to_base;
925 size_t real_to = 0;
926 size_t *to = &real_to;
927 size_t real_len = 0;
928 size_t *len = &real_len;
929 size_t reserve_size;
930 size_t slot_size;
931 size_t align;
932 const void *real_from;
933 const void **from = &real_from;
934 uint64_t tsc;
935 size_t before_hdr_pad, after_hdr_pad, header_size;
936
937 if(!trace) {
938 ltt_thread_init();
939 trace = thread_trace_info;
940 }
941
942
943 /* For each field, calculate the field size. */
944 /* size = *to_base + *to + *len */
945 /* Assume that the padding for alignment starts at a
946 * sizeof(void *) address. */
947
948 *from = &lttng_param_this_fn;
949 align = sizeof(const void *);
950
951 if(*len == 0) {
952 *to += ltt_align(*to, align); /* align output */
953 } else {
954 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
955 }
956
957 *len += sizeof(const void *);
958
959 *from = &lttng_param_call_site;
960 align = sizeof(const void *);
961
962 if(*len == 0) {
963 *to += ltt_align(*to, align); /* align output */
964 } else {
965 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
966 }
967
968 *len += sizeof(const void *);
969
970 reserve_size = *to_base + *to + *len;
971 trace->nesting++;
972 index = ltt_get_index_from_facility(ltt_facility_user_generic_F583779E,
973 event_user_generic_function_exit);
974
975 {
976 ltt_buf = ltt_get_channel_from_index(trace, index);
977 slot_size = 0;
978 buffer = ltt_reserve_slot(trace, ltt_buf,
979 reserve_size, &slot_size, &tsc,
980 &before_hdr_pad, &after_hdr_pad, &header_size);
981 if(!buffer) goto end; /* buffer full */
982
983 *to_base = *to = *len = 0;
984
985 ltt_write_event_header(trace, ltt_buf, buffer,
986 ltt_facility_user_generic_F583779E, event_user_generic_function_exit,
987 reserve_size, before_hdr_pad, tsc);
988 *to_base += before_hdr_pad + after_hdr_pad + header_size;
989
990 *from = &lttng_param_this_fn;
991 align = sizeof(const void *);
992
993 if(*len == 0) {
994 *to += ltt_align(*to, align); /* align output */
995 } else {
996 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
997 }
998
999 *len += sizeof(const void *);
1000
1001 /* Flush pending memcpy */
1002 if(*len != 0) {
1003 memcpy(buffer+*to_base+*to, *from, *len);
1004 *to += *len;
1005 *len = 0;
1006 }
1007
1008 *from = &lttng_param_call_site;
1009 align = sizeof(const void *);
1010
1011 if(*len == 0) {
1012 *to += ltt_align(*to, align); /* align output */
1013 } else {
1014 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
1015 }
1016
1017 *len += sizeof(const void *);
1018
1019 /* Flush pending memcpy */
1020 if(*len != 0) {
1021 memcpy(buffer+*to_base+*to, *from, *len);
1022 *to += *len;
1023 *len = 0;
1024 }
1025
1026 ltt_commit_slot(ltt_buf, buffer, slot_size);
1027
1028 }
1029
1030 end:
1031 trace->nesting--;
1032 }
1033 #endif //LTT_TRACE
1034 #endif //LTT_TRACE_FAST
1035
1036 #endif //_LTT_FACILITY_USER_GENERIC_H_
This page took 0.055432 seconds and 5 git commands to generate.