update for threads
[lttv.git] / usertrace-generic / ltt / ltt-facility-user_generic.h
CommitLineData
3d57eb5b 1#ifndef _LTT_FACILITY_USER_GENERIC_H_
2#define _LTT_FACILITY_USER_GENERIC_H_
3
4#include <sys/types.h>
5#include <ltt/ltt-facility-id-user_generic.h>
6#include <ltt/ltt-generic.h>
7
8/* Named types */
9
10/* Event string structures */
11static inline void lttng_write_string_user_generic_string_data(
12 void *buffer,
13 size_t *to_base,
14 size_t *to,
15 const void **from,
16 size_t *len,
17 const char * obj)
18{
19 size_t size;
20 size_t align;
21
22 /* Flush pending memcpy */
23 if(*len != 0) {
24 if(buffer != NULL)
25 memcpy(buffer+*to_base+*to, *from, *len);
26 }
27 *to += *len;
28 *len = 0;
29
30 align = sizeof(char);
31
32 if(*len == 0) {
33 *to += ltt_align(*to, align); /* align output */
34 } else {
35 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
36 }
37
38 /* Contains variable sized fields : must explode the structure */
39
40 size = strlen(obj) + 1; /* Include final NULL char. */
41 if(buffer != NULL)
42 memcpy(buffer+*to_base+*to, obj, size);
43 *to += size;
44
45 /* Realign the *to_base on arch size, set *to to 0 */
46 *to += ltt_align(*to, sizeof(void *));
47 *to_base = *to_base+*to;
48 *to = 0;
49
50 /* Put source *from just after the C string */
51 *from += size;
52}
53
54
55/* Event string logging function */
8a9103df 56#ifndef LTT_TRACE_FAST
3d57eb5b 57static inline int trace_user_generic_string(
58 const char * lttng_param_data)
59#ifndef LTT_TRACE
60{
61}
62#else
63{
e36efdc5 64 int ret = 0;
3d57eb5b 65 void *buffer = NULL;
66 size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
67 size_t *to_base = &real_to_base;
68 size_t real_to = 0;
69 size_t *to = &real_to;
70 size_t real_len = 0;
71 size_t *len = &real_len;
72 size_t reserve_size;
73 size_t slot_size;
3d57eb5b 74 const void *real_from;
75 const void **from = &real_from;
76 /* For each field, calculate the field size. */
77 /* size = *to_base + *to + *len */
78 /* Assume that the padding for alignment starts at a
79 * sizeof(void *) address. */
80
81 *from = lttng_param_data;
82 lttng_write_string_user_generic_string_data(buffer, to_base, to, from, len, lttng_param_data);
83
84 reserve_size = *to_base + *to + *len;
85 {
86 char stack_buffer[reserve_size];
87 buffer = stack_buffer;
88
89 *to_base = *to = *len = 0;
90
91 *from = lttng_param_data;
92 lttng_write_string_user_generic_string_data(buffer, to_base, to, from, len, lttng_param_data);
93
94 /* Flush pending memcpy */
95 if(*len != 0) {
96 memcpy(buffer+*to_base+*to, *from, *len);
97 *to += *len;
98 *len = 0;
99 }
100
e90c7b86 101 ret = ltt_trace_generic(ltt_facility_user_generic_F583779E, event_user_generic_string, buffer, reserve_size, LTT_BLOCKING);
3d57eb5b 102 }
103
104 return ret;
105
106}
107#endif //LTT_TRACE
8a9103df 108#endif //!LTT_TRACE_FAST
109
110#ifdef LTT_TRACE_FAST
111static inline int trace_user_generic_string(
112 const char * lttng_param_data)
113#ifndef LTT_TRACE
114{
115}
116#else
117{
118 unsigned int index;
119 struct ltt_trace_info *trace = thread_trace_info;
120 struct ltt_buf *ltt_buf;
121 void *buffer = NULL;
122 size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
123 size_t *to_base = &real_to_base;
124 size_t real_to = 0;
125 size_t *to = &real_to;
126 size_t real_len = 0;
127 size_t *len = &real_len;
128 size_t reserve_size;
129 size_t slot_size;
130 const void *real_from;
131 const void **from = &real_from;
132 uint64_t tsc;
133 size_t before_hdr_pad, after_hdr_pad, header_size;
134
135 if(!trace) ltt_thread_init();
136
137 /* For each field, calculate the field size. */
138 /* size = *to_base + *to + *len */
139 /* Assume that the padding for alignment starts at a
140 * sizeof(void *) address. */
141
142 *from = lttng_param_data;
143 lttng_write_string_user_generic_string_data(buffer, to_base, to, from, len, lttng_param_data);
144
145 reserve_size = *to_base + *to + *len;
146 trace->nesting++;
147 index = ltt_get_index_from_facility(ltt_facility_user_generic_F583779E,
148 event_user_generic_string);
149
150 {
151 ltt_buf = ltt_get_channel_from_index(trace, index);
152 slot_size = 0;
153 buffer = ltt_reserve_slot(trace, ltt_buf,
154 reserve_size, &slot_size, &tsc,
155 &before_hdr_pad, &after_hdr_pad, &header_size);
156 if(!buffer) goto end; /* buffer full */
157
158 *to_base = *to = *len = 0;
159
160 ltt_write_event_header(trace, ltt_buf, buffer,
161 ltt_facility_user_generic_F583779E, event_user_generic_string,
162 reserve_size, before_hdr_pad, tsc);
163 *to_base += before_hdr_pad + after_hdr_pad + header_size;
164
165 *from = lttng_param_data;
166 lttng_write_string_user_generic_string_data(buffer, to_base, to, from, len, lttng_param_data);
167
168 /* Flush pending memcpy */
169 if(*len != 0) {
170 memcpy(buffer+*to_base+*to, *from, *len);
171 *to += *len;
172 *len = 0;
173 }
174
175 ltt_commit_slot(ltt_buf, buffer, slot_size);
176
177}
178
179end:
180 trace->nesting--;
181}
182#endif //LTT_TRACE
183#endif //LTT_TRACE_FAST
3d57eb5b 184
185/* Event string_pointer structures */
186static inline void lttng_write_string_user_generic_string_pointer_string(
187 void *buffer,
188 size_t *to_base,
189 size_t *to,
190 const void **from,
191 size_t *len,
192 const char * obj)
193{
194 size_t size;
195 size_t align;
196
197 /* Flush pending memcpy */
198 if(*len != 0) {
199 if(buffer != NULL)
200 memcpy(buffer+*to_base+*to, *from, *len);
201 }
202 *to += *len;
203 *len = 0;
204
205 align = sizeof(char);
206
207 if(*len == 0) {
208 *to += ltt_align(*to, align); /* align output */
209 } else {
210 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
211 }
212
213 /* Contains variable sized fields : must explode the structure */
214
215 size = strlen(obj) + 1; /* Include final NULL char. */
216 if(buffer != NULL)
217 memcpy(buffer+*to_base+*to, obj, size);
218 *to += size;
219
220 /* Realign the *to_base on arch size, set *to to 0 */
221 *to += ltt_align(*to, sizeof(void *));
222 *to_base = *to_base+*to;
223 *to = 0;
224
225 /* Put source *from just after the C string */
226 *from += size;
227}
228
229
230/* Event string_pointer logging function */
8a9103df 231#ifndef LTT_TRACE_FAST
3d57eb5b 232static inline int trace_user_generic_string_pointer(
233 const char * lttng_param_string,
234 const void * lttng_param_pointer)
235#ifndef LTT_TRACE
236{
237}
238#else
239{
e36efdc5 240 int ret = 0;
3d57eb5b 241 void *buffer = NULL;
242 size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
243 size_t *to_base = &real_to_base;
244 size_t real_to = 0;
245 size_t *to = &real_to;
246 size_t real_len = 0;
247 size_t *len = &real_len;
248 size_t reserve_size;
249 size_t slot_size;
3d57eb5b 250 size_t align;
251 const void *real_from;
252 const void **from = &real_from;
253 /* For each field, calculate the field size. */
254 /* size = *to_base + *to + *len */
255 /* Assume that the padding for alignment starts at a
256 * sizeof(void *) address. */
257
258 *from = lttng_param_string;
259 lttng_write_string_user_generic_string_pointer_string(buffer, to_base, to, from, len, lttng_param_string);
260
261 *from = &lttng_param_pointer;
262 align = sizeof(const void *);
263
264 if(*len == 0) {
265 *to += ltt_align(*to, align); /* align output */
266 } else {
267 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
268 }
269
270 *len += sizeof(const void *);
271
272 reserve_size = *to_base + *to + *len;
273 {
274 char stack_buffer[reserve_size];
275 buffer = stack_buffer;
276
277 *to_base = *to = *len = 0;
278
279 *from = lttng_param_string;
280 lttng_write_string_user_generic_string_pointer_string(buffer, to_base, to, from, len, lttng_param_string);
281
282 /* Flush pending memcpy */
283 if(*len != 0) {
284 memcpy(buffer+*to_base+*to, *from, *len);
285 *to += *len;
286 *len = 0;
287 }
288
289 *from = &lttng_param_pointer;
290 align = sizeof(const void *);
291
292 if(*len == 0) {
293 *to += ltt_align(*to, align); /* align output */
294 } else {
295 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
296 }
297
298 *len += sizeof(const void *);
299
300 /* Flush pending memcpy */
301 if(*len != 0) {
302 memcpy(buffer+*to_base+*to, *from, *len);
303 *to += *len;
304 *len = 0;
305 }
306
e90c7b86 307 ret = ltt_trace_generic(ltt_facility_user_generic_F583779E, event_user_generic_string_pointer, buffer, reserve_size, LTT_BLOCKING);
e36efdc5 308 }
309
310 return ret;
311
312}
313#endif //LTT_TRACE
8a9103df 314#endif //!LTT_TRACE_FAST
315
316#ifdef LTT_TRACE_FAST
317static inline int trace_user_generic_string_pointer(
318 const char * lttng_param_string,
319 const void * lttng_param_pointer)
320#ifndef LTT_TRACE
321{
322}
323#else
324{
325 unsigned int index;
326 struct ltt_trace_info *trace = thread_trace_info;
327 struct ltt_buf *ltt_buf;
328 void *buffer = NULL;
329 size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
330 size_t *to_base = &real_to_base;
331 size_t real_to = 0;
332 size_t *to = &real_to;
333 size_t real_len = 0;
334 size_t *len = &real_len;
335 size_t reserve_size;
336 size_t slot_size;
337 size_t align;
338 const void *real_from;
339 const void **from = &real_from;
340 uint64_t tsc;
341 size_t before_hdr_pad, after_hdr_pad, header_size;
342
343 if(!trace) ltt_thread_init();
344
345 /* For each field, calculate the field size. */
346 /* size = *to_base + *to + *len */
347 /* Assume that the padding for alignment starts at a
348 * sizeof(void *) address. */
349
350 *from = lttng_param_string;
351 lttng_write_string_user_generic_string_pointer_string(buffer, to_base, to, from, len, lttng_param_string);
352
353 *from = &lttng_param_pointer;
354 align = sizeof(const void *);
355
356 if(*len == 0) {
357 *to += ltt_align(*to, align); /* align output */
358 } else {
359 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
360 }
361
362 *len += sizeof(const void *);
363
364 reserve_size = *to_base + *to + *len;
365 trace->nesting++;
366 index = ltt_get_index_from_facility(ltt_facility_user_generic_F583779E,
367 event_user_generic_string_pointer);
368
369 {
370 ltt_buf = ltt_get_channel_from_index(trace, index);
371 slot_size = 0;
372 buffer = ltt_reserve_slot(trace, ltt_buf,
373 reserve_size, &slot_size, &tsc,
374 &before_hdr_pad, &after_hdr_pad, &header_size);
375 if(!buffer) goto end; /* buffer full */
376
377 *to_base = *to = *len = 0;
378
379 ltt_write_event_header(trace, ltt_buf, buffer,
380 ltt_facility_user_generic_F583779E, event_user_generic_string_pointer,
381 reserve_size, before_hdr_pad, tsc);
382 *to_base += before_hdr_pad + after_hdr_pad + header_size;
383
384 *from = lttng_param_string;
385 lttng_write_string_user_generic_string_pointer_string(buffer, to_base, to, from, len, lttng_param_string);
386
387 /* Flush pending memcpy */
388 if(*len != 0) {
389 memcpy(buffer+*to_base+*to, *from, *len);
390 *to += *len;
391 *len = 0;
392 }
393
394 *from = &lttng_param_pointer;
395 align = sizeof(const void *);
396
397 if(*len == 0) {
398 *to += ltt_align(*to, align); /* align output */
399 } else {
400 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
401 }
402
403 *len += sizeof(const void *);
404
405 /* Flush pending memcpy */
406 if(*len != 0) {
407 memcpy(buffer+*to_base+*to, *from, *len);
408 *to += *len;
409 *len = 0;
410 }
411
412 ltt_commit_slot(ltt_buf, buffer, slot_size);
413
414}
415
416end:
417 trace->nesting--;
418}
419#endif //LTT_TRACE
420#endif //LTT_TRACE_FAST
e36efdc5 421
422/* Event slow_printf structures */
423static inline void lttng_write_string_user_generic_slow_printf_string(
424 void *buffer,
425 size_t *to_base,
426 size_t *to,
427 const void **from,
428 size_t *len,
429 const char * obj)
430{
431 size_t size;
432 size_t align;
433
434 /* Flush pending memcpy */
435 if(*len != 0) {
436 if(buffer != NULL)
437 memcpy(buffer+*to_base+*to, *from, *len);
438 }
439 *to += *len;
440 *len = 0;
441
442 align = sizeof(char);
443
444 if(*len == 0) {
445 *to += ltt_align(*to, align); /* align output */
446 } else {
447 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
448 }
449
450 /* Contains variable sized fields : must explode the structure */
451
452 size = strlen(obj) + 1; /* Include final NULL char. */
453 if(buffer != NULL)
454 memcpy(buffer+*to_base+*to, obj, size);
455 *to += size;
456
457 /* Realign the *to_base on arch size, set *to to 0 */
458 *to += ltt_align(*to, sizeof(void *));
459 *to_base = *to_base+*to;
460 *to = 0;
461
462 /* Put source *from just after the C string */
463 *from += size;
464}
465
466
467/* Event slow_printf logging function */
8a9103df 468#ifndef LTT_TRACE_FAST
e36efdc5 469static inline int trace_user_generic_slow_printf_param_buffer(
470 void *buffer,
471 size_t reserve_size)
472#ifndef LTT_TRACE
473{
474}
475#else
476{
477 int ret = 0;
e90c7b86 478 reserve_size = ltt_align(reserve_size, sizeof(void *));
e36efdc5 479 {
e90c7b86 480 ret = ltt_trace_generic(ltt_facility_user_generic_F583779E, event_user_generic_slow_printf, buffer, reserve_size, LTT_BLOCKING);
481 }
482
483 return ret;
484
485}
486#endif //LTT_TRACE
8a9103df 487#endif //!LTT_TRACE_FAST
488
489#ifdef LTT_TRACE_FAST
490static inline int trace_user_generic_slow_printf(
491 const char * lttng_param_string)
492#ifndef LTT_TRACE
493{
494}
495#else
496{
497 unsigned int index;
498 struct ltt_trace_info *trace = thread_trace_info;
499 struct ltt_buf *ltt_buf;
500 void *buffer = NULL;
501 size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
502 size_t *to_base = &real_to_base;
503 size_t real_to = 0;
504 size_t *to = &real_to;
505 size_t real_len = 0;
506 size_t *len = &real_len;
507 size_t reserve_size;
508 size_t slot_size;
509 const void *real_from;
510 const void **from = &real_from;
511 uint64_t tsc;
512 size_t before_hdr_pad, after_hdr_pad, header_size;
513
514 if(!trace) ltt_thread_init();
515
516 /* For each field, calculate the field size. */
517 /* size = *to_base + *to + *len */
518 /* Assume that the padding for alignment starts at a
519 * sizeof(void *) address. */
520
521 *from = lttng_param_string;
522 lttng_write_string_user_generic_slow_printf_string(buffer, to_base, to, from, len, lttng_param_string);
523
524 reserve_size = *to_base + *to + *len;
525 trace->nesting++;
526 index = ltt_get_index_from_facility(ltt_facility_user_generic_F583779E,
527 event_user_generic_slow_printf);
528
529 {
530 ltt_buf = ltt_get_channel_from_index(trace, index);
531 slot_size = 0;
532 buffer = ltt_reserve_slot(trace, ltt_buf,
533 reserve_size, &slot_size, &tsc,
534 &before_hdr_pad, &after_hdr_pad, &header_size);
535 if(!buffer) goto end; /* buffer full */
536
537 *to_base = *to = *len = 0;
538
539 ltt_write_event_header(trace, ltt_buf, buffer,
540 ltt_facility_user_generic_F583779E, event_user_generic_slow_printf,
541 reserve_size, before_hdr_pad, tsc);
542 *to_base += before_hdr_pad + after_hdr_pad + header_size;
543
544 *from = lttng_param_string;
545 lttng_write_string_user_generic_slow_printf_string(buffer, to_base, to, from, len, lttng_param_string);
546
547 /* Flush pending memcpy */
548 if(*len != 0) {
549 memcpy(buffer+*to_base+*to, *from, *len);
550 *to += *len;
551 *len = 0;
552 }
553
554 ltt_commit_slot(ltt_buf, buffer, slot_size);
555
556}
557
558end:
559 trace->nesting--;
560}
561#endif //LTT_TRACE
562#endif //LTT_TRACE_FAST
e90c7b86 563
564/* Event function_entry structures */
565
566/* Event function_entry logging function */
8a9103df 567#ifndef LTT_TRACE_FAST
e90c7b86 568static inline __attribute__((no_instrument_function)) int trace_user_generic_function_entry(
569 const void * lttng_param_this_fn,
570 const void * lttng_param_call_site)
571#ifndef LTT_TRACE
572{
573}
574#else
575{
576 int ret = 0;
577 void *buffer = NULL;
578 size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
579 size_t *to_base = &real_to_base;
580 size_t real_to = 0;
581 size_t *to = &real_to;
582 size_t real_len = 0;
583 size_t *len = &real_len;
584 size_t reserve_size;
585 size_t slot_size;
586 size_t align;
587 const void *real_from;
588 const void **from = &real_from;
589 /* For each field, calculate the field size. */
590 /* size = *to_base + *to + *len */
591 /* Assume that the padding for alignment starts at a
592 * sizeof(void *) address. */
593
594 *from = &lttng_param_this_fn;
595 align = sizeof(const void *);
596
597 if(*len == 0) {
598 *to += ltt_align(*to, align); /* align output */
599 } else {
600 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
601 }
602
603 *len += sizeof(const void *);
604
605 *from = &lttng_param_call_site;
606 align = sizeof(const void *);
607
608 if(*len == 0) {
609 *to += ltt_align(*to, align); /* align output */
610 } else {
611 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
612 }
613
614 *len += sizeof(const void *);
615
616 reserve_size = *to_base + *to + *len;
617 {
618 char stack_buffer[reserve_size];
619 buffer = stack_buffer;
620
621 *to_base = *to = *len = 0;
622
623 *from = &lttng_param_this_fn;
624 align = sizeof(const void *);
625
626 if(*len == 0) {
627 *to += ltt_align(*to, align); /* align output */
628 } else {
629 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
630 }
631
632 *len += sizeof(const void *);
633
634 /* Flush pending memcpy */
635 if(*len != 0) {
636 memcpy(buffer+*to_base+*to, *from, *len);
637 *to += *len;
638 *len = 0;
639 }
640
641 *from = &lttng_param_call_site;
642 align = sizeof(const void *);
643
644 if(*len == 0) {
645 *to += ltt_align(*to, align); /* align output */
646 } else {
647 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
648 }
649
650 *len += sizeof(const void *);
651
652 /* Flush pending memcpy */
653 if(*len != 0) {
654 memcpy(buffer+*to_base+*to, *from, *len);
655 *to += *len;
656 *len = 0;
657 }
658
659 ret = ltt_trace_generic(ltt_facility_user_generic_F583779E, event_user_generic_function_entry, buffer, reserve_size, LTT_BLOCKING);
660 }
661
662 return ret;
663
664}
665#endif //LTT_TRACE
8a9103df 666#endif //!LTT_TRACE_FAST
667
668#ifdef LTT_TRACE_FAST
669static inline __attribute__((no_instrument_function)) int trace_user_generic_function_entry(
670 const void * lttng_param_this_fn,
671 const void * lttng_param_call_site)
672#ifndef LTT_TRACE
673{
674}
675#else
676{
677 unsigned int index;
678 struct ltt_trace_info *trace = thread_trace_info;
679 struct ltt_buf *ltt_buf;
680 void *buffer = NULL;
681 size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
682 size_t *to_base = &real_to_base;
683 size_t real_to = 0;
684 size_t *to = &real_to;
685 size_t real_len = 0;
686 size_t *len = &real_len;
687 size_t reserve_size;
688 size_t slot_size;
689 size_t align;
690 const void *real_from;
691 const void **from = &real_from;
692 uint64_t tsc;
693 size_t before_hdr_pad, after_hdr_pad, header_size;
694
695 if(!trace) ltt_thread_init();
696
697 /* For each field, calculate the field size. */
698 /* size = *to_base + *to + *len */
699 /* Assume that the padding for alignment starts at a
700 * sizeof(void *) address. */
701
702 *from = &lttng_param_this_fn;
703 align = sizeof(const void *);
704
705 if(*len == 0) {
706 *to += ltt_align(*to, align); /* align output */
707 } else {
708 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
709 }
710
711 *len += sizeof(const void *);
712
713 *from = &lttng_param_call_site;
714 align = sizeof(const void *);
715
716 if(*len == 0) {
717 *to += ltt_align(*to, align); /* align output */
718 } else {
719 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
720 }
721
722 *len += sizeof(const void *);
723
724 reserve_size = *to_base + *to + *len;
725 trace->nesting++;
726 index = ltt_get_index_from_facility(ltt_facility_user_generic_F583779E,
727 event_user_generic_function_entry);
728
729 {
730 ltt_buf = ltt_get_channel_from_index(trace, index);
731 slot_size = 0;
732 buffer = ltt_reserve_slot(trace, ltt_buf,
733 reserve_size, &slot_size, &tsc,
734 &before_hdr_pad, &after_hdr_pad, &header_size);
735 if(!buffer) goto end; /* buffer full */
736
737 *to_base = *to = *len = 0;
738
739 ltt_write_event_header(trace, ltt_buf, buffer,
740 ltt_facility_user_generic_F583779E, event_user_generic_function_entry,
741 reserve_size, before_hdr_pad, tsc);
742 *to_base += before_hdr_pad + after_hdr_pad + header_size;
743
744 *from = &lttng_param_this_fn;
745 align = sizeof(const void *);
746
747 if(*len == 0) {
748 *to += ltt_align(*to, align); /* align output */
749 } else {
750 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
751 }
752
753 *len += sizeof(const void *);
754
755 /* Flush pending memcpy */
756 if(*len != 0) {
757 memcpy(buffer+*to_base+*to, *from, *len);
758 *to += *len;
759 *len = 0;
760 }
761
762 *from = &lttng_param_call_site;
763 align = sizeof(const void *);
764
765 if(*len == 0) {
766 *to += ltt_align(*to, align); /* align output */
767 } else {
768 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
769 }
770
771 *len += sizeof(const void *);
772
773 /* Flush pending memcpy */
774 if(*len != 0) {
775 memcpy(buffer+*to_base+*to, *from, *len);
776 *to += *len;
777 *len = 0;
778 }
779
780 ltt_commit_slot(ltt_buf, buffer, slot_size);
781
782}
783
784end:
785 trace->nesting--;
786}
787#endif //LTT_TRACE
788#endif //LTT_TRACE_FAST
e90c7b86 789
790/* Event function_exit structures */
791
792/* Event function_exit logging function */
8a9103df 793#ifndef LTT_TRACE_FAST
e90c7b86 794static inline __attribute__((no_instrument_function)) int trace_user_generic_function_exit(
795 const void * lttng_param_this_fn,
796 const void * lttng_param_call_site)
797#ifndef LTT_TRACE
798{
799}
800#else
801{
802 int ret = 0;
803 void *buffer = NULL;
804 size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
805 size_t *to_base = &real_to_base;
806 size_t real_to = 0;
807 size_t *to = &real_to;
808 size_t real_len = 0;
809 size_t *len = &real_len;
810 size_t reserve_size;
811 size_t slot_size;
812 size_t align;
813 const void *real_from;
814 const void **from = &real_from;
815 /* For each field, calculate the field size. */
816 /* size = *to_base + *to + *len */
817 /* Assume that the padding for alignment starts at a
818 * sizeof(void *) address. */
819
820 *from = &lttng_param_this_fn;
821 align = sizeof(const void *);
822
823 if(*len == 0) {
824 *to += ltt_align(*to, align); /* align output */
825 } else {
826 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
827 }
828
829 *len += sizeof(const void *);
830
831 *from = &lttng_param_call_site;
832 align = sizeof(const void *);
833
834 if(*len == 0) {
835 *to += ltt_align(*to, align); /* align output */
836 } else {
837 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
838 }
839
840 *len += sizeof(const void *);
841
842 reserve_size = *to_base + *to + *len;
843 {
844 char stack_buffer[reserve_size];
845 buffer = stack_buffer;
846
847 *to_base = *to = *len = 0;
848
849 *from = &lttng_param_this_fn;
850 align = sizeof(const void *);
851
852 if(*len == 0) {
853 *to += ltt_align(*to, align); /* align output */
854 } else {
855 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
856 }
857
858 *len += sizeof(const void *);
859
860 /* Flush pending memcpy */
861 if(*len != 0) {
862 memcpy(buffer+*to_base+*to, *from, *len);
863 *to += *len;
864 *len = 0;
865 }
866
867 *from = &lttng_param_call_site;
868 align = sizeof(const void *);
869
870 if(*len == 0) {
871 *to += ltt_align(*to, align); /* align output */
872 } else {
873 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
874 }
875
876 *len += sizeof(const void *);
877
878 /* Flush pending memcpy */
879 if(*len != 0) {
880 memcpy(buffer+*to_base+*to, *from, *len);
881 *to += *len;
882 *len = 0;
883 }
884
885 ret = ltt_trace_generic(ltt_facility_user_generic_F583779E, event_user_generic_function_exit, buffer, reserve_size, LTT_BLOCKING);
3d57eb5b 886 }
887
888 return ret;
889
890}
891#endif //LTT_TRACE
8a9103df 892#endif //!LTT_TRACE_FAST
893
894#ifdef LTT_TRACE_FAST
895static inline __attribute__((no_instrument_function)) int trace_user_generic_function_exit(
896 const void * lttng_param_this_fn,
897 const void * lttng_param_call_site)
898#ifndef LTT_TRACE
899{
900}
901#else
902{
903 unsigned int index;
904 struct ltt_trace_info *trace = thread_trace_info;
905 struct ltt_buf *ltt_buf;
906 void *buffer = NULL;
907 size_t real_to_base = 0; /* The buffer is allocated on arch_size alignment */
908 size_t *to_base = &real_to_base;
909 size_t real_to = 0;
910 size_t *to = &real_to;
911 size_t real_len = 0;
912 size_t *len = &real_len;
913 size_t reserve_size;
914 size_t slot_size;
915 size_t align;
916 const void *real_from;
917 const void **from = &real_from;
918 uint64_t tsc;
919 size_t before_hdr_pad, after_hdr_pad, header_size;
920
921 if(!trace) ltt_thread_init();
922
923 /* For each field, calculate the field size. */
924 /* size = *to_base + *to + *len */
925 /* Assume that the padding for alignment starts at a
926 * sizeof(void *) address. */
927
928 *from = &lttng_param_this_fn;
929 align = sizeof(const void *);
930
931 if(*len == 0) {
932 *to += ltt_align(*to, align); /* align output */
933 } else {
934 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
935 }
936
937 *len += sizeof(const void *);
938
939 *from = &lttng_param_call_site;
940 align = sizeof(const void *);
941
942 if(*len == 0) {
943 *to += ltt_align(*to, align); /* align output */
944 } else {
945 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
946 }
947
948 *len += sizeof(const void *);
949
950 reserve_size = *to_base + *to + *len;
951 trace->nesting++;
952 index = ltt_get_index_from_facility(ltt_facility_user_generic_F583779E,
953 event_user_generic_function_exit);
954
955 {
956 ltt_buf = ltt_get_channel_from_index(trace, index);
957 slot_size = 0;
958 buffer = ltt_reserve_slot(trace, ltt_buf,
959 reserve_size, &slot_size, &tsc,
960 &before_hdr_pad, &after_hdr_pad, &header_size);
961 if(!buffer) goto end; /* buffer full */
962
963 *to_base = *to = *len = 0;
964
965 ltt_write_event_header(trace, ltt_buf, buffer,
966 ltt_facility_user_generic_F583779E, event_user_generic_function_exit,
967 reserve_size, before_hdr_pad, tsc);
968 *to_base += before_hdr_pad + after_hdr_pad + header_size;
969
970 *from = &lttng_param_this_fn;
971 align = sizeof(const void *);
972
973 if(*len == 0) {
974 *to += ltt_align(*to, align); /* align output */
975 } else {
976 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
977 }
978
979 *len += sizeof(const void *);
980
981 /* Flush pending memcpy */
982 if(*len != 0) {
983 memcpy(buffer+*to_base+*to, *from, *len);
984 *to += *len;
985 *len = 0;
986 }
987
988 *from = &lttng_param_call_site;
989 align = sizeof(const void *);
990
991 if(*len == 0) {
992 *to += ltt_align(*to, align); /* align output */
993 } else {
994 *len += ltt_align(*to+*len, align); /* alignment, ok to do a memcpy of it */
995 }
996
997 *len += sizeof(const void *);
998
999 /* Flush pending memcpy */
1000 if(*len != 0) {
1001 memcpy(buffer+*to_base+*to, *from, *len);
1002 *to += *len;
1003 *len = 0;
1004 }
1005
1006 ltt_commit_slot(ltt_buf, buffer, slot_size);
1007
1008}
1009
1010end:
1011 trace->nesting--;
1012}
1013#endif //LTT_TRACE
1014#endif //LTT_TRACE_FAST
3d57eb5b 1015
1016#endif //_LTT_FACILITY_USER_GENERIC_H_
This page took 0.060611 seconds and 4 git commands to generate.