Rename struct lib_ring_buffer_ctx to struct lttng_kernel_ring_buffer_ctx
[lttng-modules.git] / src / lttng-event-notifier-notification.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-event-notifier-notification.c
4 *
5 * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
6 */
7
8 #include <linux/bug.h>
9
10 #include <lttng/lttng-bytecode.h>
11 #include <lttng/events.h>
12 #include <lttng/msgpack.h>
13 #include <lttng/event-notifier-notification.h>
14 #include <lttng/events-internal.h>
15 #include <wrapper/barrier.h>
16
17 /*
18 * The capture buffer size needs to be below 1024 bytes to avoid the
19 * frame to be larger than the 1024 limit enforced by the kernel. If we
20 * ever need to increase it, we will need to use a memory allocation
21 * scheme which allows allocating temporary memory chunks from the
22 * instrumentation sites. This could be done by adapting lttng
23 * tp-mempool to become nmi-safe and lock-free.
24 */
25 #define CAPTURE_BUFFER_SIZE 512
26
27 struct lttng_event_notifier_notification {
28 int notification_fd;
29 uint64_t event_notifier_token;
30 uint8_t capture_buf[CAPTURE_BUFFER_SIZE];
31 struct lttng_msgpack_writer writer;
32 bool has_captures;
33 };
34
35 static
36 int capture_enum(struct lttng_msgpack_writer *writer,
37 struct lttng_interpreter_output *output)
38 {
39 int ret;
40
41 /*
42 * Enums are captured as a map containing 2 key-value pairs. Such as:
43 * - type: enum
44 * value: 177
45 */
46 ret = lttng_msgpack_begin_map(writer, 2);
47 if (ret) {
48 WARN_ON_ONCE(1);
49 goto end;
50 }
51
52 ret = lttng_msgpack_write_str(writer, "type");
53 if (ret) {
54 WARN_ON_ONCE(1);
55 goto end;
56 }
57
58 ret = lttng_msgpack_write_str(writer, "enum");
59 if (ret) {
60 WARN_ON_ONCE(1);
61 goto end;
62 }
63
64 ret = lttng_msgpack_write_str(writer, "value");
65 if (ret) {
66 WARN_ON_ONCE(1);
67 goto end;
68 }
69
70 switch (output->type) {
71 case LTTNG_INTERPRETER_TYPE_SIGNED_ENUM:
72 ret = lttng_msgpack_write_signed_integer(writer, output->u.s);
73 if (ret) {
74 WARN_ON_ONCE(1);
75 goto end;
76 }
77 break;
78 case LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM:
79 ret = lttng_msgpack_write_signed_integer(writer, output->u.u);
80 if (ret) {
81 WARN_ON_ONCE(1);
82 goto end;
83 }
84 break;
85 default:
86 WARN_ON(1);
87 }
88
89 ret = lttng_msgpack_end_map(writer);
90 if (ret)
91 WARN_ON_ONCE(1);
92
93 end:
94 return ret;
95 }
96
97 static
98 int64_t capture_sequence_element_signed(uint8_t *ptr,
99 const struct lttng_kernel_type_integer *type)
100 {
101 int64_t value = 0;
102 unsigned int size = type->size;
103 bool byte_order_reversed = type->reverse_byte_order;
104
105 switch (size) {
106 case 8:
107 value = *ptr;
108 break;
109 case 16:
110 {
111 int16_t tmp;
112 tmp = *(int16_t *) ptr;
113 if (byte_order_reversed)
114 __swab16s(&tmp);
115
116 value = tmp;
117 break;
118 }
119 case 32:
120 {
121 int32_t tmp;
122 tmp = *(int32_t *) ptr;
123 if (byte_order_reversed)
124 __swab32s(&tmp);
125
126 value = tmp;
127 break;
128 }
129 case 64:
130 {
131 int64_t tmp;
132 tmp = *(int64_t *) ptr;
133 if (byte_order_reversed)
134 __swab64s(&tmp);
135
136 value = tmp;
137 break;
138 }
139 default:
140 WARN_ON(1);
141 }
142
143 return value;
144 }
145
146 static
147 uint64_t capture_sequence_element_unsigned(uint8_t *ptr,
148 const struct lttng_kernel_type_integer *type)
149 {
150 uint64_t value = 0;
151 unsigned int size = type->size;
152 bool byte_order_reversed = type->reverse_byte_order;
153
154 switch (size) {
155 case 8:
156 value = *ptr;
157 break;
158 case 16:
159 {
160 uint16_t tmp;
161 tmp = *(uint16_t *) ptr;
162 if (byte_order_reversed)
163 __swab16s(&tmp);
164
165 value = tmp;
166 break;
167 }
168 case 32:
169 {
170 uint32_t tmp;
171 tmp = *(uint32_t *) ptr;
172 if (byte_order_reversed)
173 __swab32s(&tmp);
174
175 value = tmp;
176 break;
177 }
178 case 64:
179 {
180 uint64_t tmp;
181 tmp = *(uint64_t *) ptr;
182 if (byte_order_reversed)
183 __swab64s(&tmp);
184
185 value = tmp;
186 break;
187 }
188 default:
189 WARN_ON(1);
190 }
191
192 return value;
193 }
194
195 int capture_sequence(struct lttng_msgpack_writer *writer,
196 struct lttng_interpreter_output *output)
197 {
198 const struct lttng_kernel_type_integer *integer_type = NULL;
199 const struct lttng_kernel_type_common *nested_type;
200 uint8_t *ptr;
201 bool signedness;
202 int ret, i;
203
204 ret = lttng_msgpack_begin_array(writer, output->u.sequence.nr_elem);
205 if (ret) {
206 WARN_ON_ONCE(1);
207 goto end;
208 }
209
210 ptr = (uint8_t *) output->u.sequence.ptr;
211 nested_type = output->u.sequence.nested_type;
212 switch (nested_type->type) {
213 case lttng_kernel_type_integer:
214 integer_type = lttng_kernel_get_type_integer(nested_type);
215 break;
216 case lttng_kernel_type_enum:
217 /* Treat enumeration as an integer. */
218 integer_type = lttng_kernel_get_type_integer(lttng_kernel_get_type_enum(nested_type)->container_type);
219 break;
220 default:
221 /* Capture of array of non-integer are not supported. */
222 WARN_ON(1);
223 }
224 signedness = integer_type->signedness;
225 for (i = 0; i < output->u.sequence.nr_elem; i++) {
226 if (signedness) {
227 ret = lttng_msgpack_write_signed_integer(writer,
228 capture_sequence_element_signed(ptr, integer_type));
229 if (ret) {
230 WARN_ON_ONCE(1);
231 goto end;
232 }
233 } else {
234 ret = lttng_msgpack_write_unsigned_integer(writer,
235 capture_sequence_element_unsigned(ptr, integer_type));
236 if (ret) {
237 WARN_ON_ONCE(1);
238 goto end;
239 }
240 }
241
242 /*
243 * We assume that alignment is smaller or equal to the size.
244 * This currently holds true but if it changes in the future,
245 * we will want to change the pointer arithmetics below to
246 * take into account that the next element might be further
247 * away.
248 */
249 WARN_ON(integer_type->alignment > integer_type->size);
250
251 /* Size is in number of bits. */
252 ptr += (integer_type->size / CHAR_BIT) ;
253 }
254
255 ret = lttng_msgpack_end_array(writer);
256 if (ret)
257 WARN_ON_ONCE(1);
258 end:
259 return ret;
260 }
261
262 static
263 int notification_append_capture(
264 struct lttng_event_notifier_notification *notif,
265 struct lttng_interpreter_output *output)
266 {
267 struct lttng_msgpack_writer *writer = &notif->writer;
268 int ret = 0;
269
270 switch (output->type) {
271 case LTTNG_INTERPRETER_TYPE_S64:
272 ret = lttng_msgpack_write_signed_integer(writer, output->u.s);
273 if (ret) {
274 WARN_ON_ONCE(1);
275 goto end;
276 }
277 break;
278 case LTTNG_INTERPRETER_TYPE_U64:
279 ret = lttng_msgpack_write_unsigned_integer(writer, output->u.u);
280 if (ret) {
281 WARN_ON_ONCE(1);
282 goto end;
283 }
284 break;
285 case LTTNG_INTERPRETER_TYPE_STRING:
286 ret = lttng_msgpack_write_str(writer, output->u.str.str);
287 if (ret) {
288 WARN_ON_ONCE(1);
289 goto end;
290 }
291 break;
292 case LTTNG_INTERPRETER_TYPE_SEQUENCE:
293 ret = capture_sequence(writer, output);
294 if (ret) {
295 WARN_ON_ONCE(1);
296 goto end;
297 }
298 break;
299 case LTTNG_INTERPRETER_TYPE_SIGNED_ENUM:
300 case LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM:
301 ret = capture_enum(writer, output);
302 if (ret) {
303 WARN_ON_ONCE(1);
304 goto end;
305 }
306 break;
307 default:
308 ret = -1;
309 WARN_ON(1);
310 }
311 end:
312 return ret;
313 }
314
315 static
316 int notification_append_empty_capture(
317 struct lttng_event_notifier_notification *notif)
318 {
319 int ret = lttng_msgpack_write_nil(&notif->writer);
320 if (ret)
321 WARN_ON_ONCE(1);
322
323 return ret;
324 }
325
326 static
327 int notification_init(struct lttng_event_notifier_notification *notif,
328 struct lttng_kernel_event_notifier *event_notifier)
329 {
330 struct lttng_msgpack_writer *writer = &notif->writer;
331 int ret = 0;
332
333 notif->has_captures = false;
334
335 if (event_notifier->priv->num_captures > 0) {
336 lttng_msgpack_writer_init(writer, notif->capture_buf,
337 CAPTURE_BUFFER_SIZE);
338
339 ret = lttng_msgpack_begin_array(writer, event_notifier->priv->num_captures);
340 if (ret) {
341 WARN_ON_ONCE(1);
342 goto end;
343 }
344
345 notif->has_captures = true;
346 }
347
348 end:
349 return ret;
350 }
351
352 static
353 void record_error(struct lttng_kernel_event_notifier *event_notifier)
354 {
355
356 struct lttng_event_notifier_group *event_notifier_group = event_notifier->priv->group;
357 struct lttng_counter *error_counter;
358 size_t dimension_index[1];
359 int ret;
360
361 /*
362 * lttng_smp_load_acquire paired with lttng_smp_store_release orders
363 * creation of the error counter and setting error_counter_len
364 * before the error_counter is used.
365 */
366 error_counter = lttng_smp_load_acquire(&event_notifier_group->error_counter);
367 /* This group may not have an error counter attached to it. */
368 if (!error_counter)
369 return;
370
371 dimension_index[0] = event_notifier->priv->error_counter_index;
372
373 ret = error_counter->ops->counter_add(error_counter->counter,
374 dimension_index, 1);
375 if (ret)
376 WARN_ON_ONCE(1);
377 }
378
379 static
380 void notification_send(struct lttng_event_notifier_notification *notif,
381 struct lttng_kernel_event_notifier *event_notifier)
382 {
383 struct lttng_event_notifier_group *event_notifier_group = event_notifier->priv->group;
384 struct lttng_kernel_ring_buffer_ctx ctx;
385 struct lttng_kernel_abi_event_notifier_notification kernel_notif;
386 size_t capture_buffer_content_len, reserve_size;
387 int ret;
388
389 reserve_size = sizeof(kernel_notif);
390 kernel_notif.token = event_notifier->priv->parent.user_token;
391
392 if (notif->has_captures) {
393 capture_buffer_content_len = notif->writer.write_pos - notif->writer.buffer;
394 } else {
395 capture_buffer_content_len = 0;
396 }
397
398 WARN_ON_ONCE(capture_buffer_content_len > CAPTURE_BUFFER_SIZE);
399
400 reserve_size += capture_buffer_content_len;
401 kernel_notif.capture_buf_size = capture_buffer_content_len;
402
403 lib_ring_buffer_ctx_init(&ctx, event_notifier_group->chan, reserve_size,
404 lttng_alignof(kernel_notif), NULL);
405 ret = event_notifier_group->ops->event_reserve(&ctx);
406 if (ret < 0) {
407 record_error(event_notifier);
408 return;
409 }
410
411 lib_ring_buffer_align_ctx(&ctx, lttng_alignof(kernel_notif));
412
413 /* Write the notif structure. */
414 event_notifier_group->ops->event_write(&ctx, &kernel_notif,
415 sizeof(kernel_notif));
416
417 /*
418 * Write the capture buffer. No need to realigned as the below is a raw
419 * char* buffer.
420 */
421 event_notifier_group->ops->event_write(&ctx, &notif->capture_buf,
422 capture_buffer_content_len);
423
424 event_notifier_group->ops->event_commit(&ctx);
425 irq_work_queue(&event_notifier_group->wakeup_pending);
426 }
427
428 void lttng_event_notifier_notification_send(struct lttng_kernel_event_notifier *event_notifier,
429 const char *stack_data,
430 struct lttng_kernel_probe_ctx *probe_ctx,
431 struct lttng_kernel_notification_ctx *notif_ctx)
432 {
433 struct lttng_event_notifier_notification notif = { 0 };
434 int ret;
435
436 if (unlikely(!READ_ONCE(event_notifier->parent.enabled)))
437 return;
438
439 ret = notification_init(&notif, event_notifier);
440 if (ret) {
441 WARN_ON_ONCE(1);
442 goto end;
443 }
444
445 if (unlikely(notif_ctx->eval_capture)) {
446 struct lttng_kernel_bytecode_runtime *capture_bc_runtime;
447
448 /*
449 * Iterate over all the capture bytecodes. If the interpreter
450 * functions returns successfully, append the value of the
451 * `output` parameter to the capture buffer. If the interpreter
452 * fails, append an empty capture to the buffer.
453 */
454 list_for_each_entry_rcu(capture_bc_runtime,
455 &event_notifier->priv->capture_bytecode_runtime_head, node) {
456 struct lttng_interpreter_output output;
457
458 if (capture_bc_runtime->interpreter_func(capture_bc_runtime,
459 stack_data, probe_ctx, &output) == LTTNG_KERNEL_BYTECODE_INTERPRETER_OK)
460 ret = notification_append_capture(&notif, &output);
461 else
462 ret = notification_append_empty_capture(&notif);
463
464 if (ret)
465 printk(KERN_WARNING "Error appending capture to notification");
466 }
467 }
468
469 /*
470 * Send the notification (including the capture buffer) to the
471 * sessiond.
472 */
473 notification_send(&notif, event_notifier);
474 end:
475 return;
476 }
This page took 0.044298 seconds and 4 git commands to generate.