Fix: event notifier: add missing parameters validation
[lttng-modules.git] / src / lttng-abi.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-abi.c
4 *
5 * LTTng ABI
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Mimic system calls for:
10 * - session creation, returns a file descriptor or failure.
11 * - channel creation, returns a file descriptor or failure.
12 * - Operates on a session file descriptor
13 * - Takes all channel options as parameters.
14 * - stream get, returns a file descriptor or failure.
15 * - Operates on a channel file descriptor.
16 * - stream notifier get, returns a file descriptor or failure.
17 * - Operates on a channel file descriptor.
18 * - event creation, returns a file descriptor or failure.
19 * - Operates on a channel file descriptor
20 * - Takes an event name as parameter
21 * - Takes an instrumentation source as parameter
22 * - e.g. tracepoints, dynamic_probes...
23 * - Takes instrumentation source specific arguments.
24 */
25
26 #include <linux/module.h>
27 #include <linux/proc_fs.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/file.h>
30 #include <linux/uaccess.h>
31 #include <linux/slab.h>
32 #include <linux/err.h>
33 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
34 #include <ringbuffer/vfs.h>
35 #include <ringbuffer/backend.h>
36 #include <ringbuffer/frontend.h>
37 #include <wrapper/poll.h>
38 #include <wrapper/file.h>
39 #include <wrapper/kref.h>
40 #include <wrapper/barrier.h>
41 #include <lttng/string-utils.h>
42 #include <lttng/abi.h>
43 #include <lttng/abi-old.h>
44 #include <lttng/events.h>
45 #include <lttng/tracer.h>
46 #include <lttng/tp-mempool.h>
47 #include <ringbuffer/frontend_types.h>
48 #include <ringbuffer/iterator.h>
49
50 /*
51 * This is LTTng's own personal way to create a system call as an external
52 * module. We use ioctl() on /proc/lttng.
53 */
54
55 static struct proc_dir_entry *lttng_proc_dentry;
56
57 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0))
58 static const struct proc_ops lttng_proc_ops;
59 #else
60 static const struct file_operations lttng_proc_ops;
61 #endif
62
63 static const struct file_operations lttng_session_fops;
64 static const struct file_operations lttng_event_notifier_group_fops;
65 static const struct file_operations lttng_channel_fops;
66 static const struct file_operations lttng_metadata_fops;
67 static const struct file_operations lttng_event_fops;
68 static struct file_operations lttng_stream_ring_buffer_file_operations;
69
70 static int put_u64(uint64_t val, unsigned long arg);
71 static int put_u32(uint32_t val, unsigned long arg);
72
73 static int validate_zeroed_padding(char *p, size_t len)
74 {
75 size_t i;
76
77 for (i = 0; i < len; i++) {
78 if (p[i])
79 return -1;
80 }
81 return 0;
82 }
83
84 /*
85 * Teardown management: opened file descriptors keep a refcount on the module,
86 * so it can only exit when all file descriptors are closed.
87 */
88
89 static
90 int lttng_abi_create_session(void)
91 {
92 struct lttng_session *session;
93 struct file *session_file;
94 int session_fd, ret;
95
96 session = lttng_session_create();
97 if (!session)
98 return -ENOMEM;
99 session_fd = lttng_get_unused_fd();
100 if (session_fd < 0) {
101 ret = session_fd;
102 goto fd_error;
103 }
104 session_file = anon_inode_getfile("[lttng_session]",
105 &lttng_session_fops,
106 session, O_RDWR);
107 if (IS_ERR(session_file)) {
108 ret = PTR_ERR(session_file);
109 goto file_error;
110 }
111 session->file = session_file;
112 fd_install(session_fd, session_file);
113 return session_fd;
114
115 file_error:
116 put_unused_fd(session_fd);
117 fd_error:
118 lttng_session_destroy(session);
119 return ret;
120 }
121
122 void event_notifier_send_notification_work_wakeup(struct irq_work *entry)
123 {
124 struct lttng_event_notifier_group *event_notifier_group =
125 container_of(entry, struct lttng_event_notifier_group,
126 wakeup_pending);
127 wake_up_interruptible(&event_notifier_group->read_wait);
128 }
129
130 static
131 int lttng_abi_create_event_notifier_group(void)
132 {
133 struct lttng_event_notifier_group *event_notifier_group;
134 struct file *event_notifier_group_file;
135 int event_notifier_group_fd, ret;
136
137 event_notifier_group = lttng_event_notifier_group_create();
138 if (!event_notifier_group)
139 return -ENOMEM;
140
141 event_notifier_group_fd = lttng_get_unused_fd();
142 if (event_notifier_group_fd < 0) {
143 ret = event_notifier_group_fd;
144 goto fd_error;
145 }
146 event_notifier_group_file = anon_inode_getfile("[lttng_event_notifier_group]",
147 &lttng_event_notifier_group_fops,
148 event_notifier_group, O_RDWR);
149 if (IS_ERR(event_notifier_group_file)) {
150 ret = PTR_ERR(event_notifier_group_file);
151 goto file_error;
152 }
153
154 event_notifier_group->file = event_notifier_group_file;
155 init_waitqueue_head(&event_notifier_group->read_wait);
156 init_irq_work(&event_notifier_group->wakeup_pending,
157 event_notifier_send_notification_work_wakeup);
158 fd_install(event_notifier_group_fd, event_notifier_group_file);
159 return event_notifier_group_fd;
160
161 file_error:
162 put_unused_fd(event_notifier_group_fd);
163 fd_error:
164 lttng_event_notifier_group_destroy(event_notifier_group);
165 return ret;
166 }
167
168 static
169 int lttng_abi_tracepoint_list(void)
170 {
171 struct file *tracepoint_list_file;
172 int file_fd, ret;
173
174 file_fd = lttng_get_unused_fd();
175 if (file_fd < 0) {
176 ret = file_fd;
177 goto fd_error;
178 }
179
180 tracepoint_list_file = anon_inode_getfile("[lttng_tracepoint_list]",
181 &lttng_tracepoint_list_fops,
182 NULL, O_RDWR);
183 if (IS_ERR(tracepoint_list_file)) {
184 ret = PTR_ERR(tracepoint_list_file);
185 goto file_error;
186 }
187 ret = lttng_tracepoint_list_fops.open(NULL, tracepoint_list_file);
188 if (ret < 0)
189 goto open_error;
190 fd_install(file_fd, tracepoint_list_file);
191 return file_fd;
192
193 open_error:
194 fput(tracepoint_list_file);
195 file_error:
196 put_unused_fd(file_fd);
197 fd_error:
198 return ret;
199 }
200
201 #ifndef CONFIG_HAVE_SYSCALL_TRACEPOINTS
202 static inline
203 int lttng_abi_syscall_list(void)
204 {
205 return -ENOSYS;
206 }
207 #else
208 static
209 int lttng_abi_syscall_list(void)
210 {
211 struct file *syscall_list_file;
212 int file_fd, ret;
213
214 file_fd = lttng_get_unused_fd();
215 if (file_fd < 0) {
216 ret = file_fd;
217 goto fd_error;
218 }
219
220 syscall_list_file = anon_inode_getfile("[lttng_syscall_list]",
221 &lttng_syscall_list_fops,
222 NULL, O_RDWR);
223 if (IS_ERR(syscall_list_file)) {
224 ret = PTR_ERR(syscall_list_file);
225 goto file_error;
226 }
227 ret = lttng_syscall_list_fops.open(NULL, syscall_list_file);
228 if (ret < 0)
229 goto open_error;
230 fd_install(file_fd, syscall_list_file);
231 return file_fd;
232
233 open_error:
234 fput(syscall_list_file);
235 file_error:
236 put_unused_fd(file_fd);
237 fd_error:
238 return ret;
239 }
240 #endif
241
242 static
243 void lttng_abi_tracer_version(struct lttng_kernel_tracer_version *v)
244 {
245 v->major = LTTNG_MODULES_MAJOR_VERSION;
246 v->minor = LTTNG_MODULES_MINOR_VERSION;
247 v->patchlevel = LTTNG_MODULES_PATCHLEVEL_VERSION;
248 }
249
250 static
251 void lttng_abi_tracer_abi_version(struct lttng_kernel_tracer_abi_version *v)
252 {
253 v->major = LTTNG_MODULES_ABI_MAJOR_VERSION;
254 v->minor = LTTNG_MODULES_ABI_MINOR_VERSION;
255 }
256
257 static
258 long lttng_abi_add_context(struct file *file,
259 struct lttng_kernel_context *context_param,
260 struct lttng_ctx **ctx, struct lttng_session *session)
261 {
262
263 if (session->been_active)
264 return -EPERM;
265
266 switch (context_param->ctx) {
267 case LTTNG_KERNEL_CONTEXT_PID:
268 return lttng_add_pid_to_ctx(ctx);
269 case LTTNG_KERNEL_CONTEXT_PRIO:
270 return lttng_add_prio_to_ctx(ctx);
271 case LTTNG_KERNEL_CONTEXT_NICE:
272 return lttng_add_nice_to_ctx(ctx);
273 case LTTNG_KERNEL_CONTEXT_VPID:
274 return lttng_add_vpid_to_ctx(ctx);
275 case LTTNG_KERNEL_CONTEXT_TID:
276 return lttng_add_tid_to_ctx(ctx);
277 case LTTNG_KERNEL_CONTEXT_VTID:
278 return lttng_add_vtid_to_ctx(ctx);
279 case LTTNG_KERNEL_CONTEXT_PPID:
280 return lttng_add_ppid_to_ctx(ctx);
281 case LTTNG_KERNEL_CONTEXT_VPPID:
282 return lttng_add_vppid_to_ctx(ctx);
283 case LTTNG_KERNEL_CONTEXT_PERF_COUNTER:
284 context_param->u.perf_counter.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
285 return lttng_add_perf_counter_to_ctx(context_param->u.perf_counter.type,
286 context_param->u.perf_counter.config,
287 context_param->u.perf_counter.name,
288 ctx);
289 case LTTNG_KERNEL_CONTEXT_PROCNAME:
290 return lttng_add_procname_to_ctx(ctx);
291 case LTTNG_KERNEL_CONTEXT_HOSTNAME:
292 return lttng_add_hostname_to_ctx(ctx);
293 case LTTNG_KERNEL_CONTEXT_CPU_ID:
294 return lttng_add_cpu_id_to_ctx(ctx);
295 case LTTNG_KERNEL_CONTEXT_INTERRUPTIBLE:
296 return lttng_add_interruptible_to_ctx(ctx);
297 case LTTNG_KERNEL_CONTEXT_NEED_RESCHEDULE:
298 return lttng_add_need_reschedule_to_ctx(ctx);
299 case LTTNG_KERNEL_CONTEXT_PREEMPTIBLE:
300 return lttng_add_preemptible_to_ctx(ctx);
301 case LTTNG_KERNEL_CONTEXT_MIGRATABLE:
302 return lttng_add_migratable_to_ctx(ctx);
303 case LTTNG_KERNEL_CONTEXT_CALLSTACK_KERNEL:
304 case LTTNG_KERNEL_CONTEXT_CALLSTACK_USER:
305 return lttng_add_callstack_to_ctx(ctx, context_param->ctx);
306 case LTTNG_KERNEL_CONTEXT_CGROUP_NS:
307 return lttng_add_cgroup_ns_to_ctx(ctx);
308 case LTTNG_KERNEL_CONTEXT_IPC_NS:
309 return lttng_add_ipc_ns_to_ctx(ctx);
310 case LTTNG_KERNEL_CONTEXT_MNT_NS:
311 return lttng_add_mnt_ns_to_ctx(ctx);
312 case LTTNG_KERNEL_CONTEXT_NET_NS:
313 return lttng_add_net_ns_to_ctx(ctx);
314 case LTTNG_KERNEL_CONTEXT_PID_NS:
315 return lttng_add_pid_ns_to_ctx(ctx);
316 case LTTNG_KERNEL_CONTEXT_USER_NS:
317 return lttng_add_user_ns_to_ctx(ctx);
318 case LTTNG_KERNEL_CONTEXT_UTS_NS:
319 return lttng_add_uts_ns_to_ctx(ctx);
320 case LTTNG_KERNEL_CONTEXT_UID:
321 return lttng_add_uid_to_ctx(ctx);
322 case LTTNG_KERNEL_CONTEXT_EUID:
323 return lttng_add_euid_to_ctx(ctx);
324 case LTTNG_KERNEL_CONTEXT_SUID:
325 return lttng_add_suid_to_ctx(ctx);
326 case LTTNG_KERNEL_CONTEXT_GID:
327 return lttng_add_gid_to_ctx(ctx);
328 case LTTNG_KERNEL_CONTEXT_EGID:
329 return lttng_add_egid_to_ctx(ctx);
330 case LTTNG_KERNEL_CONTEXT_SGID:
331 return lttng_add_sgid_to_ctx(ctx);
332 case LTTNG_KERNEL_CONTEXT_VUID:
333 return lttng_add_vuid_to_ctx(ctx);
334 case LTTNG_KERNEL_CONTEXT_VEUID:
335 return lttng_add_veuid_to_ctx(ctx);
336 case LTTNG_KERNEL_CONTEXT_VSUID:
337 return lttng_add_vsuid_to_ctx(ctx);
338 case LTTNG_KERNEL_CONTEXT_VGID:
339 return lttng_add_vgid_to_ctx(ctx);
340 case LTTNG_KERNEL_CONTEXT_VEGID:
341 return lttng_add_vegid_to_ctx(ctx);
342 case LTTNG_KERNEL_CONTEXT_VSGID:
343 return lttng_add_vsgid_to_ctx(ctx);
344 case LTTNG_KERNEL_CONTEXT_TIME_NS:
345 return lttng_add_time_ns_to_ctx(ctx);
346 default:
347 return -EINVAL;
348 }
349 }
350
351 /**
352 * lttng_ioctl - lttng syscall through ioctl
353 *
354 * @file: the file
355 * @cmd: the command
356 * @arg: command arg
357 *
358 * This ioctl implements lttng commands:
359 * LTTNG_KERNEL_SESSION
360 * Returns a LTTng trace session file descriptor
361 * LTTNG_KERNEL_TRACER_VERSION
362 * Returns the LTTng kernel tracer version
363 * LTTNG_KERNEL_TRACEPOINT_LIST
364 * Returns a file descriptor listing available tracepoints
365 * LTTNG_KERNEL_WAIT_QUIESCENT
366 * Returns after all previously running probes have completed
367 * LTTNG_KERNEL_TRACER_ABI_VERSION
368 * Returns the LTTng kernel tracer ABI version
369 * LTTNG_KERNEL_EVENT_NOTIFIER_GROUP_CREATE
370 * Returns a LTTng event notifier group file descriptor
371 *
372 * The returned session will be deleted when its file descriptor is closed.
373 */
374 static
375 long lttng_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
376 {
377 switch (cmd) {
378 case LTTNG_KERNEL_OLD_SESSION:
379 case LTTNG_KERNEL_SESSION:
380 return lttng_abi_create_session();
381 case LTTNG_KERNEL_EVENT_NOTIFIER_GROUP_CREATE:
382 return lttng_abi_create_event_notifier_group();
383 case LTTNG_KERNEL_OLD_TRACER_VERSION:
384 {
385 struct lttng_kernel_tracer_version v;
386 struct lttng_kernel_old_tracer_version oldv;
387 struct lttng_kernel_old_tracer_version *uversion =
388 (struct lttng_kernel_old_tracer_version __user *) arg;
389
390 lttng_abi_tracer_version(&v);
391 oldv.major = v.major;
392 oldv.minor = v.minor;
393 oldv.patchlevel = v.patchlevel;
394
395 if (copy_to_user(uversion, &oldv, sizeof(oldv)))
396 return -EFAULT;
397 return 0;
398 }
399 case LTTNG_KERNEL_TRACER_VERSION:
400 {
401 struct lttng_kernel_tracer_version version;
402 struct lttng_kernel_tracer_version *uversion =
403 (struct lttng_kernel_tracer_version __user *) arg;
404
405 lttng_abi_tracer_version(&version);
406
407 if (copy_to_user(uversion, &version, sizeof(version)))
408 return -EFAULT;
409 return 0;
410 }
411 case LTTNG_KERNEL_TRACER_ABI_VERSION:
412 {
413 struct lttng_kernel_tracer_abi_version version;
414 struct lttng_kernel_tracer_abi_version *uversion =
415 (struct lttng_kernel_tracer_abi_version __user *) arg;
416
417 lttng_abi_tracer_abi_version(&version);
418
419 if (copy_to_user(uversion, &version, sizeof(version)))
420 return -EFAULT;
421 return 0;
422 }
423 case LTTNG_KERNEL_OLD_TRACEPOINT_LIST:
424 case LTTNG_KERNEL_TRACEPOINT_LIST:
425 return lttng_abi_tracepoint_list();
426 case LTTNG_KERNEL_SYSCALL_LIST:
427 return lttng_abi_syscall_list();
428 case LTTNG_KERNEL_OLD_WAIT_QUIESCENT:
429 case LTTNG_KERNEL_WAIT_QUIESCENT:
430 synchronize_trace();
431 return 0;
432 case LTTNG_KERNEL_OLD_CALIBRATE:
433 {
434 struct lttng_kernel_old_calibrate __user *ucalibrate =
435 (struct lttng_kernel_old_calibrate __user *) arg;
436 struct lttng_kernel_old_calibrate old_calibrate;
437 struct lttng_kernel_calibrate calibrate;
438 int ret;
439
440 if (copy_from_user(&old_calibrate, ucalibrate, sizeof(old_calibrate)))
441 return -EFAULT;
442 calibrate.type = old_calibrate.type;
443 ret = lttng_calibrate(&calibrate);
444 if (copy_to_user(ucalibrate, &old_calibrate, sizeof(old_calibrate)))
445 return -EFAULT;
446 return ret;
447 }
448 case LTTNG_KERNEL_CALIBRATE:
449 {
450 struct lttng_kernel_calibrate __user *ucalibrate =
451 (struct lttng_kernel_calibrate __user *) arg;
452 struct lttng_kernel_calibrate calibrate;
453 int ret;
454
455 if (copy_from_user(&calibrate, ucalibrate, sizeof(calibrate)))
456 return -EFAULT;
457 ret = lttng_calibrate(&calibrate);
458 if (copy_to_user(ucalibrate, &calibrate, sizeof(calibrate)))
459 return -EFAULT;
460 return ret;
461 }
462 default:
463 return -ENOIOCTLCMD;
464 }
465 }
466
467 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0))
468 static const struct proc_ops lttng_proc_ops = {
469 .proc_ioctl = lttng_ioctl,
470 #ifdef CONFIG_COMPAT
471 .proc_compat_ioctl = lttng_ioctl,
472 #endif /* CONFIG_COMPAT */
473 };
474 #else
475 static const struct file_operations lttng_proc_ops = {
476 .owner = THIS_MODULE,
477 .unlocked_ioctl = lttng_ioctl,
478 #ifdef CONFIG_COMPAT
479 .compat_ioctl = lttng_ioctl,
480 #endif /* CONFIG_COMPAT */
481 };
482 #endif
483
484 static
485 int lttng_abi_create_channel(struct file *session_file,
486 struct lttng_kernel_channel *chan_param,
487 enum channel_type channel_type)
488 {
489 struct lttng_session *session = session_file->private_data;
490 const struct file_operations *fops = NULL;
491 const char *transport_name;
492 struct lttng_channel *chan;
493 struct file *chan_file;
494 int chan_fd;
495 int ret = 0;
496
497 chan_fd = lttng_get_unused_fd();
498 if (chan_fd < 0) {
499 ret = chan_fd;
500 goto fd_error;
501 }
502 switch (channel_type) {
503 case PER_CPU_CHANNEL:
504 fops = &lttng_channel_fops;
505 break;
506 case METADATA_CHANNEL:
507 fops = &lttng_metadata_fops;
508 break;
509 }
510
511 chan_file = anon_inode_getfile("[lttng_channel]",
512 fops,
513 NULL, O_RDWR);
514 if (IS_ERR(chan_file)) {
515 ret = PTR_ERR(chan_file);
516 goto file_error;
517 }
518 switch (channel_type) {
519 case PER_CPU_CHANNEL:
520 if (chan_param->output == LTTNG_KERNEL_SPLICE) {
521 transport_name = chan_param->overwrite ?
522 "relay-overwrite" : "relay-discard";
523 } else if (chan_param->output == LTTNG_KERNEL_MMAP) {
524 transport_name = chan_param->overwrite ?
525 "relay-overwrite-mmap" : "relay-discard-mmap";
526 } else {
527 return -EINVAL;
528 }
529 break;
530 case METADATA_CHANNEL:
531 if (chan_param->output == LTTNG_KERNEL_SPLICE)
532 transport_name = "relay-metadata";
533 else if (chan_param->output == LTTNG_KERNEL_MMAP)
534 transport_name = "relay-metadata-mmap";
535 else
536 return -EINVAL;
537 break;
538 default:
539 transport_name = "<unknown>";
540 break;
541 }
542 if (!atomic_long_add_unless(&session_file->f_count, 1, LONG_MAX)) {
543 ret = -EOVERFLOW;
544 goto refcount_error;
545 }
546 /*
547 * We tolerate no failure path after channel creation. It will stay
548 * invariant for the rest of the session.
549 */
550 chan = lttng_channel_create(session, transport_name, NULL,
551 chan_param->subbuf_size,
552 chan_param->num_subbuf,
553 chan_param->switch_timer_interval,
554 chan_param->read_timer_interval,
555 channel_type);
556 if (!chan) {
557 ret = -EINVAL;
558 goto chan_error;
559 }
560 chan->file = chan_file;
561 chan_file->private_data = chan;
562 fd_install(chan_fd, chan_file);
563
564 return chan_fd;
565
566 chan_error:
567 atomic_long_dec(&session_file->f_count);
568 refcount_error:
569 fput(chan_file);
570 file_error:
571 put_unused_fd(chan_fd);
572 fd_error:
573 return ret;
574 }
575
576 static
577 int lttng_abi_session_set_name(struct lttng_session *session,
578 struct lttng_kernel_session_name *name)
579 {
580 size_t len;
581
582 len = strnlen(name->name, LTTNG_KERNEL_SESSION_NAME_LEN);
583
584 if (len == LTTNG_KERNEL_SESSION_NAME_LEN) {
585 /* Name is too long/malformed */
586 return -EINVAL;
587 }
588
589 strcpy(session->name, name->name);
590 return 0;
591 }
592
593 static
594 int lttng_abi_session_set_creation_time(struct lttng_session *session,
595 struct lttng_kernel_session_creation_time *time)
596 {
597 size_t len;
598
599 len = strnlen(time->iso8601, LTTNG_KERNEL_SESSION_CREATION_TIME_ISO8601_LEN);
600
601 if (len == LTTNG_KERNEL_SESSION_CREATION_TIME_ISO8601_LEN) {
602 /* Time is too long/malformed */
603 return -EINVAL;
604 }
605
606 strcpy(session->creation_time, time->iso8601);
607 return 0;
608 }
609
610 static
611 int lttng_counter_release(struct inode *inode, struct file *file)
612 {
613 struct lttng_counter *counter = file->private_data;
614
615 if (counter) {
616 /*
617 * Do not destroy the counter itself. Wait of the owner
618 * (event_notifier group) to be destroyed.
619 */
620 fput(counter->owner);
621 }
622
623 return 0;
624 }
625
626 static
627 long lttng_counter_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
628 {
629 struct lttng_counter *counter = file->private_data;
630 size_t indexes[LTTNG_KERNEL_COUNTER_DIMENSION_MAX] = { 0 };
631 int i;
632
633 switch (cmd) {
634 case LTTNG_KERNEL_COUNTER_READ:
635 {
636 struct lttng_kernel_counter_read local_counter_read;
637 struct lttng_kernel_counter_read __user *ucounter_read =
638 (struct lttng_kernel_counter_read __user *) arg;
639 bool overflow, underflow;
640 int64_t value;
641 int32_t cpu;
642 int ret;
643
644 if (copy_from_user(&local_counter_read, ucounter_read,
645 sizeof(local_counter_read)))
646 return -EFAULT;
647 if (validate_zeroed_padding(local_counter_read.padding,
648 sizeof(local_counter_read.padding)))
649 return -EINVAL;
650
651 /* Cast all indexes into size_t. */
652 for (i = 0; i < local_counter_read.index.number_dimensions; i++)
653 indexes[i] = (size_t) local_counter_read.index.dimension_indexes[i];
654 cpu = local_counter_read.cpu;
655
656 ret = lttng_kernel_counter_read(counter, indexes, cpu, &value,
657 &overflow, &underflow);
658 if (ret)
659 return ret;
660 local_counter_read.value.value = value;
661 local_counter_read.value.overflow = overflow;
662 local_counter_read.value.underflow = underflow;
663
664 if (copy_to_user(&ucounter_read->value, &local_counter_read.value,
665 sizeof(local_counter_read.value)))
666 return -EFAULT;
667
668 return 0;
669 }
670 case LTTNG_KERNEL_COUNTER_AGGREGATE:
671 {
672 struct lttng_kernel_counter_aggregate local_counter_aggregate;
673 struct lttng_kernel_counter_aggregate __user *ucounter_aggregate =
674 (struct lttng_kernel_counter_aggregate __user *) arg;
675 bool overflow, underflow;
676 int64_t value;
677 int ret;
678
679 if (copy_from_user(&local_counter_aggregate, ucounter_aggregate,
680 sizeof(local_counter_aggregate)))
681 return -EFAULT;
682 if (validate_zeroed_padding(local_counter_aggregate.padding,
683 sizeof(local_counter_aggregate.padding)))
684 return -EINVAL;
685
686 /* Cast all indexes into size_t. */
687 for (i = 0; i < local_counter_aggregate.index.number_dimensions; i++)
688 indexes[i] = (size_t) local_counter_aggregate.index.dimension_indexes[i];
689
690 ret = lttng_kernel_counter_aggregate(counter, indexes, &value,
691 &overflow, &underflow);
692 if (ret)
693 return ret;
694 local_counter_aggregate.value.value = value;
695 local_counter_aggregate.value.overflow = overflow;
696 local_counter_aggregate.value.underflow = underflow;
697
698 if (copy_to_user(&ucounter_aggregate->value, &local_counter_aggregate.value,
699 sizeof(local_counter_aggregate.value)))
700 return -EFAULT;
701
702 return 0;
703 }
704 case LTTNG_KERNEL_COUNTER_CLEAR:
705 {
706 struct lttng_kernel_counter_clear local_counter_clear;
707 struct lttng_kernel_counter_clear __user *ucounter_clear =
708 (struct lttng_kernel_counter_clear __user *) arg;
709
710 if (copy_from_user(&local_counter_clear, ucounter_clear,
711 sizeof(local_counter_clear)))
712 return -EFAULT;
713 if (validate_zeroed_padding(local_counter_clear.padding,
714 sizeof(local_counter_clear.padding)))
715 return -EINVAL;
716
717 /* Cast all indexes into size_t. */
718 for (i = 0; i < local_counter_clear.index.number_dimensions; i++)
719 indexes[i] = (size_t) local_counter_clear.index.dimension_indexes[i];
720
721 return lttng_kernel_counter_clear(counter, indexes);
722 }
723 default:
724 WARN_ON_ONCE(1);
725 return -ENOSYS;
726 }
727 }
728
729 static const struct file_operations lttng_counter_fops = {
730 .owner = THIS_MODULE,
731 .release = lttng_counter_release,
732 .unlocked_ioctl = lttng_counter_ioctl,
733 #ifdef CONFIG_COMPAT
734 .compat_ioctl = lttng_counter_ioctl,
735 #endif
736 };
737
738
739 static
740 enum tracker_type get_tracker_type(struct lttng_kernel_tracker_args *tracker)
741 {
742 switch (tracker->type) {
743 case LTTNG_KERNEL_TRACKER_PID:
744 return TRACKER_PID;
745 case LTTNG_KERNEL_TRACKER_VPID:
746 return TRACKER_VPID;
747 case LTTNG_KERNEL_TRACKER_UID:
748 return TRACKER_UID;
749 case LTTNG_KERNEL_TRACKER_VUID:
750 return TRACKER_VUID;
751 case LTTNG_KERNEL_TRACKER_GID:
752 return TRACKER_GID;
753 case LTTNG_KERNEL_TRACKER_VGID:
754 return TRACKER_VGID;
755 default:
756 return TRACKER_UNKNOWN;
757 }
758 }
759
760 /**
761 * lttng_session_ioctl - lttng session fd ioctl
762 *
763 * @file: the file
764 * @cmd: the command
765 * @arg: command arg
766 *
767 * This ioctl implements lttng commands:
768 * LTTNG_KERNEL_CHANNEL
769 * Returns a LTTng channel file descriptor
770 * LTTNG_KERNEL_ENABLE
771 * Enables tracing for a session (weak enable)
772 * LTTNG_KERNEL_DISABLE
773 * Disables tracing for a session (strong disable)
774 * LTTNG_KERNEL_METADATA
775 * Returns a LTTng metadata file descriptor
776 * LTTNG_KERNEL_SESSION_TRACK_PID
777 * Add PID to session PID tracker
778 * LTTNG_KERNEL_SESSION_UNTRACK_PID
779 * Remove PID from session PID tracker
780 * LTTNG_KERNEL_SESSION_TRACK_ID
781 * Add ID to tracker
782 * LTTNG_KERNEL_SESSION_UNTRACK_ID
783 * Remove ID from tracker
784 *
785 * The returned channel will be deleted when its file descriptor is closed.
786 */
787 static
788 long lttng_session_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
789 {
790 struct lttng_session *session = file->private_data;
791 struct lttng_kernel_channel chan_param;
792 struct lttng_kernel_old_channel old_chan_param;
793
794 switch (cmd) {
795 case LTTNG_KERNEL_OLD_CHANNEL:
796 {
797 if (copy_from_user(&old_chan_param,
798 (struct lttng_kernel_old_channel __user *) arg,
799 sizeof(struct lttng_kernel_old_channel)))
800 return -EFAULT;
801 chan_param.overwrite = old_chan_param.overwrite;
802 chan_param.subbuf_size = old_chan_param.subbuf_size;
803 chan_param.num_subbuf = old_chan_param.num_subbuf;
804 chan_param.switch_timer_interval = old_chan_param.switch_timer_interval;
805 chan_param.read_timer_interval = old_chan_param.read_timer_interval;
806 chan_param.output = old_chan_param.output;
807
808 return lttng_abi_create_channel(file, &chan_param,
809 PER_CPU_CHANNEL);
810 }
811 case LTTNG_KERNEL_CHANNEL:
812 {
813 if (copy_from_user(&chan_param,
814 (struct lttng_kernel_channel __user *) arg,
815 sizeof(struct lttng_kernel_channel)))
816 return -EFAULT;
817 return lttng_abi_create_channel(file, &chan_param,
818 PER_CPU_CHANNEL);
819 }
820 case LTTNG_KERNEL_OLD_SESSION_START:
821 case LTTNG_KERNEL_OLD_ENABLE:
822 case LTTNG_KERNEL_SESSION_START:
823 case LTTNG_KERNEL_ENABLE:
824 return lttng_session_enable(session);
825 case LTTNG_KERNEL_OLD_SESSION_STOP:
826 case LTTNG_KERNEL_OLD_DISABLE:
827 case LTTNG_KERNEL_SESSION_STOP:
828 case LTTNG_KERNEL_DISABLE:
829 return lttng_session_disable(session);
830 case LTTNG_KERNEL_OLD_METADATA:
831 {
832 if (copy_from_user(&old_chan_param,
833 (struct lttng_kernel_old_channel __user *) arg,
834 sizeof(struct lttng_kernel_old_channel)))
835 return -EFAULT;
836 chan_param.overwrite = old_chan_param.overwrite;
837 chan_param.subbuf_size = old_chan_param.subbuf_size;
838 chan_param.num_subbuf = old_chan_param.num_subbuf;
839 chan_param.switch_timer_interval = old_chan_param.switch_timer_interval;
840 chan_param.read_timer_interval = old_chan_param.read_timer_interval;
841 chan_param.output = old_chan_param.output;
842
843 return lttng_abi_create_channel(file, &chan_param,
844 METADATA_CHANNEL);
845 }
846 case LTTNG_KERNEL_METADATA:
847 {
848 if (copy_from_user(&chan_param,
849 (struct lttng_kernel_channel __user *) arg,
850 sizeof(struct lttng_kernel_channel)))
851 return -EFAULT;
852 return lttng_abi_create_channel(file, &chan_param,
853 METADATA_CHANNEL);
854 }
855 case LTTNG_KERNEL_SESSION_TRACK_PID:
856 return lttng_session_track_id(session, TRACKER_PID, (int) arg);
857 case LTTNG_KERNEL_SESSION_UNTRACK_PID:
858 return lttng_session_untrack_id(session, TRACKER_PID, (int) arg);
859 case LTTNG_KERNEL_SESSION_TRACK_ID:
860 {
861 struct lttng_kernel_tracker_args tracker;
862 enum tracker_type tracker_type;
863
864 if (copy_from_user(&tracker,
865 (struct lttng_kernel_tracker_args __user *) arg,
866 sizeof(struct lttng_kernel_tracker_args)))
867 return -EFAULT;
868 tracker_type = get_tracker_type(&tracker);
869 if (tracker_type == TRACKER_UNKNOWN)
870 return -EINVAL;
871 return lttng_session_track_id(session, tracker_type, tracker.id);
872 }
873 case LTTNG_KERNEL_SESSION_UNTRACK_ID:
874 {
875 struct lttng_kernel_tracker_args tracker;
876 enum tracker_type tracker_type;
877
878 if (copy_from_user(&tracker,
879 (struct lttng_kernel_tracker_args __user *) arg,
880 sizeof(struct lttng_kernel_tracker_args)))
881 return -EFAULT;
882 tracker_type = get_tracker_type(&tracker);
883 if (tracker_type == TRACKER_UNKNOWN)
884 return -EINVAL;
885 return lttng_session_untrack_id(session, tracker_type,
886 tracker.id);
887 }
888 case LTTNG_KERNEL_SESSION_LIST_TRACKER_PIDS:
889 return lttng_session_list_tracker_ids(session, TRACKER_PID);
890 case LTTNG_KERNEL_SESSION_LIST_TRACKER_IDS:
891 {
892 struct lttng_kernel_tracker_args tracker;
893 enum tracker_type tracker_type;
894
895 if (copy_from_user(&tracker,
896 (struct lttng_kernel_tracker_args __user *) arg,
897 sizeof(struct lttng_kernel_tracker_args)))
898 return -EFAULT;
899 tracker_type = get_tracker_type(&tracker);
900 if (tracker_type == TRACKER_UNKNOWN)
901 return -EINVAL;
902 return lttng_session_list_tracker_ids(session, tracker_type);
903 }
904 case LTTNG_KERNEL_SESSION_METADATA_REGEN:
905 return lttng_session_metadata_regenerate(session);
906 case LTTNG_KERNEL_SESSION_STATEDUMP:
907 return lttng_session_statedump(session);
908 case LTTNG_KERNEL_SESSION_SET_NAME:
909 {
910 struct lttng_kernel_session_name name;
911
912 if (copy_from_user(&name,
913 (struct lttng_kernel_session_name __user *) arg,
914 sizeof(struct lttng_kernel_session_name)))
915 return -EFAULT;
916 return lttng_abi_session_set_name(session, &name);
917 }
918 case LTTNG_KERNEL_SESSION_SET_CREATION_TIME:
919 {
920 struct lttng_kernel_session_creation_time time;
921
922 if (copy_from_user(&time,
923 (struct lttng_kernel_session_creation_time __user *) arg,
924 sizeof(struct lttng_kernel_session_creation_time)))
925 return -EFAULT;
926 return lttng_abi_session_set_creation_time(session, &time);
927 }
928 default:
929 return -ENOIOCTLCMD;
930 }
931 }
932
933 /*
934 * Called when the last file reference is dropped.
935 *
936 * Big fat note: channels and events are invariant for the whole session after
937 * their creation. So this session destruction also destroys all channel and
938 * event structures specific to this session (they are not destroyed when their
939 * individual file is released).
940 */
941 static
942 int lttng_session_release(struct inode *inode, struct file *file)
943 {
944 struct lttng_session *session = file->private_data;
945
946 if (session)
947 lttng_session_destroy(session);
948 return 0;
949 }
950
951 static const struct file_operations lttng_session_fops = {
952 .owner = THIS_MODULE,
953 .release = lttng_session_release,
954 .unlocked_ioctl = lttng_session_ioctl,
955 #ifdef CONFIG_COMPAT
956 .compat_ioctl = lttng_session_ioctl,
957 #endif
958 };
959
960 /*
961 * When encountering empty buffer, flush current sub-buffer if non-empty
962 * and retry (if new data available to read after flush).
963 */
964 static
965 ssize_t lttng_event_notifier_group_notif_read(struct file *filp, char __user *user_buf,
966 size_t count, loff_t *ppos)
967 {
968 struct lttng_event_notifier_group *event_notifier_group = filp->private_data;
969 struct channel *chan = event_notifier_group->chan;
970 struct lib_ring_buffer *buf = event_notifier_group->buf;
971 ssize_t read_count = 0, len;
972 size_t read_offset;
973
974 might_sleep();
975 if (!lttng_access_ok(VERIFY_WRITE, user_buf, count))
976 return -EFAULT;
977
978 /* Finish copy of previous record */
979 if (*ppos != 0) {
980 if (read_count < count) {
981 len = chan->iter.len_left;
982 read_offset = *ppos;
983 goto skip_get_next;
984 }
985 }
986
987 while (read_count < count) {
988 size_t copy_len, space_left;
989
990 len = lib_ring_buffer_get_next_record(chan, buf);
991 len_test:
992 if (len < 0) {
993 /*
994 * Check if buffer is finalized (end of file).
995 */
996 if (len == -ENODATA) {
997 /* A 0 read_count will tell about end of file */
998 goto nodata;
999 }
1000 if (filp->f_flags & O_NONBLOCK) {
1001 if (!read_count)
1002 read_count = -EAGAIN;
1003 goto nodata;
1004 } else {
1005 int error;
1006
1007 /*
1008 * No data available at the moment, return what
1009 * we got.
1010 */
1011 if (read_count)
1012 goto nodata;
1013
1014 /*
1015 * Wait for returned len to be >= 0 or -ENODATA.
1016 */
1017 error = wait_event_interruptible(
1018 event_notifier_group->read_wait,
1019 ((len = lib_ring_buffer_get_next_record(
1020 chan, buf)), len != -EAGAIN));
1021 CHAN_WARN_ON(chan, len == -EBUSY);
1022 if (error) {
1023 read_count = error;
1024 goto nodata;
1025 }
1026 CHAN_WARN_ON(chan, len < 0 && len != -ENODATA);
1027 goto len_test;
1028 }
1029 }
1030 read_offset = buf->iter.read_offset;
1031 skip_get_next:
1032 space_left = count - read_count;
1033 if (len <= space_left) {
1034 copy_len = len;
1035 chan->iter.len_left = 0;
1036 *ppos = 0;
1037 } else {
1038 copy_len = space_left;
1039 chan->iter.len_left = len - copy_len;
1040 *ppos = read_offset + copy_len;
1041 }
1042 if (__lib_ring_buffer_copy_to_user(&buf->backend, read_offset,
1043 &user_buf[read_count],
1044 copy_len)) {
1045 /*
1046 * Leave the len_left and ppos values at their current
1047 * state, as we currently have a valid event to read.
1048 */
1049 return -EFAULT;
1050 }
1051 read_count += copy_len;
1052 }
1053 goto put_record;
1054
1055 nodata:
1056 *ppos = 0;
1057 chan->iter.len_left = 0;
1058
1059 put_record:
1060 lib_ring_buffer_put_current_record(buf);
1061 return read_count;
1062 }
1063
1064 /*
1065 * If the ring buffer is non empty (even just a partial subbuffer), return that
1066 * there is data available. Perform a ring buffer flush if we encounter a
1067 * non-empty ring buffer which does not have any consumeable subbuffer available.
1068 */
1069 static
1070 unsigned int lttng_event_notifier_group_notif_poll(struct file *filp,
1071 poll_table *wait)
1072 {
1073 unsigned int mask = 0;
1074 struct lttng_event_notifier_group *event_notifier_group = filp->private_data;
1075 struct channel *chan = event_notifier_group->chan;
1076 struct lib_ring_buffer *buf = event_notifier_group->buf;
1077 const struct lib_ring_buffer_config *config = &chan->backend.config;
1078 int finalized, disabled;
1079 unsigned long consumed, offset;
1080 size_t subbuffer_header_size = config->cb.subbuffer_header_size();
1081
1082 if (filp->f_mode & FMODE_READ) {
1083 poll_wait_set_exclusive(wait);
1084 poll_wait(filp, &event_notifier_group->read_wait, wait);
1085
1086 finalized = lib_ring_buffer_is_finalized(config, buf);
1087 disabled = lib_ring_buffer_channel_is_disabled(chan);
1088
1089 /*
1090 * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
1091 * finalized load before offsets loads.
1092 */
1093 WARN_ON(atomic_long_read(&buf->active_readers) != 1);
1094 retry:
1095 if (disabled)
1096 return POLLERR;
1097
1098 offset = lib_ring_buffer_get_offset(config, buf);
1099 consumed = lib_ring_buffer_get_consumed(config, buf);
1100
1101 /*
1102 * If there is no buffer available to consume.
1103 */
1104 if (subbuf_trunc(offset, chan) - subbuf_trunc(consumed, chan) == 0) {
1105 /*
1106 * If there is a non-empty subbuffer, flush and try again.
1107 */
1108 if (subbuf_offset(offset, chan) > subbuffer_header_size) {
1109 lib_ring_buffer_switch_remote(buf);
1110 goto retry;
1111 }
1112
1113 if (finalized)
1114 return POLLHUP;
1115 else {
1116 /*
1117 * The memory barriers
1118 * __wait_event()/wake_up_interruptible() take
1119 * care of "raw_spin_is_locked" memory ordering.
1120 */
1121 if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
1122 goto retry;
1123 else
1124 return 0;
1125 }
1126 } else {
1127 if (subbuf_trunc(offset, chan) - subbuf_trunc(consumed, chan)
1128 >= chan->backend.buf_size)
1129 return POLLPRI | POLLRDBAND;
1130 else
1131 return POLLIN | POLLRDNORM;
1132 }
1133 }
1134
1135 return mask;
1136 }
1137
1138 /**
1139 * lttng_event_notifier_group_notif_open - event_notifier ring buffer open file operation
1140 * @inode: opened inode
1141 * @file: opened file
1142 *
1143 * Open implementation. Makes sure only one open instance of a buffer is
1144 * done at a given moment.
1145 */
1146 static int lttng_event_notifier_group_notif_open(struct inode *inode, struct file *file)
1147 {
1148 struct lttng_event_notifier_group *event_notifier_group = inode->i_private;
1149 struct lib_ring_buffer *buf = event_notifier_group->buf;
1150
1151 file->private_data = event_notifier_group;
1152 return lib_ring_buffer_open(inode, file, buf);
1153 }
1154
1155 /**
1156 * lttng_event_notifier_group_notif_release - event_notifier ring buffer release file operation
1157 * @inode: opened inode
1158 * @file: opened file
1159 *
1160 * Release implementation.
1161 */
1162 static int lttng_event_notifier_group_notif_release(struct inode *inode, struct file *file)
1163 {
1164 struct lttng_event_notifier_group *event_notifier_group = file->private_data;
1165 struct lib_ring_buffer *buf = event_notifier_group->buf;
1166 int ret;
1167
1168 ret = lib_ring_buffer_release(inode, file, buf);
1169 if (ret)
1170 return ret;
1171 fput(event_notifier_group->file);
1172 return 0;
1173 }
1174
1175 static const struct file_operations lttng_event_notifier_group_notif_fops = {
1176 .owner = THIS_MODULE,
1177 .open = lttng_event_notifier_group_notif_open,
1178 .release = lttng_event_notifier_group_notif_release,
1179 .read = lttng_event_notifier_group_notif_read,
1180 .poll = lttng_event_notifier_group_notif_poll,
1181 };
1182
1183 /**
1184 * lttng_metadata_ring_buffer_poll - LTTng ring buffer poll file operation
1185 * @filp: the file
1186 * @wait: poll table
1187 *
1188 * Handles the poll operations for the metadata channels.
1189 */
1190 static
1191 unsigned int lttng_metadata_ring_buffer_poll(struct file *filp,
1192 poll_table *wait)
1193 {
1194 struct lttng_metadata_stream *stream = filp->private_data;
1195 struct lib_ring_buffer *buf = stream->priv;
1196 int finalized;
1197 unsigned int mask = 0;
1198
1199 if (filp->f_mode & FMODE_READ) {
1200 poll_wait_set_exclusive(wait);
1201 poll_wait(filp, &stream->read_wait, wait);
1202
1203 finalized = stream->finalized;
1204
1205 /*
1206 * lib_ring_buffer_is_finalized() contains a smp_rmb()
1207 * ordering finalized load before offsets loads.
1208 */
1209 WARN_ON(atomic_long_read(&buf->active_readers) != 1);
1210
1211 if (finalized)
1212 mask |= POLLHUP;
1213
1214 mutex_lock(&stream->metadata_cache->lock);
1215 if (stream->metadata_cache->metadata_written >
1216 stream->metadata_out)
1217 mask |= POLLIN;
1218 mutex_unlock(&stream->metadata_cache->lock);
1219 }
1220
1221 return mask;
1222 }
1223
1224 static
1225 void lttng_metadata_ring_buffer_ioctl_put_next_subbuf(struct file *filp,
1226 unsigned int cmd, unsigned long arg)
1227 {
1228 struct lttng_metadata_stream *stream = filp->private_data;
1229
1230 stream->metadata_out = stream->metadata_in;
1231 }
1232
1233 /*
1234 * Reset the counter of how much metadata has been consumed to 0. That way,
1235 * the consumer receives the content of the metadata cache unchanged. This is
1236 * different from the metadata_regenerate where the offset from epoch is
1237 * resampled, here we want the exact same content as the last time the metadata
1238 * was generated. This command is only possible if all the metadata written
1239 * in the cache has been output to the metadata stream to avoid corrupting the
1240 * metadata file.
1241 *
1242 * Return 0 on success, a negative value on error.
1243 */
1244 static
1245 int lttng_metadata_cache_dump(struct lttng_metadata_stream *stream)
1246 {
1247 int ret;
1248 struct lttng_metadata_cache *cache = stream->metadata_cache;
1249
1250 mutex_lock(&cache->lock);
1251 if (stream->metadata_out != cache->metadata_written) {
1252 ret = -EBUSY;
1253 goto end;
1254 }
1255 stream->metadata_out = 0;
1256 stream->metadata_in = 0;
1257 wake_up_interruptible(&stream->read_wait);
1258 ret = 0;
1259
1260 end:
1261 mutex_unlock(&cache->lock);
1262 return ret;
1263 }
1264
1265 static
1266 long lttng_metadata_ring_buffer_ioctl(struct file *filp,
1267 unsigned int cmd, unsigned long arg)
1268 {
1269 int ret;
1270 struct lttng_metadata_stream *stream = filp->private_data;
1271 struct lib_ring_buffer *buf = stream->priv;
1272 unsigned int rb_cmd;
1273 bool coherent;
1274
1275 if (cmd == RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK)
1276 rb_cmd = RING_BUFFER_GET_NEXT_SUBBUF;
1277 else
1278 rb_cmd = cmd;
1279
1280 switch (cmd) {
1281 case RING_BUFFER_GET_NEXT_SUBBUF:
1282 {
1283 struct lttng_metadata_stream *stream = filp->private_data;
1284 struct lib_ring_buffer *buf = stream->priv;
1285 struct channel *chan = buf->backend.chan;
1286
1287 ret = lttng_metadata_output_channel(stream, chan, NULL);
1288 if (ret > 0) {
1289 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
1290 ret = 0;
1291 } else if (ret < 0)
1292 goto err;
1293 break;
1294 }
1295 case RING_BUFFER_GET_SUBBUF:
1296 {
1297 /*
1298 * Random access is not allowed for metadata channel.
1299 */
1300 return -ENOSYS;
1301 }
1302 case RING_BUFFER_FLUSH_EMPTY: /* Fall-through. */
1303 case RING_BUFFER_FLUSH:
1304 {
1305 struct lttng_metadata_stream *stream = filp->private_data;
1306 struct lib_ring_buffer *buf = stream->priv;
1307 struct channel *chan = buf->backend.chan;
1308
1309 /*
1310 * Before doing the actual ring buffer flush, write up to one
1311 * packet of metadata in the ring buffer.
1312 */
1313 ret = lttng_metadata_output_channel(stream, chan, NULL);
1314 if (ret < 0)
1315 goto err;
1316 break;
1317 }
1318 case RING_BUFFER_GET_METADATA_VERSION:
1319 {
1320 struct lttng_metadata_stream *stream = filp->private_data;
1321
1322 return put_u64(stream->version, arg);
1323 }
1324 case RING_BUFFER_METADATA_CACHE_DUMP:
1325 {
1326 struct lttng_metadata_stream *stream = filp->private_data;
1327
1328 return lttng_metadata_cache_dump(stream);
1329 }
1330 case RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
1331 {
1332 struct lttng_metadata_stream *stream = filp->private_data;
1333 struct lib_ring_buffer *buf = stream->priv;
1334 struct channel *chan = buf->backend.chan;
1335
1336 ret = lttng_metadata_output_channel(stream, chan, &coherent);
1337 if (ret > 0) {
1338 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
1339 ret = 0;
1340 } else if (ret < 0) {
1341 goto err;
1342 }
1343 break;
1344 }
1345 default:
1346 break;
1347 }
1348 /* PUT_SUBBUF is the one from lib ring buffer, unmodified. */
1349
1350 /* Performing lib ring buffer ioctl after our own. */
1351 ret = lib_ring_buffer_ioctl(filp, rb_cmd, arg, buf);
1352 if (ret < 0)
1353 goto err;
1354
1355 switch (cmd) {
1356 case RING_BUFFER_PUT_NEXT_SUBBUF:
1357 {
1358 lttng_metadata_ring_buffer_ioctl_put_next_subbuf(filp,
1359 cmd, arg);
1360 break;
1361 }
1362 case RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
1363 {
1364 return put_u32(coherent, arg);
1365 }
1366 default:
1367 break;
1368 }
1369 err:
1370 return ret;
1371 }
1372
1373 #ifdef CONFIG_COMPAT
1374 static
1375 long lttng_metadata_ring_buffer_compat_ioctl(struct file *filp,
1376 unsigned int cmd, unsigned long arg)
1377 {
1378 int ret;
1379 struct lttng_metadata_stream *stream = filp->private_data;
1380 struct lib_ring_buffer *buf = stream->priv;
1381 unsigned int rb_cmd;
1382 bool coherent;
1383
1384 if (cmd == RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK)
1385 rb_cmd = RING_BUFFER_GET_NEXT_SUBBUF;
1386 else
1387 rb_cmd = cmd;
1388
1389 switch (cmd) {
1390 case RING_BUFFER_GET_NEXT_SUBBUF:
1391 {
1392 struct lttng_metadata_stream *stream = filp->private_data;
1393 struct lib_ring_buffer *buf = stream->priv;
1394 struct channel *chan = buf->backend.chan;
1395
1396 ret = lttng_metadata_output_channel(stream, chan, NULL);
1397 if (ret > 0) {
1398 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
1399 ret = 0;
1400 } else if (ret < 0)
1401 goto err;
1402 break;
1403 }
1404 case RING_BUFFER_GET_SUBBUF:
1405 {
1406 /*
1407 * Random access is not allowed for metadata channel.
1408 */
1409 return -ENOSYS;
1410 }
1411 case RING_BUFFER_FLUSH_EMPTY: /* Fall-through. */
1412 case RING_BUFFER_FLUSH:
1413 {
1414 struct lttng_metadata_stream *stream = filp->private_data;
1415 struct lib_ring_buffer *buf = stream->priv;
1416 struct channel *chan = buf->backend.chan;
1417
1418 /*
1419 * Before doing the actual ring buffer flush, write up to one
1420 * packet of metadata in the ring buffer.
1421 */
1422 ret = lttng_metadata_output_channel(stream, chan, NULL);
1423 if (ret < 0)
1424 goto err;
1425 break;
1426 }
1427 case RING_BUFFER_GET_METADATA_VERSION:
1428 {
1429 struct lttng_metadata_stream *stream = filp->private_data;
1430
1431 return put_u64(stream->version, arg);
1432 }
1433 case RING_BUFFER_METADATA_CACHE_DUMP:
1434 {
1435 struct lttng_metadata_stream *stream = filp->private_data;
1436
1437 return lttng_metadata_cache_dump(stream);
1438 }
1439 case RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
1440 {
1441 struct lttng_metadata_stream *stream = filp->private_data;
1442 struct lib_ring_buffer *buf = stream->priv;
1443 struct channel *chan = buf->backend.chan;
1444
1445 ret = lttng_metadata_output_channel(stream, chan, &coherent);
1446 if (ret > 0) {
1447 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
1448 ret = 0;
1449 } else if (ret < 0) {
1450 goto err;
1451 }
1452 break;
1453 }
1454 default:
1455 break;
1456 }
1457 /* PUT_SUBBUF is the one from lib ring buffer, unmodified. */
1458
1459 /* Performing lib ring buffer ioctl after our own. */
1460 ret = lib_ring_buffer_compat_ioctl(filp, rb_cmd, arg, buf);
1461 if (ret < 0)
1462 goto err;
1463
1464 switch (cmd) {
1465 case RING_BUFFER_PUT_NEXT_SUBBUF:
1466 {
1467 lttng_metadata_ring_buffer_ioctl_put_next_subbuf(filp,
1468 cmd, arg);
1469 break;
1470 }
1471 case RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
1472 {
1473 return put_u32(coherent, arg);
1474 }
1475 default:
1476 break;
1477 }
1478 err:
1479 return ret;
1480 }
1481 #endif
1482
1483 /*
1484 * This is not used by anonymous file descriptors. This code is left
1485 * there if we ever want to implement an inode with open() operation.
1486 */
1487 static
1488 int lttng_metadata_ring_buffer_open(struct inode *inode, struct file *file)
1489 {
1490 struct lttng_metadata_stream *stream = inode->i_private;
1491 struct lib_ring_buffer *buf = stream->priv;
1492
1493 file->private_data = buf;
1494 /*
1495 * Since life-time of metadata cache differs from that of
1496 * session, we need to keep our own reference on the transport.
1497 */
1498 if (!try_module_get(stream->transport->owner)) {
1499 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
1500 return -EBUSY;
1501 }
1502 return lib_ring_buffer_open(inode, file, buf);
1503 }
1504
1505 static
1506 int lttng_metadata_ring_buffer_release(struct inode *inode, struct file *file)
1507 {
1508 struct lttng_metadata_stream *stream = file->private_data;
1509 struct lib_ring_buffer *buf = stream->priv;
1510
1511 mutex_lock(&stream->metadata_cache->lock);
1512 list_del(&stream->list);
1513 mutex_unlock(&stream->metadata_cache->lock);
1514 kref_put(&stream->metadata_cache->refcount, metadata_cache_destroy);
1515 module_put(stream->transport->owner);
1516 kfree(stream);
1517 return lib_ring_buffer_release(inode, file, buf);
1518 }
1519
1520 static
1521 ssize_t lttng_metadata_ring_buffer_splice_read(struct file *in, loff_t *ppos,
1522 struct pipe_inode_info *pipe, size_t len,
1523 unsigned int flags)
1524 {
1525 struct lttng_metadata_stream *stream = in->private_data;
1526 struct lib_ring_buffer *buf = stream->priv;
1527
1528 return lib_ring_buffer_splice_read(in, ppos, pipe, len,
1529 flags, buf);
1530 }
1531
1532 static
1533 int lttng_metadata_ring_buffer_mmap(struct file *filp,
1534 struct vm_area_struct *vma)
1535 {
1536 struct lttng_metadata_stream *stream = filp->private_data;
1537 struct lib_ring_buffer *buf = stream->priv;
1538
1539 return lib_ring_buffer_mmap(filp, vma, buf);
1540 }
1541
1542 static
1543 const struct file_operations lttng_metadata_ring_buffer_file_operations = {
1544 .owner = THIS_MODULE,
1545 .open = lttng_metadata_ring_buffer_open,
1546 .release = lttng_metadata_ring_buffer_release,
1547 .poll = lttng_metadata_ring_buffer_poll,
1548 .splice_read = lttng_metadata_ring_buffer_splice_read,
1549 .mmap = lttng_metadata_ring_buffer_mmap,
1550 .unlocked_ioctl = lttng_metadata_ring_buffer_ioctl,
1551 .llseek = vfs_lib_ring_buffer_no_llseek,
1552 #ifdef CONFIG_COMPAT
1553 .compat_ioctl = lttng_metadata_ring_buffer_compat_ioctl,
1554 #endif
1555 };
1556
1557 static
1558 int lttng_abi_create_stream_fd(struct file *channel_file, void *stream_priv,
1559 const struct file_operations *fops, const char *name)
1560 {
1561 int stream_fd, ret;
1562 struct file *stream_file;
1563
1564 stream_fd = lttng_get_unused_fd();
1565 if (stream_fd < 0) {
1566 ret = stream_fd;
1567 goto fd_error;
1568 }
1569 stream_file = anon_inode_getfile(name, fops, stream_priv, O_RDWR);
1570 if (IS_ERR(stream_file)) {
1571 ret = PTR_ERR(stream_file);
1572 goto file_error;
1573 }
1574 /*
1575 * OPEN_FMODE, called within anon_inode_getfile/alloc_file, don't honor
1576 * FMODE_LSEEK, FMODE_PREAD nor FMODE_PWRITE. We need to read from this
1577 * file descriptor, so we set FMODE_PREAD here.
1578 */
1579 stream_file->f_mode |= FMODE_PREAD;
1580 fd_install(stream_fd, stream_file);
1581 /*
1582 * The stream holds a reference to the channel within the generic ring
1583 * buffer library, so no need to hold a refcount on the channel and
1584 * session files here.
1585 */
1586 return stream_fd;
1587
1588 file_error:
1589 put_unused_fd(stream_fd);
1590 fd_error:
1591 return ret;
1592 }
1593
1594 static
1595 int lttng_abi_open_stream(struct file *channel_file)
1596 {
1597 struct lttng_channel *channel = channel_file->private_data;
1598 struct lib_ring_buffer *buf;
1599 int ret;
1600 void *stream_priv;
1601
1602 buf = channel->ops->buffer_read_open(channel->chan);
1603 if (!buf)
1604 return -ENOENT;
1605
1606 stream_priv = buf;
1607 ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
1608 &lttng_stream_ring_buffer_file_operations,
1609 "[lttng_stream]");
1610 if (ret < 0)
1611 goto fd_error;
1612
1613 return ret;
1614
1615 fd_error:
1616 channel->ops->buffer_read_close(buf);
1617 return ret;
1618 }
1619
1620 static
1621 int lttng_abi_open_metadata_stream(struct file *channel_file)
1622 {
1623 struct lttng_channel *channel = channel_file->private_data;
1624 struct lttng_session *session = channel->session;
1625 struct lib_ring_buffer *buf;
1626 int ret;
1627 struct lttng_metadata_stream *metadata_stream;
1628 void *stream_priv;
1629
1630 buf = channel->ops->buffer_read_open(channel->chan);
1631 if (!buf)
1632 return -ENOENT;
1633
1634 metadata_stream = kzalloc(sizeof(struct lttng_metadata_stream),
1635 GFP_KERNEL);
1636 if (!metadata_stream) {
1637 ret = -ENOMEM;
1638 goto nomem;
1639 }
1640 metadata_stream->metadata_cache = session->metadata_cache;
1641 init_waitqueue_head(&metadata_stream->read_wait);
1642 metadata_stream->priv = buf;
1643 stream_priv = metadata_stream;
1644 metadata_stream->transport = channel->transport;
1645 /* Initial state is an empty metadata, considered as incoherent. */
1646 metadata_stream->coherent = false;
1647
1648 /*
1649 * Since life-time of metadata cache differs from that of
1650 * session, we need to keep our own reference on the transport.
1651 */
1652 if (!try_module_get(metadata_stream->transport->owner)) {
1653 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
1654 ret = -EINVAL;
1655 goto notransport;
1656 }
1657
1658 if (!lttng_kref_get(&session->metadata_cache->refcount)) {
1659 ret = -EOVERFLOW;
1660 goto kref_error;
1661 }
1662
1663 ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
1664 &lttng_metadata_ring_buffer_file_operations,
1665 "[lttng_metadata_stream]");
1666 if (ret < 0)
1667 goto fd_error;
1668
1669 mutex_lock(&session->metadata_cache->lock);
1670 list_add(&metadata_stream->list,
1671 &session->metadata_cache->metadata_stream);
1672 mutex_unlock(&session->metadata_cache->lock);
1673 return ret;
1674
1675 fd_error:
1676 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
1677 kref_error:
1678 module_put(metadata_stream->transport->owner);
1679 notransport:
1680 kfree(metadata_stream);
1681 nomem:
1682 channel->ops->buffer_read_close(buf);
1683 return ret;
1684 }
1685
1686 static
1687 int lttng_abi_open_event_notifier_group_stream(struct file *notif_file)
1688 {
1689 struct lttng_event_notifier_group *event_notifier_group = notif_file->private_data;
1690 struct channel *chan = event_notifier_group->chan;
1691 struct lib_ring_buffer *buf;
1692 int ret;
1693 void *stream_priv;
1694
1695 buf = event_notifier_group->ops->buffer_read_open(chan);
1696 if (!buf)
1697 return -ENOENT;
1698
1699 /* The event_notifier notification fd holds a reference on the event_notifier group */
1700 if (!atomic_long_add_unless(&notif_file->f_count, 1, LONG_MAX)) {
1701 ret = -EOVERFLOW;
1702 goto refcount_error;
1703 }
1704 event_notifier_group->buf = buf;
1705 stream_priv = event_notifier_group;
1706 ret = lttng_abi_create_stream_fd(notif_file, stream_priv,
1707 &lttng_event_notifier_group_notif_fops,
1708 "[lttng_event_notifier_stream]");
1709 if (ret < 0)
1710 goto fd_error;
1711
1712 return ret;
1713
1714 fd_error:
1715 atomic_long_dec(&notif_file->f_count);
1716 refcount_error:
1717 event_notifier_group->ops->buffer_read_close(buf);
1718 return ret;
1719 }
1720
1721 static
1722 int lttng_abi_validate_event_param(struct lttng_kernel_event *event_param)
1723 {
1724 /* Limit ABI to implemented features. */
1725 switch (event_param->instrumentation) {
1726 case LTTNG_KERNEL_SYSCALL:
1727 switch (event_param->u.syscall.entryexit) {
1728 case LTTNG_KERNEL_SYSCALL_ENTRY: /* Fall-through */
1729 case LTTNG_KERNEL_SYSCALL_EXIT: /* Fall-through */
1730 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1731 break;
1732 default:
1733 return -EINVAL;
1734 }
1735 switch (event_param->u.syscall.abi) {
1736 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1737 break;
1738 default:
1739 return -EINVAL;
1740 }
1741 switch (event_param->u.syscall.match) {
1742 case LTTNG_KERNEL_SYSCALL_MATCH_NAME:
1743 break;
1744 default:
1745 return -EINVAL;
1746 }
1747 break;
1748
1749 case LTTNG_KERNEL_KRETPROBE:
1750 switch (event_param->u.kretprobe.entryexit) {
1751 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1752 break;
1753 case LTTNG_KERNEL_SYSCALL_ENTRY: /* Fall-through */
1754 case LTTNG_KERNEL_SYSCALL_EXIT: /* Fall-through */
1755 default:
1756 return -EINVAL;
1757 }
1758 break;
1759
1760 case LTTNG_KERNEL_TRACEPOINT: /* Fall-through */
1761 case LTTNG_KERNEL_KPROBE: /* Fall-through */
1762 case LTTNG_KERNEL_UPROBE:
1763 break;
1764
1765 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1766 case LTTNG_KERNEL_NOOP: /* Fall-through */
1767 default:
1768 return -EINVAL;
1769 }
1770 return 0;
1771 }
1772
1773 static
1774 int lttng_abi_create_event(struct file *channel_file,
1775 struct lttng_kernel_event *event_param)
1776 {
1777 struct lttng_channel *channel = channel_file->private_data;
1778 int event_fd, ret;
1779 struct file *event_file;
1780 void *priv;
1781
1782 event_param->name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
1783 switch (event_param->instrumentation) {
1784 case LTTNG_KERNEL_KRETPROBE:
1785 event_param->u.kretprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
1786 break;
1787 case LTTNG_KERNEL_KPROBE:
1788 event_param->u.kprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
1789 break;
1790 case LTTNG_KERNEL_FUNCTION:
1791 WARN_ON_ONCE(1);
1792 /* Not implemented. */
1793 break;
1794 default:
1795 break;
1796 }
1797 event_fd = lttng_get_unused_fd();
1798 if (event_fd < 0) {
1799 ret = event_fd;
1800 goto fd_error;
1801 }
1802 event_file = anon_inode_getfile("[lttng_event]",
1803 &lttng_event_fops,
1804 NULL, O_RDWR);
1805 if (IS_ERR(event_file)) {
1806 ret = PTR_ERR(event_file);
1807 goto file_error;
1808 }
1809 /* The event holds a reference on the channel */
1810 if (!atomic_long_add_unless(&channel_file->f_count, 1, LONG_MAX)) {
1811 ret = -EOVERFLOW;
1812 goto refcount_error;
1813 }
1814 ret = lttng_abi_validate_event_param(event_param);
1815 if (ret)
1816 goto event_error;
1817
1818 switch (event_param->instrumentation) {
1819 case LTTNG_KERNEL_TRACEPOINT: /* Fall-through */
1820 case LTTNG_KERNEL_SYSCALL:
1821 {
1822 struct lttng_event_enabler *event_enabler;
1823
1824 if (strutils_is_star_glob_pattern(event_param->name)) {
1825 /*
1826 * If the event name is a star globbing pattern,
1827 * we create the special star globbing enabler.
1828 */
1829 event_enabler = lttng_event_enabler_create(LTTNG_ENABLER_FORMAT_STAR_GLOB,
1830 event_param, channel);
1831 } else {
1832 event_enabler = lttng_event_enabler_create(LTTNG_ENABLER_FORMAT_NAME,
1833 event_param, channel);
1834 }
1835 priv = event_enabler;
1836 break;
1837 }
1838
1839 case LTTNG_KERNEL_KPROBE: /* Fall-through */
1840 case LTTNG_KERNEL_KRETPROBE: /* Fall-through */
1841 case LTTNG_KERNEL_UPROBE:
1842 {
1843 struct lttng_event *event;
1844
1845 /*
1846 * We tolerate no failure path after event creation. It
1847 * will stay invariant for the rest of the session.
1848 */
1849 event = lttng_event_create(channel, event_param,
1850 NULL, NULL,
1851 event_param->instrumentation);
1852 WARN_ON_ONCE(!event);
1853 if (IS_ERR(event)) {
1854 ret = PTR_ERR(event);
1855 goto event_error;
1856 }
1857 priv = event;
1858 break;
1859 }
1860
1861 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1862 case LTTNG_KERNEL_NOOP: /* Fall-through */
1863 default:
1864 ret = -EINVAL;
1865 goto event_error;
1866 }
1867 event_file->private_data = priv;
1868 fd_install(event_fd, event_file);
1869 return event_fd;
1870
1871 event_error:
1872 atomic_long_dec(&channel_file->f_count);
1873 refcount_error:
1874 fput(event_file);
1875 file_error:
1876 put_unused_fd(event_fd);
1877 fd_error:
1878 return ret;
1879 }
1880
1881 static
1882 long lttng_event_notifier_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1883 {
1884 struct lttng_event_notifier *event_notifier;
1885 struct lttng_event_notifier_enabler *event_notifier_enabler;
1886 enum lttng_event_type *evtype = file->private_data;
1887
1888 switch (cmd) {
1889 case LTTNG_KERNEL_ENABLE:
1890 switch (*evtype) {
1891 case LTTNG_TYPE_EVENT:
1892 event_notifier = file->private_data;
1893 return lttng_event_notifier_enable(event_notifier);
1894 case LTTNG_TYPE_ENABLER:
1895 event_notifier_enabler = file->private_data;
1896 return lttng_event_notifier_enabler_enable(event_notifier_enabler);
1897 default:
1898 WARN_ON_ONCE(1);
1899 return -ENOSYS;
1900 }
1901 case LTTNG_KERNEL_DISABLE:
1902 switch (*evtype) {
1903 case LTTNG_TYPE_EVENT:
1904 event_notifier = file->private_data;
1905 return lttng_event_notifier_disable(event_notifier);
1906 case LTTNG_TYPE_ENABLER:
1907 event_notifier_enabler = file->private_data;
1908 return lttng_event_notifier_enabler_disable(event_notifier_enabler);
1909 default:
1910 WARN_ON_ONCE(1);
1911 return -ENOSYS;
1912 }
1913 case LTTNG_KERNEL_FILTER:
1914 switch (*evtype) {
1915 case LTTNG_TYPE_EVENT:
1916 return -EINVAL;
1917 case LTTNG_TYPE_ENABLER:
1918 event_notifier_enabler = file->private_data;
1919 return lttng_event_notifier_enabler_attach_filter_bytecode(
1920 event_notifier_enabler,
1921 (struct lttng_kernel_filter_bytecode __user *) arg);
1922 default:
1923 WARN_ON_ONCE(1);
1924 return -ENOSYS;
1925 }
1926
1927 case LTTNG_KERNEL_CAPTURE:
1928 switch (*evtype) {
1929 case LTTNG_TYPE_EVENT:
1930 return -EINVAL;
1931 case LTTNG_TYPE_ENABLER:
1932 event_notifier_enabler = file->private_data;
1933 return lttng_event_notifier_enabler_attach_capture_bytecode(
1934 event_notifier_enabler,
1935 (struct lttng_kernel_capture_bytecode __user *) arg);
1936 default:
1937 WARN_ON_ONCE(1);
1938 return -ENOSYS;
1939 }
1940 case LTTNG_KERNEL_ADD_CALLSITE:
1941 switch (*evtype) {
1942 case LTTNG_TYPE_EVENT:
1943 event_notifier = file->private_data;
1944 return lttng_event_notifier_add_callsite(event_notifier,
1945 (struct lttng_kernel_event_callsite __user *) arg);
1946 case LTTNG_TYPE_ENABLER:
1947 return -EINVAL;
1948 default:
1949 WARN_ON_ONCE(1);
1950 return -ENOSYS;
1951 }
1952 default:
1953 return -ENOIOCTLCMD;
1954 }
1955 }
1956
1957 static
1958 int lttng_event_notifier_release(struct inode *inode, struct file *file)
1959 {
1960 struct lttng_event_notifier *event_notifier;
1961 struct lttng_event_notifier_enabler *event_notifier_enabler;
1962 enum lttng_event_type *evtype = file->private_data;
1963
1964 if (!evtype)
1965 return 0;
1966
1967 switch (*evtype) {
1968 case LTTNG_TYPE_EVENT:
1969 event_notifier = file->private_data;
1970 if (event_notifier)
1971 fput(event_notifier->group->file);
1972 break;
1973 case LTTNG_TYPE_ENABLER:
1974 event_notifier_enabler = file->private_data;
1975 if (event_notifier_enabler)
1976 fput(event_notifier_enabler->group->file);
1977 break;
1978 default:
1979 WARN_ON_ONCE(1);
1980 break;
1981 }
1982
1983 return 0;
1984 }
1985
1986 static const struct file_operations lttng_event_notifier_fops = {
1987 .owner = THIS_MODULE,
1988 .release = lttng_event_notifier_release,
1989 .unlocked_ioctl = lttng_event_notifier_ioctl,
1990 #ifdef CONFIG_COMPAT
1991 .compat_ioctl = lttng_event_notifier_ioctl,
1992 #endif
1993 };
1994
1995 static
1996 int lttng_abi_create_event_notifier(struct file *event_notifier_group_file,
1997 struct lttng_kernel_event_notifier *event_notifier_param)
1998 {
1999 struct lttng_event_notifier_group *event_notifier_group =
2000 event_notifier_group_file->private_data;
2001 int event_notifier_fd, ret;
2002 struct file *event_notifier_file;
2003 void *priv;
2004
2005 switch (event_notifier_param->event.instrumentation) {
2006 case LTTNG_KERNEL_TRACEPOINT:
2007 case LTTNG_KERNEL_UPROBE:
2008 break;
2009 case LTTNG_KERNEL_KPROBE:
2010 event_notifier_param->event.u.kprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
2011 break;
2012 case LTTNG_KERNEL_SYSCALL:
2013 break;
2014 case LTTNG_KERNEL_KRETPROBE:
2015 /* Placing an event notifier on kretprobe is not supported. */
2016 case LTTNG_KERNEL_FUNCTION:
2017 case LTTNG_KERNEL_NOOP:
2018 default:
2019 ret = -EINVAL;
2020 goto inval_instr;
2021 }
2022
2023 event_notifier_param->event.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
2024
2025 event_notifier_fd = lttng_get_unused_fd();
2026 if (event_notifier_fd < 0) {
2027 ret = event_notifier_fd;
2028 goto fd_error;
2029 }
2030
2031 event_notifier_file = anon_inode_getfile("[lttng_event_notifier]",
2032 &lttng_event_notifier_fops,
2033 NULL, O_RDWR);
2034 if (IS_ERR(event_notifier_file)) {
2035 ret = PTR_ERR(event_notifier_file);
2036 goto file_error;
2037 }
2038
2039 /* The event notifier holds a reference on the event notifier group. */
2040 if (!atomic_long_add_unless(&event_notifier_group_file->f_count, 1, LONG_MAX)) {
2041 ret = -EOVERFLOW;
2042 goto refcount_error;
2043 }
2044
2045 ret = lttng_abi_validate_event_param(&event_notifier_param->event);
2046 if (ret)
2047 goto event_notifier_error;
2048
2049 switch (event_notifier_param->event.instrumentation) {
2050 case LTTNG_KERNEL_TRACEPOINT: /* Fall-through */
2051 case LTTNG_KERNEL_SYSCALL:
2052 {
2053 struct lttng_event_notifier_enabler *enabler;
2054
2055 if (strutils_is_star_glob_pattern(event_notifier_param->event.name)) {
2056 /*
2057 * If the event name is a star globbing pattern,
2058 * we create the special star globbing enabler.
2059 */
2060 enabler = lttng_event_notifier_enabler_create(
2061 event_notifier_group,
2062 LTTNG_ENABLER_FORMAT_STAR_GLOB,
2063 event_notifier_param);
2064 } else {
2065 enabler = lttng_event_notifier_enabler_create(
2066 event_notifier_group,
2067 LTTNG_ENABLER_FORMAT_NAME,
2068 event_notifier_param);
2069 }
2070 priv = enabler;
2071 break;
2072 }
2073
2074 case LTTNG_KERNEL_KPROBE: /* Fall-through */
2075 case LTTNG_KERNEL_KRETPROBE: /* Fall-through */
2076 case LTTNG_KERNEL_UPROBE:
2077 {
2078 struct lttng_event_notifier *event_notifier;
2079
2080 /*
2081 * We tolerate no failure path after event notifier creation.
2082 * It will stay invariant for the rest of the session.
2083 */
2084 event_notifier = lttng_event_notifier_create(NULL,
2085 event_notifier_param->event.token,
2086 event_notifier_param->error_counter_index,
2087 event_notifier_group,
2088 event_notifier_param, NULL,
2089 event_notifier_param->event.instrumentation);
2090 WARN_ON_ONCE(!event_notifier);
2091 if (IS_ERR(event_notifier)) {
2092 ret = PTR_ERR(event_notifier);
2093 goto event_notifier_error;
2094 }
2095 priv = event_notifier;
2096 break;
2097 }
2098
2099 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
2100 case LTTNG_KERNEL_NOOP: /* Fall-through */
2101 default:
2102 ret = -EINVAL;
2103 goto event_notifier_error;
2104 }
2105 event_notifier_file->private_data = priv;
2106 fd_install(event_notifier_fd, event_notifier_file);
2107 return event_notifier_fd;
2108
2109 event_notifier_error:
2110 atomic_long_dec(&event_notifier_group_file->f_count);
2111 refcount_error:
2112 fput(event_notifier_file);
2113 file_error:
2114 put_unused_fd(event_notifier_fd);
2115 fd_error:
2116 inval_instr:
2117 return ret;
2118 }
2119
2120 static
2121 long lttng_abi_event_notifier_group_create_error_counter(
2122 struct file *event_notifier_group_file,
2123 const struct lttng_kernel_counter_conf *error_counter_conf)
2124 {
2125 int counter_fd, ret;
2126 char *counter_transport_name;
2127 size_t counter_len;
2128 struct lttng_counter *counter = NULL;
2129 struct file *counter_file;
2130 struct lttng_event_notifier_group *event_notifier_group =
2131 (struct lttng_event_notifier_group *) event_notifier_group_file->private_data;
2132
2133 if (error_counter_conf->arithmetic != LTTNG_KERNEL_COUNTER_ARITHMETIC_MODULAR) {
2134 printk(KERN_ERR "LTTng: event_notifier: Error counter of the wrong arithmetic type.\n");
2135 return -EINVAL;
2136 }
2137
2138 if (error_counter_conf->number_dimensions != 1) {
2139 printk(KERN_ERR "LTTng: event_notifier: Error counter has more than one dimension.\n");
2140 return -EINVAL;
2141 }
2142
2143 switch (error_counter_conf->bitness) {
2144 case LTTNG_KERNEL_COUNTER_BITNESS_64:
2145 counter_transport_name = "counter-per-cpu-64-modular";
2146 break;
2147 case LTTNG_KERNEL_COUNTER_BITNESS_32:
2148 counter_transport_name = "counter-per-cpu-32-modular";
2149 break;
2150 default:
2151 return -EINVAL;
2152 }
2153
2154 /*
2155 * Lock sessions to provide mutual exclusion against concurrent
2156 * modification of event_notifier group, which would result in
2157 * overwriting the error counter if set concurrently.
2158 */
2159 lttng_lock_sessions();
2160
2161 if (event_notifier_group->error_counter) {
2162 printk(KERN_ERR "Error counter already created in event_notifier group\n");
2163 ret = -EBUSY;
2164 goto fd_error;
2165 }
2166
2167 counter_fd = lttng_get_unused_fd();
2168 if (counter_fd < 0) {
2169 ret = counter_fd;
2170 goto fd_error;
2171 }
2172
2173 counter_file = anon_inode_getfile("[lttng_counter]",
2174 &lttng_counter_fops,
2175 NULL, O_RDONLY);
2176 if (IS_ERR(counter_file)) {
2177 ret = PTR_ERR(counter_file);
2178 goto file_error;
2179 }
2180
2181 counter_len = error_counter_conf->dimensions[0].size;
2182
2183 if (!atomic_long_add_unless(&event_notifier_group_file->f_count, 1, LONG_MAX)) {
2184 ret = -EOVERFLOW;
2185 goto refcount_error;
2186 }
2187
2188 counter = lttng_kernel_counter_create(counter_transport_name,
2189 1, &counter_len);
2190 if (!counter) {
2191 ret = -EINVAL;
2192 goto counter_error;
2193 }
2194
2195 event_notifier_group->error_counter_len = counter_len;
2196 /*
2197 * store-release to publish error counter matches load-acquire
2198 * in record_error. Ensures the counter is created and the
2199 * error_counter_len is set before they are used.
2200 */
2201 lttng_smp_store_release(&event_notifier_group->error_counter, counter);
2202
2203 counter->file = counter_file;
2204 counter->owner = event_notifier_group->file;
2205 counter_file->private_data = counter;
2206 /* Ownership transferred. */
2207 counter = NULL;
2208
2209 fd_install(counter_fd, counter_file);
2210 lttng_unlock_sessions();
2211
2212 return counter_fd;
2213
2214 counter_error:
2215 atomic_long_dec(&event_notifier_group_file->f_count);
2216 refcount_error:
2217 fput(counter_file);
2218 file_error:
2219 put_unused_fd(counter_fd);
2220 fd_error:
2221 lttng_unlock_sessions();
2222 return ret;
2223 }
2224
2225 static
2226 long lttng_event_notifier_group_ioctl(struct file *file, unsigned int cmd,
2227 unsigned long arg)
2228 {
2229 switch (cmd) {
2230 case LTTNG_KERNEL_EVENT_NOTIFIER_GROUP_NOTIFICATION_FD:
2231 {
2232 return lttng_abi_open_event_notifier_group_stream(file);
2233 }
2234 case LTTNG_KERNEL_EVENT_NOTIFIER_CREATE:
2235 {
2236 struct lttng_kernel_event_notifier uevent_notifier_param;
2237
2238 if (copy_from_user(&uevent_notifier_param,
2239 (struct lttng_kernel_event_notifier __user *) arg,
2240 sizeof(uevent_notifier_param)))
2241 return -EFAULT;
2242 return lttng_abi_create_event_notifier(file, &uevent_notifier_param);
2243 }
2244 case LTTNG_KERNEL_COUNTER:
2245 {
2246 struct lttng_kernel_counter_conf uerror_counter_conf;
2247
2248 if (copy_from_user(&uerror_counter_conf,
2249 (struct lttng_kernel_counter_conf __user *) arg,
2250 sizeof(uerror_counter_conf)))
2251 return -EFAULT;
2252 return lttng_abi_event_notifier_group_create_error_counter(file,
2253 &uerror_counter_conf);
2254 }
2255 default:
2256 return -ENOIOCTLCMD;
2257 }
2258 return 0;
2259 }
2260
2261 static
2262 int lttng_event_notifier_group_release(struct inode *inode, struct file *file)
2263 {
2264 struct lttng_event_notifier_group *event_notifier_group =
2265 file->private_data;
2266
2267 if (event_notifier_group)
2268 lttng_event_notifier_group_destroy(event_notifier_group);
2269 return 0;
2270 }
2271
2272 static const struct file_operations lttng_event_notifier_group_fops = {
2273 .owner = THIS_MODULE,
2274 .release = lttng_event_notifier_group_release,
2275 .unlocked_ioctl = lttng_event_notifier_group_ioctl,
2276 #ifdef CONFIG_COMPAT
2277 .compat_ioctl = lttng_event_notifier_group_ioctl,
2278 #endif
2279 };
2280
2281 /**
2282 * lttng_channel_ioctl - lttng syscall through ioctl
2283 *
2284 * @file: the file
2285 * @cmd: the command
2286 * @arg: command arg
2287 *
2288 * This ioctl implements lttng commands:
2289 * LTTNG_KERNEL_STREAM
2290 * Returns an event stream file descriptor or failure.
2291 * (typically, one event stream records events from one CPU)
2292 * LTTNG_KERNEL_EVENT
2293 * Returns an event file descriptor or failure.
2294 * LTTNG_KERNEL_CONTEXT
2295 * Prepend a context field to each event in the channel
2296 * LTTNG_KERNEL_ENABLE
2297 * Enable recording for events in this channel (weak enable)
2298 * LTTNG_KERNEL_DISABLE
2299 * Disable recording for events in this channel (strong disable)
2300 *
2301 * Channel and event file descriptors also hold a reference on the session.
2302 */
2303 static
2304 long lttng_channel_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2305 {
2306 struct lttng_channel *channel = file->private_data;
2307
2308 switch (cmd) {
2309 case LTTNG_KERNEL_OLD_STREAM:
2310 case LTTNG_KERNEL_STREAM:
2311 return lttng_abi_open_stream(file);
2312 case LTTNG_KERNEL_OLD_EVENT:
2313 {
2314 struct lttng_kernel_event *uevent_param;
2315 struct lttng_kernel_old_event *old_uevent_param;
2316 int ret;
2317
2318 uevent_param = kmalloc(sizeof(struct lttng_kernel_event),
2319 GFP_KERNEL);
2320 if (!uevent_param) {
2321 ret = -ENOMEM;
2322 goto old_event_end;
2323 }
2324 old_uevent_param = kmalloc(
2325 sizeof(struct lttng_kernel_old_event),
2326 GFP_KERNEL);
2327 if (!old_uevent_param) {
2328 ret = -ENOMEM;
2329 goto old_event_error_free_param;
2330 }
2331 if (copy_from_user(old_uevent_param,
2332 (struct lttng_kernel_old_event __user *) arg,
2333 sizeof(struct lttng_kernel_old_event))) {
2334 ret = -EFAULT;
2335 goto old_event_error_free_old_param;
2336 }
2337
2338 memcpy(uevent_param->name, old_uevent_param->name,
2339 sizeof(uevent_param->name));
2340 uevent_param->instrumentation =
2341 old_uevent_param->instrumentation;
2342
2343 switch (old_uevent_param->instrumentation) {
2344 case LTTNG_KERNEL_KPROBE:
2345 uevent_param->u.kprobe.addr =
2346 old_uevent_param->u.kprobe.addr;
2347 uevent_param->u.kprobe.offset =
2348 old_uevent_param->u.kprobe.offset;
2349 memcpy(uevent_param->u.kprobe.symbol_name,
2350 old_uevent_param->u.kprobe.symbol_name,
2351 sizeof(uevent_param->u.kprobe.symbol_name));
2352 break;
2353 case LTTNG_KERNEL_KRETPROBE:
2354 uevent_param->u.kretprobe.addr =
2355 old_uevent_param->u.kretprobe.addr;
2356 uevent_param->u.kretprobe.offset =
2357 old_uevent_param->u.kretprobe.offset;
2358 memcpy(uevent_param->u.kretprobe.symbol_name,
2359 old_uevent_param->u.kretprobe.symbol_name,
2360 sizeof(uevent_param->u.kretprobe.symbol_name));
2361 break;
2362 case LTTNG_KERNEL_FUNCTION:
2363 WARN_ON_ONCE(1);
2364 /* Not implemented. */
2365 break;
2366 default:
2367 break;
2368 }
2369 ret = lttng_abi_create_event(file, uevent_param);
2370
2371 old_event_error_free_old_param:
2372 kfree(old_uevent_param);
2373 old_event_error_free_param:
2374 kfree(uevent_param);
2375 old_event_end:
2376 return ret;
2377 }
2378 case LTTNG_KERNEL_EVENT:
2379 {
2380 struct lttng_kernel_event uevent_param;
2381
2382 if (copy_from_user(&uevent_param,
2383 (struct lttng_kernel_event __user *) arg,
2384 sizeof(uevent_param)))
2385 return -EFAULT;
2386 return lttng_abi_create_event(file, &uevent_param);
2387 }
2388 case LTTNG_KERNEL_OLD_CONTEXT:
2389 {
2390 struct lttng_kernel_context *ucontext_param;
2391 struct lttng_kernel_old_context *old_ucontext_param;
2392 int ret;
2393
2394 ucontext_param = kmalloc(sizeof(struct lttng_kernel_context),
2395 GFP_KERNEL);
2396 if (!ucontext_param) {
2397 ret = -ENOMEM;
2398 goto old_ctx_end;
2399 }
2400 old_ucontext_param = kmalloc(sizeof(struct lttng_kernel_old_context),
2401 GFP_KERNEL);
2402 if (!old_ucontext_param) {
2403 ret = -ENOMEM;
2404 goto old_ctx_error_free_param;
2405 }
2406
2407 if (copy_from_user(old_ucontext_param,
2408 (struct lttng_kernel_old_context __user *) arg,
2409 sizeof(struct lttng_kernel_old_context))) {
2410 ret = -EFAULT;
2411 goto old_ctx_error_free_old_param;
2412 }
2413 ucontext_param->ctx = old_ucontext_param->ctx;
2414 memcpy(ucontext_param->padding, old_ucontext_param->padding,
2415 sizeof(ucontext_param->padding));
2416 /* only type that uses the union */
2417 if (old_ucontext_param->ctx == LTTNG_KERNEL_CONTEXT_PERF_COUNTER) {
2418 ucontext_param->u.perf_counter.type =
2419 old_ucontext_param->u.perf_counter.type;
2420 ucontext_param->u.perf_counter.config =
2421 old_ucontext_param->u.perf_counter.config;
2422 memcpy(ucontext_param->u.perf_counter.name,
2423 old_ucontext_param->u.perf_counter.name,
2424 sizeof(ucontext_param->u.perf_counter.name));
2425 }
2426
2427 ret = lttng_abi_add_context(file,
2428 ucontext_param,
2429 &channel->ctx, channel->session);
2430
2431 old_ctx_error_free_old_param:
2432 kfree(old_ucontext_param);
2433 old_ctx_error_free_param:
2434 kfree(ucontext_param);
2435 old_ctx_end:
2436 return ret;
2437 }
2438 case LTTNG_KERNEL_CONTEXT:
2439 {
2440 struct lttng_kernel_context ucontext_param;
2441
2442 if (copy_from_user(&ucontext_param,
2443 (struct lttng_kernel_context __user *) arg,
2444 sizeof(ucontext_param)))
2445 return -EFAULT;
2446 return lttng_abi_add_context(file,
2447 &ucontext_param,
2448 &channel->ctx, channel->session);
2449 }
2450 case LTTNG_KERNEL_OLD_ENABLE:
2451 case LTTNG_KERNEL_ENABLE:
2452 return lttng_channel_enable(channel);
2453 case LTTNG_KERNEL_OLD_DISABLE:
2454 case LTTNG_KERNEL_DISABLE:
2455 return lttng_channel_disable(channel);
2456 case LTTNG_KERNEL_SYSCALL_MASK:
2457 return lttng_channel_syscall_mask(channel,
2458 (struct lttng_kernel_syscall_mask __user *) arg);
2459 default:
2460 return -ENOIOCTLCMD;
2461 }
2462 }
2463
2464 /**
2465 * lttng_metadata_ioctl - lttng syscall through ioctl
2466 *
2467 * @file: the file
2468 * @cmd: the command
2469 * @arg: command arg
2470 *
2471 * This ioctl implements lttng commands:
2472 * LTTNG_KERNEL_STREAM
2473 * Returns an event stream file descriptor or failure.
2474 *
2475 * Channel and event file descriptors also hold a reference on the session.
2476 */
2477 static
2478 long lttng_metadata_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2479 {
2480 switch (cmd) {
2481 case LTTNG_KERNEL_OLD_STREAM:
2482 case LTTNG_KERNEL_STREAM:
2483 return lttng_abi_open_metadata_stream(file);
2484 default:
2485 return -ENOIOCTLCMD;
2486 }
2487 }
2488
2489 /**
2490 * lttng_channel_poll - lttng stream addition/removal monitoring
2491 *
2492 * @file: the file
2493 * @wait: poll table
2494 */
2495 unsigned int lttng_channel_poll(struct file *file, poll_table *wait)
2496 {
2497 struct lttng_channel *channel = file->private_data;
2498 unsigned int mask = 0;
2499
2500 if (file->f_mode & FMODE_READ) {
2501 poll_wait_set_exclusive(wait);
2502 poll_wait(file, channel->ops->get_hp_wait_queue(channel->chan),
2503 wait);
2504
2505 if (channel->ops->is_disabled(channel->chan))
2506 return POLLERR;
2507 if (channel->ops->is_finalized(channel->chan))
2508 return POLLHUP;
2509 if (channel->ops->buffer_has_read_closed_stream(channel->chan))
2510 return POLLIN | POLLRDNORM;
2511 return 0;
2512 }
2513 return mask;
2514
2515 }
2516
2517 static
2518 int lttng_channel_release(struct inode *inode, struct file *file)
2519 {
2520 struct lttng_channel *channel = file->private_data;
2521
2522 if (channel)
2523 fput(channel->session->file);
2524 return 0;
2525 }
2526
2527 static
2528 int lttng_metadata_channel_release(struct inode *inode, struct file *file)
2529 {
2530 struct lttng_channel *channel = file->private_data;
2531
2532 if (channel) {
2533 fput(channel->session->file);
2534 lttng_metadata_channel_destroy(channel);
2535 }
2536
2537 return 0;
2538 }
2539
2540 static const struct file_operations lttng_channel_fops = {
2541 .owner = THIS_MODULE,
2542 .release = lttng_channel_release,
2543 .poll = lttng_channel_poll,
2544 .unlocked_ioctl = lttng_channel_ioctl,
2545 #ifdef CONFIG_COMPAT
2546 .compat_ioctl = lttng_channel_ioctl,
2547 #endif
2548 };
2549
2550 static const struct file_operations lttng_metadata_fops = {
2551 .owner = THIS_MODULE,
2552 .release = lttng_metadata_channel_release,
2553 .unlocked_ioctl = lttng_metadata_ioctl,
2554 #ifdef CONFIG_COMPAT
2555 .compat_ioctl = lttng_metadata_ioctl,
2556 #endif
2557 };
2558
2559 /**
2560 * lttng_event_ioctl - lttng syscall through ioctl
2561 *
2562 * @file: the file
2563 * @cmd: the command
2564 * @arg: command arg
2565 *
2566 * This ioctl implements lttng commands:
2567 * LTTNG_KERNEL_CONTEXT
2568 * Prepend a context field to each record of this event
2569 * LTTNG_KERNEL_ENABLE
2570 * Enable recording for this event (weak enable)
2571 * LTTNG_KERNEL_DISABLE
2572 * Disable recording for this event (strong disable)
2573 */
2574 static
2575 long lttng_event_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2576 {
2577 struct lttng_event *event;
2578 struct lttng_event_enabler *event_enabler;
2579 enum lttng_event_type *evtype = file->private_data;
2580
2581 switch (cmd) {
2582 case LTTNG_KERNEL_OLD_CONTEXT:
2583 {
2584 /* Not implemented */
2585 return -ENOSYS;
2586 }
2587 case LTTNG_KERNEL_CONTEXT:
2588 {
2589 /* Not implemented */
2590 return -ENOSYS;
2591 }
2592 case LTTNG_KERNEL_OLD_ENABLE:
2593 case LTTNG_KERNEL_ENABLE:
2594 switch (*evtype) {
2595 case LTTNG_TYPE_EVENT:
2596 event = file->private_data;
2597 return lttng_event_enable(event);
2598 case LTTNG_TYPE_ENABLER:
2599 event_enabler = file->private_data;
2600 return lttng_event_enabler_enable(event_enabler);
2601 default:
2602 WARN_ON_ONCE(1);
2603 return -ENOSYS;
2604 }
2605 case LTTNG_KERNEL_OLD_DISABLE:
2606 case LTTNG_KERNEL_DISABLE:
2607 switch (*evtype) {
2608 case LTTNG_TYPE_EVENT:
2609 event = file->private_data;
2610 return lttng_event_disable(event);
2611 case LTTNG_TYPE_ENABLER:
2612 event_enabler = file->private_data;
2613 return lttng_event_enabler_disable(event_enabler);
2614 default:
2615 WARN_ON_ONCE(1);
2616 return -ENOSYS;
2617 }
2618 case LTTNG_KERNEL_FILTER:
2619 switch (*evtype) {
2620 case LTTNG_TYPE_EVENT:
2621 return -EINVAL;
2622 case LTTNG_TYPE_ENABLER:
2623 {
2624 event_enabler = file->private_data;
2625 return lttng_event_enabler_attach_filter_bytecode(
2626 event_enabler,
2627 (struct lttng_kernel_filter_bytecode __user *) arg);
2628 }
2629 default:
2630 WARN_ON_ONCE(1);
2631 return -ENOSYS;
2632 }
2633 case LTTNG_KERNEL_ADD_CALLSITE:
2634 switch (*evtype) {
2635 case LTTNG_TYPE_EVENT:
2636 event = file->private_data;
2637 return lttng_event_add_callsite(event,
2638 (struct lttng_kernel_event_callsite __user *) arg);
2639 case LTTNG_TYPE_ENABLER:
2640 return -EINVAL;
2641 default:
2642 WARN_ON_ONCE(1);
2643 return -ENOSYS;
2644 }
2645 default:
2646 return -ENOIOCTLCMD;
2647 }
2648 }
2649
2650 static
2651 int lttng_event_release(struct inode *inode, struct file *file)
2652 {
2653 struct lttng_event *event;
2654 struct lttng_event_enabler *event_enabler;
2655 enum lttng_event_type *evtype = file->private_data;
2656
2657 if (!evtype)
2658 return 0;
2659
2660 switch (*evtype) {
2661 case LTTNG_TYPE_EVENT:
2662 event = file->private_data;
2663 if (event)
2664 fput(event->chan->file);
2665 break;
2666 case LTTNG_TYPE_ENABLER:
2667 event_enabler = file->private_data;
2668 if (event_enabler)
2669 fput(event_enabler->chan->file);
2670 break;
2671 default:
2672 WARN_ON_ONCE(1);
2673 break;
2674 }
2675
2676 return 0;
2677 }
2678
2679 /* TODO: filter control ioctl */
2680 static const struct file_operations lttng_event_fops = {
2681 .owner = THIS_MODULE,
2682 .release = lttng_event_release,
2683 .unlocked_ioctl = lttng_event_ioctl,
2684 #ifdef CONFIG_COMPAT
2685 .compat_ioctl = lttng_event_ioctl,
2686 #endif
2687 };
2688
2689 static int put_u64(uint64_t val, unsigned long arg)
2690 {
2691 return put_user(val, (uint64_t __user *) arg);
2692 }
2693
2694 static int put_u32(uint32_t val, unsigned long arg)
2695 {
2696 return put_user(val, (uint32_t __user *) arg);
2697 }
2698
2699 static long lttng_stream_ring_buffer_ioctl(struct file *filp,
2700 unsigned int cmd, unsigned long arg)
2701 {
2702 struct lib_ring_buffer *buf = filp->private_data;
2703 struct channel *chan = buf->backend.chan;
2704 const struct lib_ring_buffer_config *config = &chan->backend.config;
2705 const struct lttng_channel_ops *ops = chan->backend.priv_ops;
2706 int ret;
2707
2708 if (atomic_read(&chan->record_disabled))
2709 return -EIO;
2710
2711 switch (cmd) {
2712 case LTTNG_RING_BUFFER_GET_TIMESTAMP_BEGIN:
2713 {
2714 uint64_t ts;
2715
2716 ret = ops->timestamp_begin(config, buf, &ts);
2717 if (ret < 0)
2718 goto error;
2719 return put_u64(ts, arg);
2720 }
2721 case LTTNG_RING_BUFFER_GET_TIMESTAMP_END:
2722 {
2723 uint64_t ts;
2724
2725 ret = ops->timestamp_end(config, buf, &ts);
2726 if (ret < 0)
2727 goto error;
2728 return put_u64(ts, arg);
2729 }
2730 case LTTNG_RING_BUFFER_GET_EVENTS_DISCARDED:
2731 {
2732 uint64_t ed;
2733
2734 ret = ops->events_discarded(config, buf, &ed);
2735 if (ret < 0)
2736 goto error;
2737 return put_u64(ed, arg);
2738 }
2739 case LTTNG_RING_BUFFER_GET_CONTENT_SIZE:
2740 {
2741 uint64_t cs;
2742
2743 ret = ops->content_size(config, buf, &cs);
2744 if (ret < 0)
2745 goto error;
2746 return put_u64(cs, arg);
2747 }
2748 case LTTNG_RING_BUFFER_GET_PACKET_SIZE:
2749 {
2750 uint64_t ps;
2751
2752 ret = ops->packet_size(config, buf, &ps);
2753 if (ret < 0)
2754 goto error;
2755 return put_u64(ps, arg);
2756 }
2757 case LTTNG_RING_BUFFER_GET_STREAM_ID:
2758 {
2759 uint64_t si;
2760
2761 ret = ops->stream_id(config, buf, &si);
2762 if (ret < 0)
2763 goto error;
2764 return put_u64(si, arg);
2765 }
2766 case LTTNG_RING_BUFFER_GET_CURRENT_TIMESTAMP:
2767 {
2768 uint64_t ts;
2769
2770 ret = ops->current_timestamp(config, buf, &ts);
2771 if (ret < 0)
2772 goto error;
2773 return put_u64(ts, arg);
2774 }
2775 case LTTNG_RING_BUFFER_GET_SEQ_NUM:
2776 {
2777 uint64_t seq;
2778
2779 ret = ops->sequence_number(config, buf, &seq);
2780 if (ret < 0)
2781 goto error;
2782 return put_u64(seq, arg);
2783 }
2784 case LTTNG_RING_BUFFER_INSTANCE_ID:
2785 {
2786 uint64_t id;
2787
2788 ret = ops->instance_id(config, buf, &id);
2789 if (ret < 0)
2790 goto error;
2791 return put_u64(id, arg);
2792 }
2793 default:
2794 return lib_ring_buffer_file_operations.unlocked_ioctl(filp,
2795 cmd, arg);
2796 }
2797
2798 error:
2799 return -ENOSYS;
2800 }
2801
2802 #ifdef CONFIG_COMPAT
2803 static long lttng_stream_ring_buffer_compat_ioctl(struct file *filp,
2804 unsigned int cmd, unsigned long arg)
2805 {
2806 struct lib_ring_buffer *buf = filp->private_data;
2807 struct channel *chan = buf->backend.chan;
2808 const struct lib_ring_buffer_config *config = &chan->backend.config;
2809 const struct lttng_channel_ops *ops = chan->backend.priv_ops;
2810 int ret;
2811
2812 if (atomic_read(&chan->record_disabled))
2813 return -EIO;
2814
2815 switch (cmd) {
2816 case LTTNG_RING_BUFFER_COMPAT_GET_TIMESTAMP_BEGIN:
2817 {
2818 uint64_t ts;
2819
2820 ret = ops->timestamp_begin(config, buf, &ts);
2821 if (ret < 0)
2822 goto error;
2823 return put_u64(ts, arg);
2824 }
2825 case LTTNG_RING_BUFFER_COMPAT_GET_TIMESTAMP_END:
2826 {
2827 uint64_t ts;
2828
2829 ret = ops->timestamp_end(config, buf, &ts);
2830 if (ret < 0)
2831 goto error;
2832 return put_u64(ts, arg);
2833 }
2834 case LTTNG_RING_BUFFER_COMPAT_GET_EVENTS_DISCARDED:
2835 {
2836 uint64_t ed;
2837
2838 ret = ops->events_discarded(config, buf, &ed);
2839 if (ret < 0)
2840 goto error;
2841 return put_u64(ed, arg);
2842 }
2843 case LTTNG_RING_BUFFER_COMPAT_GET_CONTENT_SIZE:
2844 {
2845 uint64_t cs;
2846
2847 ret = ops->content_size(config, buf, &cs);
2848 if (ret < 0)
2849 goto error;
2850 return put_u64(cs, arg);
2851 }
2852 case LTTNG_RING_BUFFER_COMPAT_GET_PACKET_SIZE:
2853 {
2854 uint64_t ps;
2855
2856 ret = ops->packet_size(config, buf, &ps);
2857 if (ret < 0)
2858 goto error;
2859 return put_u64(ps, arg);
2860 }
2861 case LTTNG_RING_BUFFER_COMPAT_GET_STREAM_ID:
2862 {
2863 uint64_t si;
2864
2865 ret = ops->stream_id(config, buf, &si);
2866 if (ret < 0)
2867 goto error;
2868 return put_u64(si, arg);
2869 }
2870 case LTTNG_RING_BUFFER_GET_CURRENT_TIMESTAMP:
2871 {
2872 uint64_t ts;
2873
2874 ret = ops->current_timestamp(config, buf, &ts);
2875 if (ret < 0)
2876 goto error;
2877 return put_u64(ts, arg);
2878 }
2879 case LTTNG_RING_BUFFER_COMPAT_GET_SEQ_NUM:
2880 {
2881 uint64_t seq;
2882
2883 ret = ops->sequence_number(config, buf, &seq);
2884 if (ret < 0)
2885 goto error;
2886 return put_u64(seq, arg);
2887 }
2888 case LTTNG_RING_BUFFER_COMPAT_INSTANCE_ID:
2889 {
2890 uint64_t id;
2891
2892 ret = ops->instance_id(config, buf, &id);
2893 if (ret < 0)
2894 goto error;
2895 return put_u64(id, arg);
2896 }
2897 default:
2898 return lib_ring_buffer_file_operations.compat_ioctl(filp,
2899 cmd, arg);
2900 }
2901
2902 error:
2903 return -ENOSYS;
2904 }
2905 #endif /* CONFIG_COMPAT */
2906
2907 static void lttng_stream_override_ring_buffer_fops(void)
2908 {
2909 lttng_stream_ring_buffer_file_operations.owner = THIS_MODULE;
2910 lttng_stream_ring_buffer_file_operations.open =
2911 lib_ring_buffer_file_operations.open;
2912 lttng_stream_ring_buffer_file_operations.release =
2913 lib_ring_buffer_file_operations.release;
2914 lttng_stream_ring_buffer_file_operations.poll =
2915 lib_ring_buffer_file_operations.poll;
2916 lttng_stream_ring_buffer_file_operations.splice_read =
2917 lib_ring_buffer_file_operations.splice_read;
2918 lttng_stream_ring_buffer_file_operations.mmap =
2919 lib_ring_buffer_file_operations.mmap;
2920 lttng_stream_ring_buffer_file_operations.unlocked_ioctl =
2921 lttng_stream_ring_buffer_ioctl;
2922 lttng_stream_ring_buffer_file_operations.llseek =
2923 lib_ring_buffer_file_operations.llseek;
2924 #ifdef CONFIG_COMPAT
2925 lttng_stream_ring_buffer_file_operations.compat_ioctl =
2926 lttng_stream_ring_buffer_compat_ioctl;
2927 #endif
2928 }
2929
2930 int __init lttng_abi_init(void)
2931 {
2932 int ret = 0;
2933
2934 wrapper_vmalloc_sync_mappings();
2935 lttng_clock_ref();
2936
2937 ret = lttng_tp_mempool_init();
2938 if (ret) {
2939 goto error;
2940 }
2941
2942 lttng_proc_dentry = proc_create_data("lttng", S_IRUSR | S_IWUSR, NULL,
2943 &lttng_proc_ops, NULL);
2944
2945 if (!lttng_proc_dentry) {
2946 printk(KERN_ERR "LTTng: Error creating control file\n");
2947 ret = -ENOMEM;
2948 goto error;
2949 }
2950 lttng_stream_override_ring_buffer_fops();
2951 return 0;
2952
2953 error:
2954 lttng_tp_mempool_destroy();
2955 lttng_clock_unref();
2956 return ret;
2957 }
2958
2959 /* No __exit annotation because used by init error path too. */
2960 void lttng_abi_exit(void)
2961 {
2962 lttng_tp_mempool_destroy();
2963 lttng_clock_unref();
2964 if (lttng_proc_dentry)
2965 remove_proc_entry("lttng", NULL);
2966 }
This page took 0.12507 seconds and 4 git commands to generate.