Cleanup: lttng_abi_create_event{,_notifier}: use switch/case rather than if
[lttng-modules.git] / src / lttng-abi.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-abi.c
4 *
5 * LTTng ABI
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Mimic system calls for:
10 * - session creation, returns a file descriptor or failure.
11 * - channel creation, returns a file descriptor or failure.
12 * - Operates on a session file descriptor
13 * - Takes all channel options as parameters.
14 * - stream get, returns a file descriptor or failure.
15 * - Operates on a channel file descriptor.
16 * - stream notifier get, returns a file descriptor or failure.
17 * - Operates on a channel file descriptor.
18 * - event creation, returns a file descriptor or failure.
19 * - Operates on a channel file descriptor
20 * - Takes an event name as parameter
21 * - Takes an instrumentation source as parameter
22 * - e.g. tracepoints, dynamic_probes...
23 * - Takes instrumentation source specific arguments.
24 */
25
26 #include <linux/module.h>
27 #include <linux/proc_fs.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/file.h>
30 #include <linux/uaccess.h>
31 #include <linux/slab.h>
32 #include <linux/err.h>
33 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
34 #include <ringbuffer/vfs.h>
35 #include <ringbuffer/backend.h>
36 #include <ringbuffer/frontend.h>
37 #include <wrapper/poll.h>
38 #include <wrapper/file.h>
39 #include <wrapper/kref.h>
40 #include <wrapper/barrier.h>
41 #include <lttng/string-utils.h>
42 #include <lttng/abi.h>
43 #include <lttng/abi-old.h>
44 #include <lttng/events.h>
45 #include <lttng/tracer.h>
46 #include <lttng/tp-mempool.h>
47 #include <ringbuffer/frontend_types.h>
48 #include <ringbuffer/iterator.h>
49
50 /*
51 * This is LTTng's own personal way to create a system call as an external
52 * module. We use ioctl() on /proc/lttng.
53 */
54
55 static struct proc_dir_entry *lttng_proc_dentry;
56
57 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0))
58 static const struct proc_ops lttng_proc_ops;
59 #else
60 static const struct file_operations lttng_proc_ops;
61 #endif
62
63 static const struct file_operations lttng_session_fops;
64 static const struct file_operations lttng_event_notifier_group_fops;
65 static const struct file_operations lttng_channel_fops;
66 static const struct file_operations lttng_metadata_fops;
67 static const struct file_operations lttng_event_fops;
68 static struct file_operations lttng_stream_ring_buffer_file_operations;
69
70 static int put_u64(uint64_t val, unsigned long arg);
71 static int put_u32(uint32_t val, unsigned long arg);
72
73 static int validate_zeroed_padding(char *p, size_t len)
74 {
75 size_t i;
76
77 for (i = 0; i < len; i++) {
78 if (p[i])
79 return -1;
80 }
81 return 0;
82 }
83
84 /*
85 * Teardown management: opened file descriptors keep a refcount on the module,
86 * so it can only exit when all file descriptors are closed.
87 */
88
89 static
90 int lttng_abi_create_session(void)
91 {
92 struct lttng_session *session;
93 struct file *session_file;
94 int session_fd, ret;
95
96 session = lttng_session_create();
97 if (!session)
98 return -ENOMEM;
99 session_fd = lttng_get_unused_fd();
100 if (session_fd < 0) {
101 ret = session_fd;
102 goto fd_error;
103 }
104 session_file = anon_inode_getfile("[lttng_session]",
105 &lttng_session_fops,
106 session, O_RDWR);
107 if (IS_ERR(session_file)) {
108 ret = PTR_ERR(session_file);
109 goto file_error;
110 }
111 session->file = session_file;
112 fd_install(session_fd, session_file);
113 return session_fd;
114
115 file_error:
116 put_unused_fd(session_fd);
117 fd_error:
118 lttng_session_destroy(session);
119 return ret;
120 }
121
122 void event_notifier_send_notification_work_wakeup(struct irq_work *entry)
123 {
124 struct lttng_event_notifier_group *event_notifier_group =
125 container_of(entry, struct lttng_event_notifier_group,
126 wakeup_pending);
127 wake_up_interruptible(&event_notifier_group->read_wait);
128 }
129
130 static
131 int lttng_abi_create_event_notifier_group(void)
132 {
133 struct lttng_event_notifier_group *event_notifier_group;
134 struct file *event_notifier_group_file;
135 int event_notifier_group_fd, ret;
136
137 event_notifier_group = lttng_event_notifier_group_create();
138 if (!event_notifier_group)
139 return -ENOMEM;
140
141 event_notifier_group_fd = lttng_get_unused_fd();
142 if (event_notifier_group_fd < 0) {
143 ret = event_notifier_group_fd;
144 goto fd_error;
145 }
146 event_notifier_group_file = anon_inode_getfile("[lttng_event_notifier_group]",
147 &lttng_event_notifier_group_fops,
148 event_notifier_group, O_RDWR);
149 if (IS_ERR(event_notifier_group_file)) {
150 ret = PTR_ERR(event_notifier_group_file);
151 goto file_error;
152 }
153
154 event_notifier_group->file = event_notifier_group_file;
155 init_waitqueue_head(&event_notifier_group->read_wait);
156 init_irq_work(&event_notifier_group->wakeup_pending,
157 event_notifier_send_notification_work_wakeup);
158 fd_install(event_notifier_group_fd, event_notifier_group_file);
159 return event_notifier_group_fd;
160
161 file_error:
162 put_unused_fd(event_notifier_group_fd);
163 fd_error:
164 lttng_event_notifier_group_destroy(event_notifier_group);
165 return ret;
166 }
167
168 static
169 int lttng_abi_tracepoint_list(void)
170 {
171 struct file *tracepoint_list_file;
172 int file_fd, ret;
173
174 file_fd = lttng_get_unused_fd();
175 if (file_fd < 0) {
176 ret = file_fd;
177 goto fd_error;
178 }
179
180 tracepoint_list_file = anon_inode_getfile("[lttng_tracepoint_list]",
181 &lttng_tracepoint_list_fops,
182 NULL, O_RDWR);
183 if (IS_ERR(tracepoint_list_file)) {
184 ret = PTR_ERR(tracepoint_list_file);
185 goto file_error;
186 }
187 ret = lttng_tracepoint_list_fops.open(NULL, tracepoint_list_file);
188 if (ret < 0)
189 goto open_error;
190 fd_install(file_fd, tracepoint_list_file);
191 return file_fd;
192
193 open_error:
194 fput(tracepoint_list_file);
195 file_error:
196 put_unused_fd(file_fd);
197 fd_error:
198 return ret;
199 }
200
201 #ifndef CONFIG_HAVE_SYSCALL_TRACEPOINTS
202 static inline
203 int lttng_abi_syscall_list(void)
204 {
205 return -ENOSYS;
206 }
207 #else
208 static
209 int lttng_abi_syscall_list(void)
210 {
211 struct file *syscall_list_file;
212 int file_fd, ret;
213
214 file_fd = lttng_get_unused_fd();
215 if (file_fd < 0) {
216 ret = file_fd;
217 goto fd_error;
218 }
219
220 syscall_list_file = anon_inode_getfile("[lttng_syscall_list]",
221 &lttng_syscall_list_fops,
222 NULL, O_RDWR);
223 if (IS_ERR(syscall_list_file)) {
224 ret = PTR_ERR(syscall_list_file);
225 goto file_error;
226 }
227 ret = lttng_syscall_list_fops.open(NULL, syscall_list_file);
228 if (ret < 0)
229 goto open_error;
230 fd_install(file_fd, syscall_list_file);
231 return file_fd;
232
233 open_error:
234 fput(syscall_list_file);
235 file_error:
236 put_unused_fd(file_fd);
237 fd_error:
238 return ret;
239 }
240 #endif
241
242 static
243 void lttng_abi_tracer_version(struct lttng_kernel_tracer_version *v)
244 {
245 v->major = LTTNG_MODULES_MAJOR_VERSION;
246 v->minor = LTTNG_MODULES_MINOR_VERSION;
247 v->patchlevel = LTTNG_MODULES_PATCHLEVEL_VERSION;
248 }
249
250 static
251 void lttng_abi_tracer_abi_version(struct lttng_kernel_tracer_abi_version *v)
252 {
253 v->major = LTTNG_MODULES_ABI_MAJOR_VERSION;
254 v->minor = LTTNG_MODULES_ABI_MINOR_VERSION;
255 }
256
257 static
258 long lttng_abi_add_context(struct file *file,
259 struct lttng_kernel_context *context_param,
260 struct lttng_ctx **ctx, struct lttng_session *session)
261 {
262
263 if (session->been_active)
264 return -EPERM;
265
266 switch (context_param->ctx) {
267 case LTTNG_KERNEL_CONTEXT_PID:
268 return lttng_add_pid_to_ctx(ctx);
269 case LTTNG_KERNEL_CONTEXT_PRIO:
270 return lttng_add_prio_to_ctx(ctx);
271 case LTTNG_KERNEL_CONTEXT_NICE:
272 return lttng_add_nice_to_ctx(ctx);
273 case LTTNG_KERNEL_CONTEXT_VPID:
274 return lttng_add_vpid_to_ctx(ctx);
275 case LTTNG_KERNEL_CONTEXT_TID:
276 return lttng_add_tid_to_ctx(ctx);
277 case LTTNG_KERNEL_CONTEXT_VTID:
278 return lttng_add_vtid_to_ctx(ctx);
279 case LTTNG_KERNEL_CONTEXT_PPID:
280 return lttng_add_ppid_to_ctx(ctx);
281 case LTTNG_KERNEL_CONTEXT_VPPID:
282 return lttng_add_vppid_to_ctx(ctx);
283 case LTTNG_KERNEL_CONTEXT_PERF_COUNTER:
284 context_param->u.perf_counter.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
285 return lttng_add_perf_counter_to_ctx(context_param->u.perf_counter.type,
286 context_param->u.perf_counter.config,
287 context_param->u.perf_counter.name,
288 ctx);
289 case LTTNG_KERNEL_CONTEXT_PROCNAME:
290 return lttng_add_procname_to_ctx(ctx);
291 case LTTNG_KERNEL_CONTEXT_HOSTNAME:
292 return lttng_add_hostname_to_ctx(ctx);
293 case LTTNG_KERNEL_CONTEXT_CPU_ID:
294 return lttng_add_cpu_id_to_ctx(ctx);
295 case LTTNG_KERNEL_CONTEXT_INTERRUPTIBLE:
296 return lttng_add_interruptible_to_ctx(ctx);
297 case LTTNG_KERNEL_CONTEXT_NEED_RESCHEDULE:
298 return lttng_add_need_reschedule_to_ctx(ctx);
299 case LTTNG_KERNEL_CONTEXT_PREEMPTIBLE:
300 return lttng_add_preemptible_to_ctx(ctx);
301 case LTTNG_KERNEL_CONTEXT_MIGRATABLE:
302 return lttng_add_migratable_to_ctx(ctx);
303 case LTTNG_KERNEL_CONTEXT_CALLSTACK_KERNEL:
304 case LTTNG_KERNEL_CONTEXT_CALLSTACK_USER:
305 return lttng_add_callstack_to_ctx(ctx, context_param->ctx);
306 case LTTNG_KERNEL_CONTEXT_CGROUP_NS:
307 return lttng_add_cgroup_ns_to_ctx(ctx);
308 case LTTNG_KERNEL_CONTEXT_IPC_NS:
309 return lttng_add_ipc_ns_to_ctx(ctx);
310 case LTTNG_KERNEL_CONTEXT_MNT_NS:
311 return lttng_add_mnt_ns_to_ctx(ctx);
312 case LTTNG_KERNEL_CONTEXT_NET_NS:
313 return lttng_add_net_ns_to_ctx(ctx);
314 case LTTNG_KERNEL_CONTEXT_PID_NS:
315 return lttng_add_pid_ns_to_ctx(ctx);
316 case LTTNG_KERNEL_CONTEXT_USER_NS:
317 return lttng_add_user_ns_to_ctx(ctx);
318 case LTTNG_KERNEL_CONTEXT_UTS_NS:
319 return lttng_add_uts_ns_to_ctx(ctx);
320 case LTTNG_KERNEL_CONTEXT_UID:
321 return lttng_add_uid_to_ctx(ctx);
322 case LTTNG_KERNEL_CONTEXT_EUID:
323 return lttng_add_euid_to_ctx(ctx);
324 case LTTNG_KERNEL_CONTEXT_SUID:
325 return lttng_add_suid_to_ctx(ctx);
326 case LTTNG_KERNEL_CONTEXT_GID:
327 return lttng_add_gid_to_ctx(ctx);
328 case LTTNG_KERNEL_CONTEXT_EGID:
329 return lttng_add_egid_to_ctx(ctx);
330 case LTTNG_KERNEL_CONTEXT_SGID:
331 return lttng_add_sgid_to_ctx(ctx);
332 case LTTNG_KERNEL_CONTEXT_VUID:
333 return lttng_add_vuid_to_ctx(ctx);
334 case LTTNG_KERNEL_CONTEXT_VEUID:
335 return lttng_add_veuid_to_ctx(ctx);
336 case LTTNG_KERNEL_CONTEXT_VSUID:
337 return lttng_add_vsuid_to_ctx(ctx);
338 case LTTNG_KERNEL_CONTEXT_VGID:
339 return lttng_add_vgid_to_ctx(ctx);
340 case LTTNG_KERNEL_CONTEXT_VEGID:
341 return lttng_add_vegid_to_ctx(ctx);
342 case LTTNG_KERNEL_CONTEXT_VSGID:
343 return lttng_add_vsgid_to_ctx(ctx);
344 case LTTNG_KERNEL_CONTEXT_TIME_NS:
345 return lttng_add_time_ns_to_ctx(ctx);
346 default:
347 return -EINVAL;
348 }
349 }
350
351 /**
352 * lttng_ioctl - lttng syscall through ioctl
353 *
354 * @file: the file
355 * @cmd: the command
356 * @arg: command arg
357 *
358 * This ioctl implements lttng commands:
359 * LTTNG_KERNEL_SESSION
360 * Returns a LTTng trace session file descriptor
361 * LTTNG_KERNEL_TRACER_VERSION
362 * Returns the LTTng kernel tracer version
363 * LTTNG_KERNEL_TRACEPOINT_LIST
364 * Returns a file descriptor listing available tracepoints
365 * LTTNG_KERNEL_WAIT_QUIESCENT
366 * Returns after all previously running probes have completed
367 * LTTNG_KERNEL_TRACER_ABI_VERSION
368 * Returns the LTTng kernel tracer ABI version
369 * LTTNG_KERNEL_EVENT_NOTIFIER_GROUP_CREATE
370 * Returns a LTTng event notifier group file descriptor
371 *
372 * The returned session will be deleted when its file descriptor is closed.
373 */
374 static
375 long lttng_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
376 {
377 switch (cmd) {
378 case LTTNG_KERNEL_OLD_SESSION:
379 case LTTNG_KERNEL_SESSION:
380 return lttng_abi_create_session();
381 case LTTNG_KERNEL_EVENT_NOTIFIER_GROUP_CREATE:
382 return lttng_abi_create_event_notifier_group();
383 case LTTNG_KERNEL_OLD_TRACER_VERSION:
384 {
385 struct lttng_kernel_tracer_version v;
386 struct lttng_kernel_old_tracer_version oldv;
387 struct lttng_kernel_old_tracer_version *uversion =
388 (struct lttng_kernel_old_tracer_version __user *) arg;
389
390 lttng_abi_tracer_version(&v);
391 oldv.major = v.major;
392 oldv.minor = v.minor;
393 oldv.patchlevel = v.patchlevel;
394
395 if (copy_to_user(uversion, &oldv, sizeof(oldv)))
396 return -EFAULT;
397 return 0;
398 }
399 case LTTNG_KERNEL_TRACER_VERSION:
400 {
401 struct lttng_kernel_tracer_version version;
402 struct lttng_kernel_tracer_version *uversion =
403 (struct lttng_kernel_tracer_version __user *) arg;
404
405 lttng_abi_tracer_version(&version);
406
407 if (copy_to_user(uversion, &version, sizeof(version)))
408 return -EFAULT;
409 return 0;
410 }
411 case LTTNG_KERNEL_TRACER_ABI_VERSION:
412 {
413 struct lttng_kernel_tracer_abi_version version;
414 struct lttng_kernel_tracer_abi_version *uversion =
415 (struct lttng_kernel_tracer_abi_version __user *) arg;
416
417 lttng_abi_tracer_abi_version(&version);
418
419 if (copy_to_user(uversion, &version, sizeof(version)))
420 return -EFAULT;
421 return 0;
422 }
423 case LTTNG_KERNEL_OLD_TRACEPOINT_LIST:
424 case LTTNG_KERNEL_TRACEPOINT_LIST:
425 return lttng_abi_tracepoint_list();
426 case LTTNG_KERNEL_SYSCALL_LIST:
427 return lttng_abi_syscall_list();
428 case LTTNG_KERNEL_OLD_WAIT_QUIESCENT:
429 case LTTNG_KERNEL_WAIT_QUIESCENT:
430 synchronize_trace();
431 return 0;
432 case LTTNG_KERNEL_OLD_CALIBRATE:
433 {
434 struct lttng_kernel_old_calibrate __user *ucalibrate =
435 (struct lttng_kernel_old_calibrate __user *) arg;
436 struct lttng_kernel_old_calibrate old_calibrate;
437 struct lttng_kernel_calibrate calibrate;
438 int ret;
439
440 if (copy_from_user(&old_calibrate, ucalibrate, sizeof(old_calibrate)))
441 return -EFAULT;
442 calibrate.type = old_calibrate.type;
443 ret = lttng_calibrate(&calibrate);
444 if (copy_to_user(ucalibrate, &old_calibrate, sizeof(old_calibrate)))
445 return -EFAULT;
446 return ret;
447 }
448 case LTTNG_KERNEL_CALIBRATE:
449 {
450 struct lttng_kernel_calibrate __user *ucalibrate =
451 (struct lttng_kernel_calibrate __user *) arg;
452 struct lttng_kernel_calibrate calibrate;
453 int ret;
454
455 if (copy_from_user(&calibrate, ucalibrate, sizeof(calibrate)))
456 return -EFAULT;
457 ret = lttng_calibrate(&calibrate);
458 if (copy_to_user(ucalibrate, &calibrate, sizeof(calibrate)))
459 return -EFAULT;
460 return ret;
461 }
462 default:
463 return -ENOIOCTLCMD;
464 }
465 }
466
467 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0))
468 static const struct proc_ops lttng_proc_ops = {
469 .proc_ioctl = lttng_ioctl,
470 #ifdef CONFIG_COMPAT
471 .proc_compat_ioctl = lttng_ioctl,
472 #endif /* CONFIG_COMPAT */
473 };
474 #else
475 static const struct file_operations lttng_proc_ops = {
476 .owner = THIS_MODULE,
477 .unlocked_ioctl = lttng_ioctl,
478 #ifdef CONFIG_COMPAT
479 .compat_ioctl = lttng_ioctl,
480 #endif /* CONFIG_COMPAT */
481 };
482 #endif
483
484 static
485 int lttng_abi_create_channel(struct file *session_file,
486 struct lttng_kernel_channel *chan_param,
487 enum channel_type channel_type)
488 {
489 struct lttng_session *session = session_file->private_data;
490 const struct file_operations *fops = NULL;
491 const char *transport_name;
492 struct lttng_channel *chan;
493 struct file *chan_file;
494 int chan_fd;
495 int ret = 0;
496
497 chan_fd = lttng_get_unused_fd();
498 if (chan_fd < 0) {
499 ret = chan_fd;
500 goto fd_error;
501 }
502 switch (channel_type) {
503 case PER_CPU_CHANNEL:
504 fops = &lttng_channel_fops;
505 break;
506 case METADATA_CHANNEL:
507 fops = &lttng_metadata_fops;
508 break;
509 }
510
511 chan_file = anon_inode_getfile("[lttng_channel]",
512 fops,
513 NULL, O_RDWR);
514 if (IS_ERR(chan_file)) {
515 ret = PTR_ERR(chan_file);
516 goto file_error;
517 }
518 switch (channel_type) {
519 case PER_CPU_CHANNEL:
520 if (chan_param->output == LTTNG_KERNEL_SPLICE) {
521 transport_name = chan_param->overwrite ?
522 "relay-overwrite" : "relay-discard";
523 } else if (chan_param->output == LTTNG_KERNEL_MMAP) {
524 transport_name = chan_param->overwrite ?
525 "relay-overwrite-mmap" : "relay-discard-mmap";
526 } else {
527 return -EINVAL;
528 }
529 break;
530 case METADATA_CHANNEL:
531 if (chan_param->output == LTTNG_KERNEL_SPLICE)
532 transport_name = "relay-metadata";
533 else if (chan_param->output == LTTNG_KERNEL_MMAP)
534 transport_name = "relay-metadata-mmap";
535 else
536 return -EINVAL;
537 break;
538 default:
539 transport_name = "<unknown>";
540 break;
541 }
542 if (!atomic_long_add_unless(&session_file->f_count, 1, LONG_MAX)) {
543 ret = -EOVERFLOW;
544 goto refcount_error;
545 }
546 /*
547 * We tolerate no failure path after channel creation. It will stay
548 * invariant for the rest of the session.
549 */
550 chan = lttng_channel_create(session, transport_name, NULL,
551 chan_param->subbuf_size,
552 chan_param->num_subbuf,
553 chan_param->switch_timer_interval,
554 chan_param->read_timer_interval,
555 channel_type);
556 if (!chan) {
557 ret = -EINVAL;
558 goto chan_error;
559 }
560 chan->file = chan_file;
561 chan_file->private_data = chan;
562 fd_install(chan_fd, chan_file);
563
564 return chan_fd;
565
566 chan_error:
567 atomic_long_dec(&session_file->f_count);
568 refcount_error:
569 fput(chan_file);
570 file_error:
571 put_unused_fd(chan_fd);
572 fd_error:
573 return ret;
574 }
575
576 static
577 int lttng_abi_session_set_name(struct lttng_session *session,
578 struct lttng_kernel_session_name *name)
579 {
580 size_t len;
581
582 len = strnlen(name->name, LTTNG_KERNEL_SESSION_NAME_LEN);
583
584 if (len == LTTNG_KERNEL_SESSION_NAME_LEN) {
585 /* Name is too long/malformed */
586 return -EINVAL;
587 }
588
589 strcpy(session->name, name->name);
590 return 0;
591 }
592
593 static
594 int lttng_abi_session_set_creation_time(struct lttng_session *session,
595 struct lttng_kernel_session_creation_time *time)
596 {
597 size_t len;
598
599 len = strnlen(time->iso8601, LTTNG_KERNEL_SESSION_CREATION_TIME_ISO8601_LEN);
600
601 if (len == LTTNG_KERNEL_SESSION_CREATION_TIME_ISO8601_LEN) {
602 /* Time is too long/malformed */
603 return -EINVAL;
604 }
605
606 strcpy(session->creation_time, time->iso8601);
607 return 0;
608 }
609
610 static
611 int lttng_counter_release(struct inode *inode, struct file *file)
612 {
613 struct lttng_counter *counter = file->private_data;
614
615 if (counter) {
616 /*
617 * Do not destroy the counter itself. Wait of the owner
618 * (event_notifier group) to be destroyed.
619 */
620 fput(counter->owner);
621 }
622
623 return 0;
624 }
625
626 static
627 long lttng_counter_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
628 {
629 struct lttng_counter *counter = file->private_data;
630 size_t indexes[LTTNG_KERNEL_COUNTER_DIMENSION_MAX] = { 0 };
631 int i;
632
633 switch (cmd) {
634 case LTTNG_KERNEL_COUNTER_READ:
635 {
636 struct lttng_kernel_counter_read local_counter_read;
637 struct lttng_kernel_counter_read __user *ucounter_read =
638 (struct lttng_kernel_counter_read __user *) arg;
639 bool overflow, underflow;
640 int64_t value;
641 int32_t cpu;
642 int ret;
643
644 if (copy_from_user(&local_counter_read, ucounter_read,
645 sizeof(local_counter_read)))
646 return -EFAULT;
647 if (validate_zeroed_padding(local_counter_read.padding,
648 sizeof(local_counter_read.padding)))
649 return -EINVAL;
650
651 /* Cast all indexes into size_t. */
652 for (i = 0; i < local_counter_read.index.number_dimensions; i++)
653 indexes[i] = (size_t) local_counter_read.index.dimension_indexes[i];
654 cpu = local_counter_read.cpu;
655
656 ret = lttng_kernel_counter_read(counter, indexes, cpu, &value,
657 &overflow, &underflow);
658 if (ret)
659 return ret;
660 local_counter_read.value.value = value;
661 local_counter_read.value.overflow = overflow;
662 local_counter_read.value.underflow = underflow;
663
664 if (copy_to_user(&ucounter_read->value, &local_counter_read.value,
665 sizeof(local_counter_read.value)))
666 return -EFAULT;
667
668 return 0;
669 }
670 case LTTNG_KERNEL_COUNTER_AGGREGATE:
671 {
672 struct lttng_kernel_counter_aggregate local_counter_aggregate;
673 struct lttng_kernel_counter_aggregate __user *ucounter_aggregate =
674 (struct lttng_kernel_counter_aggregate __user *) arg;
675 bool overflow, underflow;
676 int64_t value;
677 int ret;
678
679 if (copy_from_user(&local_counter_aggregate, ucounter_aggregate,
680 sizeof(local_counter_aggregate)))
681 return -EFAULT;
682 if (validate_zeroed_padding(local_counter_aggregate.padding,
683 sizeof(local_counter_aggregate.padding)))
684 return -EINVAL;
685
686 /* Cast all indexes into size_t. */
687 for (i = 0; i < local_counter_aggregate.index.number_dimensions; i++)
688 indexes[i] = (size_t) local_counter_aggregate.index.dimension_indexes[i];
689
690 ret = lttng_kernel_counter_aggregate(counter, indexes, &value,
691 &overflow, &underflow);
692 if (ret)
693 return ret;
694 local_counter_aggregate.value.value = value;
695 local_counter_aggregate.value.overflow = overflow;
696 local_counter_aggregate.value.underflow = underflow;
697
698 if (copy_to_user(&ucounter_aggregate->value, &local_counter_aggregate.value,
699 sizeof(local_counter_aggregate.value)))
700 return -EFAULT;
701
702 return 0;
703 }
704 case LTTNG_KERNEL_COUNTER_CLEAR:
705 {
706 struct lttng_kernel_counter_clear local_counter_clear;
707 struct lttng_kernel_counter_clear __user *ucounter_clear =
708 (struct lttng_kernel_counter_clear __user *) arg;
709
710 if (copy_from_user(&local_counter_clear, ucounter_clear,
711 sizeof(local_counter_clear)))
712 return -EFAULT;
713 if (validate_zeroed_padding(local_counter_clear.padding,
714 sizeof(local_counter_clear.padding)))
715 return -EINVAL;
716
717 /* Cast all indexes into size_t. */
718 for (i = 0; i < local_counter_clear.index.number_dimensions; i++)
719 indexes[i] = (size_t) local_counter_clear.index.dimension_indexes[i];
720
721 return lttng_kernel_counter_clear(counter, indexes);
722 }
723 default:
724 WARN_ON_ONCE(1);
725 return -ENOSYS;
726 }
727 }
728
729 static const struct file_operations lttng_counter_fops = {
730 .owner = THIS_MODULE,
731 .release = lttng_counter_release,
732 .unlocked_ioctl = lttng_counter_ioctl,
733 #ifdef CONFIG_COMPAT
734 .compat_ioctl = lttng_counter_ioctl,
735 #endif
736 };
737
738
739 static
740 enum tracker_type get_tracker_type(struct lttng_kernel_tracker_args *tracker)
741 {
742 switch (tracker->type) {
743 case LTTNG_KERNEL_TRACKER_PID:
744 return TRACKER_PID;
745 case LTTNG_KERNEL_TRACKER_VPID:
746 return TRACKER_VPID;
747 case LTTNG_KERNEL_TRACKER_UID:
748 return TRACKER_UID;
749 case LTTNG_KERNEL_TRACKER_VUID:
750 return TRACKER_VUID;
751 case LTTNG_KERNEL_TRACKER_GID:
752 return TRACKER_GID;
753 case LTTNG_KERNEL_TRACKER_VGID:
754 return TRACKER_VGID;
755 default:
756 return TRACKER_UNKNOWN;
757 }
758 }
759
760 /**
761 * lttng_session_ioctl - lttng session fd ioctl
762 *
763 * @file: the file
764 * @cmd: the command
765 * @arg: command arg
766 *
767 * This ioctl implements lttng commands:
768 * LTTNG_KERNEL_CHANNEL
769 * Returns a LTTng channel file descriptor
770 * LTTNG_KERNEL_ENABLE
771 * Enables tracing for a session (weak enable)
772 * LTTNG_KERNEL_DISABLE
773 * Disables tracing for a session (strong disable)
774 * LTTNG_KERNEL_METADATA
775 * Returns a LTTng metadata file descriptor
776 * LTTNG_KERNEL_SESSION_TRACK_PID
777 * Add PID to session PID tracker
778 * LTTNG_KERNEL_SESSION_UNTRACK_PID
779 * Remove PID from session PID tracker
780 * LTTNG_KERNEL_SESSION_TRACK_ID
781 * Add ID to tracker
782 * LTTNG_KERNEL_SESSION_UNTRACK_ID
783 * Remove ID from tracker
784 *
785 * The returned channel will be deleted when its file descriptor is closed.
786 */
787 static
788 long lttng_session_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
789 {
790 struct lttng_session *session = file->private_data;
791 struct lttng_kernel_channel chan_param;
792 struct lttng_kernel_old_channel old_chan_param;
793
794 switch (cmd) {
795 case LTTNG_KERNEL_OLD_CHANNEL:
796 {
797 if (copy_from_user(&old_chan_param,
798 (struct lttng_kernel_old_channel __user *) arg,
799 sizeof(struct lttng_kernel_old_channel)))
800 return -EFAULT;
801 chan_param.overwrite = old_chan_param.overwrite;
802 chan_param.subbuf_size = old_chan_param.subbuf_size;
803 chan_param.num_subbuf = old_chan_param.num_subbuf;
804 chan_param.switch_timer_interval = old_chan_param.switch_timer_interval;
805 chan_param.read_timer_interval = old_chan_param.read_timer_interval;
806 chan_param.output = old_chan_param.output;
807
808 return lttng_abi_create_channel(file, &chan_param,
809 PER_CPU_CHANNEL);
810 }
811 case LTTNG_KERNEL_CHANNEL:
812 {
813 if (copy_from_user(&chan_param,
814 (struct lttng_kernel_channel __user *) arg,
815 sizeof(struct lttng_kernel_channel)))
816 return -EFAULT;
817 return lttng_abi_create_channel(file, &chan_param,
818 PER_CPU_CHANNEL);
819 }
820 case LTTNG_KERNEL_OLD_SESSION_START:
821 case LTTNG_KERNEL_OLD_ENABLE:
822 case LTTNG_KERNEL_SESSION_START:
823 case LTTNG_KERNEL_ENABLE:
824 return lttng_session_enable(session);
825 case LTTNG_KERNEL_OLD_SESSION_STOP:
826 case LTTNG_KERNEL_OLD_DISABLE:
827 case LTTNG_KERNEL_SESSION_STOP:
828 case LTTNG_KERNEL_DISABLE:
829 return lttng_session_disable(session);
830 case LTTNG_KERNEL_OLD_METADATA:
831 {
832 if (copy_from_user(&old_chan_param,
833 (struct lttng_kernel_old_channel __user *) arg,
834 sizeof(struct lttng_kernel_old_channel)))
835 return -EFAULT;
836 chan_param.overwrite = old_chan_param.overwrite;
837 chan_param.subbuf_size = old_chan_param.subbuf_size;
838 chan_param.num_subbuf = old_chan_param.num_subbuf;
839 chan_param.switch_timer_interval = old_chan_param.switch_timer_interval;
840 chan_param.read_timer_interval = old_chan_param.read_timer_interval;
841 chan_param.output = old_chan_param.output;
842
843 return lttng_abi_create_channel(file, &chan_param,
844 METADATA_CHANNEL);
845 }
846 case LTTNG_KERNEL_METADATA:
847 {
848 if (copy_from_user(&chan_param,
849 (struct lttng_kernel_channel __user *) arg,
850 sizeof(struct lttng_kernel_channel)))
851 return -EFAULT;
852 return lttng_abi_create_channel(file, &chan_param,
853 METADATA_CHANNEL);
854 }
855 case LTTNG_KERNEL_SESSION_TRACK_PID:
856 return lttng_session_track_id(session, TRACKER_PID, (int) arg);
857 case LTTNG_KERNEL_SESSION_UNTRACK_PID:
858 return lttng_session_untrack_id(session, TRACKER_PID, (int) arg);
859 case LTTNG_KERNEL_SESSION_TRACK_ID:
860 {
861 struct lttng_kernel_tracker_args tracker;
862 enum tracker_type tracker_type;
863
864 if (copy_from_user(&tracker,
865 (struct lttng_kernel_tracker_args __user *) arg,
866 sizeof(struct lttng_kernel_tracker_args)))
867 return -EFAULT;
868 tracker_type = get_tracker_type(&tracker);
869 if (tracker_type == TRACKER_UNKNOWN)
870 return -EINVAL;
871 return lttng_session_track_id(session, tracker_type, tracker.id);
872 }
873 case LTTNG_KERNEL_SESSION_UNTRACK_ID:
874 {
875 struct lttng_kernel_tracker_args tracker;
876 enum tracker_type tracker_type;
877
878 if (copy_from_user(&tracker,
879 (struct lttng_kernel_tracker_args __user *) arg,
880 sizeof(struct lttng_kernel_tracker_args)))
881 return -EFAULT;
882 tracker_type = get_tracker_type(&tracker);
883 if (tracker_type == TRACKER_UNKNOWN)
884 return -EINVAL;
885 return lttng_session_untrack_id(session, tracker_type,
886 tracker.id);
887 }
888 case LTTNG_KERNEL_SESSION_LIST_TRACKER_PIDS:
889 return lttng_session_list_tracker_ids(session, TRACKER_PID);
890 case LTTNG_KERNEL_SESSION_LIST_TRACKER_IDS:
891 {
892 struct lttng_kernel_tracker_args tracker;
893 enum tracker_type tracker_type;
894
895 if (copy_from_user(&tracker,
896 (struct lttng_kernel_tracker_args __user *) arg,
897 sizeof(struct lttng_kernel_tracker_args)))
898 return -EFAULT;
899 tracker_type = get_tracker_type(&tracker);
900 if (tracker_type == TRACKER_UNKNOWN)
901 return -EINVAL;
902 return lttng_session_list_tracker_ids(session, tracker_type);
903 }
904 case LTTNG_KERNEL_SESSION_METADATA_REGEN:
905 return lttng_session_metadata_regenerate(session);
906 case LTTNG_KERNEL_SESSION_STATEDUMP:
907 return lttng_session_statedump(session);
908 case LTTNG_KERNEL_SESSION_SET_NAME:
909 {
910 struct lttng_kernel_session_name name;
911
912 if (copy_from_user(&name,
913 (struct lttng_kernel_session_name __user *) arg,
914 sizeof(struct lttng_kernel_session_name)))
915 return -EFAULT;
916 return lttng_abi_session_set_name(session, &name);
917 }
918 case LTTNG_KERNEL_SESSION_SET_CREATION_TIME:
919 {
920 struct lttng_kernel_session_creation_time time;
921
922 if (copy_from_user(&time,
923 (struct lttng_kernel_session_creation_time __user *) arg,
924 sizeof(struct lttng_kernel_session_creation_time)))
925 return -EFAULT;
926 return lttng_abi_session_set_creation_time(session, &time);
927 }
928 default:
929 return -ENOIOCTLCMD;
930 }
931 }
932
933 /*
934 * Called when the last file reference is dropped.
935 *
936 * Big fat note: channels and events are invariant for the whole session after
937 * their creation. So this session destruction also destroys all channel and
938 * event structures specific to this session (they are not destroyed when their
939 * individual file is released).
940 */
941 static
942 int lttng_session_release(struct inode *inode, struct file *file)
943 {
944 struct lttng_session *session = file->private_data;
945
946 if (session)
947 lttng_session_destroy(session);
948 return 0;
949 }
950
951 static const struct file_operations lttng_session_fops = {
952 .owner = THIS_MODULE,
953 .release = lttng_session_release,
954 .unlocked_ioctl = lttng_session_ioctl,
955 #ifdef CONFIG_COMPAT
956 .compat_ioctl = lttng_session_ioctl,
957 #endif
958 };
959
960 /*
961 * When encountering empty buffer, flush current sub-buffer if non-empty
962 * and retry (if new data available to read after flush).
963 */
964 static
965 ssize_t lttng_event_notifier_group_notif_read(struct file *filp, char __user *user_buf,
966 size_t count, loff_t *ppos)
967 {
968 struct lttng_event_notifier_group *event_notifier_group = filp->private_data;
969 struct channel *chan = event_notifier_group->chan;
970 struct lib_ring_buffer *buf = event_notifier_group->buf;
971 ssize_t read_count = 0, len;
972 size_t read_offset;
973
974 might_sleep();
975 if (!lttng_access_ok(VERIFY_WRITE, user_buf, count))
976 return -EFAULT;
977
978 /* Finish copy of previous record */
979 if (*ppos != 0) {
980 if (read_count < count) {
981 len = chan->iter.len_left;
982 read_offset = *ppos;
983 goto skip_get_next;
984 }
985 }
986
987 while (read_count < count) {
988 size_t copy_len, space_left;
989
990 len = lib_ring_buffer_get_next_record(chan, buf);
991 len_test:
992 if (len < 0) {
993 /*
994 * Check if buffer is finalized (end of file).
995 */
996 if (len == -ENODATA) {
997 /* A 0 read_count will tell about end of file */
998 goto nodata;
999 }
1000 if (filp->f_flags & O_NONBLOCK) {
1001 if (!read_count)
1002 read_count = -EAGAIN;
1003 goto nodata;
1004 } else {
1005 int error;
1006
1007 /*
1008 * No data available at the moment, return what
1009 * we got.
1010 */
1011 if (read_count)
1012 goto nodata;
1013
1014 /*
1015 * Wait for returned len to be >= 0 or -ENODATA.
1016 */
1017 error = wait_event_interruptible(
1018 event_notifier_group->read_wait,
1019 ((len = lib_ring_buffer_get_next_record(
1020 chan, buf)), len != -EAGAIN));
1021 CHAN_WARN_ON(chan, len == -EBUSY);
1022 if (error) {
1023 read_count = error;
1024 goto nodata;
1025 }
1026 CHAN_WARN_ON(chan, len < 0 && len != -ENODATA);
1027 goto len_test;
1028 }
1029 }
1030 read_offset = buf->iter.read_offset;
1031 skip_get_next:
1032 space_left = count - read_count;
1033 if (len <= space_left) {
1034 copy_len = len;
1035 chan->iter.len_left = 0;
1036 *ppos = 0;
1037 } else {
1038 copy_len = space_left;
1039 chan->iter.len_left = len - copy_len;
1040 *ppos = read_offset + copy_len;
1041 }
1042 if (__lib_ring_buffer_copy_to_user(&buf->backend, read_offset,
1043 &user_buf[read_count],
1044 copy_len)) {
1045 /*
1046 * Leave the len_left and ppos values at their current
1047 * state, as we currently have a valid event to read.
1048 */
1049 return -EFAULT;
1050 }
1051 read_count += copy_len;
1052 }
1053 goto put_record;
1054
1055 nodata:
1056 *ppos = 0;
1057 chan->iter.len_left = 0;
1058
1059 put_record:
1060 lib_ring_buffer_put_current_record(buf);
1061 return read_count;
1062 }
1063
1064 /*
1065 * If the ring buffer is non empty (even just a partial subbuffer), return that
1066 * there is data available. Perform a ring buffer flush if we encounter a
1067 * non-empty ring buffer which does not have any consumeable subbuffer available.
1068 */
1069 static
1070 unsigned int lttng_event_notifier_group_notif_poll(struct file *filp,
1071 poll_table *wait)
1072 {
1073 unsigned int mask = 0;
1074 struct lttng_event_notifier_group *event_notifier_group = filp->private_data;
1075 struct channel *chan = event_notifier_group->chan;
1076 struct lib_ring_buffer *buf = event_notifier_group->buf;
1077 const struct lib_ring_buffer_config *config = &chan->backend.config;
1078 int finalized, disabled;
1079 unsigned long consumed, offset;
1080 size_t subbuffer_header_size = config->cb.subbuffer_header_size();
1081
1082 if (filp->f_mode & FMODE_READ) {
1083 poll_wait_set_exclusive(wait);
1084 poll_wait(filp, &event_notifier_group->read_wait, wait);
1085
1086 finalized = lib_ring_buffer_is_finalized(config, buf);
1087 disabled = lib_ring_buffer_channel_is_disabled(chan);
1088
1089 /*
1090 * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
1091 * finalized load before offsets loads.
1092 */
1093 WARN_ON(atomic_long_read(&buf->active_readers) != 1);
1094 retry:
1095 if (disabled)
1096 return POLLERR;
1097
1098 offset = lib_ring_buffer_get_offset(config, buf);
1099 consumed = lib_ring_buffer_get_consumed(config, buf);
1100
1101 /*
1102 * If there is no buffer available to consume.
1103 */
1104 if (subbuf_trunc(offset, chan) - subbuf_trunc(consumed, chan) == 0) {
1105 /*
1106 * If there is a non-empty subbuffer, flush and try again.
1107 */
1108 if (subbuf_offset(offset, chan) > subbuffer_header_size) {
1109 lib_ring_buffer_switch_remote(buf);
1110 goto retry;
1111 }
1112
1113 if (finalized)
1114 return POLLHUP;
1115 else {
1116 /*
1117 * The memory barriers
1118 * __wait_event()/wake_up_interruptible() take
1119 * care of "raw_spin_is_locked" memory ordering.
1120 */
1121 if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
1122 goto retry;
1123 else
1124 return 0;
1125 }
1126 } else {
1127 if (subbuf_trunc(offset, chan) - subbuf_trunc(consumed, chan)
1128 >= chan->backend.buf_size)
1129 return POLLPRI | POLLRDBAND;
1130 else
1131 return POLLIN | POLLRDNORM;
1132 }
1133 }
1134
1135 return mask;
1136 }
1137
1138 /**
1139 * lttng_event_notifier_group_notif_open - event_notifier ring buffer open file operation
1140 * @inode: opened inode
1141 * @file: opened file
1142 *
1143 * Open implementation. Makes sure only one open instance of a buffer is
1144 * done at a given moment.
1145 */
1146 static int lttng_event_notifier_group_notif_open(struct inode *inode, struct file *file)
1147 {
1148 struct lttng_event_notifier_group *event_notifier_group = inode->i_private;
1149 struct lib_ring_buffer *buf = event_notifier_group->buf;
1150
1151 file->private_data = event_notifier_group;
1152 return lib_ring_buffer_open(inode, file, buf);
1153 }
1154
1155 /**
1156 * lttng_event_notifier_group_notif_release - event_notifier ring buffer release file operation
1157 * @inode: opened inode
1158 * @file: opened file
1159 *
1160 * Release implementation.
1161 */
1162 static int lttng_event_notifier_group_notif_release(struct inode *inode, struct file *file)
1163 {
1164 struct lttng_event_notifier_group *event_notifier_group = file->private_data;
1165 struct lib_ring_buffer *buf = event_notifier_group->buf;
1166 int ret;
1167
1168 ret = lib_ring_buffer_release(inode, file, buf);
1169 if (ret)
1170 return ret;
1171 fput(event_notifier_group->file);
1172 return 0;
1173 }
1174
1175 static const struct file_operations lttng_event_notifier_group_notif_fops = {
1176 .owner = THIS_MODULE,
1177 .open = lttng_event_notifier_group_notif_open,
1178 .release = lttng_event_notifier_group_notif_release,
1179 .read = lttng_event_notifier_group_notif_read,
1180 .poll = lttng_event_notifier_group_notif_poll,
1181 };
1182
1183 /**
1184 * lttng_metadata_ring_buffer_poll - LTTng ring buffer poll file operation
1185 * @filp: the file
1186 * @wait: poll table
1187 *
1188 * Handles the poll operations for the metadata channels.
1189 */
1190 static
1191 unsigned int lttng_metadata_ring_buffer_poll(struct file *filp,
1192 poll_table *wait)
1193 {
1194 struct lttng_metadata_stream *stream = filp->private_data;
1195 struct lib_ring_buffer *buf = stream->priv;
1196 int finalized;
1197 unsigned int mask = 0;
1198
1199 if (filp->f_mode & FMODE_READ) {
1200 poll_wait_set_exclusive(wait);
1201 poll_wait(filp, &stream->read_wait, wait);
1202
1203 finalized = stream->finalized;
1204
1205 /*
1206 * lib_ring_buffer_is_finalized() contains a smp_rmb()
1207 * ordering finalized load before offsets loads.
1208 */
1209 WARN_ON(atomic_long_read(&buf->active_readers) != 1);
1210
1211 if (finalized)
1212 mask |= POLLHUP;
1213
1214 mutex_lock(&stream->metadata_cache->lock);
1215 if (stream->metadata_cache->metadata_written >
1216 stream->metadata_out)
1217 mask |= POLLIN;
1218 mutex_unlock(&stream->metadata_cache->lock);
1219 }
1220
1221 return mask;
1222 }
1223
1224 static
1225 void lttng_metadata_ring_buffer_ioctl_put_next_subbuf(struct file *filp,
1226 unsigned int cmd, unsigned long arg)
1227 {
1228 struct lttng_metadata_stream *stream = filp->private_data;
1229
1230 stream->metadata_out = stream->metadata_in;
1231 }
1232
1233 /*
1234 * Reset the counter of how much metadata has been consumed to 0. That way,
1235 * the consumer receives the content of the metadata cache unchanged. This is
1236 * different from the metadata_regenerate where the offset from epoch is
1237 * resampled, here we want the exact same content as the last time the metadata
1238 * was generated. This command is only possible if all the metadata written
1239 * in the cache has been output to the metadata stream to avoid corrupting the
1240 * metadata file.
1241 *
1242 * Return 0 on success, a negative value on error.
1243 */
1244 static
1245 int lttng_metadata_cache_dump(struct lttng_metadata_stream *stream)
1246 {
1247 int ret;
1248 struct lttng_metadata_cache *cache = stream->metadata_cache;
1249
1250 mutex_lock(&cache->lock);
1251 if (stream->metadata_out != cache->metadata_written) {
1252 ret = -EBUSY;
1253 goto end;
1254 }
1255 stream->metadata_out = 0;
1256 stream->metadata_in = 0;
1257 wake_up_interruptible(&stream->read_wait);
1258 ret = 0;
1259
1260 end:
1261 mutex_unlock(&cache->lock);
1262 return ret;
1263 }
1264
1265 static
1266 long lttng_metadata_ring_buffer_ioctl(struct file *filp,
1267 unsigned int cmd, unsigned long arg)
1268 {
1269 int ret;
1270 struct lttng_metadata_stream *stream = filp->private_data;
1271 struct lib_ring_buffer *buf = stream->priv;
1272 unsigned int rb_cmd;
1273 bool coherent;
1274
1275 if (cmd == RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK)
1276 rb_cmd = RING_BUFFER_GET_NEXT_SUBBUF;
1277 else
1278 rb_cmd = cmd;
1279
1280 switch (cmd) {
1281 case RING_BUFFER_GET_NEXT_SUBBUF:
1282 {
1283 struct lttng_metadata_stream *stream = filp->private_data;
1284 struct lib_ring_buffer *buf = stream->priv;
1285 struct channel *chan = buf->backend.chan;
1286
1287 ret = lttng_metadata_output_channel(stream, chan, NULL);
1288 if (ret > 0) {
1289 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
1290 ret = 0;
1291 } else if (ret < 0)
1292 goto err;
1293 break;
1294 }
1295 case RING_BUFFER_GET_SUBBUF:
1296 {
1297 /*
1298 * Random access is not allowed for metadata channel.
1299 */
1300 return -ENOSYS;
1301 }
1302 case RING_BUFFER_FLUSH_EMPTY: /* Fall-through. */
1303 case RING_BUFFER_FLUSH:
1304 {
1305 struct lttng_metadata_stream *stream = filp->private_data;
1306 struct lib_ring_buffer *buf = stream->priv;
1307 struct channel *chan = buf->backend.chan;
1308
1309 /*
1310 * Before doing the actual ring buffer flush, write up to one
1311 * packet of metadata in the ring buffer.
1312 */
1313 ret = lttng_metadata_output_channel(stream, chan, NULL);
1314 if (ret < 0)
1315 goto err;
1316 break;
1317 }
1318 case RING_BUFFER_GET_METADATA_VERSION:
1319 {
1320 struct lttng_metadata_stream *stream = filp->private_data;
1321
1322 return put_u64(stream->version, arg);
1323 }
1324 case RING_BUFFER_METADATA_CACHE_DUMP:
1325 {
1326 struct lttng_metadata_stream *stream = filp->private_data;
1327
1328 return lttng_metadata_cache_dump(stream);
1329 }
1330 case RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
1331 {
1332 struct lttng_metadata_stream *stream = filp->private_data;
1333 struct lib_ring_buffer *buf = stream->priv;
1334 struct channel *chan = buf->backend.chan;
1335
1336 ret = lttng_metadata_output_channel(stream, chan, &coherent);
1337 if (ret > 0) {
1338 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
1339 ret = 0;
1340 } else if (ret < 0) {
1341 goto err;
1342 }
1343 break;
1344 }
1345 default:
1346 break;
1347 }
1348 /* PUT_SUBBUF is the one from lib ring buffer, unmodified. */
1349
1350 /* Performing lib ring buffer ioctl after our own. */
1351 ret = lib_ring_buffer_ioctl(filp, rb_cmd, arg, buf);
1352 if (ret < 0)
1353 goto err;
1354
1355 switch (cmd) {
1356 case RING_BUFFER_PUT_NEXT_SUBBUF:
1357 {
1358 lttng_metadata_ring_buffer_ioctl_put_next_subbuf(filp,
1359 cmd, arg);
1360 break;
1361 }
1362 case RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
1363 {
1364 return put_u32(coherent, arg);
1365 }
1366 default:
1367 break;
1368 }
1369 err:
1370 return ret;
1371 }
1372
1373 #ifdef CONFIG_COMPAT
1374 static
1375 long lttng_metadata_ring_buffer_compat_ioctl(struct file *filp,
1376 unsigned int cmd, unsigned long arg)
1377 {
1378 int ret;
1379 struct lttng_metadata_stream *stream = filp->private_data;
1380 struct lib_ring_buffer *buf = stream->priv;
1381 unsigned int rb_cmd;
1382 bool coherent;
1383
1384 if (cmd == RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK)
1385 rb_cmd = RING_BUFFER_GET_NEXT_SUBBUF;
1386 else
1387 rb_cmd = cmd;
1388
1389 switch (cmd) {
1390 case RING_BUFFER_GET_NEXT_SUBBUF:
1391 {
1392 struct lttng_metadata_stream *stream = filp->private_data;
1393 struct lib_ring_buffer *buf = stream->priv;
1394 struct channel *chan = buf->backend.chan;
1395
1396 ret = lttng_metadata_output_channel(stream, chan, NULL);
1397 if (ret > 0) {
1398 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
1399 ret = 0;
1400 } else if (ret < 0)
1401 goto err;
1402 break;
1403 }
1404 case RING_BUFFER_GET_SUBBUF:
1405 {
1406 /*
1407 * Random access is not allowed for metadata channel.
1408 */
1409 return -ENOSYS;
1410 }
1411 case RING_BUFFER_FLUSH_EMPTY: /* Fall-through. */
1412 case RING_BUFFER_FLUSH:
1413 {
1414 struct lttng_metadata_stream *stream = filp->private_data;
1415 struct lib_ring_buffer *buf = stream->priv;
1416 struct channel *chan = buf->backend.chan;
1417
1418 /*
1419 * Before doing the actual ring buffer flush, write up to one
1420 * packet of metadata in the ring buffer.
1421 */
1422 ret = lttng_metadata_output_channel(stream, chan, NULL);
1423 if (ret < 0)
1424 goto err;
1425 break;
1426 }
1427 case RING_BUFFER_GET_METADATA_VERSION:
1428 {
1429 struct lttng_metadata_stream *stream = filp->private_data;
1430
1431 return put_u64(stream->version, arg);
1432 }
1433 case RING_BUFFER_METADATA_CACHE_DUMP:
1434 {
1435 struct lttng_metadata_stream *stream = filp->private_data;
1436
1437 return lttng_metadata_cache_dump(stream);
1438 }
1439 case RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
1440 {
1441 struct lttng_metadata_stream *stream = filp->private_data;
1442 struct lib_ring_buffer *buf = stream->priv;
1443 struct channel *chan = buf->backend.chan;
1444
1445 ret = lttng_metadata_output_channel(stream, chan, &coherent);
1446 if (ret > 0) {
1447 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
1448 ret = 0;
1449 } else if (ret < 0) {
1450 goto err;
1451 }
1452 break;
1453 }
1454 default:
1455 break;
1456 }
1457 /* PUT_SUBBUF is the one from lib ring buffer, unmodified. */
1458
1459 /* Performing lib ring buffer ioctl after our own. */
1460 ret = lib_ring_buffer_compat_ioctl(filp, rb_cmd, arg, buf);
1461 if (ret < 0)
1462 goto err;
1463
1464 switch (cmd) {
1465 case RING_BUFFER_PUT_NEXT_SUBBUF:
1466 {
1467 lttng_metadata_ring_buffer_ioctl_put_next_subbuf(filp,
1468 cmd, arg);
1469 break;
1470 }
1471 case RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
1472 {
1473 return put_u32(coherent, arg);
1474 }
1475 default:
1476 break;
1477 }
1478 err:
1479 return ret;
1480 }
1481 #endif
1482
1483 /*
1484 * This is not used by anonymous file descriptors. This code is left
1485 * there if we ever want to implement an inode with open() operation.
1486 */
1487 static
1488 int lttng_metadata_ring_buffer_open(struct inode *inode, struct file *file)
1489 {
1490 struct lttng_metadata_stream *stream = inode->i_private;
1491 struct lib_ring_buffer *buf = stream->priv;
1492
1493 file->private_data = buf;
1494 /*
1495 * Since life-time of metadata cache differs from that of
1496 * session, we need to keep our own reference on the transport.
1497 */
1498 if (!try_module_get(stream->transport->owner)) {
1499 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
1500 return -EBUSY;
1501 }
1502 return lib_ring_buffer_open(inode, file, buf);
1503 }
1504
1505 static
1506 int lttng_metadata_ring_buffer_release(struct inode *inode, struct file *file)
1507 {
1508 struct lttng_metadata_stream *stream = file->private_data;
1509 struct lib_ring_buffer *buf = stream->priv;
1510
1511 mutex_lock(&stream->metadata_cache->lock);
1512 list_del(&stream->list);
1513 mutex_unlock(&stream->metadata_cache->lock);
1514 kref_put(&stream->metadata_cache->refcount, metadata_cache_destroy);
1515 module_put(stream->transport->owner);
1516 kfree(stream);
1517 return lib_ring_buffer_release(inode, file, buf);
1518 }
1519
1520 static
1521 ssize_t lttng_metadata_ring_buffer_splice_read(struct file *in, loff_t *ppos,
1522 struct pipe_inode_info *pipe, size_t len,
1523 unsigned int flags)
1524 {
1525 struct lttng_metadata_stream *stream = in->private_data;
1526 struct lib_ring_buffer *buf = stream->priv;
1527
1528 return lib_ring_buffer_splice_read(in, ppos, pipe, len,
1529 flags, buf);
1530 }
1531
1532 static
1533 int lttng_metadata_ring_buffer_mmap(struct file *filp,
1534 struct vm_area_struct *vma)
1535 {
1536 struct lttng_metadata_stream *stream = filp->private_data;
1537 struct lib_ring_buffer *buf = stream->priv;
1538
1539 return lib_ring_buffer_mmap(filp, vma, buf);
1540 }
1541
1542 static
1543 const struct file_operations lttng_metadata_ring_buffer_file_operations = {
1544 .owner = THIS_MODULE,
1545 .open = lttng_metadata_ring_buffer_open,
1546 .release = lttng_metadata_ring_buffer_release,
1547 .poll = lttng_metadata_ring_buffer_poll,
1548 .splice_read = lttng_metadata_ring_buffer_splice_read,
1549 .mmap = lttng_metadata_ring_buffer_mmap,
1550 .unlocked_ioctl = lttng_metadata_ring_buffer_ioctl,
1551 .llseek = vfs_lib_ring_buffer_no_llseek,
1552 #ifdef CONFIG_COMPAT
1553 .compat_ioctl = lttng_metadata_ring_buffer_compat_ioctl,
1554 #endif
1555 };
1556
1557 static
1558 int lttng_abi_create_stream_fd(struct file *channel_file, void *stream_priv,
1559 const struct file_operations *fops, const char *name)
1560 {
1561 int stream_fd, ret;
1562 struct file *stream_file;
1563
1564 stream_fd = lttng_get_unused_fd();
1565 if (stream_fd < 0) {
1566 ret = stream_fd;
1567 goto fd_error;
1568 }
1569 stream_file = anon_inode_getfile(name, fops, stream_priv, O_RDWR);
1570 if (IS_ERR(stream_file)) {
1571 ret = PTR_ERR(stream_file);
1572 goto file_error;
1573 }
1574 /*
1575 * OPEN_FMODE, called within anon_inode_getfile/alloc_file, don't honor
1576 * FMODE_LSEEK, FMODE_PREAD nor FMODE_PWRITE. We need to read from this
1577 * file descriptor, so we set FMODE_PREAD here.
1578 */
1579 stream_file->f_mode |= FMODE_PREAD;
1580 fd_install(stream_fd, stream_file);
1581 /*
1582 * The stream holds a reference to the channel within the generic ring
1583 * buffer library, so no need to hold a refcount on the channel and
1584 * session files here.
1585 */
1586 return stream_fd;
1587
1588 file_error:
1589 put_unused_fd(stream_fd);
1590 fd_error:
1591 return ret;
1592 }
1593
1594 static
1595 int lttng_abi_open_stream(struct file *channel_file)
1596 {
1597 struct lttng_channel *channel = channel_file->private_data;
1598 struct lib_ring_buffer *buf;
1599 int ret;
1600 void *stream_priv;
1601
1602 buf = channel->ops->buffer_read_open(channel->chan);
1603 if (!buf)
1604 return -ENOENT;
1605
1606 stream_priv = buf;
1607 ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
1608 &lttng_stream_ring_buffer_file_operations,
1609 "[lttng_stream]");
1610 if (ret < 0)
1611 goto fd_error;
1612
1613 return ret;
1614
1615 fd_error:
1616 channel->ops->buffer_read_close(buf);
1617 return ret;
1618 }
1619
1620 static
1621 int lttng_abi_open_metadata_stream(struct file *channel_file)
1622 {
1623 struct lttng_channel *channel = channel_file->private_data;
1624 struct lttng_session *session = channel->session;
1625 struct lib_ring_buffer *buf;
1626 int ret;
1627 struct lttng_metadata_stream *metadata_stream;
1628 void *stream_priv;
1629
1630 buf = channel->ops->buffer_read_open(channel->chan);
1631 if (!buf)
1632 return -ENOENT;
1633
1634 metadata_stream = kzalloc(sizeof(struct lttng_metadata_stream),
1635 GFP_KERNEL);
1636 if (!metadata_stream) {
1637 ret = -ENOMEM;
1638 goto nomem;
1639 }
1640 metadata_stream->metadata_cache = session->metadata_cache;
1641 init_waitqueue_head(&metadata_stream->read_wait);
1642 metadata_stream->priv = buf;
1643 stream_priv = metadata_stream;
1644 metadata_stream->transport = channel->transport;
1645 /* Initial state is an empty metadata, considered as incoherent. */
1646 metadata_stream->coherent = false;
1647
1648 /*
1649 * Since life-time of metadata cache differs from that of
1650 * session, we need to keep our own reference on the transport.
1651 */
1652 if (!try_module_get(metadata_stream->transport->owner)) {
1653 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
1654 ret = -EINVAL;
1655 goto notransport;
1656 }
1657
1658 if (!lttng_kref_get(&session->metadata_cache->refcount)) {
1659 ret = -EOVERFLOW;
1660 goto kref_error;
1661 }
1662
1663 ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
1664 &lttng_metadata_ring_buffer_file_operations,
1665 "[lttng_metadata_stream]");
1666 if (ret < 0)
1667 goto fd_error;
1668
1669 mutex_lock(&session->metadata_cache->lock);
1670 list_add(&metadata_stream->list,
1671 &session->metadata_cache->metadata_stream);
1672 mutex_unlock(&session->metadata_cache->lock);
1673 return ret;
1674
1675 fd_error:
1676 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
1677 kref_error:
1678 module_put(metadata_stream->transport->owner);
1679 notransport:
1680 kfree(metadata_stream);
1681 nomem:
1682 channel->ops->buffer_read_close(buf);
1683 return ret;
1684 }
1685
1686 static
1687 int lttng_abi_open_event_notifier_group_stream(struct file *notif_file)
1688 {
1689 struct lttng_event_notifier_group *event_notifier_group = notif_file->private_data;
1690 struct channel *chan = event_notifier_group->chan;
1691 struct lib_ring_buffer *buf;
1692 int ret;
1693 void *stream_priv;
1694
1695 buf = event_notifier_group->ops->buffer_read_open(chan);
1696 if (!buf)
1697 return -ENOENT;
1698
1699 /* The event_notifier notification fd holds a reference on the event_notifier group */
1700 if (!atomic_long_add_unless(&notif_file->f_count, 1, LONG_MAX)) {
1701 ret = -EOVERFLOW;
1702 goto refcount_error;
1703 }
1704 event_notifier_group->buf = buf;
1705 stream_priv = event_notifier_group;
1706 ret = lttng_abi_create_stream_fd(notif_file, stream_priv,
1707 &lttng_event_notifier_group_notif_fops,
1708 "[lttng_event_notifier_stream]");
1709 if (ret < 0)
1710 goto fd_error;
1711
1712 return ret;
1713
1714 fd_error:
1715 atomic_long_dec(&notif_file->f_count);
1716 refcount_error:
1717 event_notifier_group->ops->buffer_read_close(buf);
1718 return ret;
1719 }
1720
1721 static
1722 int lttng_abi_validate_event_param(struct lttng_kernel_event *event_param)
1723 {
1724 /* Limit ABI to implemented features. */
1725 switch (event_param->instrumentation) {
1726 case LTTNG_KERNEL_SYSCALL:
1727 switch (event_param->u.syscall.entryexit) {
1728 case LTTNG_KERNEL_SYSCALL_ENTRY: /* Fall-through */
1729 case LTTNG_KERNEL_SYSCALL_EXIT: /* Fall-through */
1730 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1731 break;
1732 default:
1733 return -EINVAL;
1734 }
1735 switch (event_param->u.syscall.abi) {
1736 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1737 break;
1738 default:
1739 return -EINVAL;
1740 }
1741 switch (event_param->u.syscall.match) {
1742 case LTTNG_KERNEL_SYSCALL_MATCH_NAME:
1743 break;
1744 default:
1745 return -EINVAL;
1746 }
1747 break;
1748
1749 case LTTNG_KERNEL_KRETPROBE:
1750 switch (event_param->u.kretprobe.entryexit) {
1751 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1752 break;
1753 case LTTNG_KERNEL_SYSCALL_ENTRY: /* Fall-through */
1754 case LTTNG_KERNEL_SYSCALL_EXIT: /* Fall-through */
1755 default:
1756 return -EINVAL;
1757 }
1758 break;
1759
1760 case LTTNG_KERNEL_TRACEPOINT: /* Fall-through */
1761 case LTTNG_KERNEL_KPROBE: /* Fall-through */
1762 case LTTNG_KERNEL_UPROBE:
1763 break;
1764
1765 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1766 case LTTNG_KERNEL_NOOP: /* Fall-through */
1767 default:
1768 return -EINVAL;
1769 }
1770 return 0;
1771 }
1772
1773 static
1774 int lttng_abi_create_event(struct file *channel_file,
1775 struct lttng_kernel_event *event_param)
1776 {
1777 struct lttng_channel *channel = channel_file->private_data;
1778 int event_fd, ret;
1779 struct file *event_file;
1780 void *priv;
1781
1782 event_param->name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
1783 switch (event_param->instrumentation) {
1784 case LTTNG_KERNEL_KRETPROBE:
1785 event_param->u.kretprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
1786 break;
1787 case LTTNG_KERNEL_KPROBE:
1788 event_param->u.kprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
1789 break;
1790 case LTTNG_KERNEL_FUNCTION:
1791 WARN_ON_ONCE(1);
1792 /* Not implemented. */
1793 break;
1794 default:
1795 break;
1796 }
1797 event_fd = lttng_get_unused_fd();
1798 if (event_fd < 0) {
1799 ret = event_fd;
1800 goto fd_error;
1801 }
1802 event_file = anon_inode_getfile("[lttng_event]",
1803 &lttng_event_fops,
1804 NULL, O_RDWR);
1805 if (IS_ERR(event_file)) {
1806 ret = PTR_ERR(event_file);
1807 goto file_error;
1808 }
1809 /* The event holds a reference on the channel */
1810 if (!atomic_long_add_unless(&channel_file->f_count, 1, LONG_MAX)) {
1811 ret = -EOVERFLOW;
1812 goto refcount_error;
1813 }
1814 ret = lttng_abi_validate_event_param(event_param);
1815 if (ret)
1816 goto event_error;
1817
1818 switch (event_param->instrumentation) {
1819 case LTTNG_KERNEL_TRACEPOINT: /* Fall-through */
1820 case LTTNG_KERNEL_SYSCALL:
1821 {
1822 struct lttng_event_enabler *event_enabler;
1823
1824 if (strutils_is_star_glob_pattern(event_param->name)) {
1825 /*
1826 * If the event name is a star globbing pattern,
1827 * we create the special star globbing enabler.
1828 */
1829 event_enabler = lttng_event_enabler_create(LTTNG_ENABLER_FORMAT_STAR_GLOB,
1830 event_param, channel);
1831 } else {
1832 event_enabler = lttng_event_enabler_create(LTTNG_ENABLER_FORMAT_NAME,
1833 event_param, channel);
1834 }
1835 priv = event_enabler;
1836 break;
1837 }
1838
1839 case LTTNG_KERNEL_KPROBE: /* Fall-through */
1840 case LTTNG_KERNEL_KRETPROBE: /* Fall-through */
1841 case LTTNG_KERNEL_UPROBE:
1842 {
1843 struct lttng_event *event;
1844
1845 /*
1846 * We tolerate no failure path after event creation. It
1847 * will stay invariant for the rest of the session.
1848 */
1849 event = lttng_event_create(channel, event_param,
1850 NULL, NULL,
1851 event_param->instrumentation);
1852 WARN_ON_ONCE(!event);
1853 if (IS_ERR(event)) {
1854 ret = PTR_ERR(event);
1855 goto event_error;
1856 }
1857 priv = event;
1858 break;
1859 }
1860
1861 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1862 case LTTNG_KERNEL_NOOP: /* Fall-through */
1863 default:
1864 ret = -EINVAL;
1865 goto event_error;
1866 }
1867 event_file->private_data = priv;
1868 fd_install(event_fd, event_file);
1869 return event_fd;
1870
1871 event_error:
1872 atomic_long_dec(&channel_file->f_count);
1873 refcount_error:
1874 fput(event_file);
1875 file_error:
1876 put_unused_fd(event_fd);
1877 fd_error:
1878 return ret;
1879 }
1880
1881 static
1882 long lttng_event_notifier_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1883 {
1884 struct lttng_event_notifier *event_notifier;
1885 struct lttng_event_notifier_enabler *event_notifier_enabler;
1886 enum lttng_event_type *evtype = file->private_data;
1887
1888 switch (cmd) {
1889 case LTTNG_KERNEL_ENABLE:
1890 switch (*evtype) {
1891 case LTTNG_TYPE_EVENT:
1892 event_notifier = file->private_data;
1893 return lttng_event_notifier_enable(event_notifier);
1894 case LTTNG_TYPE_ENABLER:
1895 event_notifier_enabler = file->private_data;
1896 return lttng_event_notifier_enabler_enable(event_notifier_enabler);
1897 default:
1898 WARN_ON_ONCE(1);
1899 return -ENOSYS;
1900 }
1901 case LTTNG_KERNEL_DISABLE:
1902 switch (*evtype) {
1903 case LTTNG_TYPE_EVENT:
1904 event_notifier = file->private_data;
1905 return lttng_event_notifier_disable(event_notifier);
1906 case LTTNG_TYPE_ENABLER:
1907 event_notifier_enabler = file->private_data;
1908 return lttng_event_notifier_enabler_disable(event_notifier_enabler);
1909 default:
1910 WARN_ON_ONCE(1);
1911 return -ENOSYS;
1912 }
1913 case LTTNG_KERNEL_FILTER:
1914 switch (*evtype) {
1915 case LTTNG_TYPE_EVENT:
1916 return -EINVAL;
1917 case LTTNG_TYPE_ENABLER:
1918 event_notifier_enabler = file->private_data;
1919 return lttng_event_notifier_enabler_attach_filter_bytecode(
1920 event_notifier_enabler,
1921 (struct lttng_kernel_filter_bytecode __user *) arg);
1922 default:
1923 WARN_ON_ONCE(1);
1924 return -ENOSYS;
1925 }
1926
1927 case LTTNG_KERNEL_CAPTURE:
1928 switch (*evtype) {
1929 case LTTNG_TYPE_EVENT:
1930 return -EINVAL;
1931 case LTTNG_TYPE_ENABLER:
1932 event_notifier_enabler = file->private_data;
1933 return lttng_event_notifier_enabler_attach_capture_bytecode(
1934 event_notifier_enabler,
1935 (struct lttng_kernel_capture_bytecode __user *) arg);
1936 default:
1937 WARN_ON_ONCE(1);
1938 return -ENOSYS;
1939 }
1940 case LTTNG_KERNEL_ADD_CALLSITE:
1941 switch (*evtype) {
1942 case LTTNG_TYPE_EVENT:
1943 event_notifier = file->private_data;
1944 return lttng_event_notifier_add_callsite(event_notifier,
1945 (struct lttng_kernel_event_callsite __user *) arg);
1946 case LTTNG_TYPE_ENABLER:
1947 return -EINVAL;
1948 default:
1949 WARN_ON_ONCE(1);
1950 return -ENOSYS;
1951 }
1952 default:
1953 return -ENOIOCTLCMD;
1954 }
1955 }
1956
1957 static
1958 int lttng_event_notifier_release(struct inode *inode, struct file *file)
1959 {
1960 struct lttng_event_notifier *event_notifier;
1961 struct lttng_event_notifier_enabler *event_notifier_enabler;
1962 enum lttng_event_type *evtype = file->private_data;
1963
1964 if (!evtype)
1965 return 0;
1966
1967 switch (*evtype) {
1968 case LTTNG_TYPE_EVENT:
1969 event_notifier = file->private_data;
1970 if (event_notifier)
1971 fput(event_notifier->group->file);
1972 break;
1973 case LTTNG_TYPE_ENABLER:
1974 event_notifier_enabler = file->private_data;
1975 if (event_notifier_enabler)
1976 fput(event_notifier_enabler->group->file);
1977 break;
1978 default:
1979 WARN_ON_ONCE(1);
1980 break;
1981 }
1982
1983 return 0;
1984 }
1985
1986 static const struct file_operations lttng_event_notifier_fops = {
1987 .owner = THIS_MODULE,
1988 .release = lttng_event_notifier_release,
1989 .unlocked_ioctl = lttng_event_notifier_ioctl,
1990 #ifdef CONFIG_COMPAT
1991 .compat_ioctl = lttng_event_notifier_ioctl,
1992 #endif
1993 };
1994
1995 static
1996 int lttng_abi_create_event_notifier(struct file *event_notifier_group_file,
1997 struct lttng_kernel_event_notifier *event_notifier_param)
1998 {
1999 struct lttng_event_notifier_group *event_notifier_group =
2000 event_notifier_group_file->private_data;
2001 int event_notifier_fd, ret;
2002 struct file *event_notifier_file;
2003 void *priv;
2004
2005 switch (event_notifier_param->event.instrumentation) {
2006 case LTTNG_KERNEL_TRACEPOINT:
2007 case LTTNG_KERNEL_UPROBE:
2008 break;
2009 case LTTNG_KERNEL_KPROBE:
2010 event_notifier_param->event.u.kprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
2011 break;
2012 case LTTNG_KERNEL_SYSCALL:
2013 break;
2014 case LTTNG_KERNEL_KRETPROBE:
2015 /* Placing an event notifier on kretprobe is not supported. */
2016 case LTTNG_KERNEL_FUNCTION:
2017 case LTTNG_KERNEL_NOOP:
2018 default:
2019 ret = -EINVAL;
2020 goto inval_instr;
2021 }
2022
2023 event_notifier_param->event.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
2024
2025 event_notifier_fd = lttng_get_unused_fd();
2026 if (event_notifier_fd < 0) {
2027 ret = event_notifier_fd;
2028 goto fd_error;
2029 }
2030
2031 event_notifier_file = anon_inode_getfile("[lttng_event_notifier]",
2032 &lttng_event_notifier_fops,
2033 NULL, O_RDWR);
2034 if (IS_ERR(event_notifier_file)) {
2035 ret = PTR_ERR(event_notifier_file);
2036 goto file_error;
2037 }
2038
2039 /* The event notifier holds a reference on the event notifier group. */
2040 if (!atomic_long_add_unless(&event_notifier_group_file->f_count, 1, LONG_MAX)) {
2041 ret = -EOVERFLOW;
2042 goto refcount_error;
2043 }
2044
2045 switch (event_notifier_param->event.instrumentation) {
2046 case LTTNG_KERNEL_TRACEPOINT: /* Fall-through */
2047 case LTTNG_KERNEL_SYSCALL:
2048 {
2049 struct lttng_event_notifier_enabler *enabler;
2050
2051 if (strutils_is_star_glob_pattern(event_notifier_param->event.name)) {
2052 /*
2053 * If the event name is a star globbing pattern,
2054 * we create the special star globbing enabler.
2055 */
2056 enabler = lttng_event_notifier_enabler_create(
2057 event_notifier_group,
2058 LTTNG_ENABLER_FORMAT_STAR_GLOB,
2059 event_notifier_param);
2060 } else {
2061 enabler = lttng_event_notifier_enabler_create(
2062 event_notifier_group,
2063 LTTNG_ENABLER_FORMAT_NAME,
2064 event_notifier_param);
2065 }
2066 priv = enabler;
2067 break;
2068 }
2069
2070 case LTTNG_KERNEL_KPROBE: /* Fall-through */
2071 case LTTNG_KERNEL_KRETPROBE: /* Fall-through */
2072 case LTTNG_KERNEL_UPROBE:
2073 {
2074 struct lttng_event_notifier *event_notifier;
2075
2076 /*
2077 * We tolerate no failure path after event notifier creation.
2078 * It will stay invariant for the rest of the session.
2079 */
2080 event_notifier = lttng_event_notifier_create(NULL,
2081 event_notifier_param->event.token,
2082 event_notifier_param->error_counter_index,
2083 event_notifier_group,
2084 event_notifier_param, NULL,
2085 event_notifier_param->event.instrumentation);
2086 WARN_ON_ONCE(!event_notifier);
2087 if (IS_ERR(event_notifier)) {
2088 ret = PTR_ERR(event_notifier);
2089 goto event_notifier_error;
2090 }
2091 priv = event_notifier;
2092 break;
2093 }
2094
2095 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
2096 case LTTNG_KERNEL_NOOP: /* Fall-through */
2097 default:
2098 ret = -EINVAL;
2099 goto event_notifier_error;
2100 }
2101 event_notifier_file->private_data = priv;
2102 fd_install(event_notifier_fd, event_notifier_file);
2103 return event_notifier_fd;
2104
2105 event_notifier_error:
2106 atomic_long_dec(&event_notifier_group_file->f_count);
2107 refcount_error:
2108 fput(event_notifier_file);
2109 file_error:
2110 put_unused_fd(event_notifier_fd);
2111 fd_error:
2112 inval_instr:
2113 return ret;
2114 }
2115
2116 static
2117 long lttng_abi_event_notifier_group_create_error_counter(
2118 struct file *event_notifier_group_file,
2119 const struct lttng_kernel_counter_conf *error_counter_conf)
2120 {
2121 int counter_fd, ret;
2122 char *counter_transport_name;
2123 size_t counter_len;
2124 struct lttng_counter *counter = NULL;
2125 struct file *counter_file;
2126 struct lttng_event_notifier_group *event_notifier_group =
2127 (struct lttng_event_notifier_group *) event_notifier_group_file->private_data;
2128
2129 if (error_counter_conf->arithmetic != LTTNG_KERNEL_COUNTER_ARITHMETIC_MODULAR) {
2130 printk(KERN_ERR "LTTng: event_notifier: Error counter of the wrong arithmetic type.\n");
2131 return -EINVAL;
2132 }
2133
2134 if (error_counter_conf->number_dimensions != 1) {
2135 printk(KERN_ERR "LTTng: event_notifier: Error counter has more than one dimension.\n");
2136 return -EINVAL;
2137 }
2138
2139 switch (error_counter_conf->bitness) {
2140 case LTTNG_KERNEL_COUNTER_BITNESS_64:
2141 counter_transport_name = "counter-per-cpu-64-modular";
2142 break;
2143 case LTTNG_KERNEL_COUNTER_BITNESS_32:
2144 counter_transport_name = "counter-per-cpu-32-modular";
2145 break;
2146 default:
2147 return -EINVAL;
2148 }
2149
2150 /*
2151 * Lock sessions to provide mutual exclusion against concurrent
2152 * modification of event_notifier group, which would result in
2153 * overwriting the error counter if set concurrently.
2154 */
2155 lttng_lock_sessions();
2156
2157 if (event_notifier_group->error_counter) {
2158 printk(KERN_ERR "Error counter already created in event_notifier group\n");
2159 ret = -EBUSY;
2160 goto fd_error;
2161 }
2162
2163 counter_fd = lttng_get_unused_fd();
2164 if (counter_fd < 0) {
2165 ret = counter_fd;
2166 goto fd_error;
2167 }
2168
2169 counter_file = anon_inode_getfile("[lttng_counter]",
2170 &lttng_counter_fops,
2171 NULL, O_RDONLY);
2172 if (IS_ERR(counter_file)) {
2173 ret = PTR_ERR(counter_file);
2174 goto file_error;
2175 }
2176
2177 counter_len = error_counter_conf->dimensions[0].size;
2178
2179 if (!atomic_long_add_unless(&event_notifier_group_file->f_count, 1, LONG_MAX)) {
2180 ret = -EOVERFLOW;
2181 goto refcount_error;
2182 }
2183
2184 counter = lttng_kernel_counter_create(counter_transport_name,
2185 1, &counter_len);
2186 if (!counter) {
2187 ret = -EINVAL;
2188 goto counter_error;
2189 }
2190
2191 event_notifier_group->error_counter_len = counter_len;
2192 /*
2193 * store-release to publish error counter matches load-acquire
2194 * in record_error. Ensures the counter is created and the
2195 * error_counter_len is set before they are used.
2196 */
2197 lttng_smp_store_release(&event_notifier_group->error_counter, counter);
2198
2199 counter->file = counter_file;
2200 counter->owner = event_notifier_group->file;
2201 counter_file->private_data = counter;
2202 /* Ownership transferred. */
2203 counter = NULL;
2204
2205 fd_install(counter_fd, counter_file);
2206 lttng_unlock_sessions();
2207
2208 return counter_fd;
2209
2210 counter_error:
2211 atomic_long_dec(&event_notifier_group_file->f_count);
2212 refcount_error:
2213 fput(counter_file);
2214 file_error:
2215 put_unused_fd(counter_fd);
2216 fd_error:
2217 lttng_unlock_sessions();
2218 return ret;
2219 }
2220
2221 static
2222 long lttng_event_notifier_group_ioctl(struct file *file, unsigned int cmd,
2223 unsigned long arg)
2224 {
2225 switch (cmd) {
2226 case LTTNG_KERNEL_EVENT_NOTIFIER_GROUP_NOTIFICATION_FD:
2227 {
2228 return lttng_abi_open_event_notifier_group_stream(file);
2229 }
2230 case LTTNG_KERNEL_EVENT_NOTIFIER_CREATE:
2231 {
2232 struct lttng_kernel_event_notifier uevent_notifier_param;
2233
2234 if (copy_from_user(&uevent_notifier_param,
2235 (struct lttng_kernel_event_notifier __user *) arg,
2236 sizeof(uevent_notifier_param)))
2237 return -EFAULT;
2238 return lttng_abi_create_event_notifier(file, &uevent_notifier_param);
2239 }
2240 case LTTNG_KERNEL_COUNTER:
2241 {
2242 struct lttng_kernel_counter_conf uerror_counter_conf;
2243
2244 if (copy_from_user(&uerror_counter_conf,
2245 (struct lttng_kernel_counter_conf __user *) arg,
2246 sizeof(uerror_counter_conf)))
2247 return -EFAULT;
2248 return lttng_abi_event_notifier_group_create_error_counter(file,
2249 &uerror_counter_conf);
2250 }
2251 default:
2252 return -ENOIOCTLCMD;
2253 }
2254 return 0;
2255 }
2256
2257 static
2258 int lttng_event_notifier_group_release(struct inode *inode, struct file *file)
2259 {
2260 struct lttng_event_notifier_group *event_notifier_group =
2261 file->private_data;
2262
2263 if (event_notifier_group)
2264 lttng_event_notifier_group_destroy(event_notifier_group);
2265 return 0;
2266 }
2267
2268 static const struct file_operations lttng_event_notifier_group_fops = {
2269 .owner = THIS_MODULE,
2270 .release = lttng_event_notifier_group_release,
2271 .unlocked_ioctl = lttng_event_notifier_group_ioctl,
2272 #ifdef CONFIG_COMPAT
2273 .compat_ioctl = lttng_event_notifier_group_ioctl,
2274 #endif
2275 };
2276
2277 /**
2278 * lttng_channel_ioctl - lttng syscall through ioctl
2279 *
2280 * @file: the file
2281 * @cmd: the command
2282 * @arg: command arg
2283 *
2284 * This ioctl implements lttng commands:
2285 * LTTNG_KERNEL_STREAM
2286 * Returns an event stream file descriptor or failure.
2287 * (typically, one event stream records events from one CPU)
2288 * LTTNG_KERNEL_EVENT
2289 * Returns an event file descriptor or failure.
2290 * LTTNG_KERNEL_CONTEXT
2291 * Prepend a context field to each event in the channel
2292 * LTTNG_KERNEL_ENABLE
2293 * Enable recording for events in this channel (weak enable)
2294 * LTTNG_KERNEL_DISABLE
2295 * Disable recording for events in this channel (strong disable)
2296 *
2297 * Channel and event file descriptors also hold a reference on the session.
2298 */
2299 static
2300 long lttng_channel_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2301 {
2302 struct lttng_channel *channel = file->private_data;
2303
2304 switch (cmd) {
2305 case LTTNG_KERNEL_OLD_STREAM:
2306 case LTTNG_KERNEL_STREAM:
2307 return lttng_abi_open_stream(file);
2308 case LTTNG_KERNEL_OLD_EVENT:
2309 {
2310 struct lttng_kernel_event *uevent_param;
2311 struct lttng_kernel_old_event *old_uevent_param;
2312 int ret;
2313
2314 uevent_param = kmalloc(sizeof(struct lttng_kernel_event),
2315 GFP_KERNEL);
2316 if (!uevent_param) {
2317 ret = -ENOMEM;
2318 goto old_event_end;
2319 }
2320 old_uevent_param = kmalloc(
2321 sizeof(struct lttng_kernel_old_event),
2322 GFP_KERNEL);
2323 if (!old_uevent_param) {
2324 ret = -ENOMEM;
2325 goto old_event_error_free_param;
2326 }
2327 if (copy_from_user(old_uevent_param,
2328 (struct lttng_kernel_old_event __user *) arg,
2329 sizeof(struct lttng_kernel_old_event))) {
2330 ret = -EFAULT;
2331 goto old_event_error_free_old_param;
2332 }
2333
2334 memcpy(uevent_param->name, old_uevent_param->name,
2335 sizeof(uevent_param->name));
2336 uevent_param->instrumentation =
2337 old_uevent_param->instrumentation;
2338
2339 switch (old_uevent_param->instrumentation) {
2340 case LTTNG_KERNEL_KPROBE:
2341 uevent_param->u.kprobe.addr =
2342 old_uevent_param->u.kprobe.addr;
2343 uevent_param->u.kprobe.offset =
2344 old_uevent_param->u.kprobe.offset;
2345 memcpy(uevent_param->u.kprobe.symbol_name,
2346 old_uevent_param->u.kprobe.symbol_name,
2347 sizeof(uevent_param->u.kprobe.symbol_name));
2348 break;
2349 case LTTNG_KERNEL_KRETPROBE:
2350 uevent_param->u.kretprobe.addr =
2351 old_uevent_param->u.kretprobe.addr;
2352 uevent_param->u.kretprobe.offset =
2353 old_uevent_param->u.kretprobe.offset;
2354 memcpy(uevent_param->u.kretprobe.symbol_name,
2355 old_uevent_param->u.kretprobe.symbol_name,
2356 sizeof(uevent_param->u.kretprobe.symbol_name));
2357 break;
2358 case LTTNG_KERNEL_FUNCTION:
2359 WARN_ON_ONCE(1);
2360 /* Not implemented. */
2361 break;
2362 default:
2363 break;
2364 }
2365 ret = lttng_abi_create_event(file, uevent_param);
2366
2367 old_event_error_free_old_param:
2368 kfree(old_uevent_param);
2369 old_event_error_free_param:
2370 kfree(uevent_param);
2371 old_event_end:
2372 return ret;
2373 }
2374 case LTTNG_KERNEL_EVENT:
2375 {
2376 struct lttng_kernel_event uevent_param;
2377
2378 if (copy_from_user(&uevent_param,
2379 (struct lttng_kernel_event __user *) arg,
2380 sizeof(uevent_param)))
2381 return -EFAULT;
2382 return lttng_abi_create_event(file, &uevent_param);
2383 }
2384 case LTTNG_KERNEL_OLD_CONTEXT:
2385 {
2386 struct lttng_kernel_context *ucontext_param;
2387 struct lttng_kernel_old_context *old_ucontext_param;
2388 int ret;
2389
2390 ucontext_param = kmalloc(sizeof(struct lttng_kernel_context),
2391 GFP_KERNEL);
2392 if (!ucontext_param) {
2393 ret = -ENOMEM;
2394 goto old_ctx_end;
2395 }
2396 old_ucontext_param = kmalloc(sizeof(struct lttng_kernel_old_context),
2397 GFP_KERNEL);
2398 if (!old_ucontext_param) {
2399 ret = -ENOMEM;
2400 goto old_ctx_error_free_param;
2401 }
2402
2403 if (copy_from_user(old_ucontext_param,
2404 (struct lttng_kernel_old_context __user *) arg,
2405 sizeof(struct lttng_kernel_old_context))) {
2406 ret = -EFAULT;
2407 goto old_ctx_error_free_old_param;
2408 }
2409 ucontext_param->ctx = old_ucontext_param->ctx;
2410 memcpy(ucontext_param->padding, old_ucontext_param->padding,
2411 sizeof(ucontext_param->padding));
2412 /* only type that uses the union */
2413 if (old_ucontext_param->ctx == LTTNG_KERNEL_CONTEXT_PERF_COUNTER) {
2414 ucontext_param->u.perf_counter.type =
2415 old_ucontext_param->u.perf_counter.type;
2416 ucontext_param->u.perf_counter.config =
2417 old_ucontext_param->u.perf_counter.config;
2418 memcpy(ucontext_param->u.perf_counter.name,
2419 old_ucontext_param->u.perf_counter.name,
2420 sizeof(ucontext_param->u.perf_counter.name));
2421 }
2422
2423 ret = lttng_abi_add_context(file,
2424 ucontext_param,
2425 &channel->ctx, channel->session);
2426
2427 old_ctx_error_free_old_param:
2428 kfree(old_ucontext_param);
2429 old_ctx_error_free_param:
2430 kfree(ucontext_param);
2431 old_ctx_end:
2432 return ret;
2433 }
2434 case LTTNG_KERNEL_CONTEXT:
2435 {
2436 struct lttng_kernel_context ucontext_param;
2437
2438 if (copy_from_user(&ucontext_param,
2439 (struct lttng_kernel_context __user *) arg,
2440 sizeof(ucontext_param)))
2441 return -EFAULT;
2442 return lttng_abi_add_context(file,
2443 &ucontext_param,
2444 &channel->ctx, channel->session);
2445 }
2446 case LTTNG_KERNEL_OLD_ENABLE:
2447 case LTTNG_KERNEL_ENABLE:
2448 return lttng_channel_enable(channel);
2449 case LTTNG_KERNEL_OLD_DISABLE:
2450 case LTTNG_KERNEL_DISABLE:
2451 return lttng_channel_disable(channel);
2452 case LTTNG_KERNEL_SYSCALL_MASK:
2453 return lttng_channel_syscall_mask(channel,
2454 (struct lttng_kernel_syscall_mask __user *) arg);
2455 default:
2456 return -ENOIOCTLCMD;
2457 }
2458 }
2459
2460 /**
2461 * lttng_metadata_ioctl - lttng syscall through ioctl
2462 *
2463 * @file: the file
2464 * @cmd: the command
2465 * @arg: command arg
2466 *
2467 * This ioctl implements lttng commands:
2468 * LTTNG_KERNEL_STREAM
2469 * Returns an event stream file descriptor or failure.
2470 *
2471 * Channel and event file descriptors also hold a reference on the session.
2472 */
2473 static
2474 long lttng_metadata_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2475 {
2476 switch (cmd) {
2477 case LTTNG_KERNEL_OLD_STREAM:
2478 case LTTNG_KERNEL_STREAM:
2479 return lttng_abi_open_metadata_stream(file);
2480 default:
2481 return -ENOIOCTLCMD;
2482 }
2483 }
2484
2485 /**
2486 * lttng_channel_poll - lttng stream addition/removal monitoring
2487 *
2488 * @file: the file
2489 * @wait: poll table
2490 */
2491 unsigned int lttng_channel_poll(struct file *file, poll_table *wait)
2492 {
2493 struct lttng_channel *channel = file->private_data;
2494 unsigned int mask = 0;
2495
2496 if (file->f_mode & FMODE_READ) {
2497 poll_wait_set_exclusive(wait);
2498 poll_wait(file, channel->ops->get_hp_wait_queue(channel->chan),
2499 wait);
2500
2501 if (channel->ops->is_disabled(channel->chan))
2502 return POLLERR;
2503 if (channel->ops->is_finalized(channel->chan))
2504 return POLLHUP;
2505 if (channel->ops->buffer_has_read_closed_stream(channel->chan))
2506 return POLLIN | POLLRDNORM;
2507 return 0;
2508 }
2509 return mask;
2510
2511 }
2512
2513 static
2514 int lttng_channel_release(struct inode *inode, struct file *file)
2515 {
2516 struct lttng_channel *channel = file->private_data;
2517
2518 if (channel)
2519 fput(channel->session->file);
2520 return 0;
2521 }
2522
2523 static
2524 int lttng_metadata_channel_release(struct inode *inode, struct file *file)
2525 {
2526 struct lttng_channel *channel = file->private_data;
2527
2528 if (channel) {
2529 fput(channel->session->file);
2530 lttng_metadata_channel_destroy(channel);
2531 }
2532
2533 return 0;
2534 }
2535
2536 static const struct file_operations lttng_channel_fops = {
2537 .owner = THIS_MODULE,
2538 .release = lttng_channel_release,
2539 .poll = lttng_channel_poll,
2540 .unlocked_ioctl = lttng_channel_ioctl,
2541 #ifdef CONFIG_COMPAT
2542 .compat_ioctl = lttng_channel_ioctl,
2543 #endif
2544 };
2545
2546 static const struct file_operations lttng_metadata_fops = {
2547 .owner = THIS_MODULE,
2548 .release = lttng_metadata_channel_release,
2549 .unlocked_ioctl = lttng_metadata_ioctl,
2550 #ifdef CONFIG_COMPAT
2551 .compat_ioctl = lttng_metadata_ioctl,
2552 #endif
2553 };
2554
2555 /**
2556 * lttng_event_ioctl - lttng syscall through ioctl
2557 *
2558 * @file: the file
2559 * @cmd: the command
2560 * @arg: command arg
2561 *
2562 * This ioctl implements lttng commands:
2563 * LTTNG_KERNEL_CONTEXT
2564 * Prepend a context field to each record of this event
2565 * LTTNG_KERNEL_ENABLE
2566 * Enable recording for this event (weak enable)
2567 * LTTNG_KERNEL_DISABLE
2568 * Disable recording for this event (strong disable)
2569 */
2570 static
2571 long lttng_event_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2572 {
2573 struct lttng_event *event;
2574 struct lttng_event_enabler *event_enabler;
2575 enum lttng_event_type *evtype = file->private_data;
2576
2577 switch (cmd) {
2578 case LTTNG_KERNEL_OLD_CONTEXT:
2579 {
2580 /* Not implemented */
2581 return -ENOSYS;
2582 }
2583 case LTTNG_KERNEL_CONTEXT:
2584 {
2585 /* Not implemented */
2586 return -ENOSYS;
2587 }
2588 case LTTNG_KERNEL_OLD_ENABLE:
2589 case LTTNG_KERNEL_ENABLE:
2590 switch (*evtype) {
2591 case LTTNG_TYPE_EVENT:
2592 event = file->private_data;
2593 return lttng_event_enable(event);
2594 case LTTNG_TYPE_ENABLER:
2595 event_enabler = file->private_data;
2596 return lttng_event_enabler_enable(event_enabler);
2597 default:
2598 WARN_ON_ONCE(1);
2599 return -ENOSYS;
2600 }
2601 case LTTNG_KERNEL_OLD_DISABLE:
2602 case LTTNG_KERNEL_DISABLE:
2603 switch (*evtype) {
2604 case LTTNG_TYPE_EVENT:
2605 event = file->private_data;
2606 return lttng_event_disable(event);
2607 case LTTNG_TYPE_ENABLER:
2608 event_enabler = file->private_data;
2609 return lttng_event_enabler_disable(event_enabler);
2610 default:
2611 WARN_ON_ONCE(1);
2612 return -ENOSYS;
2613 }
2614 case LTTNG_KERNEL_FILTER:
2615 switch (*evtype) {
2616 case LTTNG_TYPE_EVENT:
2617 return -EINVAL;
2618 case LTTNG_TYPE_ENABLER:
2619 {
2620 event_enabler = file->private_data;
2621 return lttng_event_enabler_attach_filter_bytecode(
2622 event_enabler,
2623 (struct lttng_kernel_filter_bytecode __user *) arg);
2624 }
2625 default:
2626 WARN_ON_ONCE(1);
2627 return -ENOSYS;
2628 }
2629 case LTTNG_KERNEL_ADD_CALLSITE:
2630 switch (*evtype) {
2631 case LTTNG_TYPE_EVENT:
2632 event = file->private_data;
2633 return lttng_event_add_callsite(event,
2634 (struct lttng_kernel_event_callsite __user *) arg);
2635 case LTTNG_TYPE_ENABLER:
2636 return -EINVAL;
2637 default:
2638 WARN_ON_ONCE(1);
2639 return -ENOSYS;
2640 }
2641 default:
2642 return -ENOIOCTLCMD;
2643 }
2644 }
2645
2646 static
2647 int lttng_event_release(struct inode *inode, struct file *file)
2648 {
2649 struct lttng_event *event;
2650 struct lttng_event_enabler *event_enabler;
2651 enum lttng_event_type *evtype = file->private_data;
2652
2653 if (!evtype)
2654 return 0;
2655
2656 switch (*evtype) {
2657 case LTTNG_TYPE_EVENT:
2658 event = file->private_data;
2659 if (event)
2660 fput(event->chan->file);
2661 break;
2662 case LTTNG_TYPE_ENABLER:
2663 event_enabler = file->private_data;
2664 if (event_enabler)
2665 fput(event_enabler->chan->file);
2666 break;
2667 default:
2668 WARN_ON_ONCE(1);
2669 break;
2670 }
2671
2672 return 0;
2673 }
2674
2675 /* TODO: filter control ioctl */
2676 static const struct file_operations lttng_event_fops = {
2677 .owner = THIS_MODULE,
2678 .release = lttng_event_release,
2679 .unlocked_ioctl = lttng_event_ioctl,
2680 #ifdef CONFIG_COMPAT
2681 .compat_ioctl = lttng_event_ioctl,
2682 #endif
2683 };
2684
2685 static int put_u64(uint64_t val, unsigned long arg)
2686 {
2687 return put_user(val, (uint64_t __user *) arg);
2688 }
2689
2690 static int put_u32(uint32_t val, unsigned long arg)
2691 {
2692 return put_user(val, (uint32_t __user *) arg);
2693 }
2694
2695 static long lttng_stream_ring_buffer_ioctl(struct file *filp,
2696 unsigned int cmd, unsigned long arg)
2697 {
2698 struct lib_ring_buffer *buf = filp->private_data;
2699 struct channel *chan = buf->backend.chan;
2700 const struct lib_ring_buffer_config *config = &chan->backend.config;
2701 const struct lttng_channel_ops *ops = chan->backend.priv_ops;
2702 int ret;
2703
2704 if (atomic_read(&chan->record_disabled))
2705 return -EIO;
2706
2707 switch (cmd) {
2708 case LTTNG_RING_BUFFER_GET_TIMESTAMP_BEGIN:
2709 {
2710 uint64_t ts;
2711
2712 ret = ops->timestamp_begin(config, buf, &ts);
2713 if (ret < 0)
2714 goto error;
2715 return put_u64(ts, arg);
2716 }
2717 case LTTNG_RING_BUFFER_GET_TIMESTAMP_END:
2718 {
2719 uint64_t ts;
2720
2721 ret = ops->timestamp_end(config, buf, &ts);
2722 if (ret < 0)
2723 goto error;
2724 return put_u64(ts, arg);
2725 }
2726 case LTTNG_RING_BUFFER_GET_EVENTS_DISCARDED:
2727 {
2728 uint64_t ed;
2729
2730 ret = ops->events_discarded(config, buf, &ed);
2731 if (ret < 0)
2732 goto error;
2733 return put_u64(ed, arg);
2734 }
2735 case LTTNG_RING_BUFFER_GET_CONTENT_SIZE:
2736 {
2737 uint64_t cs;
2738
2739 ret = ops->content_size(config, buf, &cs);
2740 if (ret < 0)
2741 goto error;
2742 return put_u64(cs, arg);
2743 }
2744 case LTTNG_RING_BUFFER_GET_PACKET_SIZE:
2745 {
2746 uint64_t ps;
2747
2748 ret = ops->packet_size(config, buf, &ps);
2749 if (ret < 0)
2750 goto error;
2751 return put_u64(ps, arg);
2752 }
2753 case LTTNG_RING_BUFFER_GET_STREAM_ID:
2754 {
2755 uint64_t si;
2756
2757 ret = ops->stream_id(config, buf, &si);
2758 if (ret < 0)
2759 goto error;
2760 return put_u64(si, arg);
2761 }
2762 case LTTNG_RING_BUFFER_GET_CURRENT_TIMESTAMP:
2763 {
2764 uint64_t ts;
2765
2766 ret = ops->current_timestamp(config, buf, &ts);
2767 if (ret < 0)
2768 goto error;
2769 return put_u64(ts, arg);
2770 }
2771 case LTTNG_RING_BUFFER_GET_SEQ_NUM:
2772 {
2773 uint64_t seq;
2774
2775 ret = ops->sequence_number(config, buf, &seq);
2776 if (ret < 0)
2777 goto error;
2778 return put_u64(seq, arg);
2779 }
2780 case LTTNG_RING_BUFFER_INSTANCE_ID:
2781 {
2782 uint64_t id;
2783
2784 ret = ops->instance_id(config, buf, &id);
2785 if (ret < 0)
2786 goto error;
2787 return put_u64(id, arg);
2788 }
2789 default:
2790 return lib_ring_buffer_file_operations.unlocked_ioctl(filp,
2791 cmd, arg);
2792 }
2793
2794 error:
2795 return -ENOSYS;
2796 }
2797
2798 #ifdef CONFIG_COMPAT
2799 static long lttng_stream_ring_buffer_compat_ioctl(struct file *filp,
2800 unsigned int cmd, unsigned long arg)
2801 {
2802 struct lib_ring_buffer *buf = filp->private_data;
2803 struct channel *chan = buf->backend.chan;
2804 const struct lib_ring_buffer_config *config = &chan->backend.config;
2805 const struct lttng_channel_ops *ops = chan->backend.priv_ops;
2806 int ret;
2807
2808 if (atomic_read(&chan->record_disabled))
2809 return -EIO;
2810
2811 switch (cmd) {
2812 case LTTNG_RING_BUFFER_COMPAT_GET_TIMESTAMP_BEGIN:
2813 {
2814 uint64_t ts;
2815
2816 ret = ops->timestamp_begin(config, buf, &ts);
2817 if (ret < 0)
2818 goto error;
2819 return put_u64(ts, arg);
2820 }
2821 case LTTNG_RING_BUFFER_COMPAT_GET_TIMESTAMP_END:
2822 {
2823 uint64_t ts;
2824
2825 ret = ops->timestamp_end(config, buf, &ts);
2826 if (ret < 0)
2827 goto error;
2828 return put_u64(ts, arg);
2829 }
2830 case LTTNG_RING_BUFFER_COMPAT_GET_EVENTS_DISCARDED:
2831 {
2832 uint64_t ed;
2833
2834 ret = ops->events_discarded(config, buf, &ed);
2835 if (ret < 0)
2836 goto error;
2837 return put_u64(ed, arg);
2838 }
2839 case LTTNG_RING_BUFFER_COMPAT_GET_CONTENT_SIZE:
2840 {
2841 uint64_t cs;
2842
2843 ret = ops->content_size(config, buf, &cs);
2844 if (ret < 0)
2845 goto error;
2846 return put_u64(cs, arg);
2847 }
2848 case LTTNG_RING_BUFFER_COMPAT_GET_PACKET_SIZE:
2849 {
2850 uint64_t ps;
2851
2852 ret = ops->packet_size(config, buf, &ps);
2853 if (ret < 0)
2854 goto error;
2855 return put_u64(ps, arg);
2856 }
2857 case LTTNG_RING_BUFFER_COMPAT_GET_STREAM_ID:
2858 {
2859 uint64_t si;
2860
2861 ret = ops->stream_id(config, buf, &si);
2862 if (ret < 0)
2863 goto error;
2864 return put_u64(si, arg);
2865 }
2866 case LTTNG_RING_BUFFER_GET_CURRENT_TIMESTAMP:
2867 {
2868 uint64_t ts;
2869
2870 ret = ops->current_timestamp(config, buf, &ts);
2871 if (ret < 0)
2872 goto error;
2873 return put_u64(ts, arg);
2874 }
2875 case LTTNG_RING_BUFFER_COMPAT_GET_SEQ_NUM:
2876 {
2877 uint64_t seq;
2878
2879 ret = ops->sequence_number(config, buf, &seq);
2880 if (ret < 0)
2881 goto error;
2882 return put_u64(seq, arg);
2883 }
2884 case LTTNG_RING_BUFFER_COMPAT_INSTANCE_ID:
2885 {
2886 uint64_t id;
2887
2888 ret = ops->instance_id(config, buf, &id);
2889 if (ret < 0)
2890 goto error;
2891 return put_u64(id, arg);
2892 }
2893 default:
2894 return lib_ring_buffer_file_operations.compat_ioctl(filp,
2895 cmd, arg);
2896 }
2897
2898 error:
2899 return -ENOSYS;
2900 }
2901 #endif /* CONFIG_COMPAT */
2902
2903 static void lttng_stream_override_ring_buffer_fops(void)
2904 {
2905 lttng_stream_ring_buffer_file_operations.owner = THIS_MODULE;
2906 lttng_stream_ring_buffer_file_operations.open =
2907 lib_ring_buffer_file_operations.open;
2908 lttng_stream_ring_buffer_file_operations.release =
2909 lib_ring_buffer_file_operations.release;
2910 lttng_stream_ring_buffer_file_operations.poll =
2911 lib_ring_buffer_file_operations.poll;
2912 lttng_stream_ring_buffer_file_operations.splice_read =
2913 lib_ring_buffer_file_operations.splice_read;
2914 lttng_stream_ring_buffer_file_operations.mmap =
2915 lib_ring_buffer_file_operations.mmap;
2916 lttng_stream_ring_buffer_file_operations.unlocked_ioctl =
2917 lttng_stream_ring_buffer_ioctl;
2918 lttng_stream_ring_buffer_file_operations.llseek =
2919 lib_ring_buffer_file_operations.llseek;
2920 #ifdef CONFIG_COMPAT
2921 lttng_stream_ring_buffer_file_operations.compat_ioctl =
2922 lttng_stream_ring_buffer_compat_ioctl;
2923 #endif
2924 }
2925
2926 int __init lttng_abi_init(void)
2927 {
2928 int ret = 0;
2929
2930 wrapper_vmalloc_sync_mappings();
2931 lttng_clock_ref();
2932
2933 ret = lttng_tp_mempool_init();
2934 if (ret) {
2935 goto error;
2936 }
2937
2938 lttng_proc_dentry = proc_create_data("lttng", S_IRUSR | S_IWUSR, NULL,
2939 &lttng_proc_ops, NULL);
2940
2941 if (!lttng_proc_dentry) {
2942 printk(KERN_ERR "LTTng: Error creating control file\n");
2943 ret = -ENOMEM;
2944 goto error;
2945 }
2946 lttng_stream_override_ring_buffer_fops();
2947 return 0;
2948
2949 error:
2950 lttng_tp_mempool_destroy();
2951 lttng_clock_unref();
2952 return ret;
2953 }
2954
2955 /* No __exit annotation because used by init error path too. */
2956 void lttng_abi_exit(void)
2957 {
2958 lttng_tp_mempool_destroy();
2959 lttng_clock_unref();
2960 if (lttng_proc_dentry)
2961 remove_proc_entry("lttng", NULL);
2962 }
This page took 0.129459 seconds and 4 git commands to generate.