eb68bce8d15328da889e4f1077c87f779aa4b37d
[lttng-modules.git] / src / lttng-abi.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-abi.c
4 *
5 * LTTng ABI
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Mimic system calls for:
10 * - session creation, returns a file descriptor or failure.
11 * - channel creation, returns a file descriptor or failure.
12 * - Operates on a session file descriptor
13 * - Takes all channel options as parameters.
14 * - stream get, returns a file descriptor or failure.
15 * - Operates on a channel file descriptor.
16 * - stream notifier get, returns a file descriptor or failure.
17 * - Operates on a channel file descriptor.
18 * - event creation, returns a file descriptor or failure.
19 * - Operates on a channel file descriptor
20 * - Takes an event name as parameter
21 * - Takes an instrumentation source as parameter
22 * - e.g. tracepoints, dynamic_probes...
23 * - Takes instrumentation source specific arguments.
24 */
25
26 #include <linux/module.h>
27 #include <linux/proc_fs.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/file.h>
30 #include <linux/uaccess.h>
31 #include <linux/slab.h>
32 #include <linux/err.h>
33 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
34 #include <ringbuffer/vfs.h>
35 #include <ringbuffer/backend.h>
36 #include <ringbuffer/frontend.h>
37 #include <wrapper/poll.h>
38 #include <wrapper/file.h>
39 #include <wrapper/kref.h>
40 #include <wrapper/barrier.h>
41 #include <lttng/string-utils.h>
42 #include <lttng/abi.h>
43 #include <lttng/abi-old.h>
44 #include <lttng/events.h>
45 #include <lttng/tracer.h>
46 #include <lttng/tp-mempool.h>
47 #include <ringbuffer/frontend_types.h>
48 #include <ringbuffer/iterator.h>
49
50 /*
51 * This is LTTng's own personal way to create a system call as an external
52 * module. We use ioctl() on /proc/lttng.
53 */
54
55 static struct proc_dir_entry *lttng_proc_dentry;
56
57 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0))
58 static const struct proc_ops lttng_proc_ops;
59 #else
60 static const struct file_operations lttng_proc_ops;
61 #endif
62
63 static const struct file_operations lttng_session_fops;
64 static const struct file_operations lttng_event_notifier_group_fops;
65 static const struct file_operations lttng_channel_fops;
66 static const struct file_operations lttng_metadata_fops;
67 static const struct file_operations lttng_event_fops;
68 static struct file_operations lttng_stream_ring_buffer_file_operations;
69
70 static int put_u64(uint64_t val, unsigned long arg);
71 static int put_u32(uint32_t val, unsigned long arg);
72
73 static int validate_zeroed_padding(char *p, size_t len)
74 {
75 size_t i;
76
77 for (i = 0; i < len; i++) {
78 if (p[i])
79 return -1;
80 }
81 return 0;
82 }
83
84 /*
85 * Teardown management: opened file descriptors keep a refcount on the module,
86 * so it can only exit when all file descriptors are closed.
87 */
88
89 static
90 int lttng_abi_create_session(void)
91 {
92 struct lttng_session *session;
93 struct file *session_file;
94 int session_fd, ret;
95
96 session = lttng_session_create();
97 if (!session)
98 return -ENOMEM;
99 session_fd = lttng_get_unused_fd();
100 if (session_fd < 0) {
101 ret = session_fd;
102 goto fd_error;
103 }
104 session_file = anon_inode_getfile("[lttng_session]",
105 &lttng_session_fops,
106 session, O_RDWR);
107 if (IS_ERR(session_file)) {
108 ret = PTR_ERR(session_file);
109 goto file_error;
110 }
111 session->file = session_file;
112 fd_install(session_fd, session_file);
113 return session_fd;
114
115 file_error:
116 put_unused_fd(session_fd);
117 fd_error:
118 lttng_session_destroy(session);
119 return ret;
120 }
121
122 void event_notifier_send_notification_work_wakeup(struct irq_work *entry)
123 {
124 struct lttng_event_notifier_group *event_notifier_group =
125 container_of(entry, struct lttng_event_notifier_group,
126 wakeup_pending);
127 wake_up_interruptible(&event_notifier_group->read_wait);
128 }
129
130 static
131 int lttng_abi_create_event_notifier_group(void)
132 {
133 struct lttng_event_notifier_group *event_notifier_group;
134 struct file *event_notifier_group_file;
135 int event_notifier_group_fd, ret;
136
137 event_notifier_group = lttng_event_notifier_group_create();
138 if (!event_notifier_group)
139 return -ENOMEM;
140
141 event_notifier_group_fd = lttng_get_unused_fd();
142 if (event_notifier_group_fd < 0) {
143 ret = event_notifier_group_fd;
144 goto fd_error;
145 }
146 event_notifier_group_file = anon_inode_getfile("[lttng_event_notifier_group]",
147 &lttng_event_notifier_group_fops,
148 event_notifier_group, O_RDWR);
149 if (IS_ERR(event_notifier_group_file)) {
150 ret = PTR_ERR(event_notifier_group_file);
151 goto file_error;
152 }
153
154 event_notifier_group->file = event_notifier_group_file;
155 init_waitqueue_head(&event_notifier_group->read_wait);
156 init_irq_work(&event_notifier_group->wakeup_pending,
157 event_notifier_send_notification_work_wakeup);
158 fd_install(event_notifier_group_fd, event_notifier_group_file);
159 return event_notifier_group_fd;
160
161 file_error:
162 put_unused_fd(event_notifier_group_fd);
163 fd_error:
164 lttng_event_notifier_group_destroy(event_notifier_group);
165 return ret;
166 }
167
168 static
169 int lttng_abi_tracepoint_list(void)
170 {
171 struct file *tracepoint_list_file;
172 int file_fd, ret;
173
174 file_fd = lttng_get_unused_fd();
175 if (file_fd < 0) {
176 ret = file_fd;
177 goto fd_error;
178 }
179
180 tracepoint_list_file = anon_inode_getfile("[lttng_tracepoint_list]",
181 &lttng_tracepoint_list_fops,
182 NULL, O_RDWR);
183 if (IS_ERR(tracepoint_list_file)) {
184 ret = PTR_ERR(tracepoint_list_file);
185 goto file_error;
186 }
187 ret = lttng_tracepoint_list_fops.open(NULL, tracepoint_list_file);
188 if (ret < 0)
189 goto open_error;
190 fd_install(file_fd, tracepoint_list_file);
191 return file_fd;
192
193 open_error:
194 fput(tracepoint_list_file);
195 file_error:
196 put_unused_fd(file_fd);
197 fd_error:
198 return ret;
199 }
200
201 #ifndef CONFIG_HAVE_SYSCALL_TRACEPOINTS
202 static inline
203 int lttng_abi_syscall_list(void)
204 {
205 return -ENOSYS;
206 }
207 #else
208 static
209 int lttng_abi_syscall_list(void)
210 {
211 struct file *syscall_list_file;
212 int file_fd, ret;
213
214 file_fd = lttng_get_unused_fd();
215 if (file_fd < 0) {
216 ret = file_fd;
217 goto fd_error;
218 }
219
220 syscall_list_file = anon_inode_getfile("[lttng_syscall_list]",
221 &lttng_syscall_list_fops,
222 NULL, O_RDWR);
223 if (IS_ERR(syscall_list_file)) {
224 ret = PTR_ERR(syscall_list_file);
225 goto file_error;
226 }
227 ret = lttng_syscall_list_fops.open(NULL, syscall_list_file);
228 if (ret < 0)
229 goto open_error;
230 fd_install(file_fd, syscall_list_file);
231 return file_fd;
232
233 open_error:
234 fput(syscall_list_file);
235 file_error:
236 put_unused_fd(file_fd);
237 fd_error:
238 return ret;
239 }
240 #endif
241
242 static
243 void lttng_abi_tracer_version(struct lttng_kernel_tracer_version *v)
244 {
245 v->major = LTTNG_MODULES_MAJOR_VERSION;
246 v->minor = LTTNG_MODULES_MINOR_VERSION;
247 v->patchlevel = LTTNG_MODULES_PATCHLEVEL_VERSION;
248 }
249
250 static
251 void lttng_abi_tracer_abi_version(struct lttng_kernel_tracer_abi_version *v)
252 {
253 v->major = LTTNG_MODULES_ABI_MAJOR_VERSION;
254 v->minor = LTTNG_MODULES_ABI_MINOR_VERSION;
255 }
256
257 static
258 long lttng_abi_add_context(struct file *file,
259 struct lttng_kernel_context *context_param,
260 struct lttng_ctx **ctx, struct lttng_session *session)
261 {
262
263 if (session->been_active)
264 return -EPERM;
265
266 switch (context_param->ctx) {
267 case LTTNG_KERNEL_CONTEXT_PID:
268 return lttng_add_pid_to_ctx(ctx);
269 case LTTNG_KERNEL_CONTEXT_PRIO:
270 return lttng_add_prio_to_ctx(ctx);
271 case LTTNG_KERNEL_CONTEXT_NICE:
272 return lttng_add_nice_to_ctx(ctx);
273 case LTTNG_KERNEL_CONTEXT_VPID:
274 return lttng_add_vpid_to_ctx(ctx);
275 case LTTNG_KERNEL_CONTEXT_TID:
276 return lttng_add_tid_to_ctx(ctx);
277 case LTTNG_KERNEL_CONTEXT_VTID:
278 return lttng_add_vtid_to_ctx(ctx);
279 case LTTNG_KERNEL_CONTEXT_PPID:
280 return lttng_add_ppid_to_ctx(ctx);
281 case LTTNG_KERNEL_CONTEXT_VPPID:
282 return lttng_add_vppid_to_ctx(ctx);
283 case LTTNG_KERNEL_CONTEXT_PERF_COUNTER:
284 context_param->u.perf_counter.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
285 return lttng_add_perf_counter_to_ctx(context_param->u.perf_counter.type,
286 context_param->u.perf_counter.config,
287 context_param->u.perf_counter.name,
288 ctx);
289 case LTTNG_KERNEL_CONTEXT_PROCNAME:
290 return lttng_add_procname_to_ctx(ctx);
291 case LTTNG_KERNEL_CONTEXT_HOSTNAME:
292 return lttng_add_hostname_to_ctx(ctx);
293 case LTTNG_KERNEL_CONTEXT_CPU_ID:
294 return lttng_add_cpu_id_to_ctx(ctx);
295 case LTTNG_KERNEL_CONTEXT_INTERRUPTIBLE:
296 return lttng_add_interruptible_to_ctx(ctx);
297 case LTTNG_KERNEL_CONTEXT_NEED_RESCHEDULE:
298 return lttng_add_need_reschedule_to_ctx(ctx);
299 case LTTNG_KERNEL_CONTEXT_PREEMPTIBLE:
300 return lttng_add_preemptible_to_ctx(ctx);
301 case LTTNG_KERNEL_CONTEXT_MIGRATABLE:
302 return lttng_add_migratable_to_ctx(ctx);
303 case LTTNG_KERNEL_CONTEXT_CALLSTACK_KERNEL:
304 case LTTNG_KERNEL_CONTEXT_CALLSTACK_USER:
305 return lttng_add_callstack_to_ctx(ctx, context_param->ctx);
306 case LTTNG_KERNEL_CONTEXT_CGROUP_NS:
307 return lttng_add_cgroup_ns_to_ctx(ctx);
308 case LTTNG_KERNEL_CONTEXT_IPC_NS:
309 return lttng_add_ipc_ns_to_ctx(ctx);
310 case LTTNG_KERNEL_CONTEXT_MNT_NS:
311 return lttng_add_mnt_ns_to_ctx(ctx);
312 case LTTNG_KERNEL_CONTEXT_NET_NS:
313 return lttng_add_net_ns_to_ctx(ctx);
314 case LTTNG_KERNEL_CONTEXT_PID_NS:
315 return lttng_add_pid_ns_to_ctx(ctx);
316 case LTTNG_KERNEL_CONTEXT_USER_NS:
317 return lttng_add_user_ns_to_ctx(ctx);
318 case LTTNG_KERNEL_CONTEXT_UTS_NS:
319 return lttng_add_uts_ns_to_ctx(ctx);
320 case LTTNG_KERNEL_CONTEXT_UID:
321 return lttng_add_uid_to_ctx(ctx);
322 case LTTNG_KERNEL_CONTEXT_EUID:
323 return lttng_add_euid_to_ctx(ctx);
324 case LTTNG_KERNEL_CONTEXT_SUID:
325 return lttng_add_suid_to_ctx(ctx);
326 case LTTNG_KERNEL_CONTEXT_GID:
327 return lttng_add_gid_to_ctx(ctx);
328 case LTTNG_KERNEL_CONTEXT_EGID:
329 return lttng_add_egid_to_ctx(ctx);
330 case LTTNG_KERNEL_CONTEXT_SGID:
331 return lttng_add_sgid_to_ctx(ctx);
332 case LTTNG_KERNEL_CONTEXT_VUID:
333 return lttng_add_vuid_to_ctx(ctx);
334 case LTTNG_KERNEL_CONTEXT_VEUID:
335 return lttng_add_veuid_to_ctx(ctx);
336 case LTTNG_KERNEL_CONTEXT_VSUID:
337 return lttng_add_vsuid_to_ctx(ctx);
338 case LTTNG_KERNEL_CONTEXT_VGID:
339 return lttng_add_vgid_to_ctx(ctx);
340 case LTTNG_KERNEL_CONTEXT_VEGID:
341 return lttng_add_vegid_to_ctx(ctx);
342 case LTTNG_KERNEL_CONTEXT_VSGID:
343 return lttng_add_vsgid_to_ctx(ctx);
344 case LTTNG_KERNEL_CONTEXT_TIME_NS:
345 return lttng_add_time_ns_to_ctx(ctx);
346 default:
347 return -EINVAL;
348 }
349 }
350
351 /**
352 * lttng_ioctl - lttng syscall through ioctl
353 *
354 * @file: the file
355 * @cmd: the command
356 * @arg: command arg
357 *
358 * This ioctl implements lttng commands:
359 * LTTNG_KERNEL_SESSION
360 * Returns a LTTng trace session file descriptor
361 * LTTNG_KERNEL_TRACER_VERSION
362 * Returns the LTTng kernel tracer version
363 * LTTNG_KERNEL_TRACEPOINT_LIST
364 * Returns a file descriptor listing available tracepoints
365 * LTTNG_KERNEL_WAIT_QUIESCENT
366 * Returns after all previously running probes have completed
367 * LTTNG_KERNEL_TRACER_ABI_VERSION
368 * Returns the LTTng kernel tracer ABI version
369 * LTTNG_KERNEL_EVENT_NOTIFIER_GROUP_CREATE
370 * Returns a LTTng event notifier group file descriptor
371 *
372 * The returned session will be deleted when its file descriptor is closed.
373 */
374 static
375 long lttng_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
376 {
377 switch (cmd) {
378 case LTTNG_KERNEL_OLD_SESSION:
379 case LTTNG_KERNEL_SESSION:
380 return lttng_abi_create_session();
381 case LTTNG_KERNEL_EVENT_NOTIFIER_GROUP_CREATE:
382 return lttng_abi_create_event_notifier_group();
383 case LTTNG_KERNEL_OLD_TRACER_VERSION:
384 {
385 struct lttng_kernel_tracer_version v;
386 struct lttng_kernel_old_tracer_version oldv;
387 struct lttng_kernel_old_tracer_version *uversion =
388 (struct lttng_kernel_old_tracer_version __user *) arg;
389
390 lttng_abi_tracer_version(&v);
391 oldv.major = v.major;
392 oldv.minor = v.minor;
393 oldv.patchlevel = v.patchlevel;
394
395 if (copy_to_user(uversion, &oldv, sizeof(oldv)))
396 return -EFAULT;
397 return 0;
398 }
399 case LTTNG_KERNEL_TRACER_VERSION:
400 {
401 struct lttng_kernel_tracer_version version;
402 struct lttng_kernel_tracer_version *uversion =
403 (struct lttng_kernel_tracer_version __user *) arg;
404
405 lttng_abi_tracer_version(&version);
406
407 if (copy_to_user(uversion, &version, sizeof(version)))
408 return -EFAULT;
409 return 0;
410 }
411 case LTTNG_KERNEL_TRACER_ABI_VERSION:
412 {
413 struct lttng_kernel_tracer_abi_version version;
414 struct lttng_kernel_tracer_abi_version *uversion =
415 (struct lttng_kernel_tracer_abi_version __user *) arg;
416
417 lttng_abi_tracer_abi_version(&version);
418
419 if (copy_to_user(uversion, &version, sizeof(version)))
420 return -EFAULT;
421 return 0;
422 }
423 case LTTNG_KERNEL_OLD_TRACEPOINT_LIST:
424 case LTTNG_KERNEL_TRACEPOINT_LIST:
425 return lttng_abi_tracepoint_list();
426 case LTTNG_KERNEL_SYSCALL_LIST:
427 return lttng_abi_syscall_list();
428 case LTTNG_KERNEL_OLD_WAIT_QUIESCENT:
429 case LTTNG_KERNEL_WAIT_QUIESCENT:
430 synchronize_trace();
431 return 0;
432 case LTTNG_KERNEL_OLD_CALIBRATE:
433 {
434 struct lttng_kernel_old_calibrate __user *ucalibrate =
435 (struct lttng_kernel_old_calibrate __user *) arg;
436 struct lttng_kernel_old_calibrate old_calibrate;
437 struct lttng_kernel_calibrate calibrate;
438 int ret;
439
440 if (copy_from_user(&old_calibrate, ucalibrate, sizeof(old_calibrate)))
441 return -EFAULT;
442 calibrate.type = old_calibrate.type;
443 ret = lttng_calibrate(&calibrate);
444 if (copy_to_user(ucalibrate, &old_calibrate, sizeof(old_calibrate)))
445 return -EFAULT;
446 return ret;
447 }
448 case LTTNG_KERNEL_CALIBRATE:
449 {
450 struct lttng_kernel_calibrate __user *ucalibrate =
451 (struct lttng_kernel_calibrate __user *) arg;
452 struct lttng_kernel_calibrate calibrate;
453 int ret;
454
455 if (copy_from_user(&calibrate, ucalibrate, sizeof(calibrate)))
456 return -EFAULT;
457 ret = lttng_calibrate(&calibrate);
458 if (copy_to_user(ucalibrate, &calibrate, sizeof(calibrate)))
459 return -EFAULT;
460 return ret;
461 }
462 default:
463 return -ENOIOCTLCMD;
464 }
465 }
466
467 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0))
468 static const struct proc_ops lttng_proc_ops = {
469 .proc_ioctl = lttng_ioctl,
470 #ifdef CONFIG_COMPAT
471 .proc_compat_ioctl = lttng_ioctl,
472 #endif /* CONFIG_COMPAT */
473 };
474 #else
475 static const struct file_operations lttng_proc_ops = {
476 .owner = THIS_MODULE,
477 .unlocked_ioctl = lttng_ioctl,
478 #ifdef CONFIG_COMPAT
479 .compat_ioctl = lttng_ioctl,
480 #endif /* CONFIG_COMPAT */
481 };
482 #endif
483
484 static
485 int lttng_abi_create_channel(struct file *session_file,
486 struct lttng_kernel_channel *chan_param,
487 enum channel_type channel_type)
488 {
489 struct lttng_session *session = session_file->private_data;
490 const struct file_operations *fops = NULL;
491 const char *transport_name;
492 struct lttng_channel *chan;
493 struct file *chan_file;
494 int chan_fd;
495 int ret = 0;
496
497 chan_fd = lttng_get_unused_fd();
498 if (chan_fd < 0) {
499 ret = chan_fd;
500 goto fd_error;
501 }
502 switch (channel_type) {
503 case PER_CPU_CHANNEL:
504 fops = &lttng_channel_fops;
505 break;
506 case METADATA_CHANNEL:
507 fops = &lttng_metadata_fops;
508 break;
509 }
510
511 chan_file = anon_inode_getfile("[lttng_channel]",
512 fops,
513 NULL, O_RDWR);
514 if (IS_ERR(chan_file)) {
515 ret = PTR_ERR(chan_file);
516 goto file_error;
517 }
518 switch (channel_type) {
519 case PER_CPU_CHANNEL:
520 if (chan_param->output == LTTNG_KERNEL_SPLICE) {
521 transport_name = chan_param->overwrite ?
522 "relay-overwrite" : "relay-discard";
523 } else if (chan_param->output == LTTNG_KERNEL_MMAP) {
524 transport_name = chan_param->overwrite ?
525 "relay-overwrite-mmap" : "relay-discard-mmap";
526 } else {
527 return -EINVAL;
528 }
529 break;
530 case METADATA_CHANNEL:
531 if (chan_param->output == LTTNG_KERNEL_SPLICE)
532 transport_name = "relay-metadata";
533 else if (chan_param->output == LTTNG_KERNEL_MMAP)
534 transport_name = "relay-metadata-mmap";
535 else
536 return -EINVAL;
537 break;
538 default:
539 transport_name = "<unknown>";
540 break;
541 }
542 if (!atomic_long_add_unless(&session_file->f_count, 1, LONG_MAX)) {
543 ret = -EOVERFLOW;
544 goto refcount_error;
545 }
546 /*
547 * We tolerate no failure path after channel creation. It will stay
548 * invariant for the rest of the session.
549 */
550 chan = lttng_channel_create(session, transport_name, NULL,
551 chan_param->subbuf_size,
552 chan_param->num_subbuf,
553 chan_param->switch_timer_interval,
554 chan_param->read_timer_interval,
555 channel_type);
556 if (!chan) {
557 ret = -EINVAL;
558 goto chan_error;
559 }
560 chan->file = chan_file;
561 chan_file->private_data = chan;
562 fd_install(chan_fd, chan_file);
563
564 return chan_fd;
565
566 chan_error:
567 atomic_long_dec(&session_file->f_count);
568 refcount_error:
569 fput(chan_file);
570 file_error:
571 put_unused_fd(chan_fd);
572 fd_error:
573 return ret;
574 }
575
576 static
577 int lttng_abi_session_set_name(struct lttng_session *session,
578 struct lttng_kernel_session_name *name)
579 {
580 size_t len;
581
582 len = strnlen(name->name, LTTNG_KERNEL_SESSION_NAME_LEN);
583
584 if (len == LTTNG_KERNEL_SESSION_NAME_LEN) {
585 /* Name is too long/malformed */
586 return -EINVAL;
587 }
588
589 strcpy(session->name, name->name);
590 return 0;
591 }
592
593 static
594 int lttng_abi_session_set_creation_time(struct lttng_session *session,
595 struct lttng_kernel_session_creation_time *time)
596 {
597 size_t len;
598
599 len = strnlen(time->iso8601, LTTNG_KERNEL_SESSION_CREATION_TIME_ISO8601_LEN);
600
601 if (len == LTTNG_KERNEL_SESSION_CREATION_TIME_ISO8601_LEN) {
602 /* Time is too long/malformed */
603 return -EINVAL;
604 }
605
606 strcpy(session->creation_time, time->iso8601);
607 return 0;
608 }
609
610 static
611 int lttng_counter_release(struct inode *inode, struct file *file)
612 {
613 struct lttng_counter *counter = file->private_data;
614
615 if (counter) {
616 /*
617 * Do not destroy the counter itself. Wait of the owner
618 * (event_notifier group) to be destroyed.
619 */
620 fput(counter->owner);
621 }
622
623 return 0;
624 }
625
626 static
627 long lttng_counter_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
628 {
629 struct lttng_counter *counter = file->private_data;
630 size_t indexes[LTTNG_KERNEL_COUNTER_DIMENSION_MAX] = { 0 };
631 int i;
632
633 switch (cmd) {
634 case LTTNG_KERNEL_COUNTER_READ:
635 {
636 struct lttng_kernel_counter_read local_counter_read;
637 struct lttng_kernel_counter_read __user *ucounter_read =
638 (struct lttng_kernel_counter_read __user *) arg;
639 bool overflow, underflow;
640 int64_t value;
641 int32_t cpu;
642 int ret;
643
644 if (copy_from_user(&local_counter_read, ucounter_read,
645 sizeof(local_counter_read)))
646 return -EFAULT;
647 if (validate_zeroed_padding(local_counter_read.padding,
648 sizeof(local_counter_read.padding)))
649 return -EINVAL;
650
651 /* Cast all indexes into size_t. */
652 for (i = 0; i < local_counter_read.index.number_dimensions; i++)
653 indexes[i] = (size_t) local_counter_read.index.dimension_indexes[i];
654 cpu = local_counter_read.cpu;
655
656 ret = lttng_kernel_counter_read(counter, indexes, cpu, &value,
657 &overflow, &underflow);
658 if (ret)
659 return ret;
660 local_counter_read.value.value = value;
661 local_counter_read.value.overflow = overflow;
662 local_counter_read.value.underflow = underflow;
663
664 if (copy_to_user(&ucounter_read->value, &local_counter_read.value,
665 sizeof(local_counter_read.value)))
666 return -EFAULT;
667
668 return 0;
669 }
670 case LTTNG_KERNEL_COUNTER_AGGREGATE:
671 {
672 struct lttng_kernel_counter_aggregate local_counter_aggregate;
673 struct lttng_kernel_counter_aggregate __user *ucounter_aggregate =
674 (struct lttng_kernel_counter_aggregate __user *) arg;
675 bool overflow, underflow;
676 int64_t value;
677 int ret;
678
679 if (copy_from_user(&local_counter_aggregate, ucounter_aggregate,
680 sizeof(local_counter_aggregate)))
681 return -EFAULT;
682 if (validate_zeroed_padding(local_counter_aggregate.padding,
683 sizeof(local_counter_aggregate.padding)))
684 return -EINVAL;
685
686 /* Cast all indexes into size_t. */
687 for (i = 0; i < local_counter_aggregate.index.number_dimensions; i++)
688 indexes[i] = (size_t) local_counter_aggregate.index.dimension_indexes[i];
689
690 ret = lttng_kernel_counter_aggregate(counter, indexes, &value,
691 &overflow, &underflow);
692 if (ret)
693 return ret;
694 local_counter_aggregate.value.value = value;
695 local_counter_aggregate.value.overflow = overflow;
696 local_counter_aggregate.value.underflow = underflow;
697
698 if (copy_to_user(&ucounter_aggregate->value, &local_counter_aggregate.value,
699 sizeof(local_counter_aggregate.value)))
700 return -EFAULT;
701
702 return 0;
703 }
704 case LTTNG_KERNEL_COUNTER_CLEAR:
705 {
706 struct lttng_kernel_counter_clear local_counter_clear;
707 struct lttng_kernel_counter_clear __user *ucounter_clear =
708 (struct lttng_kernel_counter_clear __user *) arg;
709
710 if (copy_from_user(&local_counter_clear, ucounter_clear,
711 sizeof(local_counter_clear)))
712 return -EFAULT;
713 if (validate_zeroed_padding(local_counter_clear.padding,
714 sizeof(local_counter_clear.padding)))
715 return -EINVAL;
716
717 /* Cast all indexes into size_t. */
718 for (i = 0; i < local_counter_clear.index.number_dimensions; i++)
719 indexes[i] = (size_t) local_counter_clear.index.dimension_indexes[i];
720
721 return lttng_kernel_counter_clear(counter, indexes);
722 }
723 default:
724 WARN_ON_ONCE(1);
725 return -ENOSYS;
726 }
727 }
728
729 static const struct file_operations lttng_counter_fops = {
730 .owner = THIS_MODULE,
731 .release = lttng_counter_release,
732 .unlocked_ioctl = lttng_counter_ioctl,
733 #ifdef CONFIG_COMPAT
734 .compat_ioctl = lttng_counter_ioctl,
735 #endif
736 };
737
738
739 static
740 enum tracker_type get_tracker_type(struct lttng_kernel_tracker_args *tracker)
741 {
742 switch (tracker->type) {
743 case LTTNG_KERNEL_TRACKER_PID:
744 return TRACKER_PID;
745 case LTTNG_KERNEL_TRACKER_VPID:
746 return TRACKER_VPID;
747 case LTTNG_KERNEL_TRACKER_UID:
748 return TRACKER_UID;
749 case LTTNG_KERNEL_TRACKER_VUID:
750 return TRACKER_VUID;
751 case LTTNG_KERNEL_TRACKER_GID:
752 return TRACKER_GID;
753 case LTTNG_KERNEL_TRACKER_VGID:
754 return TRACKER_VGID;
755 default:
756 return TRACKER_UNKNOWN;
757 }
758 }
759
760 /**
761 * lttng_session_ioctl - lttng session fd ioctl
762 *
763 * @file: the file
764 * @cmd: the command
765 * @arg: command arg
766 *
767 * This ioctl implements lttng commands:
768 * LTTNG_KERNEL_CHANNEL
769 * Returns a LTTng channel file descriptor
770 * LTTNG_KERNEL_ENABLE
771 * Enables tracing for a session (weak enable)
772 * LTTNG_KERNEL_DISABLE
773 * Disables tracing for a session (strong disable)
774 * LTTNG_KERNEL_METADATA
775 * Returns a LTTng metadata file descriptor
776 * LTTNG_KERNEL_SESSION_TRACK_PID
777 * Add PID to session PID tracker
778 * LTTNG_KERNEL_SESSION_UNTRACK_PID
779 * Remove PID from session PID tracker
780 * LTTNG_KERNEL_SESSION_TRACK_ID
781 * Add ID to tracker
782 * LTTNG_KERNEL_SESSION_UNTRACK_ID
783 * Remove ID from tracker
784 *
785 * The returned channel will be deleted when its file descriptor is closed.
786 */
787 static
788 long lttng_session_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
789 {
790 struct lttng_session *session = file->private_data;
791 struct lttng_kernel_channel chan_param;
792 struct lttng_kernel_old_channel old_chan_param;
793
794 switch (cmd) {
795 case LTTNG_KERNEL_OLD_CHANNEL:
796 {
797 if (copy_from_user(&old_chan_param,
798 (struct lttng_kernel_old_channel __user *) arg,
799 sizeof(struct lttng_kernel_old_channel)))
800 return -EFAULT;
801 chan_param.overwrite = old_chan_param.overwrite;
802 chan_param.subbuf_size = old_chan_param.subbuf_size;
803 chan_param.num_subbuf = old_chan_param.num_subbuf;
804 chan_param.switch_timer_interval = old_chan_param.switch_timer_interval;
805 chan_param.read_timer_interval = old_chan_param.read_timer_interval;
806 chan_param.output = old_chan_param.output;
807
808 return lttng_abi_create_channel(file, &chan_param,
809 PER_CPU_CHANNEL);
810 }
811 case LTTNG_KERNEL_CHANNEL:
812 {
813 if (copy_from_user(&chan_param,
814 (struct lttng_kernel_channel __user *) arg,
815 sizeof(struct lttng_kernel_channel)))
816 return -EFAULT;
817 return lttng_abi_create_channel(file, &chan_param,
818 PER_CPU_CHANNEL);
819 }
820 case LTTNG_KERNEL_OLD_SESSION_START:
821 case LTTNG_KERNEL_OLD_ENABLE:
822 case LTTNG_KERNEL_SESSION_START:
823 case LTTNG_KERNEL_ENABLE:
824 return lttng_session_enable(session);
825 case LTTNG_KERNEL_OLD_SESSION_STOP:
826 case LTTNG_KERNEL_OLD_DISABLE:
827 case LTTNG_KERNEL_SESSION_STOP:
828 case LTTNG_KERNEL_DISABLE:
829 return lttng_session_disable(session);
830 case LTTNG_KERNEL_OLD_METADATA:
831 {
832 if (copy_from_user(&old_chan_param,
833 (struct lttng_kernel_old_channel __user *) arg,
834 sizeof(struct lttng_kernel_old_channel)))
835 return -EFAULT;
836 chan_param.overwrite = old_chan_param.overwrite;
837 chan_param.subbuf_size = old_chan_param.subbuf_size;
838 chan_param.num_subbuf = old_chan_param.num_subbuf;
839 chan_param.switch_timer_interval = old_chan_param.switch_timer_interval;
840 chan_param.read_timer_interval = old_chan_param.read_timer_interval;
841 chan_param.output = old_chan_param.output;
842
843 return lttng_abi_create_channel(file, &chan_param,
844 METADATA_CHANNEL);
845 }
846 case LTTNG_KERNEL_METADATA:
847 {
848 if (copy_from_user(&chan_param,
849 (struct lttng_kernel_channel __user *) arg,
850 sizeof(struct lttng_kernel_channel)))
851 return -EFAULT;
852 return lttng_abi_create_channel(file, &chan_param,
853 METADATA_CHANNEL);
854 }
855 case LTTNG_KERNEL_SESSION_TRACK_PID:
856 return lttng_session_track_id(session, TRACKER_PID, (int) arg);
857 case LTTNG_KERNEL_SESSION_UNTRACK_PID:
858 return lttng_session_untrack_id(session, TRACKER_PID, (int) arg);
859 case LTTNG_KERNEL_SESSION_TRACK_ID:
860 {
861 struct lttng_kernel_tracker_args tracker;
862 enum tracker_type tracker_type;
863
864 if (copy_from_user(&tracker,
865 (struct lttng_kernel_tracker_args __user *) arg,
866 sizeof(struct lttng_kernel_tracker_args)))
867 return -EFAULT;
868 tracker_type = get_tracker_type(&tracker);
869 if (tracker_type == TRACKER_UNKNOWN)
870 return -EINVAL;
871 return lttng_session_track_id(session, tracker_type, tracker.id);
872 }
873 case LTTNG_KERNEL_SESSION_UNTRACK_ID:
874 {
875 struct lttng_kernel_tracker_args tracker;
876 enum tracker_type tracker_type;
877
878 if (copy_from_user(&tracker,
879 (struct lttng_kernel_tracker_args __user *) arg,
880 sizeof(struct lttng_kernel_tracker_args)))
881 return -EFAULT;
882 tracker_type = get_tracker_type(&tracker);
883 if (tracker_type == TRACKER_UNKNOWN)
884 return -EINVAL;
885 return lttng_session_untrack_id(session, tracker_type,
886 tracker.id);
887 }
888 case LTTNG_KERNEL_SESSION_LIST_TRACKER_PIDS:
889 return lttng_session_list_tracker_ids(session, TRACKER_PID);
890 case LTTNG_KERNEL_SESSION_LIST_TRACKER_IDS:
891 {
892 struct lttng_kernel_tracker_args tracker;
893 enum tracker_type tracker_type;
894
895 if (copy_from_user(&tracker,
896 (struct lttng_kernel_tracker_args __user *) arg,
897 sizeof(struct lttng_kernel_tracker_args)))
898 return -EFAULT;
899 tracker_type = get_tracker_type(&tracker);
900 if (tracker_type == TRACKER_UNKNOWN)
901 return -EINVAL;
902 return lttng_session_list_tracker_ids(session, tracker_type);
903 }
904 case LTTNG_KERNEL_SESSION_METADATA_REGEN:
905 return lttng_session_metadata_regenerate(session);
906 case LTTNG_KERNEL_SESSION_STATEDUMP:
907 return lttng_session_statedump(session);
908 case LTTNG_KERNEL_SESSION_SET_NAME:
909 {
910 struct lttng_kernel_session_name name;
911
912 if (copy_from_user(&name,
913 (struct lttng_kernel_session_name __user *) arg,
914 sizeof(struct lttng_kernel_session_name)))
915 return -EFAULT;
916 return lttng_abi_session_set_name(session, &name);
917 }
918 case LTTNG_KERNEL_SESSION_SET_CREATION_TIME:
919 {
920 struct lttng_kernel_session_creation_time time;
921
922 if (copy_from_user(&time,
923 (struct lttng_kernel_session_creation_time __user *) arg,
924 sizeof(struct lttng_kernel_session_creation_time)))
925 return -EFAULT;
926 return lttng_abi_session_set_creation_time(session, &time);
927 }
928 default:
929 return -ENOIOCTLCMD;
930 }
931 }
932
933 /*
934 * Called when the last file reference is dropped.
935 *
936 * Big fat note: channels and events are invariant for the whole session after
937 * their creation. So this session destruction also destroys all channel and
938 * event structures specific to this session (they are not destroyed when their
939 * individual file is released).
940 */
941 static
942 int lttng_session_release(struct inode *inode, struct file *file)
943 {
944 struct lttng_session *session = file->private_data;
945
946 if (session)
947 lttng_session_destroy(session);
948 return 0;
949 }
950
951 static const struct file_operations lttng_session_fops = {
952 .owner = THIS_MODULE,
953 .release = lttng_session_release,
954 .unlocked_ioctl = lttng_session_ioctl,
955 #ifdef CONFIG_COMPAT
956 .compat_ioctl = lttng_session_ioctl,
957 #endif
958 };
959
960 /*
961 * When encountering empty buffer, flush current sub-buffer if non-empty
962 * and retry (if new data available to read after flush).
963 */
964 static
965 ssize_t lttng_event_notifier_group_notif_read(struct file *filp, char __user *user_buf,
966 size_t count, loff_t *ppos)
967 {
968 struct lttng_event_notifier_group *event_notifier_group = filp->private_data;
969 struct channel *chan = event_notifier_group->chan;
970 struct lib_ring_buffer *buf = event_notifier_group->buf;
971 ssize_t read_count = 0, len;
972 size_t read_offset;
973
974 might_sleep();
975 if (!lttng_access_ok(VERIFY_WRITE, user_buf, count))
976 return -EFAULT;
977
978 /* Finish copy of previous record */
979 if (*ppos != 0) {
980 if (read_count < count) {
981 len = chan->iter.len_left;
982 read_offset = *ppos;
983 goto skip_get_next;
984 }
985 }
986
987 while (read_count < count) {
988 size_t copy_len, space_left;
989
990 len = lib_ring_buffer_get_next_record(chan, buf);
991 len_test:
992 if (len < 0) {
993 /*
994 * Check if buffer is finalized (end of file).
995 */
996 if (len == -ENODATA) {
997 /* A 0 read_count will tell about end of file */
998 goto nodata;
999 }
1000 if (filp->f_flags & O_NONBLOCK) {
1001 if (!read_count)
1002 read_count = -EAGAIN;
1003 goto nodata;
1004 } else {
1005 int error;
1006
1007 /*
1008 * No data available at the moment, return what
1009 * we got.
1010 */
1011 if (read_count)
1012 goto nodata;
1013
1014 /*
1015 * Wait for returned len to be >= 0 or -ENODATA.
1016 */
1017 error = wait_event_interruptible(
1018 event_notifier_group->read_wait,
1019 ((len = lib_ring_buffer_get_next_record(
1020 chan, buf)), len != -EAGAIN));
1021 CHAN_WARN_ON(chan, len == -EBUSY);
1022 if (error) {
1023 read_count = error;
1024 goto nodata;
1025 }
1026 CHAN_WARN_ON(chan, len < 0 && len != -ENODATA);
1027 goto len_test;
1028 }
1029 }
1030 read_offset = buf->iter.read_offset;
1031 skip_get_next:
1032 space_left = count - read_count;
1033 if (len <= space_left) {
1034 copy_len = len;
1035 chan->iter.len_left = 0;
1036 *ppos = 0;
1037 } else {
1038 copy_len = space_left;
1039 chan->iter.len_left = len - copy_len;
1040 *ppos = read_offset + copy_len;
1041 }
1042 if (__lib_ring_buffer_copy_to_user(&buf->backend, read_offset,
1043 &user_buf[read_count],
1044 copy_len)) {
1045 /*
1046 * Leave the len_left and ppos values at their current
1047 * state, as we currently have a valid event to read.
1048 */
1049 return -EFAULT;
1050 }
1051 read_count += copy_len;
1052 }
1053 goto put_record;
1054
1055 nodata:
1056 *ppos = 0;
1057 chan->iter.len_left = 0;
1058
1059 put_record:
1060 lib_ring_buffer_put_current_record(buf);
1061 return read_count;
1062 }
1063
1064 /*
1065 * If the ring buffer is non empty (even just a partial subbuffer), return that
1066 * there is data available. Perform a ring buffer flush if we encounter a
1067 * non-empty ring buffer which does not have any consumeable subbuffer available.
1068 */
1069 static
1070 unsigned int lttng_event_notifier_group_notif_poll(struct file *filp,
1071 poll_table *wait)
1072 {
1073 unsigned int mask = 0;
1074 struct lttng_event_notifier_group *event_notifier_group = filp->private_data;
1075 struct channel *chan = event_notifier_group->chan;
1076 struct lib_ring_buffer *buf = event_notifier_group->buf;
1077 const struct lib_ring_buffer_config *config = &chan->backend.config;
1078 int finalized, disabled;
1079 unsigned long consumed, offset;
1080 size_t subbuffer_header_size = config->cb.subbuffer_header_size();
1081
1082 if (filp->f_mode & FMODE_READ) {
1083 poll_wait_set_exclusive(wait);
1084 poll_wait(filp, &event_notifier_group->read_wait, wait);
1085
1086 finalized = lib_ring_buffer_is_finalized(config, buf);
1087 disabled = lib_ring_buffer_channel_is_disabled(chan);
1088
1089 /*
1090 * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
1091 * finalized load before offsets loads.
1092 */
1093 WARN_ON(atomic_long_read(&buf->active_readers) != 1);
1094 retry:
1095 if (disabled)
1096 return POLLERR;
1097
1098 offset = lib_ring_buffer_get_offset(config, buf);
1099 consumed = lib_ring_buffer_get_consumed(config, buf);
1100
1101 /*
1102 * If there is no buffer available to consume.
1103 */
1104 if (subbuf_trunc(offset, chan) - subbuf_trunc(consumed, chan) == 0) {
1105 /*
1106 * If there is a non-empty subbuffer, flush and try again.
1107 */
1108 if (subbuf_offset(offset, chan) > subbuffer_header_size) {
1109 lib_ring_buffer_switch_remote(buf);
1110 goto retry;
1111 }
1112
1113 if (finalized)
1114 return POLLHUP;
1115 else {
1116 /*
1117 * The memory barriers
1118 * __wait_event()/wake_up_interruptible() take
1119 * care of "raw_spin_is_locked" memory ordering.
1120 */
1121 if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
1122 goto retry;
1123 else
1124 return 0;
1125 }
1126 } else {
1127 if (subbuf_trunc(offset, chan) - subbuf_trunc(consumed, chan)
1128 >= chan->backend.buf_size)
1129 return POLLPRI | POLLRDBAND;
1130 else
1131 return POLLIN | POLLRDNORM;
1132 }
1133 }
1134
1135 return mask;
1136 }
1137
1138 /**
1139 * lttng_event_notifier_group_notif_open - event_notifier ring buffer open file operation
1140 * @inode: opened inode
1141 * @file: opened file
1142 *
1143 * Open implementation. Makes sure only one open instance of a buffer is
1144 * done at a given moment.
1145 */
1146 static int lttng_event_notifier_group_notif_open(struct inode *inode, struct file *file)
1147 {
1148 struct lttng_event_notifier_group *event_notifier_group = inode->i_private;
1149 struct lib_ring_buffer *buf = event_notifier_group->buf;
1150
1151 file->private_data = event_notifier_group;
1152 return lib_ring_buffer_open(inode, file, buf);
1153 }
1154
1155 /**
1156 * lttng_event_notifier_group_notif_release - event_notifier ring buffer release file operation
1157 * @inode: opened inode
1158 * @file: opened file
1159 *
1160 * Release implementation.
1161 */
1162 static int lttng_event_notifier_group_notif_release(struct inode *inode, struct file *file)
1163 {
1164 struct lttng_event_notifier_group *event_notifier_group = file->private_data;
1165 struct lib_ring_buffer *buf = event_notifier_group->buf;
1166 int ret;
1167
1168 ret = lib_ring_buffer_release(inode, file, buf);
1169 if (ret)
1170 return ret;
1171 fput(event_notifier_group->file);
1172 return 0;
1173 }
1174
1175 static const struct file_operations lttng_event_notifier_group_notif_fops = {
1176 .owner = THIS_MODULE,
1177 .open = lttng_event_notifier_group_notif_open,
1178 .release = lttng_event_notifier_group_notif_release,
1179 .read = lttng_event_notifier_group_notif_read,
1180 .poll = lttng_event_notifier_group_notif_poll,
1181 };
1182
1183 /**
1184 * lttng_metadata_ring_buffer_poll - LTTng ring buffer poll file operation
1185 * @filp: the file
1186 * @wait: poll table
1187 *
1188 * Handles the poll operations for the metadata channels.
1189 */
1190 static
1191 unsigned int lttng_metadata_ring_buffer_poll(struct file *filp,
1192 poll_table *wait)
1193 {
1194 struct lttng_metadata_stream *stream = filp->private_data;
1195 struct lib_ring_buffer *buf = stream->priv;
1196 int finalized;
1197 unsigned int mask = 0;
1198
1199 if (filp->f_mode & FMODE_READ) {
1200 poll_wait_set_exclusive(wait);
1201 poll_wait(filp, &stream->read_wait, wait);
1202
1203 finalized = stream->finalized;
1204
1205 /*
1206 * lib_ring_buffer_is_finalized() contains a smp_rmb()
1207 * ordering finalized load before offsets loads.
1208 */
1209 WARN_ON(atomic_long_read(&buf->active_readers) != 1);
1210
1211 if (finalized)
1212 mask |= POLLHUP;
1213
1214 mutex_lock(&stream->metadata_cache->lock);
1215 if (stream->metadata_cache->metadata_written >
1216 stream->metadata_out)
1217 mask |= POLLIN;
1218 mutex_unlock(&stream->metadata_cache->lock);
1219 }
1220
1221 return mask;
1222 }
1223
1224 static
1225 void lttng_metadata_ring_buffer_ioctl_put_next_subbuf(struct file *filp,
1226 unsigned int cmd, unsigned long arg)
1227 {
1228 struct lttng_metadata_stream *stream = filp->private_data;
1229
1230 stream->metadata_out = stream->metadata_in;
1231 }
1232
1233 /*
1234 * Reset the counter of how much metadata has been consumed to 0. That way,
1235 * the consumer receives the content of the metadata cache unchanged. This is
1236 * different from the metadata_regenerate where the offset from epoch is
1237 * resampled, here we want the exact same content as the last time the metadata
1238 * was generated. This command is only possible if all the metadata written
1239 * in the cache has been output to the metadata stream to avoid corrupting the
1240 * metadata file.
1241 *
1242 * Return 0 on success, a negative value on error.
1243 */
1244 static
1245 int lttng_metadata_cache_dump(struct lttng_metadata_stream *stream)
1246 {
1247 int ret;
1248 struct lttng_metadata_cache *cache = stream->metadata_cache;
1249
1250 mutex_lock(&cache->lock);
1251 if (stream->metadata_out != cache->metadata_written) {
1252 ret = -EBUSY;
1253 goto end;
1254 }
1255 stream->metadata_out = 0;
1256 stream->metadata_in = 0;
1257 wake_up_interruptible(&stream->read_wait);
1258 ret = 0;
1259
1260 end:
1261 mutex_unlock(&cache->lock);
1262 return ret;
1263 }
1264
1265 static
1266 long lttng_metadata_ring_buffer_ioctl(struct file *filp,
1267 unsigned int cmd, unsigned long arg)
1268 {
1269 int ret;
1270 struct lttng_metadata_stream *stream = filp->private_data;
1271 struct lib_ring_buffer *buf = stream->priv;
1272 unsigned int rb_cmd;
1273 bool coherent;
1274
1275 if (cmd == RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK)
1276 rb_cmd = RING_BUFFER_GET_NEXT_SUBBUF;
1277 else
1278 rb_cmd = cmd;
1279
1280 switch (cmd) {
1281 case RING_BUFFER_GET_NEXT_SUBBUF:
1282 {
1283 struct lttng_metadata_stream *stream = filp->private_data;
1284 struct lib_ring_buffer *buf = stream->priv;
1285 struct channel *chan = buf->backend.chan;
1286
1287 ret = lttng_metadata_output_channel(stream, chan, NULL);
1288 if (ret > 0) {
1289 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
1290 ret = 0;
1291 } else if (ret < 0)
1292 goto err;
1293 break;
1294 }
1295 case RING_BUFFER_GET_SUBBUF:
1296 {
1297 /*
1298 * Random access is not allowed for metadata channel.
1299 */
1300 return -ENOSYS;
1301 }
1302 case RING_BUFFER_FLUSH_EMPTY: /* Fall-through. */
1303 case RING_BUFFER_FLUSH:
1304 {
1305 struct lttng_metadata_stream *stream = filp->private_data;
1306 struct lib_ring_buffer *buf = stream->priv;
1307 struct channel *chan = buf->backend.chan;
1308
1309 /*
1310 * Before doing the actual ring buffer flush, write up to one
1311 * packet of metadata in the ring buffer.
1312 */
1313 ret = lttng_metadata_output_channel(stream, chan, NULL);
1314 if (ret < 0)
1315 goto err;
1316 break;
1317 }
1318 case RING_BUFFER_GET_METADATA_VERSION:
1319 {
1320 struct lttng_metadata_stream *stream = filp->private_data;
1321
1322 return put_u64(stream->version, arg);
1323 }
1324 case RING_BUFFER_METADATA_CACHE_DUMP:
1325 {
1326 struct lttng_metadata_stream *stream = filp->private_data;
1327
1328 return lttng_metadata_cache_dump(stream);
1329 }
1330 case RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
1331 {
1332 struct lttng_metadata_stream *stream = filp->private_data;
1333 struct lib_ring_buffer *buf = stream->priv;
1334 struct channel *chan = buf->backend.chan;
1335
1336 ret = lttng_metadata_output_channel(stream, chan, &coherent);
1337 if (ret > 0) {
1338 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
1339 ret = 0;
1340 } else if (ret < 0) {
1341 goto err;
1342 }
1343 break;
1344 }
1345 default:
1346 break;
1347 }
1348 /* PUT_SUBBUF is the one from lib ring buffer, unmodified. */
1349
1350 /* Performing lib ring buffer ioctl after our own. */
1351 ret = lib_ring_buffer_ioctl(filp, rb_cmd, arg, buf);
1352 if (ret < 0)
1353 goto err;
1354
1355 switch (cmd) {
1356 case RING_BUFFER_PUT_NEXT_SUBBUF:
1357 {
1358 lttng_metadata_ring_buffer_ioctl_put_next_subbuf(filp,
1359 cmd, arg);
1360 break;
1361 }
1362 case RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
1363 {
1364 return put_u32(coherent, arg);
1365 }
1366 default:
1367 break;
1368 }
1369 err:
1370 return ret;
1371 }
1372
1373 #ifdef CONFIG_COMPAT
1374 static
1375 long lttng_metadata_ring_buffer_compat_ioctl(struct file *filp,
1376 unsigned int cmd, unsigned long arg)
1377 {
1378 int ret;
1379 struct lttng_metadata_stream *stream = filp->private_data;
1380 struct lib_ring_buffer *buf = stream->priv;
1381 unsigned int rb_cmd;
1382 bool coherent;
1383
1384 if (cmd == RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK)
1385 rb_cmd = RING_BUFFER_GET_NEXT_SUBBUF;
1386 else
1387 rb_cmd = cmd;
1388
1389 switch (cmd) {
1390 case RING_BUFFER_GET_NEXT_SUBBUF:
1391 {
1392 struct lttng_metadata_stream *stream = filp->private_data;
1393 struct lib_ring_buffer *buf = stream->priv;
1394 struct channel *chan = buf->backend.chan;
1395
1396 ret = lttng_metadata_output_channel(stream, chan, NULL);
1397 if (ret > 0) {
1398 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
1399 ret = 0;
1400 } else if (ret < 0)
1401 goto err;
1402 break;
1403 }
1404 case RING_BUFFER_GET_SUBBUF:
1405 {
1406 /*
1407 * Random access is not allowed for metadata channel.
1408 */
1409 return -ENOSYS;
1410 }
1411 case RING_BUFFER_FLUSH_EMPTY: /* Fall-through. */
1412 case RING_BUFFER_FLUSH:
1413 {
1414 struct lttng_metadata_stream *stream = filp->private_data;
1415 struct lib_ring_buffer *buf = stream->priv;
1416 struct channel *chan = buf->backend.chan;
1417
1418 /*
1419 * Before doing the actual ring buffer flush, write up to one
1420 * packet of metadata in the ring buffer.
1421 */
1422 ret = lttng_metadata_output_channel(stream, chan, NULL);
1423 if (ret < 0)
1424 goto err;
1425 break;
1426 }
1427 case RING_BUFFER_GET_METADATA_VERSION:
1428 {
1429 struct lttng_metadata_stream *stream = filp->private_data;
1430
1431 return put_u64(stream->version, arg);
1432 }
1433 case RING_BUFFER_METADATA_CACHE_DUMP:
1434 {
1435 struct lttng_metadata_stream *stream = filp->private_data;
1436
1437 return lttng_metadata_cache_dump(stream);
1438 }
1439 case RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
1440 {
1441 struct lttng_metadata_stream *stream = filp->private_data;
1442 struct lib_ring_buffer *buf = stream->priv;
1443 struct channel *chan = buf->backend.chan;
1444
1445 ret = lttng_metadata_output_channel(stream, chan, &coherent);
1446 if (ret > 0) {
1447 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
1448 ret = 0;
1449 } else if (ret < 0) {
1450 goto err;
1451 }
1452 break;
1453 }
1454 default:
1455 break;
1456 }
1457 /* PUT_SUBBUF is the one from lib ring buffer, unmodified. */
1458
1459 /* Performing lib ring buffer ioctl after our own. */
1460 ret = lib_ring_buffer_compat_ioctl(filp, rb_cmd, arg, buf);
1461 if (ret < 0)
1462 goto err;
1463
1464 switch (cmd) {
1465 case RING_BUFFER_PUT_NEXT_SUBBUF:
1466 {
1467 lttng_metadata_ring_buffer_ioctl_put_next_subbuf(filp,
1468 cmd, arg);
1469 break;
1470 }
1471 case RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
1472 {
1473 return put_u32(coherent, arg);
1474 }
1475 default:
1476 break;
1477 }
1478 err:
1479 return ret;
1480 }
1481 #endif
1482
1483 /*
1484 * This is not used by anonymous file descriptors. This code is left
1485 * there if we ever want to implement an inode with open() operation.
1486 */
1487 static
1488 int lttng_metadata_ring_buffer_open(struct inode *inode, struct file *file)
1489 {
1490 struct lttng_metadata_stream *stream = inode->i_private;
1491 struct lib_ring_buffer *buf = stream->priv;
1492
1493 file->private_data = buf;
1494 /*
1495 * Since life-time of metadata cache differs from that of
1496 * session, we need to keep our own reference on the transport.
1497 */
1498 if (!try_module_get(stream->transport->owner)) {
1499 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
1500 return -EBUSY;
1501 }
1502 return lib_ring_buffer_open(inode, file, buf);
1503 }
1504
1505 static
1506 int lttng_metadata_ring_buffer_release(struct inode *inode, struct file *file)
1507 {
1508 struct lttng_metadata_stream *stream = file->private_data;
1509 struct lib_ring_buffer *buf = stream->priv;
1510
1511 mutex_lock(&stream->metadata_cache->lock);
1512 list_del(&stream->list);
1513 mutex_unlock(&stream->metadata_cache->lock);
1514 kref_put(&stream->metadata_cache->refcount, metadata_cache_destroy);
1515 module_put(stream->transport->owner);
1516 kfree(stream);
1517 return lib_ring_buffer_release(inode, file, buf);
1518 }
1519
1520 static
1521 ssize_t lttng_metadata_ring_buffer_splice_read(struct file *in, loff_t *ppos,
1522 struct pipe_inode_info *pipe, size_t len,
1523 unsigned int flags)
1524 {
1525 struct lttng_metadata_stream *stream = in->private_data;
1526 struct lib_ring_buffer *buf = stream->priv;
1527
1528 return lib_ring_buffer_splice_read(in, ppos, pipe, len,
1529 flags, buf);
1530 }
1531
1532 static
1533 int lttng_metadata_ring_buffer_mmap(struct file *filp,
1534 struct vm_area_struct *vma)
1535 {
1536 struct lttng_metadata_stream *stream = filp->private_data;
1537 struct lib_ring_buffer *buf = stream->priv;
1538
1539 return lib_ring_buffer_mmap(filp, vma, buf);
1540 }
1541
1542 static
1543 const struct file_operations lttng_metadata_ring_buffer_file_operations = {
1544 .owner = THIS_MODULE,
1545 .open = lttng_metadata_ring_buffer_open,
1546 .release = lttng_metadata_ring_buffer_release,
1547 .poll = lttng_metadata_ring_buffer_poll,
1548 .splice_read = lttng_metadata_ring_buffer_splice_read,
1549 .mmap = lttng_metadata_ring_buffer_mmap,
1550 .unlocked_ioctl = lttng_metadata_ring_buffer_ioctl,
1551 .llseek = vfs_lib_ring_buffer_no_llseek,
1552 #ifdef CONFIG_COMPAT
1553 .compat_ioctl = lttng_metadata_ring_buffer_compat_ioctl,
1554 #endif
1555 };
1556
1557 static
1558 int lttng_abi_create_stream_fd(struct file *channel_file, void *stream_priv,
1559 const struct file_operations *fops, const char *name)
1560 {
1561 int stream_fd, ret;
1562 struct file *stream_file;
1563
1564 stream_fd = lttng_get_unused_fd();
1565 if (stream_fd < 0) {
1566 ret = stream_fd;
1567 goto fd_error;
1568 }
1569 stream_file = anon_inode_getfile(name, fops, stream_priv, O_RDWR);
1570 if (IS_ERR(stream_file)) {
1571 ret = PTR_ERR(stream_file);
1572 goto file_error;
1573 }
1574 /*
1575 * OPEN_FMODE, called within anon_inode_getfile/alloc_file, don't honor
1576 * FMODE_LSEEK, FMODE_PREAD nor FMODE_PWRITE. We need to read from this
1577 * file descriptor, so we set FMODE_PREAD here.
1578 */
1579 stream_file->f_mode |= FMODE_PREAD;
1580 fd_install(stream_fd, stream_file);
1581 /*
1582 * The stream holds a reference to the channel within the generic ring
1583 * buffer library, so no need to hold a refcount on the channel and
1584 * session files here.
1585 */
1586 return stream_fd;
1587
1588 file_error:
1589 put_unused_fd(stream_fd);
1590 fd_error:
1591 return ret;
1592 }
1593
1594 static
1595 int lttng_abi_open_stream(struct file *channel_file)
1596 {
1597 struct lttng_channel *channel = channel_file->private_data;
1598 struct lib_ring_buffer *buf;
1599 int ret;
1600 void *stream_priv;
1601
1602 buf = channel->ops->buffer_read_open(channel->chan);
1603 if (!buf)
1604 return -ENOENT;
1605
1606 stream_priv = buf;
1607 ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
1608 &lttng_stream_ring_buffer_file_operations,
1609 "[lttng_stream]");
1610 if (ret < 0)
1611 goto fd_error;
1612
1613 return ret;
1614
1615 fd_error:
1616 channel->ops->buffer_read_close(buf);
1617 return ret;
1618 }
1619
1620 static
1621 int lttng_abi_open_metadata_stream(struct file *channel_file)
1622 {
1623 struct lttng_channel *channel = channel_file->private_data;
1624 struct lttng_session *session = channel->session;
1625 struct lib_ring_buffer *buf;
1626 int ret;
1627 struct lttng_metadata_stream *metadata_stream;
1628 void *stream_priv;
1629
1630 buf = channel->ops->buffer_read_open(channel->chan);
1631 if (!buf)
1632 return -ENOENT;
1633
1634 metadata_stream = kzalloc(sizeof(struct lttng_metadata_stream),
1635 GFP_KERNEL);
1636 if (!metadata_stream) {
1637 ret = -ENOMEM;
1638 goto nomem;
1639 }
1640 metadata_stream->metadata_cache = session->metadata_cache;
1641 init_waitqueue_head(&metadata_stream->read_wait);
1642 metadata_stream->priv = buf;
1643 stream_priv = metadata_stream;
1644 metadata_stream->transport = channel->transport;
1645 /* Initial state is an empty metadata, considered as incoherent. */
1646 metadata_stream->coherent = false;
1647
1648 /*
1649 * Since life-time of metadata cache differs from that of
1650 * session, we need to keep our own reference on the transport.
1651 */
1652 if (!try_module_get(metadata_stream->transport->owner)) {
1653 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
1654 ret = -EINVAL;
1655 goto notransport;
1656 }
1657
1658 if (!lttng_kref_get(&session->metadata_cache->refcount)) {
1659 ret = -EOVERFLOW;
1660 goto kref_error;
1661 }
1662
1663 ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
1664 &lttng_metadata_ring_buffer_file_operations,
1665 "[lttng_metadata_stream]");
1666 if (ret < 0)
1667 goto fd_error;
1668
1669 mutex_lock(&session->metadata_cache->lock);
1670 list_add(&metadata_stream->list,
1671 &session->metadata_cache->metadata_stream);
1672 mutex_unlock(&session->metadata_cache->lock);
1673 return ret;
1674
1675 fd_error:
1676 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
1677 kref_error:
1678 module_put(metadata_stream->transport->owner);
1679 notransport:
1680 kfree(metadata_stream);
1681 nomem:
1682 channel->ops->buffer_read_close(buf);
1683 return ret;
1684 }
1685
1686 static
1687 int lttng_abi_open_event_notifier_group_stream(struct file *notif_file)
1688 {
1689 struct lttng_event_notifier_group *event_notifier_group = notif_file->private_data;
1690 struct channel *chan = event_notifier_group->chan;
1691 struct lib_ring_buffer *buf;
1692 int ret;
1693 void *stream_priv;
1694
1695 buf = event_notifier_group->ops->buffer_read_open(chan);
1696 if (!buf)
1697 return -ENOENT;
1698
1699 /* The event_notifier notification fd holds a reference on the event_notifier group */
1700 if (!atomic_long_add_unless(&notif_file->f_count, 1, LONG_MAX)) {
1701 ret = -EOVERFLOW;
1702 goto refcount_error;
1703 }
1704 event_notifier_group->buf = buf;
1705 stream_priv = event_notifier_group;
1706 ret = lttng_abi_create_stream_fd(notif_file, stream_priv,
1707 &lttng_event_notifier_group_notif_fops,
1708 "[lttng_event_notifier_stream]");
1709 if (ret < 0)
1710 goto fd_error;
1711
1712 return ret;
1713
1714 fd_error:
1715 atomic_long_dec(&notif_file->f_count);
1716 refcount_error:
1717 event_notifier_group->ops->buffer_read_close(buf);
1718 return ret;
1719 }
1720
1721 static
1722 int lttng_abi_validate_event_param(struct lttng_kernel_event *event_param)
1723 {
1724 /* Limit ABI to implemented features. */
1725 switch (event_param->instrumentation) {
1726 case LTTNG_KERNEL_SYSCALL:
1727 switch (event_param->u.syscall.entryexit) {
1728 case LTTNG_KERNEL_SYSCALL_ENTRY: /* Fall-through */
1729 case LTTNG_KERNEL_SYSCALL_EXIT: /* Fall-through */
1730 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1731 break;
1732 default:
1733 return -EINVAL;
1734 }
1735 switch (event_param->u.syscall.abi) {
1736 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1737 break;
1738 default:
1739 return -EINVAL;
1740 }
1741 switch (event_param->u.syscall.match) {
1742 case LTTNG_KERNEL_SYSCALL_MATCH_NAME:
1743 break;
1744 default:
1745 return -EINVAL;
1746 }
1747 break;
1748
1749 case LTTNG_KERNEL_KRETPROBE:
1750 switch (event_param->u.kretprobe.entryexit) {
1751 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1752 break;
1753 case LTTNG_KERNEL_SYSCALL_ENTRY: /* Fall-through */
1754 case LTTNG_KERNEL_SYSCALL_EXIT: /* Fall-through */
1755 default:
1756 return -EINVAL;
1757 }
1758 break;
1759
1760 case LTTNG_KERNEL_TRACEPOINT: /* Fall-through */
1761 case LTTNG_KERNEL_KPROBE: /* Fall-through */
1762 case LTTNG_KERNEL_UPROBE:
1763 break;
1764
1765 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1766 case LTTNG_KERNEL_NOOP: /* Fall-through */
1767 default:
1768 return -EINVAL;
1769 }
1770 return 0;
1771 }
1772
1773 static
1774 int lttng_abi_create_event(struct file *channel_file,
1775 struct lttng_kernel_event *event_param)
1776 {
1777 struct lttng_channel *channel = channel_file->private_data;
1778 int event_fd, ret;
1779 struct file *event_file;
1780 void *priv;
1781
1782 event_param->name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
1783 switch (event_param->instrumentation) {
1784 case LTTNG_KERNEL_KRETPROBE:
1785 event_param->u.kretprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
1786 break;
1787 case LTTNG_KERNEL_KPROBE:
1788 event_param->u.kprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
1789 break;
1790 case LTTNG_KERNEL_FUNCTION:
1791 WARN_ON_ONCE(1);
1792 /* Not implemented. */
1793 break;
1794 default:
1795 break;
1796 }
1797 event_fd = lttng_get_unused_fd();
1798 if (event_fd < 0) {
1799 ret = event_fd;
1800 goto fd_error;
1801 }
1802 event_file = anon_inode_getfile("[lttng_event]",
1803 &lttng_event_fops,
1804 NULL, O_RDWR);
1805 if (IS_ERR(event_file)) {
1806 ret = PTR_ERR(event_file);
1807 goto file_error;
1808 }
1809 /* The event holds a reference on the channel */
1810 if (!atomic_long_add_unless(&channel_file->f_count, 1, LONG_MAX)) {
1811 ret = -EOVERFLOW;
1812 goto refcount_error;
1813 }
1814 ret = lttng_abi_validate_event_param(event_param);
1815 if (ret)
1816 goto event_error;
1817 if (event_param->instrumentation == LTTNG_KERNEL_TRACEPOINT
1818 || event_param->instrumentation == LTTNG_KERNEL_SYSCALL) {
1819 struct lttng_event_enabler *event_enabler;
1820
1821 if (strutils_is_star_glob_pattern(event_param->name)) {
1822 /*
1823 * If the event name is a star globbing pattern,
1824 * we create the special star globbing enabler.
1825 */
1826 event_enabler = lttng_event_enabler_create(LTTNG_ENABLER_FORMAT_STAR_GLOB,
1827 event_param, channel);
1828 } else {
1829 event_enabler = lttng_event_enabler_create(LTTNG_ENABLER_FORMAT_NAME,
1830 event_param, channel);
1831 }
1832 priv = event_enabler;
1833 } else {
1834 struct lttng_event *event;
1835
1836 /*
1837 * We tolerate no failure path after event creation. It
1838 * will stay invariant for the rest of the session.
1839 */
1840 event = lttng_event_create(channel, event_param,
1841 NULL, NULL,
1842 event_param->instrumentation);
1843 WARN_ON_ONCE(!event);
1844 if (IS_ERR(event)) {
1845 ret = PTR_ERR(event);
1846 goto event_error;
1847 }
1848 priv = event;
1849 }
1850 event_file->private_data = priv;
1851 fd_install(event_fd, event_file);
1852 return event_fd;
1853
1854 event_error:
1855 atomic_long_dec(&channel_file->f_count);
1856 refcount_error:
1857 fput(event_file);
1858 file_error:
1859 put_unused_fd(event_fd);
1860 fd_error:
1861 return ret;
1862 }
1863
1864 static
1865 long lttng_event_notifier_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1866 {
1867 struct lttng_event_notifier *event_notifier;
1868 struct lttng_event_notifier_enabler *event_notifier_enabler;
1869 enum lttng_event_type *evtype = file->private_data;
1870
1871 switch (cmd) {
1872 case LTTNG_KERNEL_ENABLE:
1873 switch (*evtype) {
1874 case LTTNG_TYPE_EVENT:
1875 event_notifier = file->private_data;
1876 return lttng_event_notifier_enable(event_notifier);
1877 case LTTNG_TYPE_ENABLER:
1878 event_notifier_enabler = file->private_data;
1879 return lttng_event_notifier_enabler_enable(event_notifier_enabler);
1880 default:
1881 WARN_ON_ONCE(1);
1882 return -ENOSYS;
1883 }
1884 case LTTNG_KERNEL_DISABLE:
1885 switch (*evtype) {
1886 case LTTNG_TYPE_EVENT:
1887 event_notifier = file->private_data;
1888 return lttng_event_notifier_disable(event_notifier);
1889 case LTTNG_TYPE_ENABLER:
1890 event_notifier_enabler = file->private_data;
1891 return lttng_event_notifier_enabler_disable(event_notifier_enabler);
1892 default:
1893 WARN_ON_ONCE(1);
1894 return -ENOSYS;
1895 }
1896 case LTTNG_KERNEL_FILTER:
1897 switch (*evtype) {
1898 case LTTNG_TYPE_EVENT:
1899 return -EINVAL;
1900 case LTTNG_TYPE_ENABLER:
1901 event_notifier_enabler = file->private_data;
1902 return lttng_event_notifier_enabler_attach_filter_bytecode(
1903 event_notifier_enabler,
1904 (struct lttng_kernel_filter_bytecode __user *) arg);
1905 default:
1906 WARN_ON_ONCE(1);
1907 return -ENOSYS;
1908 }
1909
1910 case LTTNG_KERNEL_CAPTURE:
1911 switch (*evtype) {
1912 case LTTNG_TYPE_EVENT:
1913 return -EINVAL;
1914 case LTTNG_TYPE_ENABLER:
1915 event_notifier_enabler = file->private_data;
1916 return lttng_event_notifier_enabler_attach_capture_bytecode(
1917 event_notifier_enabler,
1918 (struct lttng_kernel_capture_bytecode __user *) arg);
1919 default:
1920 WARN_ON_ONCE(1);
1921 return -ENOSYS;
1922 }
1923 case LTTNG_KERNEL_ADD_CALLSITE:
1924 switch (*evtype) {
1925 case LTTNG_TYPE_EVENT:
1926 event_notifier = file->private_data;
1927 return lttng_event_notifier_add_callsite(event_notifier,
1928 (struct lttng_kernel_event_callsite __user *) arg);
1929 case LTTNG_TYPE_ENABLER:
1930 return -EINVAL;
1931 default:
1932 WARN_ON_ONCE(1);
1933 return -ENOSYS;
1934 }
1935 default:
1936 return -ENOIOCTLCMD;
1937 }
1938 }
1939
1940 static
1941 int lttng_event_notifier_release(struct inode *inode, struct file *file)
1942 {
1943 struct lttng_event_notifier *event_notifier;
1944 struct lttng_event_notifier_enabler *event_notifier_enabler;
1945 enum lttng_event_type *evtype = file->private_data;
1946
1947 if (!evtype)
1948 return 0;
1949
1950 switch (*evtype) {
1951 case LTTNG_TYPE_EVENT:
1952 event_notifier = file->private_data;
1953 if (event_notifier)
1954 fput(event_notifier->group->file);
1955 break;
1956 case LTTNG_TYPE_ENABLER:
1957 event_notifier_enabler = file->private_data;
1958 if (event_notifier_enabler)
1959 fput(event_notifier_enabler->group->file);
1960 break;
1961 default:
1962 WARN_ON_ONCE(1);
1963 break;
1964 }
1965
1966 return 0;
1967 }
1968
1969 static const struct file_operations lttng_event_notifier_fops = {
1970 .owner = THIS_MODULE,
1971 .release = lttng_event_notifier_release,
1972 .unlocked_ioctl = lttng_event_notifier_ioctl,
1973 #ifdef CONFIG_COMPAT
1974 .compat_ioctl = lttng_event_notifier_ioctl,
1975 #endif
1976 };
1977
1978 static
1979 int lttng_abi_create_event_notifier(struct file *event_notifier_group_file,
1980 struct lttng_kernel_event_notifier *event_notifier_param)
1981 {
1982 struct lttng_event_notifier_group *event_notifier_group =
1983 event_notifier_group_file->private_data;
1984 int event_notifier_fd, ret;
1985 struct file *event_notifier_file;
1986 void *priv;
1987
1988 switch (event_notifier_param->event.instrumentation) {
1989 case LTTNG_KERNEL_TRACEPOINT:
1990 case LTTNG_KERNEL_UPROBE:
1991 break;
1992 case LTTNG_KERNEL_KPROBE:
1993 event_notifier_param->event.u.kprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
1994 break;
1995 case LTTNG_KERNEL_SYSCALL:
1996 break;
1997 case LTTNG_KERNEL_KRETPROBE:
1998 /* Placing an event notifier on kretprobe is not supported. */
1999 case LTTNG_KERNEL_FUNCTION:
2000 case LTTNG_KERNEL_NOOP:
2001 default:
2002 ret = -EINVAL;
2003 goto inval_instr;
2004 }
2005
2006 event_notifier_param->event.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
2007
2008 event_notifier_fd = lttng_get_unused_fd();
2009 if (event_notifier_fd < 0) {
2010 ret = event_notifier_fd;
2011 goto fd_error;
2012 }
2013
2014 event_notifier_file = anon_inode_getfile("[lttng_event_notifier]",
2015 &lttng_event_notifier_fops,
2016 NULL, O_RDWR);
2017 if (IS_ERR(event_notifier_file)) {
2018 ret = PTR_ERR(event_notifier_file);
2019 goto file_error;
2020 }
2021
2022 /* The event notifier holds a reference on the event notifier group. */
2023 if (!atomic_long_add_unless(&event_notifier_group_file->f_count, 1, LONG_MAX)) {
2024 ret = -EOVERFLOW;
2025 goto refcount_error;
2026 }
2027
2028 if (event_notifier_param->event.instrumentation == LTTNG_KERNEL_TRACEPOINT
2029 || event_notifier_param->event.instrumentation == LTTNG_KERNEL_SYSCALL) {
2030 struct lttng_event_notifier_enabler *enabler;
2031
2032 if (strutils_is_star_glob_pattern(event_notifier_param->event.name)) {
2033 /*
2034 * If the event name is a star globbing pattern,
2035 * we create the special star globbing enabler.
2036 */
2037 enabler = lttng_event_notifier_enabler_create(
2038 event_notifier_group,
2039 LTTNG_ENABLER_FORMAT_STAR_GLOB,
2040 event_notifier_param);
2041 } else {
2042 enabler = lttng_event_notifier_enabler_create(
2043 event_notifier_group,
2044 LTTNG_ENABLER_FORMAT_NAME,
2045 event_notifier_param);
2046 }
2047 priv = enabler;
2048 } else {
2049 struct lttng_event_notifier *event_notifier;
2050
2051 /*
2052 * We tolerate no failure path after event notifier creation.
2053 * It will stay invariant for the rest of the session.
2054 */
2055 event_notifier = lttng_event_notifier_create(NULL,
2056 event_notifier_param->event.token,
2057 event_notifier_param->error_counter_index,
2058 event_notifier_group,
2059 event_notifier_param, NULL,
2060 event_notifier_param->event.instrumentation);
2061 WARN_ON_ONCE(!event_notifier);
2062 if (IS_ERR(event_notifier)) {
2063 ret = PTR_ERR(event_notifier);
2064 goto event_notifier_error;
2065 }
2066 priv = event_notifier;
2067 }
2068 event_notifier_file->private_data = priv;
2069 fd_install(event_notifier_fd, event_notifier_file);
2070 return event_notifier_fd;
2071
2072 event_notifier_error:
2073 atomic_long_dec(&event_notifier_group_file->f_count);
2074 refcount_error:
2075 fput(event_notifier_file);
2076 file_error:
2077 put_unused_fd(event_notifier_fd);
2078 fd_error:
2079 inval_instr:
2080 return ret;
2081 }
2082
2083 static
2084 long lttng_abi_event_notifier_group_create_error_counter(
2085 struct file *event_notifier_group_file,
2086 const struct lttng_kernel_counter_conf *error_counter_conf)
2087 {
2088 int counter_fd, ret;
2089 char *counter_transport_name;
2090 size_t counter_len;
2091 struct lttng_counter *counter = NULL;
2092 struct file *counter_file;
2093 struct lttng_event_notifier_group *event_notifier_group =
2094 (struct lttng_event_notifier_group *) event_notifier_group_file->private_data;
2095
2096 if (error_counter_conf->arithmetic != LTTNG_KERNEL_COUNTER_ARITHMETIC_MODULAR) {
2097 printk(KERN_ERR "LTTng: event_notifier: Error counter of the wrong arithmetic type.\n");
2098 return -EINVAL;
2099 }
2100
2101 if (error_counter_conf->number_dimensions != 1) {
2102 printk(KERN_ERR "LTTng: event_notifier: Error counter has more than one dimension.\n");
2103 return -EINVAL;
2104 }
2105
2106 switch (error_counter_conf->bitness) {
2107 case LTTNG_KERNEL_COUNTER_BITNESS_64:
2108 counter_transport_name = "counter-per-cpu-64-modular";
2109 break;
2110 case LTTNG_KERNEL_COUNTER_BITNESS_32:
2111 counter_transport_name = "counter-per-cpu-32-modular";
2112 break;
2113 default:
2114 return -EINVAL;
2115 }
2116
2117 /*
2118 * Lock sessions to provide mutual exclusion against concurrent
2119 * modification of event_notifier group, which would result in
2120 * overwriting the error counter if set concurrently.
2121 */
2122 lttng_lock_sessions();
2123
2124 if (event_notifier_group->error_counter) {
2125 printk(KERN_ERR "Error counter already created in event_notifier group\n");
2126 ret = -EBUSY;
2127 goto fd_error;
2128 }
2129
2130 counter_fd = lttng_get_unused_fd();
2131 if (counter_fd < 0) {
2132 ret = counter_fd;
2133 goto fd_error;
2134 }
2135
2136 counter_file = anon_inode_getfile("[lttng_counter]",
2137 &lttng_counter_fops,
2138 NULL, O_RDONLY);
2139 if (IS_ERR(counter_file)) {
2140 ret = PTR_ERR(counter_file);
2141 goto file_error;
2142 }
2143
2144 counter_len = error_counter_conf->dimensions[0].size;
2145
2146 if (!atomic_long_add_unless(&event_notifier_group_file->f_count, 1, LONG_MAX)) {
2147 ret = -EOVERFLOW;
2148 goto refcount_error;
2149 }
2150
2151 counter = lttng_kernel_counter_create(counter_transport_name,
2152 1, &counter_len);
2153 if (!counter) {
2154 ret = -EINVAL;
2155 goto counter_error;
2156 }
2157
2158 event_notifier_group->error_counter_len = counter_len;
2159 /*
2160 * store-release to publish error counter matches load-acquire
2161 * in record_error. Ensures the counter is created and the
2162 * error_counter_len is set before they are used.
2163 */
2164 lttng_smp_store_release(&event_notifier_group->error_counter, counter);
2165
2166 counter->file = counter_file;
2167 counter->owner = event_notifier_group->file;
2168 counter_file->private_data = counter;
2169 /* Ownership transferred. */
2170 counter = NULL;
2171
2172 fd_install(counter_fd, counter_file);
2173 lttng_unlock_sessions();
2174
2175 return counter_fd;
2176
2177 counter_error:
2178 atomic_long_dec(&event_notifier_group_file->f_count);
2179 refcount_error:
2180 fput(counter_file);
2181 file_error:
2182 put_unused_fd(counter_fd);
2183 fd_error:
2184 lttng_unlock_sessions();
2185 return ret;
2186 }
2187
2188 static
2189 long lttng_event_notifier_group_ioctl(struct file *file, unsigned int cmd,
2190 unsigned long arg)
2191 {
2192 switch (cmd) {
2193 case LTTNG_KERNEL_EVENT_NOTIFIER_GROUP_NOTIFICATION_FD:
2194 {
2195 return lttng_abi_open_event_notifier_group_stream(file);
2196 }
2197 case LTTNG_KERNEL_EVENT_NOTIFIER_CREATE:
2198 {
2199 struct lttng_kernel_event_notifier uevent_notifier_param;
2200
2201 if (copy_from_user(&uevent_notifier_param,
2202 (struct lttng_kernel_event_notifier __user *) arg,
2203 sizeof(uevent_notifier_param)))
2204 return -EFAULT;
2205 return lttng_abi_create_event_notifier(file, &uevent_notifier_param);
2206 }
2207 case LTTNG_KERNEL_COUNTER:
2208 {
2209 struct lttng_kernel_counter_conf uerror_counter_conf;
2210
2211 if (copy_from_user(&uerror_counter_conf,
2212 (struct lttng_kernel_counter_conf __user *) arg,
2213 sizeof(uerror_counter_conf)))
2214 return -EFAULT;
2215 return lttng_abi_event_notifier_group_create_error_counter(file,
2216 &uerror_counter_conf);
2217 }
2218 default:
2219 return -ENOIOCTLCMD;
2220 }
2221 return 0;
2222 }
2223
2224 static
2225 int lttng_event_notifier_group_release(struct inode *inode, struct file *file)
2226 {
2227 struct lttng_event_notifier_group *event_notifier_group =
2228 file->private_data;
2229
2230 if (event_notifier_group)
2231 lttng_event_notifier_group_destroy(event_notifier_group);
2232 return 0;
2233 }
2234
2235 static const struct file_operations lttng_event_notifier_group_fops = {
2236 .owner = THIS_MODULE,
2237 .release = lttng_event_notifier_group_release,
2238 .unlocked_ioctl = lttng_event_notifier_group_ioctl,
2239 #ifdef CONFIG_COMPAT
2240 .compat_ioctl = lttng_event_notifier_group_ioctl,
2241 #endif
2242 };
2243
2244 /**
2245 * lttng_channel_ioctl - lttng syscall through ioctl
2246 *
2247 * @file: the file
2248 * @cmd: the command
2249 * @arg: command arg
2250 *
2251 * This ioctl implements lttng commands:
2252 * LTTNG_KERNEL_STREAM
2253 * Returns an event stream file descriptor or failure.
2254 * (typically, one event stream records events from one CPU)
2255 * LTTNG_KERNEL_EVENT
2256 * Returns an event file descriptor or failure.
2257 * LTTNG_KERNEL_CONTEXT
2258 * Prepend a context field to each event in the channel
2259 * LTTNG_KERNEL_ENABLE
2260 * Enable recording for events in this channel (weak enable)
2261 * LTTNG_KERNEL_DISABLE
2262 * Disable recording for events in this channel (strong disable)
2263 *
2264 * Channel and event file descriptors also hold a reference on the session.
2265 */
2266 static
2267 long lttng_channel_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2268 {
2269 struct lttng_channel *channel = file->private_data;
2270
2271 switch (cmd) {
2272 case LTTNG_KERNEL_OLD_STREAM:
2273 case LTTNG_KERNEL_STREAM:
2274 return lttng_abi_open_stream(file);
2275 case LTTNG_KERNEL_OLD_EVENT:
2276 {
2277 struct lttng_kernel_event *uevent_param;
2278 struct lttng_kernel_old_event *old_uevent_param;
2279 int ret;
2280
2281 uevent_param = kmalloc(sizeof(struct lttng_kernel_event),
2282 GFP_KERNEL);
2283 if (!uevent_param) {
2284 ret = -ENOMEM;
2285 goto old_event_end;
2286 }
2287 old_uevent_param = kmalloc(
2288 sizeof(struct lttng_kernel_old_event),
2289 GFP_KERNEL);
2290 if (!old_uevent_param) {
2291 ret = -ENOMEM;
2292 goto old_event_error_free_param;
2293 }
2294 if (copy_from_user(old_uevent_param,
2295 (struct lttng_kernel_old_event __user *) arg,
2296 sizeof(struct lttng_kernel_old_event))) {
2297 ret = -EFAULT;
2298 goto old_event_error_free_old_param;
2299 }
2300
2301 memcpy(uevent_param->name, old_uevent_param->name,
2302 sizeof(uevent_param->name));
2303 uevent_param->instrumentation =
2304 old_uevent_param->instrumentation;
2305
2306 switch (old_uevent_param->instrumentation) {
2307 case LTTNG_KERNEL_KPROBE:
2308 uevent_param->u.kprobe.addr =
2309 old_uevent_param->u.kprobe.addr;
2310 uevent_param->u.kprobe.offset =
2311 old_uevent_param->u.kprobe.offset;
2312 memcpy(uevent_param->u.kprobe.symbol_name,
2313 old_uevent_param->u.kprobe.symbol_name,
2314 sizeof(uevent_param->u.kprobe.symbol_name));
2315 break;
2316 case LTTNG_KERNEL_KRETPROBE:
2317 uevent_param->u.kretprobe.addr =
2318 old_uevent_param->u.kretprobe.addr;
2319 uevent_param->u.kretprobe.offset =
2320 old_uevent_param->u.kretprobe.offset;
2321 memcpy(uevent_param->u.kretprobe.symbol_name,
2322 old_uevent_param->u.kretprobe.symbol_name,
2323 sizeof(uevent_param->u.kretprobe.symbol_name));
2324 break;
2325 case LTTNG_KERNEL_FUNCTION:
2326 WARN_ON_ONCE(1);
2327 /* Not implemented. */
2328 break;
2329 default:
2330 break;
2331 }
2332 ret = lttng_abi_create_event(file, uevent_param);
2333
2334 old_event_error_free_old_param:
2335 kfree(old_uevent_param);
2336 old_event_error_free_param:
2337 kfree(uevent_param);
2338 old_event_end:
2339 return ret;
2340 }
2341 case LTTNG_KERNEL_EVENT:
2342 {
2343 struct lttng_kernel_event uevent_param;
2344
2345 if (copy_from_user(&uevent_param,
2346 (struct lttng_kernel_event __user *) arg,
2347 sizeof(uevent_param)))
2348 return -EFAULT;
2349 return lttng_abi_create_event(file, &uevent_param);
2350 }
2351 case LTTNG_KERNEL_OLD_CONTEXT:
2352 {
2353 struct lttng_kernel_context *ucontext_param;
2354 struct lttng_kernel_old_context *old_ucontext_param;
2355 int ret;
2356
2357 ucontext_param = kmalloc(sizeof(struct lttng_kernel_context),
2358 GFP_KERNEL);
2359 if (!ucontext_param) {
2360 ret = -ENOMEM;
2361 goto old_ctx_end;
2362 }
2363 old_ucontext_param = kmalloc(sizeof(struct lttng_kernel_old_context),
2364 GFP_KERNEL);
2365 if (!old_ucontext_param) {
2366 ret = -ENOMEM;
2367 goto old_ctx_error_free_param;
2368 }
2369
2370 if (copy_from_user(old_ucontext_param,
2371 (struct lttng_kernel_old_context __user *) arg,
2372 sizeof(struct lttng_kernel_old_context))) {
2373 ret = -EFAULT;
2374 goto old_ctx_error_free_old_param;
2375 }
2376 ucontext_param->ctx = old_ucontext_param->ctx;
2377 memcpy(ucontext_param->padding, old_ucontext_param->padding,
2378 sizeof(ucontext_param->padding));
2379 /* only type that uses the union */
2380 if (old_ucontext_param->ctx == LTTNG_KERNEL_CONTEXT_PERF_COUNTER) {
2381 ucontext_param->u.perf_counter.type =
2382 old_ucontext_param->u.perf_counter.type;
2383 ucontext_param->u.perf_counter.config =
2384 old_ucontext_param->u.perf_counter.config;
2385 memcpy(ucontext_param->u.perf_counter.name,
2386 old_ucontext_param->u.perf_counter.name,
2387 sizeof(ucontext_param->u.perf_counter.name));
2388 }
2389
2390 ret = lttng_abi_add_context(file,
2391 ucontext_param,
2392 &channel->ctx, channel->session);
2393
2394 old_ctx_error_free_old_param:
2395 kfree(old_ucontext_param);
2396 old_ctx_error_free_param:
2397 kfree(ucontext_param);
2398 old_ctx_end:
2399 return ret;
2400 }
2401 case LTTNG_KERNEL_CONTEXT:
2402 {
2403 struct lttng_kernel_context ucontext_param;
2404
2405 if (copy_from_user(&ucontext_param,
2406 (struct lttng_kernel_context __user *) arg,
2407 sizeof(ucontext_param)))
2408 return -EFAULT;
2409 return lttng_abi_add_context(file,
2410 &ucontext_param,
2411 &channel->ctx, channel->session);
2412 }
2413 case LTTNG_KERNEL_OLD_ENABLE:
2414 case LTTNG_KERNEL_ENABLE:
2415 return lttng_channel_enable(channel);
2416 case LTTNG_KERNEL_OLD_DISABLE:
2417 case LTTNG_KERNEL_DISABLE:
2418 return lttng_channel_disable(channel);
2419 case LTTNG_KERNEL_SYSCALL_MASK:
2420 return lttng_channel_syscall_mask(channel,
2421 (struct lttng_kernel_syscall_mask __user *) arg);
2422 default:
2423 return -ENOIOCTLCMD;
2424 }
2425 }
2426
2427 /**
2428 * lttng_metadata_ioctl - lttng syscall through ioctl
2429 *
2430 * @file: the file
2431 * @cmd: the command
2432 * @arg: command arg
2433 *
2434 * This ioctl implements lttng commands:
2435 * LTTNG_KERNEL_STREAM
2436 * Returns an event stream file descriptor or failure.
2437 *
2438 * Channel and event file descriptors also hold a reference on the session.
2439 */
2440 static
2441 long lttng_metadata_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2442 {
2443 switch (cmd) {
2444 case LTTNG_KERNEL_OLD_STREAM:
2445 case LTTNG_KERNEL_STREAM:
2446 return lttng_abi_open_metadata_stream(file);
2447 default:
2448 return -ENOIOCTLCMD;
2449 }
2450 }
2451
2452 /**
2453 * lttng_channel_poll - lttng stream addition/removal monitoring
2454 *
2455 * @file: the file
2456 * @wait: poll table
2457 */
2458 unsigned int lttng_channel_poll(struct file *file, poll_table *wait)
2459 {
2460 struct lttng_channel *channel = file->private_data;
2461 unsigned int mask = 0;
2462
2463 if (file->f_mode & FMODE_READ) {
2464 poll_wait_set_exclusive(wait);
2465 poll_wait(file, channel->ops->get_hp_wait_queue(channel->chan),
2466 wait);
2467
2468 if (channel->ops->is_disabled(channel->chan))
2469 return POLLERR;
2470 if (channel->ops->is_finalized(channel->chan))
2471 return POLLHUP;
2472 if (channel->ops->buffer_has_read_closed_stream(channel->chan))
2473 return POLLIN | POLLRDNORM;
2474 return 0;
2475 }
2476 return mask;
2477
2478 }
2479
2480 static
2481 int lttng_channel_release(struct inode *inode, struct file *file)
2482 {
2483 struct lttng_channel *channel = file->private_data;
2484
2485 if (channel)
2486 fput(channel->session->file);
2487 return 0;
2488 }
2489
2490 static
2491 int lttng_metadata_channel_release(struct inode *inode, struct file *file)
2492 {
2493 struct lttng_channel *channel = file->private_data;
2494
2495 if (channel) {
2496 fput(channel->session->file);
2497 lttng_metadata_channel_destroy(channel);
2498 }
2499
2500 return 0;
2501 }
2502
2503 static const struct file_operations lttng_channel_fops = {
2504 .owner = THIS_MODULE,
2505 .release = lttng_channel_release,
2506 .poll = lttng_channel_poll,
2507 .unlocked_ioctl = lttng_channel_ioctl,
2508 #ifdef CONFIG_COMPAT
2509 .compat_ioctl = lttng_channel_ioctl,
2510 #endif
2511 };
2512
2513 static const struct file_operations lttng_metadata_fops = {
2514 .owner = THIS_MODULE,
2515 .release = lttng_metadata_channel_release,
2516 .unlocked_ioctl = lttng_metadata_ioctl,
2517 #ifdef CONFIG_COMPAT
2518 .compat_ioctl = lttng_metadata_ioctl,
2519 #endif
2520 };
2521
2522 /**
2523 * lttng_event_ioctl - lttng syscall through ioctl
2524 *
2525 * @file: the file
2526 * @cmd: the command
2527 * @arg: command arg
2528 *
2529 * This ioctl implements lttng commands:
2530 * LTTNG_KERNEL_CONTEXT
2531 * Prepend a context field to each record of this event
2532 * LTTNG_KERNEL_ENABLE
2533 * Enable recording for this event (weak enable)
2534 * LTTNG_KERNEL_DISABLE
2535 * Disable recording for this event (strong disable)
2536 */
2537 static
2538 long lttng_event_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2539 {
2540 struct lttng_event *event;
2541 struct lttng_event_enabler *event_enabler;
2542 enum lttng_event_type *evtype = file->private_data;
2543
2544 switch (cmd) {
2545 case LTTNG_KERNEL_OLD_CONTEXT:
2546 {
2547 /* Not implemented */
2548 return -ENOSYS;
2549 }
2550 case LTTNG_KERNEL_CONTEXT:
2551 {
2552 /* Not implemented */
2553 return -ENOSYS;
2554 }
2555 case LTTNG_KERNEL_OLD_ENABLE:
2556 case LTTNG_KERNEL_ENABLE:
2557 switch (*evtype) {
2558 case LTTNG_TYPE_EVENT:
2559 event = file->private_data;
2560 return lttng_event_enable(event);
2561 case LTTNG_TYPE_ENABLER:
2562 event_enabler = file->private_data;
2563 return lttng_event_enabler_enable(event_enabler);
2564 default:
2565 WARN_ON_ONCE(1);
2566 return -ENOSYS;
2567 }
2568 case LTTNG_KERNEL_OLD_DISABLE:
2569 case LTTNG_KERNEL_DISABLE:
2570 switch (*evtype) {
2571 case LTTNG_TYPE_EVENT:
2572 event = file->private_data;
2573 return lttng_event_disable(event);
2574 case LTTNG_TYPE_ENABLER:
2575 event_enabler = file->private_data;
2576 return lttng_event_enabler_disable(event_enabler);
2577 default:
2578 WARN_ON_ONCE(1);
2579 return -ENOSYS;
2580 }
2581 case LTTNG_KERNEL_FILTER:
2582 switch (*evtype) {
2583 case LTTNG_TYPE_EVENT:
2584 return -EINVAL;
2585 case LTTNG_TYPE_ENABLER:
2586 {
2587 event_enabler = file->private_data;
2588 return lttng_event_enabler_attach_filter_bytecode(
2589 event_enabler,
2590 (struct lttng_kernel_filter_bytecode __user *) arg);
2591 }
2592 default:
2593 WARN_ON_ONCE(1);
2594 return -ENOSYS;
2595 }
2596 case LTTNG_KERNEL_ADD_CALLSITE:
2597 switch (*evtype) {
2598 case LTTNG_TYPE_EVENT:
2599 event = file->private_data;
2600 return lttng_event_add_callsite(event,
2601 (struct lttng_kernel_event_callsite __user *) arg);
2602 case LTTNG_TYPE_ENABLER:
2603 return -EINVAL;
2604 default:
2605 WARN_ON_ONCE(1);
2606 return -ENOSYS;
2607 }
2608 default:
2609 return -ENOIOCTLCMD;
2610 }
2611 }
2612
2613 static
2614 int lttng_event_release(struct inode *inode, struct file *file)
2615 {
2616 struct lttng_event *event;
2617 struct lttng_event_enabler *event_enabler;
2618 enum lttng_event_type *evtype = file->private_data;
2619
2620 if (!evtype)
2621 return 0;
2622
2623 switch (*evtype) {
2624 case LTTNG_TYPE_EVENT:
2625 event = file->private_data;
2626 if (event)
2627 fput(event->chan->file);
2628 break;
2629 case LTTNG_TYPE_ENABLER:
2630 event_enabler = file->private_data;
2631 if (event_enabler)
2632 fput(event_enabler->chan->file);
2633 break;
2634 default:
2635 WARN_ON_ONCE(1);
2636 break;
2637 }
2638
2639 return 0;
2640 }
2641
2642 /* TODO: filter control ioctl */
2643 static const struct file_operations lttng_event_fops = {
2644 .owner = THIS_MODULE,
2645 .release = lttng_event_release,
2646 .unlocked_ioctl = lttng_event_ioctl,
2647 #ifdef CONFIG_COMPAT
2648 .compat_ioctl = lttng_event_ioctl,
2649 #endif
2650 };
2651
2652 static int put_u64(uint64_t val, unsigned long arg)
2653 {
2654 return put_user(val, (uint64_t __user *) arg);
2655 }
2656
2657 static int put_u32(uint32_t val, unsigned long arg)
2658 {
2659 return put_user(val, (uint32_t __user *) arg);
2660 }
2661
2662 static long lttng_stream_ring_buffer_ioctl(struct file *filp,
2663 unsigned int cmd, unsigned long arg)
2664 {
2665 struct lib_ring_buffer *buf = filp->private_data;
2666 struct channel *chan = buf->backend.chan;
2667 const struct lib_ring_buffer_config *config = &chan->backend.config;
2668 const struct lttng_channel_ops *ops = chan->backend.priv_ops;
2669 int ret;
2670
2671 if (atomic_read(&chan->record_disabled))
2672 return -EIO;
2673
2674 switch (cmd) {
2675 case LTTNG_RING_BUFFER_GET_TIMESTAMP_BEGIN:
2676 {
2677 uint64_t ts;
2678
2679 ret = ops->timestamp_begin(config, buf, &ts);
2680 if (ret < 0)
2681 goto error;
2682 return put_u64(ts, arg);
2683 }
2684 case LTTNG_RING_BUFFER_GET_TIMESTAMP_END:
2685 {
2686 uint64_t ts;
2687
2688 ret = ops->timestamp_end(config, buf, &ts);
2689 if (ret < 0)
2690 goto error;
2691 return put_u64(ts, arg);
2692 }
2693 case LTTNG_RING_BUFFER_GET_EVENTS_DISCARDED:
2694 {
2695 uint64_t ed;
2696
2697 ret = ops->events_discarded(config, buf, &ed);
2698 if (ret < 0)
2699 goto error;
2700 return put_u64(ed, arg);
2701 }
2702 case LTTNG_RING_BUFFER_GET_CONTENT_SIZE:
2703 {
2704 uint64_t cs;
2705
2706 ret = ops->content_size(config, buf, &cs);
2707 if (ret < 0)
2708 goto error;
2709 return put_u64(cs, arg);
2710 }
2711 case LTTNG_RING_BUFFER_GET_PACKET_SIZE:
2712 {
2713 uint64_t ps;
2714
2715 ret = ops->packet_size(config, buf, &ps);
2716 if (ret < 0)
2717 goto error;
2718 return put_u64(ps, arg);
2719 }
2720 case LTTNG_RING_BUFFER_GET_STREAM_ID:
2721 {
2722 uint64_t si;
2723
2724 ret = ops->stream_id(config, buf, &si);
2725 if (ret < 0)
2726 goto error;
2727 return put_u64(si, arg);
2728 }
2729 case LTTNG_RING_BUFFER_GET_CURRENT_TIMESTAMP:
2730 {
2731 uint64_t ts;
2732
2733 ret = ops->current_timestamp(config, buf, &ts);
2734 if (ret < 0)
2735 goto error;
2736 return put_u64(ts, arg);
2737 }
2738 case LTTNG_RING_BUFFER_GET_SEQ_NUM:
2739 {
2740 uint64_t seq;
2741
2742 ret = ops->sequence_number(config, buf, &seq);
2743 if (ret < 0)
2744 goto error;
2745 return put_u64(seq, arg);
2746 }
2747 case LTTNG_RING_BUFFER_INSTANCE_ID:
2748 {
2749 uint64_t id;
2750
2751 ret = ops->instance_id(config, buf, &id);
2752 if (ret < 0)
2753 goto error;
2754 return put_u64(id, arg);
2755 }
2756 default:
2757 return lib_ring_buffer_file_operations.unlocked_ioctl(filp,
2758 cmd, arg);
2759 }
2760
2761 error:
2762 return -ENOSYS;
2763 }
2764
2765 #ifdef CONFIG_COMPAT
2766 static long lttng_stream_ring_buffer_compat_ioctl(struct file *filp,
2767 unsigned int cmd, unsigned long arg)
2768 {
2769 struct lib_ring_buffer *buf = filp->private_data;
2770 struct channel *chan = buf->backend.chan;
2771 const struct lib_ring_buffer_config *config = &chan->backend.config;
2772 const struct lttng_channel_ops *ops = chan->backend.priv_ops;
2773 int ret;
2774
2775 if (atomic_read(&chan->record_disabled))
2776 return -EIO;
2777
2778 switch (cmd) {
2779 case LTTNG_RING_BUFFER_COMPAT_GET_TIMESTAMP_BEGIN:
2780 {
2781 uint64_t ts;
2782
2783 ret = ops->timestamp_begin(config, buf, &ts);
2784 if (ret < 0)
2785 goto error;
2786 return put_u64(ts, arg);
2787 }
2788 case LTTNG_RING_BUFFER_COMPAT_GET_TIMESTAMP_END:
2789 {
2790 uint64_t ts;
2791
2792 ret = ops->timestamp_end(config, buf, &ts);
2793 if (ret < 0)
2794 goto error;
2795 return put_u64(ts, arg);
2796 }
2797 case LTTNG_RING_BUFFER_COMPAT_GET_EVENTS_DISCARDED:
2798 {
2799 uint64_t ed;
2800
2801 ret = ops->events_discarded(config, buf, &ed);
2802 if (ret < 0)
2803 goto error;
2804 return put_u64(ed, arg);
2805 }
2806 case LTTNG_RING_BUFFER_COMPAT_GET_CONTENT_SIZE:
2807 {
2808 uint64_t cs;
2809
2810 ret = ops->content_size(config, buf, &cs);
2811 if (ret < 0)
2812 goto error;
2813 return put_u64(cs, arg);
2814 }
2815 case LTTNG_RING_BUFFER_COMPAT_GET_PACKET_SIZE:
2816 {
2817 uint64_t ps;
2818
2819 ret = ops->packet_size(config, buf, &ps);
2820 if (ret < 0)
2821 goto error;
2822 return put_u64(ps, arg);
2823 }
2824 case LTTNG_RING_BUFFER_COMPAT_GET_STREAM_ID:
2825 {
2826 uint64_t si;
2827
2828 ret = ops->stream_id(config, buf, &si);
2829 if (ret < 0)
2830 goto error;
2831 return put_u64(si, arg);
2832 }
2833 case LTTNG_RING_BUFFER_GET_CURRENT_TIMESTAMP:
2834 {
2835 uint64_t ts;
2836
2837 ret = ops->current_timestamp(config, buf, &ts);
2838 if (ret < 0)
2839 goto error;
2840 return put_u64(ts, arg);
2841 }
2842 case LTTNG_RING_BUFFER_COMPAT_GET_SEQ_NUM:
2843 {
2844 uint64_t seq;
2845
2846 ret = ops->sequence_number(config, buf, &seq);
2847 if (ret < 0)
2848 goto error;
2849 return put_u64(seq, arg);
2850 }
2851 case LTTNG_RING_BUFFER_COMPAT_INSTANCE_ID:
2852 {
2853 uint64_t id;
2854
2855 ret = ops->instance_id(config, buf, &id);
2856 if (ret < 0)
2857 goto error;
2858 return put_u64(id, arg);
2859 }
2860 default:
2861 return lib_ring_buffer_file_operations.compat_ioctl(filp,
2862 cmd, arg);
2863 }
2864
2865 error:
2866 return -ENOSYS;
2867 }
2868 #endif /* CONFIG_COMPAT */
2869
2870 static void lttng_stream_override_ring_buffer_fops(void)
2871 {
2872 lttng_stream_ring_buffer_file_operations.owner = THIS_MODULE;
2873 lttng_stream_ring_buffer_file_operations.open =
2874 lib_ring_buffer_file_operations.open;
2875 lttng_stream_ring_buffer_file_operations.release =
2876 lib_ring_buffer_file_operations.release;
2877 lttng_stream_ring_buffer_file_operations.poll =
2878 lib_ring_buffer_file_operations.poll;
2879 lttng_stream_ring_buffer_file_operations.splice_read =
2880 lib_ring_buffer_file_operations.splice_read;
2881 lttng_stream_ring_buffer_file_operations.mmap =
2882 lib_ring_buffer_file_operations.mmap;
2883 lttng_stream_ring_buffer_file_operations.unlocked_ioctl =
2884 lttng_stream_ring_buffer_ioctl;
2885 lttng_stream_ring_buffer_file_operations.llseek =
2886 lib_ring_buffer_file_operations.llseek;
2887 #ifdef CONFIG_COMPAT
2888 lttng_stream_ring_buffer_file_operations.compat_ioctl =
2889 lttng_stream_ring_buffer_compat_ioctl;
2890 #endif
2891 }
2892
2893 int __init lttng_abi_init(void)
2894 {
2895 int ret = 0;
2896
2897 wrapper_vmalloc_sync_mappings();
2898 lttng_clock_ref();
2899
2900 ret = lttng_tp_mempool_init();
2901 if (ret) {
2902 goto error;
2903 }
2904
2905 lttng_proc_dentry = proc_create_data("lttng", S_IRUSR | S_IWUSR, NULL,
2906 &lttng_proc_ops, NULL);
2907
2908 if (!lttng_proc_dentry) {
2909 printk(KERN_ERR "LTTng: Error creating control file\n");
2910 ret = -ENOMEM;
2911 goto error;
2912 }
2913 lttng_stream_override_ring_buffer_fops();
2914 return 0;
2915
2916 error:
2917 lttng_tp_mempool_destroy();
2918 lttng_clock_unref();
2919 return ret;
2920 }
2921
2922 /* No __exit annotation because used by init error path too. */
2923 void lttng_abi_exit(void)
2924 {
2925 lttng_tp_mempool_destroy();
2926 lttng_clock_unref();
2927 if (lttng_proc_dentry)
2928 remove_proc_entry("lttng", NULL);
2929 }
This page took 0.084853 seconds and 3 git commands to generate.