Refactoring: type description structures
[lttng-modules.git] / src / lttng-abi.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-abi.c
4 *
5 * LTTng ABI
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Mimic system calls for:
10 * - session creation, returns a file descriptor or failure.
11 * - channel creation, returns a file descriptor or failure.
12 * - Operates on a session file descriptor
13 * - Takes all channel options as parameters.
14 * - stream get, returns a file descriptor or failure.
15 * - Operates on a channel file descriptor.
16 * - stream notifier get, returns a file descriptor or failure.
17 * - Operates on a channel file descriptor.
18 * - event creation, returns a file descriptor or failure.
19 * - Operates on a channel file descriptor
20 * - Takes an event name as parameter
21 * - Takes an instrumentation source as parameter
22 * - e.g. tracepoints, dynamic_probes...
23 * - Takes instrumentation source specific arguments.
24 */
25
26 #include <linux/module.h>
27 #include <linux/proc_fs.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/file.h>
30 #include <linux/uaccess.h>
31 #include <linux/slab.h>
32 #include <linux/err.h>
33 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
34 #include <ringbuffer/vfs.h>
35 #include <ringbuffer/backend.h>
36 #include <ringbuffer/frontend.h>
37 #include <wrapper/poll.h>
38 #include <wrapper/file.h>
39 #include <wrapper/kref.h>
40 #include <wrapper/barrier.h>
41 #include <lttng/string-utils.h>
42 #include <lttng/abi.h>
43 #include <lttng/abi-old.h>
44 #include <lttng/events.h>
45 #include <lttng/events-internal.h>
46 #include <lttng/tracer.h>
47 #include <lttng/tp-mempool.h>
48 #include <ringbuffer/frontend_types.h>
49 #include <ringbuffer/iterator.h>
50
51 /*
52 * This is LTTng's own personal way to create a system call as an external
53 * module. We use ioctl() on /proc/lttng.
54 */
55
56 static struct proc_dir_entry *lttng_proc_dentry;
57
58 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0))
59 static const struct proc_ops lttng_proc_ops;
60 #else
61 static const struct file_operations lttng_proc_ops;
62 #endif
63
64 static const struct file_operations lttng_session_fops;
65 static const struct file_operations lttng_event_notifier_group_fops;
66 static const struct file_operations lttng_channel_fops;
67 static const struct file_operations lttng_metadata_fops;
68 static const struct file_operations lttng_event_fops;
69 static struct file_operations lttng_stream_ring_buffer_file_operations;
70
71 static int put_u64(uint64_t val, unsigned long arg);
72 static int put_u32(uint32_t val, unsigned long arg);
73
74 static int validate_zeroed_padding(char *p, size_t len)
75 {
76 size_t i;
77
78 for (i = 0; i < len; i++) {
79 if (p[i])
80 return -1;
81 }
82 return 0;
83 }
84
85 /*
86 * Teardown management: opened file descriptors keep a refcount on the module,
87 * so it can only exit when all file descriptors are closed.
88 */
89
90 static
91 int lttng_abi_create_session(void)
92 {
93 struct lttng_session *session;
94 struct file *session_file;
95 int session_fd, ret;
96
97 session = lttng_session_create();
98 if (!session)
99 return -ENOMEM;
100 session_fd = lttng_get_unused_fd();
101 if (session_fd < 0) {
102 ret = session_fd;
103 goto fd_error;
104 }
105 session_file = anon_inode_getfile("[lttng_session]",
106 &lttng_session_fops,
107 session, O_RDWR);
108 if (IS_ERR(session_file)) {
109 ret = PTR_ERR(session_file);
110 goto file_error;
111 }
112 session->file = session_file;
113 fd_install(session_fd, session_file);
114 return session_fd;
115
116 file_error:
117 put_unused_fd(session_fd);
118 fd_error:
119 lttng_session_destroy(session);
120 return ret;
121 }
122
123 void event_notifier_send_notification_work_wakeup(struct irq_work *entry)
124 {
125 struct lttng_event_notifier_group *event_notifier_group =
126 container_of(entry, struct lttng_event_notifier_group,
127 wakeup_pending);
128 wake_up_interruptible(&event_notifier_group->read_wait);
129 }
130
131 static
132 int lttng_abi_create_event_notifier_group(void)
133 {
134 struct lttng_event_notifier_group *event_notifier_group;
135 struct file *event_notifier_group_file;
136 int event_notifier_group_fd, ret;
137
138 event_notifier_group = lttng_event_notifier_group_create();
139 if (!event_notifier_group)
140 return -ENOMEM;
141
142 event_notifier_group_fd = lttng_get_unused_fd();
143 if (event_notifier_group_fd < 0) {
144 ret = event_notifier_group_fd;
145 goto fd_error;
146 }
147 event_notifier_group_file = anon_inode_getfile("[lttng_event_notifier_group]",
148 &lttng_event_notifier_group_fops,
149 event_notifier_group, O_RDWR);
150 if (IS_ERR(event_notifier_group_file)) {
151 ret = PTR_ERR(event_notifier_group_file);
152 goto file_error;
153 }
154
155 event_notifier_group->file = event_notifier_group_file;
156 init_waitqueue_head(&event_notifier_group->read_wait);
157 init_irq_work(&event_notifier_group->wakeup_pending,
158 event_notifier_send_notification_work_wakeup);
159 fd_install(event_notifier_group_fd, event_notifier_group_file);
160 return event_notifier_group_fd;
161
162 file_error:
163 put_unused_fd(event_notifier_group_fd);
164 fd_error:
165 lttng_event_notifier_group_destroy(event_notifier_group);
166 return ret;
167 }
168
169 static
170 int lttng_abi_tracepoint_list(void)
171 {
172 struct file *tracepoint_list_file;
173 int file_fd, ret;
174
175 file_fd = lttng_get_unused_fd();
176 if (file_fd < 0) {
177 ret = file_fd;
178 goto fd_error;
179 }
180
181 tracepoint_list_file = anon_inode_getfile("[lttng_tracepoint_list]",
182 &lttng_tracepoint_list_fops,
183 NULL, O_RDWR);
184 if (IS_ERR(tracepoint_list_file)) {
185 ret = PTR_ERR(tracepoint_list_file);
186 goto file_error;
187 }
188 ret = lttng_tracepoint_list_fops.open(NULL, tracepoint_list_file);
189 if (ret < 0)
190 goto open_error;
191 fd_install(file_fd, tracepoint_list_file);
192 return file_fd;
193
194 open_error:
195 fput(tracepoint_list_file);
196 file_error:
197 put_unused_fd(file_fd);
198 fd_error:
199 return ret;
200 }
201
202 #ifndef CONFIG_HAVE_SYSCALL_TRACEPOINTS
203 static inline
204 int lttng_abi_syscall_list(void)
205 {
206 return -ENOSYS;
207 }
208 #else
209 static
210 int lttng_abi_syscall_list(void)
211 {
212 struct file *syscall_list_file;
213 int file_fd, ret;
214
215 file_fd = lttng_get_unused_fd();
216 if (file_fd < 0) {
217 ret = file_fd;
218 goto fd_error;
219 }
220
221 syscall_list_file = anon_inode_getfile("[lttng_syscall_list]",
222 &lttng_syscall_list_fops,
223 NULL, O_RDWR);
224 if (IS_ERR(syscall_list_file)) {
225 ret = PTR_ERR(syscall_list_file);
226 goto file_error;
227 }
228 ret = lttng_syscall_list_fops.open(NULL, syscall_list_file);
229 if (ret < 0)
230 goto open_error;
231 fd_install(file_fd, syscall_list_file);
232 return file_fd;
233
234 open_error:
235 fput(syscall_list_file);
236 file_error:
237 put_unused_fd(file_fd);
238 fd_error:
239 return ret;
240 }
241 #endif
242
243 static
244 void lttng_abi_tracer_version(struct lttng_kernel_tracer_version *v)
245 {
246 v->major = LTTNG_MODULES_MAJOR_VERSION;
247 v->minor = LTTNG_MODULES_MINOR_VERSION;
248 v->patchlevel = LTTNG_MODULES_PATCHLEVEL_VERSION;
249 }
250
251 static
252 void lttng_abi_tracer_abi_version(struct lttng_kernel_tracer_abi_version *v)
253 {
254 v->major = LTTNG_MODULES_ABI_MAJOR_VERSION;
255 v->minor = LTTNG_MODULES_ABI_MINOR_VERSION;
256 }
257
258 static
259 long lttng_abi_add_context(struct file *file,
260 struct lttng_kernel_context *context_param,
261 struct lttng_kernel_ctx **ctx, struct lttng_session *session)
262 {
263
264 if (session->been_active)
265 return -EPERM;
266
267 switch (context_param->ctx) {
268 case LTTNG_KERNEL_CONTEXT_PID:
269 return lttng_add_pid_to_ctx(ctx);
270 case LTTNG_KERNEL_CONTEXT_PRIO:
271 return lttng_add_prio_to_ctx(ctx);
272 case LTTNG_KERNEL_CONTEXT_NICE:
273 return lttng_add_nice_to_ctx(ctx);
274 case LTTNG_KERNEL_CONTEXT_VPID:
275 return lttng_add_vpid_to_ctx(ctx);
276 case LTTNG_KERNEL_CONTEXT_TID:
277 return lttng_add_tid_to_ctx(ctx);
278 case LTTNG_KERNEL_CONTEXT_VTID:
279 return lttng_add_vtid_to_ctx(ctx);
280 case LTTNG_KERNEL_CONTEXT_PPID:
281 return lttng_add_ppid_to_ctx(ctx);
282 case LTTNG_KERNEL_CONTEXT_VPPID:
283 return lttng_add_vppid_to_ctx(ctx);
284 case LTTNG_KERNEL_CONTEXT_PERF_COUNTER:
285 context_param->u.perf_counter.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
286 return lttng_add_perf_counter_to_ctx(context_param->u.perf_counter.type,
287 context_param->u.perf_counter.config,
288 context_param->u.perf_counter.name,
289 ctx);
290 case LTTNG_KERNEL_CONTEXT_PROCNAME:
291 return lttng_add_procname_to_ctx(ctx);
292 case LTTNG_KERNEL_CONTEXT_HOSTNAME:
293 return lttng_add_hostname_to_ctx(ctx);
294 case LTTNG_KERNEL_CONTEXT_CPU_ID:
295 return lttng_add_cpu_id_to_ctx(ctx);
296 case LTTNG_KERNEL_CONTEXT_INTERRUPTIBLE:
297 return lttng_add_interruptible_to_ctx(ctx);
298 case LTTNG_KERNEL_CONTEXT_NEED_RESCHEDULE:
299 return lttng_add_need_reschedule_to_ctx(ctx);
300 case LTTNG_KERNEL_CONTEXT_PREEMPTIBLE:
301 return lttng_add_preemptible_to_ctx(ctx);
302 case LTTNG_KERNEL_CONTEXT_MIGRATABLE:
303 return lttng_add_migratable_to_ctx(ctx);
304 case LTTNG_KERNEL_CONTEXT_CALLSTACK_KERNEL:
305 case LTTNG_KERNEL_CONTEXT_CALLSTACK_USER:
306 return lttng_add_callstack_to_ctx(ctx, context_param->ctx);
307 case LTTNG_KERNEL_CONTEXT_CGROUP_NS:
308 return lttng_add_cgroup_ns_to_ctx(ctx);
309 case LTTNG_KERNEL_CONTEXT_IPC_NS:
310 return lttng_add_ipc_ns_to_ctx(ctx);
311 case LTTNG_KERNEL_CONTEXT_MNT_NS:
312 return lttng_add_mnt_ns_to_ctx(ctx);
313 case LTTNG_KERNEL_CONTEXT_NET_NS:
314 return lttng_add_net_ns_to_ctx(ctx);
315 case LTTNG_KERNEL_CONTEXT_PID_NS:
316 return lttng_add_pid_ns_to_ctx(ctx);
317 case LTTNG_KERNEL_CONTEXT_USER_NS:
318 return lttng_add_user_ns_to_ctx(ctx);
319 case LTTNG_KERNEL_CONTEXT_UTS_NS:
320 return lttng_add_uts_ns_to_ctx(ctx);
321 case LTTNG_KERNEL_CONTEXT_UID:
322 return lttng_add_uid_to_ctx(ctx);
323 case LTTNG_KERNEL_CONTEXT_EUID:
324 return lttng_add_euid_to_ctx(ctx);
325 case LTTNG_KERNEL_CONTEXT_SUID:
326 return lttng_add_suid_to_ctx(ctx);
327 case LTTNG_KERNEL_CONTEXT_GID:
328 return lttng_add_gid_to_ctx(ctx);
329 case LTTNG_KERNEL_CONTEXT_EGID:
330 return lttng_add_egid_to_ctx(ctx);
331 case LTTNG_KERNEL_CONTEXT_SGID:
332 return lttng_add_sgid_to_ctx(ctx);
333 case LTTNG_KERNEL_CONTEXT_VUID:
334 return lttng_add_vuid_to_ctx(ctx);
335 case LTTNG_KERNEL_CONTEXT_VEUID:
336 return lttng_add_veuid_to_ctx(ctx);
337 case LTTNG_KERNEL_CONTEXT_VSUID:
338 return lttng_add_vsuid_to_ctx(ctx);
339 case LTTNG_KERNEL_CONTEXT_VGID:
340 return lttng_add_vgid_to_ctx(ctx);
341 case LTTNG_KERNEL_CONTEXT_VEGID:
342 return lttng_add_vegid_to_ctx(ctx);
343 case LTTNG_KERNEL_CONTEXT_VSGID:
344 return lttng_add_vsgid_to_ctx(ctx);
345 case LTTNG_KERNEL_CONTEXT_TIME_NS:
346 return lttng_add_time_ns_to_ctx(ctx);
347 default:
348 return -EINVAL;
349 }
350 }
351
352 /**
353 * lttng_ioctl - lttng syscall through ioctl
354 *
355 * @file: the file
356 * @cmd: the command
357 * @arg: command arg
358 *
359 * This ioctl implements lttng commands:
360 * LTTNG_KERNEL_SESSION
361 * Returns a LTTng trace session file descriptor
362 * LTTNG_KERNEL_TRACER_VERSION
363 * Returns the LTTng kernel tracer version
364 * LTTNG_KERNEL_TRACEPOINT_LIST
365 * Returns a file descriptor listing available tracepoints
366 * LTTNG_KERNEL_WAIT_QUIESCENT
367 * Returns after all previously running probes have completed
368 * LTTNG_KERNEL_TRACER_ABI_VERSION
369 * Returns the LTTng kernel tracer ABI version
370 * LTTNG_KERNEL_EVENT_NOTIFIER_GROUP_CREATE
371 * Returns a LTTng event notifier group file descriptor
372 *
373 * The returned session will be deleted when its file descriptor is closed.
374 */
375 static
376 long lttng_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
377 {
378 switch (cmd) {
379 case LTTNG_KERNEL_OLD_SESSION:
380 case LTTNG_KERNEL_SESSION:
381 return lttng_abi_create_session();
382 case LTTNG_KERNEL_EVENT_NOTIFIER_GROUP_CREATE:
383 return lttng_abi_create_event_notifier_group();
384 case LTTNG_KERNEL_OLD_TRACER_VERSION:
385 {
386 struct lttng_kernel_tracer_version v;
387 struct lttng_kernel_old_tracer_version oldv;
388 struct lttng_kernel_old_tracer_version *uversion =
389 (struct lttng_kernel_old_tracer_version __user *) arg;
390
391 lttng_abi_tracer_version(&v);
392 oldv.major = v.major;
393 oldv.minor = v.minor;
394 oldv.patchlevel = v.patchlevel;
395
396 if (copy_to_user(uversion, &oldv, sizeof(oldv)))
397 return -EFAULT;
398 return 0;
399 }
400 case LTTNG_KERNEL_TRACER_VERSION:
401 {
402 struct lttng_kernel_tracer_version version;
403 struct lttng_kernel_tracer_version *uversion =
404 (struct lttng_kernel_tracer_version __user *) arg;
405
406 lttng_abi_tracer_version(&version);
407
408 if (copy_to_user(uversion, &version, sizeof(version)))
409 return -EFAULT;
410 return 0;
411 }
412 case LTTNG_KERNEL_TRACER_ABI_VERSION:
413 {
414 struct lttng_kernel_tracer_abi_version version;
415 struct lttng_kernel_tracer_abi_version *uversion =
416 (struct lttng_kernel_tracer_abi_version __user *) arg;
417
418 lttng_abi_tracer_abi_version(&version);
419
420 if (copy_to_user(uversion, &version, sizeof(version)))
421 return -EFAULT;
422 return 0;
423 }
424 case LTTNG_KERNEL_OLD_TRACEPOINT_LIST:
425 case LTTNG_KERNEL_TRACEPOINT_LIST:
426 return lttng_abi_tracepoint_list();
427 case LTTNG_KERNEL_SYSCALL_LIST:
428 return lttng_abi_syscall_list();
429 case LTTNG_KERNEL_OLD_WAIT_QUIESCENT:
430 case LTTNG_KERNEL_WAIT_QUIESCENT:
431 synchronize_trace();
432 return 0;
433 case LTTNG_KERNEL_OLD_CALIBRATE:
434 {
435 struct lttng_kernel_old_calibrate __user *ucalibrate =
436 (struct lttng_kernel_old_calibrate __user *) arg;
437 struct lttng_kernel_old_calibrate old_calibrate;
438 struct lttng_kernel_calibrate calibrate;
439 int ret;
440
441 if (copy_from_user(&old_calibrate, ucalibrate, sizeof(old_calibrate)))
442 return -EFAULT;
443 calibrate.type = old_calibrate.type;
444 ret = lttng_calibrate(&calibrate);
445 if (copy_to_user(ucalibrate, &old_calibrate, sizeof(old_calibrate)))
446 return -EFAULT;
447 return ret;
448 }
449 case LTTNG_KERNEL_CALIBRATE:
450 {
451 struct lttng_kernel_calibrate __user *ucalibrate =
452 (struct lttng_kernel_calibrate __user *) arg;
453 struct lttng_kernel_calibrate calibrate;
454 int ret;
455
456 if (copy_from_user(&calibrate, ucalibrate, sizeof(calibrate)))
457 return -EFAULT;
458 ret = lttng_calibrate(&calibrate);
459 if (copy_to_user(ucalibrate, &calibrate, sizeof(calibrate)))
460 return -EFAULT;
461 return ret;
462 }
463 default:
464 return -ENOIOCTLCMD;
465 }
466 }
467
468 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0))
469 static const struct proc_ops lttng_proc_ops = {
470 .proc_ioctl = lttng_ioctl,
471 #ifdef CONFIG_COMPAT
472 .proc_compat_ioctl = lttng_ioctl,
473 #endif /* CONFIG_COMPAT */
474 };
475 #else
476 static const struct file_operations lttng_proc_ops = {
477 .owner = THIS_MODULE,
478 .unlocked_ioctl = lttng_ioctl,
479 #ifdef CONFIG_COMPAT
480 .compat_ioctl = lttng_ioctl,
481 #endif /* CONFIG_COMPAT */
482 };
483 #endif
484
485 static
486 int lttng_abi_create_channel(struct file *session_file,
487 struct lttng_kernel_channel *chan_param,
488 enum channel_type channel_type)
489 {
490 struct lttng_session *session = session_file->private_data;
491 const struct file_operations *fops = NULL;
492 const char *transport_name;
493 struct lttng_channel *chan;
494 struct file *chan_file;
495 int chan_fd;
496 int ret = 0;
497
498 chan_fd = lttng_get_unused_fd();
499 if (chan_fd < 0) {
500 ret = chan_fd;
501 goto fd_error;
502 }
503 switch (channel_type) {
504 case PER_CPU_CHANNEL:
505 fops = &lttng_channel_fops;
506 break;
507 case METADATA_CHANNEL:
508 fops = &lttng_metadata_fops;
509 break;
510 }
511
512 chan_file = anon_inode_getfile("[lttng_channel]",
513 fops,
514 NULL, O_RDWR);
515 if (IS_ERR(chan_file)) {
516 ret = PTR_ERR(chan_file);
517 goto file_error;
518 }
519 switch (channel_type) {
520 case PER_CPU_CHANNEL:
521 if (chan_param->output == LTTNG_KERNEL_SPLICE) {
522 transport_name = chan_param->overwrite ?
523 "relay-overwrite" : "relay-discard";
524 } else if (chan_param->output == LTTNG_KERNEL_MMAP) {
525 transport_name = chan_param->overwrite ?
526 "relay-overwrite-mmap" : "relay-discard-mmap";
527 } else {
528 return -EINVAL;
529 }
530 break;
531 case METADATA_CHANNEL:
532 if (chan_param->output == LTTNG_KERNEL_SPLICE)
533 transport_name = "relay-metadata";
534 else if (chan_param->output == LTTNG_KERNEL_MMAP)
535 transport_name = "relay-metadata-mmap";
536 else
537 return -EINVAL;
538 break;
539 default:
540 transport_name = "<unknown>";
541 break;
542 }
543 if (!atomic_long_add_unless(&session_file->f_count, 1, LONG_MAX)) {
544 ret = -EOVERFLOW;
545 goto refcount_error;
546 }
547 /*
548 * We tolerate no failure path after channel creation. It will stay
549 * invariant for the rest of the session.
550 */
551 chan = lttng_channel_create(session, transport_name, NULL,
552 chan_param->subbuf_size,
553 chan_param->num_subbuf,
554 chan_param->switch_timer_interval,
555 chan_param->read_timer_interval,
556 channel_type);
557 if (!chan) {
558 ret = -EINVAL;
559 goto chan_error;
560 }
561 chan->file = chan_file;
562 chan_file->private_data = chan;
563 fd_install(chan_fd, chan_file);
564
565 return chan_fd;
566
567 chan_error:
568 atomic_long_dec(&session_file->f_count);
569 refcount_error:
570 fput(chan_file);
571 file_error:
572 put_unused_fd(chan_fd);
573 fd_error:
574 return ret;
575 }
576
577 static
578 int lttng_abi_session_set_name(struct lttng_session *session,
579 struct lttng_kernel_session_name *name)
580 {
581 size_t len;
582
583 len = strnlen(name->name, LTTNG_KERNEL_SESSION_NAME_LEN);
584
585 if (len == LTTNG_KERNEL_SESSION_NAME_LEN) {
586 /* Name is too long/malformed */
587 return -EINVAL;
588 }
589
590 strcpy(session->name, name->name);
591 return 0;
592 }
593
594 static
595 int lttng_abi_session_set_creation_time(struct lttng_session *session,
596 struct lttng_kernel_session_creation_time *time)
597 {
598 size_t len;
599
600 len = strnlen(time->iso8601, LTTNG_KERNEL_SESSION_CREATION_TIME_ISO8601_LEN);
601
602 if (len == LTTNG_KERNEL_SESSION_CREATION_TIME_ISO8601_LEN) {
603 /* Time is too long/malformed */
604 return -EINVAL;
605 }
606
607 strcpy(session->creation_time, time->iso8601);
608 return 0;
609 }
610
611 static
612 int lttng_counter_release(struct inode *inode, struct file *file)
613 {
614 struct lttng_counter *counter = file->private_data;
615
616 if (counter) {
617 /*
618 * Do not destroy the counter itself. Wait of the owner
619 * (event_notifier group) to be destroyed.
620 */
621 fput(counter->owner);
622 }
623
624 return 0;
625 }
626
627 static
628 long lttng_counter_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
629 {
630 struct lttng_counter *counter = file->private_data;
631 size_t indexes[LTTNG_KERNEL_COUNTER_DIMENSION_MAX] = { 0 };
632 int i;
633
634 switch (cmd) {
635 case LTTNG_KERNEL_COUNTER_READ:
636 {
637 struct lttng_kernel_counter_read local_counter_read;
638 struct lttng_kernel_counter_read __user *ucounter_read =
639 (struct lttng_kernel_counter_read __user *) arg;
640 bool overflow, underflow;
641 int64_t value;
642 int32_t cpu;
643 int ret;
644
645 if (copy_from_user(&local_counter_read, ucounter_read,
646 sizeof(local_counter_read)))
647 return -EFAULT;
648 if (validate_zeroed_padding(local_counter_read.padding,
649 sizeof(local_counter_read.padding)))
650 return -EINVAL;
651
652 /* Cast all indexes into size_t. */
653 for (i = 0; i < local_counter_read.index.number_dimensions; i++)
654 indexes[i] = (size_t) local_counter_read.index.dimension_indexes[i];
655 cpu = local_counter_read.cpu;
656
657 ret = lttng_kernel_counter_read(counter, indexes, cpu, &value,
658 &overflow, &underflow);
659 if (ret)
660 return ret;
661 local_counter_read.value.value = value;
662 local_counter_read.value.overflow = overflow;
663 local_counter_read.value.underflow = underflow;
664
665 if (copy_to_user(&ucounter_read->value, &local_counter_read.value,
666 sizeof(local_counter_read.value)))
667 return -EFAULT;
668
669 return 0;
670 }
671 case LTTNG_KERNEL_COUNTER_AGGREGATE:
672 {
673 struct lttng_kernel_counter_aggregate local_counter_aggregate;
674 struct lttng_kernel_counter_aggregate __user *ucounter_aggregate =
675 (struct lttng_kernel_counter_aggregate __user *) arg;
676 bool overflow, underflow;
677 int64_t value;
678 int ret;
679
680 if (copy_from_user(&local_counter_aggregate, ucounter_aggregate,
681 sizeof(local_counter_aggregate)))
682 return -EFAULT;
683 if (validate_zeroed_padding(local_counter_aggregate.padding,
684 sizeof(local_counter_aggregate.padding)))
685 return -EINVAL;
686
687 /* Cast all indexes into size_t. */
688 for (i = 0; i < local_counter_aggregate.index.number_dimensions; i++)
689 indexes[i] = (size_t) local_counter_aggregate.index.dimension_indexes[i];
690
691 ret = lttng_kernel_counter_aggregate(counter, indexes, &value,
692 &overflow, &underflow);
693 if (ret)
694 return ret;
695 local_counter_aggregate.value.value = value;
696 local_counter_aggregate.value.overflow = overflow;
697 local_counter_aggregate.value.underflow = underflow;
698
699 if (copy_to_user(&ucounter_aggregate->value, &local_counter_aggregate.value,
700 sizeof(local_counter_aggregate.value)))
701 return -EFAULT;
702
703 return 0;
704 }
705 case LTTNG_KERNEL_COUNTER_CLEAR:
706 {
707 struct lttng_kernel_counter_clear local_counter_clear;
708 struct lttng_kernel_counter_clear __user *ucounter_clear =
709 (struct lttng_kernel_counter_clear __user *) arg;
710
711 if (copy_from_user(&local_counter_clear, ucounter_clear,
712 sizeof(local_counter_clear)))
713 return -EFAULT;
714 if (validate_zeroed_padding(local_counter_clear.padding,
715 sizeof(local_counter_clear.padding)))
716 return -EINVAL;
717
718 /* Cast all indexes into size_t. */
719 for (i = 0; i < local_counter_clear.index.number_dimensions; i++)
720 indexes[i] = (size_t) local_counter_clear.index.dimension_indexes[i];
721
722 return lttng_kernel_counter_clear(counter, indexes);
723 }
724 default:
725 WARN_ON_ONCE(1);
726 return -ENOSYS;
727 }
728 }
729
730 static const struct file_operations lttng_counter_fops = {
731 .owner = THIS_MODULE,
732 .release = lttng_counter_release,
733 .unlocked_ioctl = lttng_counter_ioctl,
734 #ifdef CONFIG_COMPAT
735 .compat_ioctl = lttng_counter_ioctl,
736 #endif
737 };
738
739
740 static
741 enum tracker_type get_tracker_type(struct lttng_kernel_tracker_args *tracker)
742 {
743 switch (tracker->type) {
744 case LTTNG_KERNEL_TRACKER_PID:
745 return TRACKER_PID;
746 case LTTNG_KERNEL_TRACKER_VPID:
747 return TRACKER_VPID;
748 case LTTNG_KERNEL_TRACKER_UID:
749 return TRACKER_UID;
750 case LTTNG_KERNEL_TRACKER_VUID:
751 return TRACKER_VUID;
752 case LTTNG_KERNEL_TRACKER_GID:
753 return TRACKER_GID;
754 case LTTNG_KERNEL_TRACKER_VGID:
755 return TRACKER_VGID;
756 default:
757 return TRACKER_UNKNOWN;
758 }
759 }
760
761 /**
762 * lttng_session_ioctl - lttng session fd ioctl
763 *
764 * @file: the file
765 * @cmd: the command
766 * @arg: command arg
767 *
768 * This ioctl implements lttng commands:
769 * LTTNG_KERNEL_CHANNEL
770 * Returns a LTTng channel file descriptor
771 * LTTNG_KERNEL_ENABLE
772 * Enables tracing for a session (weak enable)
773 * LTTNG_KERNEL_DISABLE
774 * Disables tracing for a session (strong disable)
775 * LTTNG_KERNEL_METADATA
776 * Returns a LTTng metadata file descriptor
777 * LTTNG_KERNEL_SESSION_TRACK_PID
778 * Add PID to session PID tracker
779 * LTTNG_KERNEL_SESSION_UNTRACK_PID
780 * Remove PID from session PID tracker
781 * LTTNG_KERNEL_SESSION_TRACK_ID
782 * Add ID to tracker
783 * LTTNG_KERNEL_SESSION_UNTRACK_ID
784 * Remove ID from tracker
785 *
786 * The returned channel will be deleted when its file descriptor is closed.
787 */
788 static
789 long lttng_session_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
790 {
791 struct lttng_session *session = file->private_data;
792 struct lttng_kernel_channel chan_param;
793 struct lttng_kernel_old_channel old_chan_param;
794
795 switch (cmd) {
796 case LTTNG_KERNEL_OLD_CHANNEL:
797 {
798 if (copy_from_user(&old_chan_param,
799 (struct lttng_kernel_old_channel __user *) arg,
800 sizeof(struct lttng_kernel_old_channel)))
801 return -EFAULT;
802 chan_param.overwrite = old_chan_param.overwrite;
803 chan_param.subbuf_size = old_chan_param.subbuf_size;
804 chan_param.num_subbuf = old_chan_param.num_subbuf;
805 chan_param.switch_timer_interval = old_chan_param.switch_timer_interval;
806 chan_param.read_timer_interval = old_chan_param.read_timer_interval;
807 chan_param.output = old_chan_param.output;
808
809 return lttng_abi_create_channel(file, &chan_param,
810 PER_CPU_CHANNEL);
811 }
812 case LTTNG_KERNEL_CHANNEL:
813 {
814 if (copy_from_user(&chan_param,
815 (struct lttng_kernel_channel __user *) arg,
816 sizeof(struct lttng_kernel_channel)))
817 return -EFAULT;
818 return lttng_abi_create_channel(file, &chan_param,
819 PER_CPU_CHANNEL);
820 }
821 case LTTNG_KERNEL_OLD_SESSION_START:
822 case LTTNG_KERNEL_OLD_ENABLE:
823 case LTTNG_KERNEL_SESSION_START:
824 case LTTNG_KERNEL_ENABLE:
825 return lttng_session_enable(session);
826 case LTTNG_KERNEL_OLD_SESSION_STOP:
827 case LTTNG_KERNEL_OLD_DISABLE:
828 case LTTNG_KERNEL_SESSION_STOP:
829 case LTTNG_KERNEL_DISABLE:
830 return lttng_session_disable(session);
831 case LTTNG_KERNEL_OLD_METADATA:
832 {
833 if (copy_from_user(&old_chan_param,
834 (struct lttng_kernel_old_channel __user *) arg,
835 sizeof(struct lttng_kernel_old_channel)))
836 return -EFAULT;
837 chan_param.overwrite = old_chan_param.overwrite;
838 chan_param.subbuf_size = old_chan_param.subbuf_size;
839 chan_param.num_subbuf = old_chan_param.num_subbuf;
840 chan_param.switch_timer_interval = old_chan_param.switch_timer_interval;
841 chan_param.read_timer_interval = old_chan_param.read_timer_interval;
842 chan_param.output = old_chan_param.output;
843
844 return lttng_abi_create_channel(file, &chan_param,
845 METADATA_CHANNEL);
846 }
847 case LTTNG_KERNEL_METADATA:
848 {
849 if (copy_from_user(&chan_param,
850 (struct lttng_kernel_channel __user *) arg,
851 sizeof(struct lttng_kernel_channel)))
852 return -EFAULT;
853 return lttng_abi_create_channel(file, &chan_param,
854 METADATA_CHANNEL);
855 }
856 case LTTNG_KERNEL_SESSION_TRACK_PID:
857 return lttng_session_track_id(session, TRACKER_PID, (int) arg);
858 case LTTNG_KERNEL_SESSION_UNTRACK_PID:
859 return lttng_session_untrack_id(session, TRACKER_PID, (int) arg);
860 case LTTNG_KERNEL_SESSION_TRACK_ID:
861 {
862 struct lttng_kernel_tracker_args tracker;
863 enum tracker_type tracker_type;
864
865 if (copy_from_user(&tracker,
866 (struct lttng_kernel_tracker_args __user *) arg,
867 sizeof(struct lttng_kernel_tracker_args)))
868 return -EFAULT;
869 tracker_type = get_tracker_type(&tracker);
870 if (tracker_type == TRACKER_UNKNOWN)
871 return -EINVAL;
872 return lttng_session_track_id(session, tracker_type, tracker.id);
873 }
874 case LTTNG_KERNEL_SESSION_UNTRACK_ID:
875 {
876 struct lttng_kernel_tracker_args tracker;
877 enum tracker_type tracker_type;
878
879 if (copy_from_user(&tracker,
880 (struct lttng_kernel_tracker_args __user *) arg,
881 sizeof(struct lttng_kernel_tracker_args)))
882 return -EFAULT;
883 tracker_type = get_tracker_type(&tracker);
884 if (tracker_type == TRACKER_UNKNOWN)
885 return -EINVAL;
886 return lttng_session_untrack_id(session, tracker_type,
887 tracker.id);
888 }
889 case LTTNG_KERNEL_SESSION_LIST_TRACKER_PIDS:
890 return lttng_session_list_tracker_ids(session, TRACKER_PID);
891 case LTTNG_KERNEL_SESSION_LIST_TRACKER_IDS:
892 {
893 struct lttng_kernel_tracker_args tracker;
894 enum tracker_type tracker_type;
895
896 if (copy_from_user(&tracker,
897 (struct lttng_kernel_tracker_args __user *) arg,
898 sizeof(struct lttng_kernel_tracker_args)))
899 return -EFAULT;
900 tracker_type = get_tracker_type(&tracker);
901 if (tracker_type == TRACKER_UNKNOWN)
902 return -EINVAL;
903 return lttng_session_list_tracker_ids(session, tracker_type);
904 }
905 case LTTNG_KERNEL_SESSION_METADATA_REGEN:
906 return lttng_session_metadata_regenerate(session);
907 case LTTNG_KERNEL_SESSION_STATEDUMP:
908 return lttng_session_statedump(session);
909 case LTTNG_KERNEL_SESSION_SET_NAME:
910 {
911 struct lttng_kernel_session_name name;
912
913 if (copy_from_user(&name,
914 (struct lttng_kernel_session_name __user *) arg,
915 sizeof(struct lttng_kernel_session_name)))
916 return -EFAULT;
917 return lttng_abi_session_set_name(session, &name);
918 }
919 case LTTNG_KERNEL_SESSION_SET_CREATION_TIME:
920 {
921 struct lttng_kernel_session_creation_time time;
922
923 if (copy_from_user(&time,
924 (struct lttng_kernel_session_creation_time __user *) arg,
925 sizeof(struct lttng_kernel_session_creation_time)))
926 return -EFAULT;
927 return lttng_abi_session_set_creation_time(session, &time);
928 }
929 default:
930 return -ENOIOCTLCMD;
931 }
932 }
933
934 /*
935 * Called when the last file reference is dropped.
936 *
937 * Big fat note: channels and events are invariant for the whole session after
938 * their creation. So this session destruction also destroys all channel and
939 * event structures specific to this session (they are not destroyed when their
940 * individual file is released).
941 */
942 static
943 int lttng_session_release(struct inode *inode, struct file *file)
944 {
945 struct lttng_session *session = file->private_data;
946
947 if (session)
948 lttng_session_destroy(session);
949 return 0;
950 }
951
952 static const struct file_operations lttng_session_fops = {
953 .owner = THIS_MODULE,
954 .release = lttng_session_release,
955 .unlocked_ioctl = lttng_session_ioctl,
956 #ifdef CONFIG_COMPAT
957 .compat_ioctl = lttng_session_ioctl,
958 #endif
959 };
960
961 /*
962 * When encountering empty buffer, flush current sub-buffer if non-empty
963 * and retry (if new data available to read after flush).
964 */
965 static
966 ssize_t lttng_event_notifier_group_notif_read(struct file *filp, char __user *user_buf,
967 size_t count, loff_t *ppos)
968 {
969 struct lttng_event_notifier_group *event_notifier_group = filp->private_data;
970 struct channel *chan = event_notifier_group->chan;
971 struct lib_ring_buffer *buf = event_notifier_group->buf;
972 ssize_t read_count = 0, len;
973 size_t read_offset;
974
975 might_sleep();
976 if (!lttng_access_ok(VERIFY_WRITE, user_buf, count))
977 return -EFAULT;
978
979 /* Finish copy of previous record */
980 if (*ppos != 0) {
981 if (read_count < count) {
982 len = chan->iter.len_left;
983 read_offset = *ppos;
984 goto skip_get_next;
985 }
986 }
987
988 while (read_count < count) {
989 size_t copy_len, space_left;
990
991 len = lib_ring_buffer_get_next_record(chan, buf);
992 len_test:
993 if (len < 0) {
994 /*
995 * Check if buffer is finalized (end of file).
996 */
997 if (len == -ENODATA) {
998 /* A 0 read_count will tell about end of file */
999 goto nodata;
1000 }
1001 if (filp->f_flags & O_NONBLOCK) {
1002 if (!read_count)
1003 read_count = -EAGAIN;
1004 goto nodata;
1005 } else {
1006 int error;
1007
1008 /*
1009 * No data available at the moment, return what
1010 * we got.
1011 */
1012 if (read_count)
1013 goto nodata;
1014
1015 /*
1016 * Wait for returned len to be >= 0 or -ENODATA.
1017 */
1018 error = wait_event_interruptible(
1019 event_notifier_group->read_wait,
1020 ((len = lib_ring_buffer_get_next_record(
1021 chan, buf)), len != -EAGAIN));
1022 CHAN_WARN_ON(chan, len == -EBUSY);
1023 if (error) {
1024 read_count = error;
1025 goto nodata;
1026 }
1027 CHAN_WARN_ON(chan, len < 0 && len != -ENODATA);
1028 goto len_test;
1029 }
1030 }
1031 read_offset = buf->iter.read_offset;
1032 skip_get_next:
1033 space_left = count - read_count;
1034 if (len <= space_left) {
1035 copy_len = len;
1036 chan->iter.len_left = 0;
1037 *ppos = 0;
1038 } else {
1039 copy_len = space_left;
1040 chan->iter.len_left = len - copy_len;
1041 *ppos = read_offset + copy_len;
1042 }
1043 if (__lib_ring_buffer_copy_to_user(&buf->backend, read_offset,
1044 &user_buf[read_count],
1045 copy_len)) {
1046 /*
1047 * Leave the len_left and ppos values at their current
1048 * state, as we currently have a valid event to read.
1049 */
1050 return -EFAULT;
1051 }
1052 read_count += copy_len;
1053 }
1054 goto put_record;
1055
1056 nodata:
1057 *ppos = 0;
1058 chan->iter.len_left = 0;
1059
1060 put_record:
1061 lib_ring_buffer_put_current_record(buf);
1062 return read_count;
1063 }
1064
1065 /*
1066 * If the ring buffer is non empty (even just a partial subbuffer), return that
1067 * there is data available. Perform a ring buffer flush if we encounter a
1068 * non-empty ring buffer which does not have any consumeable subbuffer available.
1069 */
1070 static
1071 unsigned int lttng_event_notifier_group_notif_poll(struct file *filp,
1072 poll_table *wait)
1073 {
1074 unsigned int mask = 0;
1075 struct lttng_event_notifier_group *event_notifier_group = filp->private_data;
1076 struct channel *chan = event_notifier_group->chan;
1077 struct lib_ring_buffer *buf = event_notifier_group->buf;
1078 const struct lib_ring_buffer_config *config = &chan->backend.config;
1079 int finalized, disabled;
1080 unsigned long consumed, offset;
1081 size_t subbuffer_header_size = config->cb.subbuffer_header_size();
1082
1083 if (filp->f_mode & FMODE_READ) {
1084 poll_wait_set_exclusive(wait);
1085 poll_wait(filp, &event_notifier_group->read_wait, wait);
1086
1087 finalized = lib_ring_buffer_is_finalized(config, buf);
1088 disabled = lib_ring_buffer_channel_is_disabled(chan);
1089
1090 /*
1091 * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
1092 * finalized load before offsets loads.
1093 */
1094 WARN_ON(atomic_long_read(&buf->active_readers) != 1);
1095 retry:
1096 if (disabled)
1097 return POLLERR;
1098
1099 offset = lib_ring_buffer_get_offset(config, buf);
1100 consumed = lib_ring_buffer_get_consumed(config, buf);
1101
1102 /*
1103 * If there is no buffer available to consume.
1104 */
1105 if (subbuf_trunc(offset, chan) - subbuf_trunc(consumed, chan) == 0) {
1106 /*
1107 * If there is a non-empty subbuffer, flush and try again.
1108 */
1109 if (subbuf_offset(offset, chan) > subbuffer_header_size) {
1110 lib_ring_buffer_switch_remote(buf);
1111 goto retry;
1112 }
1113
1114 if (finalized)
1115 return POLLHUP;
1116 else {
1117 /*
1118 * The memory barriers
1119 * __wait_event()/wake_up_interruptible() take
1120 * care of "raw_spin_is_locked" memory ordering.
1121 */
1122 if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
1123 goto retry;
1124 else
1125 return 0;
1126 }
1127 } else {
1128 if (subbuf_trunc(offset, chan) - subbuf_trunc(consumed, chan)
1129 >= chan->backend.buf_size)
1130 return POLLPRI | POLLRDBAND;
1131 else
1132 return POLLIN | POLLRDNORM;
1133 }
1134 }
1135
1136 return mask;
1137 }
1138
1139 /**
1140 * lttng_event_notifier_group_notif_open - event_notifier ring buffer open file operation
1141 * @inode: opened inode
1142 * @file: opened file
1143 *
1144 * Open implementation. Makes sure only one open instance of a buffer is
1145 * done at a given moment.
1146 */
1147 static int lttng_event_notifier_group_notif_open(struct inode *inode, struct file *file)
1148 {
1149 struct lttng_event_notifier_group *event_notifier_group = inode->i_private;
1150 struct lib_ring_buffer *buf = event_notifier_group->buf;
1151
1152 file->private_data = event_notifier_group;
1153 return lib_ring_buffer_open(inode, file, buf);
1154 }
1155
1156 /**
1157 * lttng_event_notifier_group_notif_release - event_notifier ring buffer release file operation
1158 * @inode: opened inode
1159 * @file: opened file
1160 *
1161 * Release implementation.
1162 */
1163 static int lttng_event_notifier_group_notif_release(struct inode *inode, struct file *file)
1164 {
1165 struct lttng_event_notifier_group *event_notifier_group = file->private_data;
1166 struct lib_ring_buffer *buf = event_notifier_group->buf;
1167 int ret;
1168
1169 ret = lib_ring_buffer_release(inode, file, buf);
1170 if (ret)
1171 return ret;
1172 fput(event_notifier_group->file);
1173 return 0;
1174 }
1175
1176 static const struct file_operations lttng_event_notifier_group_notif_fops = {
1177 .owner = THIS_MODULE,
1178 .open = lttng_event_notifier_group_notif_open,
1179 .release = lttng_event_notifier_group_notif_release,
1180 .read = lttng_event_notifier_group_notif_read,
1181 .poll = lttng_event_notifier_group_notif_poll,
1182 };
1183
1184 /**
1185 * lttng_metadata_ring_buffer_poll - LTTng ring buffer poll file operation
1186 * @filp: the file
1187 * @wait: poll table
1188 *
1189 * Handles the poll operations for the metadata channels.
1190 */
1191 static
1192 unsigned int lttng_metadata_ring_buffer_poll(struct file *filp,
1193 poll_table *wait)
1194 {
1195 struct lttng_metadata_stream *stream = filp->private_data;
1196 struct lib_ring_buffer *buf = stream->priv;
1197 int finalized;
1198 unsigned int mask = 0;
1199
1200 if (filp->f_mode & FMODE_READ) {
1201 poll_wait_set_exclusive(wait);
1202 poll_wait(filp, &stream->read_wait, wait);
1203
1204 finalized = stream->finalized;
1205
1206 /*
1207 * lib_ring_buffer_is_finalized() contains a smp_rmb()
1208 * ordering finalized load before offsets loads.
1209 */
1210 WARN_ON(atomic_long_read(&buf->active_readers) != 1);
1211
1212 if (finalized)
1213 mask |= POLLHUP;
1214
1215 mutex_lock(&stream->metadata_cache->lock);
1216 if (stream->metadata_cache->metadata_written >
1217 stream->metadata_out)
1218 mask |= POLLIN;
1219 mutex_unlock(&stream->metadata_cache->lock);
1220 }
1221
1222 return mask;
1223 }
1224
1225 static
1226 void lttng_metadata_ring_buffer_ioctl_put_next_subbuf(struct file *filp,
1227 unsigned int cmd, unsigned long arg)
1228 {
1229 struct lttng_metadata_stream *stream = filp->private_data;
1230
1231 stream->metadata_out = stream->metadata_in;
1232 }
1233
1234 /*
1235 * Reset the counter of how much metadata has been consumed to 0. That way,
1236 * the consumer receives the content of the metadata cache unchanged. This is
1237 * different from the metadata_regenerate where the offset from epoch is
1238 * resampled, here we want the exact same content as the last time the metadata
1239 * was generated. This command is only possible if all the metadata written
1240 * in the cache has been output to the metadata stream to avoid corrupting the
1241 * metadata file.
1242 *
1243 * Return 0 on success, a negative value on error.
1244 */
1245 static
1246 int lttng_metadata_cache_dump(struct lttng_metadata_stream *stream)
1247 {
1248 int ret;
1249 struct lttng_metadata_cache *cache = stream->metadata_cache;
1250
1251 mutex_lock(&cache->lock);
1252 if (stream->metadata_out != cache->metadata_written) {
1253 ret = -EBUSY;
1254 goto end;
1255 }
1256 stream->metadata_out = 0;
1257 stream->metadata_in = 0;
1258 wake_up_interruptible(&stream->read_wait);
1259 ret = 0;
1260
1261 end:
1262 mutex_unlock(&cache->lock);
1263 return ret;
1264 }
1265
1266 static
1267 long lttng_metadata_ring_buffer_ioctl(struct file *filp,
1268 unsigned int cmd, unsigned long arg)
1269 {
1270 int ret;
1271 struct lttng_metadata_stream *stream = filp->private_data;
1272 struct lib_ring_buffer *buf = stream->priv;
1273 unsigned int rb_cmd;
1274 bool coherent;
1275
1276 if (cmd == RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK)
1277 rb_cmd = RING_BUFFER_GET_NEXT_SUBBUF;
1278 else
1279 rb_cmd = cmd;
1280
1281 switch (cmd) {
1282 case RING_BUFFER_GET_NEXT_SUBBUF:
1283 {
1284 struct lttng_metadata_stream *stream = filp->private_data;
1285 struct lib_ring_buffer *buf = stream->priv;
1286 struct channel *chan = buf->backend.chan;
1287
1288 ret = lttng_metadata_output_channel(stream, chan, NULL);
1289 if (ret > 0) {
1290 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
1291 ret = 0;
1292 } else if (ret < 0)
1293 goto err;
1294 break;
1295 }
1296 case RING_BUFFER_GET_SUBBUF:
1297 {
1298 /*
1299 * Random access is not allowed for metadata channel.
1300 */
1301 return -ENOSYS;
1302 }
1303 case RING_BUFFER_FLUSH_EMPTY: /* Fall-through. */
1304 case RING_BUFFER_FLUSH:
1305 {
1306 struct lttng_metadata_stream *stream = filp->private_data;
1307 struct lib_ring_buffer *buf = stream->priv;
1308 struct channel *chan = buf->backend.chan;
1309
1310 /*
1311 * Before doing the actual ring buffer flush, write up to one
1312 * packet of metadata in the ring buffer.
1313 */
1314 ret = lttng_metadata_output_channel(stream, chan, NULL);
1315 if (ret < 0)
1316 goto err;
1317 break;
1318 }
1319 case RING_BUFFER_GET_METADATA_VERSION:
1320 {
1321 struct lttng_metadata_stream *stream = filp->private_data;
1322
1323 return put_u64(stream->version, arg);
1324 }
1325 case RING_BUFFER_METADATA_CACHE_DUMP:
1326 {
1327 struct lttng_metadata_stream *stream = filp->private_data;
1328
1329 return lttng_metadata_cache_dump(stream);
1330 }
1331 case RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
1332 {
1333 struct lttng_metadata_stream *stream = filp->private_data;
1334 struct lib_ring_buffer *buf = stream->priv;
1335 struct channel *chan = buf->backend.chan;
1336
1337 ret = lttng_metadata_output_channel(stream, chan, &coherent);
1338 if (ret > 0) {
1339 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
1340 ret = 0;
1341 } else if (ret < 0) {
1342 goto err;
1343 }
1344 break;
1345 }
1346 default:
1347 break;
1348 }
1349 /* PUT_SUBBUF is the one from lib ring buffer, unmodified. */
1350
1351 /* Performing lib ring buffer ioctl after our own. */
1352 ret = lib_ring_buffer_ioctl(filp, rb_cmd, arg, buf);
1353 if (ret < 0)
1354 goto err;
1355
1356 switch (cmd) {
1357 case RING_BUFFER_PUT_NEXT_SUBBUF:
1358 {
1359 lttng_metadata_ring_buffer_ioctl_put_next_subbuf(filp,
1360 cmd, arg);
1361 break;
1362 }
1363 case RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
1364 {
1365 return put_u32(coherent, arg);
1366 }
1367 default:
1368 break;
1369 }
1370 err:
1371 return ret;
1372 }
1373
1374 #ifdef CONFIG_COMPAT
1375 static
1376 long lttng_metadata_ring_buffer_compat_ioctl(struct file *filp,
1377 unsigned int cmd, unsigned long arg)
1378 {
1379 int ret;
1380 struct lttng_metadata_stream *stream = filp->private_data;
1381 struct lib_ring_buffer *buf = stream->priv;
1382 unsigned int rb_cmd;
1383 bool coherent;
1384
1385 if (cmd == RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK)
1386 rb_cmd = RING_BUFFER_GET_NEXT_SUBBUF;
1387 else
1388 rb_cmd = cmd;
1389
1390 switch (cmd) {
1391 case RING_BUFFER_GET_NEXT_SUBBUF:
1392 {
1393 struct lttng_metadata_stream *stream = filp->private_data;
1394 struct lib_ring_buffer *buf = stream->priv;
1395 struct channel *chan = buf->backend.chan;
1396
1397 ret = lttng_metadata_output_channel(stream, chan, NULL);
1398 if (ret > 0) {
1399 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
1400 ret = 0;
1401 } else if (ret < 0)
1402 goto err;
1403 break;
1404 }
1405 case RING_BUFFER_GET_SUBBUF:
1406 {
1407 /*
1408 * Random access is not allowed for metadata channel.
1409 */
1410 return -ENOSYS;
1411 }
1412 case RING_BUFFER_FLUSH_EMPTY: /* Fall-through. */
1413 case RING_BUFFER_FLUSH:
1414 {
1415 struct lttng_metadata_stream *stream = filp->private_data;
1416 struct lib_ring_buffer *buf = stream->priv;
1417 struct channel *chan = buf->backend.chan;
1418
1419 /*
1420 * Before doing the actual ring buffer flush, write up to one
1421 * packet of metadata in the ring buffer.
1422 */
1423 ret = lttng_metadata_output_channel(stream, chan, NULL);
1424 if (ret < 0)
1425 goto err;
1426 break;
1427 }
1428 case RING_BUFFER_GET_METADATA_VERSION:
1429 {
1430 struct lttng_metadata_stream *stream = filp->private_data;
1431
1432 return put_u64(stream->version, arg);
1433 }
1434 case RING_BUFFER_METADATA_CACHE_DUMP:
1435 {
1436 struct lttng_metadata_stream *stream = filp->private_data;
1437
1438 return lttng_metadata_cache_dump(stream);
1439 }
1440 case RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
1441 {
1442 struct lttng_metadata_stream *stream = filp->private_data;
1443 struct lib_ring_buffer *buf = stream->priv;
1444 struct channel *chan = buf->backend.chan;
1445
1446 ret = lttng_metadata_output_channel(stream, chan, &coherent);
1447 if (ret > 0) {
1448 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
1449 ret = 0;
1450 } else if (ret < 0) {
1451 goto err;
1452 }
1453 break;
1454 }
1455 default:
1456 break;
1457 }
1458 /* PUT_SUBBUF is the one from lib ring buffer, unmodified. */
1459
1460 /* Performing lib ring buffer ioctl after our own. */
1461 ret = lib_ring_buffer_compat_ioctl(filp, rb_cmd, arg, buf);
1462 if (ret < 0)
1463 goto err;
1464
1465 switch (cmd) {
1466 case RING_BUFFER_PUT_NEXT_SUBBUF:
1467 {
1468 lttng_metadata_ring_buffer_ioctl_put_next_subbuf(filp,
1469 cmd, arg);
1470 break;
1471 }
1472 case RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
1473 {
1474 return put_u32(coherent, arg);
1475 }
1476 default:
1477 break;
1478 }
1479 err:
1480 return ret;
1481 }
1482 #endif
1483
1484 /*
1485 * This is not used by anonymous file descriptors. This code is left
1486 * there if we ever want to implement an inode with open() operation.
1487 */
1488 static
1489 int lttng_metadata_ring_buffer_open(struct inode *inode, struct file *file)
1490 {
1491 struct lttng_metadata_stream *stream = inode->i_private;
1492 struct lib_ring_buffer *buf = stream->priv;
1493
1494 file->private_data = buf;
1495 /*
1496 * Since life-time of metadata cache differs from that of
1497 * session, we need to keep our own reference on the transport.
1498 */
1499 if (!try_module_get(stream->transport->owner)) {
1500 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
1501 return -EBUSY;
1502 }
1503 return lib_ring_buffer_open(inode, file, buf);
1504 }
1505
1506 static
1507 int lttng_metadata_ring_buffer_release(struct inode *inode, struct file *file)
1508 {
1509 struct lttng_metadata_stream *stream = file->private_data;
1510 struct lib_ring_buffer *buf = stream->priv;
1511
1512 mutex_lock(&stream->metadata_cache->lock);
1513 list_del(&stream->list);
1514 mutex_unlock(&stream->metadata_cache->lock);
1515 kref_put(&stream->metadata_cache->refcount, metadata_cache_destroy);
1516 module_put(stream->transport->owner);
1517 kfree(stream);
1518 return lib_ring_buffer_release(inode, file, buf);
1519 }
1520
1521 static
1522 ssize_t lttng_metadata_ring_buffer_splice_read(struct file *in, loff_t *ppos,
1523 struct pipe_inode_info *pipe, size_t len,
1524 unsigned int flags)
1525 {
1526 struct lttng_metadata_stream *stream = in->private_data;
1527 struct lib_ring_buffer *buf = stream->priv;
1528
1529 return lib_ring_buffer_splice_read(in, ppos, pipe, len,
1530 flags, buf);
1531 }
1532
1533 static
1534 int lttng_metadata_ring_buffer_mmap(struct file *filp,
1535 struct vm_area_struct *vma)
1536 {
1537 struct lttng_metadata_stream *stream = filp->private_data;
1538 struct lib_ring_buffer *buf = stream->priv;
1539
1540 return lib_ring_buffer_mmap(filp, vma, buf);
1541 }
1542
1543 static
1544 const struct file_operations lttng_metadata_ring_buffer_file_operations = {
1545 .owner = THIS_MODULE,
1546 .open = lttng_metadata_ring_buffer_open,
1547 .release = lttng_metadata_ring_buffer_release,
1548 .poll = lttng_metadata_ring_buffer_poll,
1549 .splice_read = lttng_metadata_ring_buffer_splice_read,
1550 .mmap = lttng_metadata_ring_buffer_mmap,
1551 .unlocked_ioctl = lttng_metadata_ring_buffer_ioctl,
1552 .llseek = vfs_lib_ring_buffer_no_llseek,
1553 #ifdef CONFIG_COMPAT
1554 .compat_ioctl = lttng_metadata_ring_buffer_compat_ioctl,
1555 #endif
1556 };
1557
1558 static
1559 int lttng_abi_create_stream_fd(struct file *channel_file, void *stream_priv,
1560 const struct file_operations *fops, const char *name)
1561 {
1562 int stream_fd, ret;
1563 struct file *stream_file;
1564
1565 stream_fd = lttng_get_unused_fd();
1566 if (stream_fd < 0) {
1567 ret = stream_fd;
1568 goto fd_error;
1569 }
1570 stream_file = anon_inode_getfile(name, fops, stream_priv, O_RDWR);
1571 if (IS_ERR(stream_file)) {
1572 ret = PTR_ERR(stream_file);
1573 goto file_error;
1574 }
1575 /*
1576 * OPEN_FMODE, called within anon_inode_getfile/alloc_file, don't honor
1577 * FMODE_LSEEK, FMODE_PREAD nor FMODE_PWRITE. We need to read from this
1578 * file descriptor, so we set FMODE_PREAD here.
1579 */
1580 stream_file->f_mode |= FMODE_PREAD;
1581 fd_install(stream_fd, stream_file);
1582 /*
1583 * The stream holds a reference to the channel within the generic ring
1584 * buffer library, so no need to hold a refcount on the channel and
1585 * session files here.
1586 */
1587 return stream_fd;
1588
1589 file_error:
1590 put_unused_fd(stream_fd);
1591 fd_error:
1592 return ret;
1593 }
1594
1595 static
1596 int lttng_abi_open_stream(struct file *channel_file)
1597 {
1598 struct lttng_channel *channel = channel_file->private_data;
1599 struct lib_ring_buffer *buf;
1600 int ret;
1601 void *stream_priv;
1602
1603 buf = channel->ops->buffer_read_open(channel->chan);
1604 if (!buf)
1605 return -ENOENT;
1606
1607 stream_priv = buf;
1608 ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
1609 &lttng_stream_ring_buffer_file_operations,
1610 "[lttng_stream]");
1611 if (ret < 0)
1612 goto fd_error;
1613
1614 return ret;
1615
1616 fd_error:
1617 channel->ops->buffer_read_close(buf);
1618 return ret;
1619 }
1620
1621 static
1622 int lttng_abi_open_metadata_stream(struct file *channel_file)
1623 {
1624 struct lttng_channel *channel = channel_file->private_data;
1625 struct lttng_session *session = channel->session;
1626 struct lib_ring_buffer *buf;
1627 int ret;
1628 struct lttng_metadata_stream *metadata_stream;
1629 void *stream_priv;
1630
1631 buf = channel->ops->buffer_read_open(channel->chan);
1632 if (!buf)
1633 return -ENOENT;
1634
1635 metadata_stream = kzalloc(sizeof(struct lttng_metadata_stream),
1636 GFP_KERNEL);
1637 if (!metadata_stream) {
1638 ret = -ENOMEM;
1639 goto nomem;
1640 }
1641 metadata_stream->metadata_cache = session->metadata_cache;
1642 init_waitqueue_head(&metadata_stream->read_wait);
1643 metadata_stream->priv = buf;
1644 stream_priv = metadata_stream;
1645 metadata_stream->transport = channel->transport;
1646 /* Initial state is an empty metadata, considered as incoherent. */
1647 metadata_stream->coherent = false;
1648
1649 /*
1650 * Since life-time of metadata cache differs from that of
1651 * session, we need to keep our own reference on the transport.
1652 */
1653 if (!try_module_get(metadata_stream->transport->owner)) {
1654 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
1655 ret = -EINVAL;
1656 goto notransport;
1657 }
1658
1659 if (!lttng_kref_get(&session->metadata_cache->refcount)) {
1660 ret = -EOVERFLOW;
1661 goto kref_error;
1662 }
1663
1664 ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
1665 &lttng_metadata_ring_buffer_file_operations,
1666 "[lttng_metadata_stream]");
1667 if (ret < 0)
1668 goto fd_error;
1669
1670 mutex_lock(&session->metadata_cache->lock);
1671 list_add(&metadata_stream->list,
1672 &session->metadata_cache->metadata_stream);
1673 mutex_unlock(&session->metadata_cache->lock);
1674 return ret;
1675
1676 fd_error:
1677 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
1678 kref_error:
1679 module_put(metadata_stream->transport->owner);
1680 notransport:
1681 kfree(metadata_stream);
1682 nomem:
1683 channel->ops->buffer_read_close(buf);
1684 return ret;
1685 }
1686
1687 static
1688 int lttng_abi_open_event_notifier_group_stream(struct file *notif_file)
1689 {
1690 struct lttng_event_notifier_group *event_notifier_group = notif_file->private_data;
1691 struct channel *chan = event_notifier_group->chan;
1692 struct lib_ring_buffer *buf;
1693 int ret;
1694 void *stream_priv;
1695
1696 buf = event_notifier_group->ops->buffer_read_open(chan);
1697 if (!buf)
1698 return -ENOENT;
1699
1700 /* The event_notifier notification fd holds a reference on the event_notifier group */
1701 if (!atomic_long_add_unless(&notif_file->f_count, 1, LONG_MAX)) {
1702 ret = -EOVERFLOW;
1703 goto refcount_error;
1704 }
1705 event_notifier_group->buf = buf;
1706 stream_priv = event_notifier_group;
1707 ret = lttng_abi_create_stream_fd(notif_file, stream_priv,
1708 &lttng_event_notifier_group_notif_fops,
1709 "[lttng_event_notifier_stream]");
1710 if (ret < 0)
1711 goto fd_error;
1712
1713 return ret;
1714
1715 fd_error:
1716 atomic_long_dec(&notif_file->f_count);
1717 refcount_error:
1718 event_notifier_group->ops->buffer_read_close(buf);
1719 return ret;
1720 }
1721
1722 static
1723 int lttng_abi_validate_event_param(struct lttng_kernel_event *event_param)
1724 {
1725 /* Limit ABI to implemented features. */
1726 switch (event_param->instrumentation) {
1727 case LTTNG_KERNEL_SYSCALL:
1728 switch (event_param->u.syscall.entryexit) {
1729 case LTTNG_KERNEL_SYSCALL_ENTRY: /* Fall-through */
1730 case LTTNG_KERNEL_SYSCALL_EXIT: /* Fall-through */
1731 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1732 break;
1733 default:
1734 return -EINVAL;
1735 }
1736 switch (event_param->u.syscall.abi) {
1737 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1738 break;
1739 default:
1740 return -EINVAL;
1741 }
1742 switch (event_param->u.syscall.match) {
1743 case LTTNG_KERNEL_SYSCALL_MATCH_NAME:
1744 break;
1745 default:
1746 return -EINVAL;
1747 }
1748 break;
1749
1750 case LTTNG_KERNEL_KRETPROBE:
1751 switch (event_param->u.kretprobe.entryexit) {
1752 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1753 break;
1754 case LTTNG_KERNEL_SYSCALL_ENTRY: /* Fall-through */
1755 case LTTNG_KERNEL_SYSCALL_EXIT: /* Fall-through */
1756 default:
1757 return -EINVAL;
1758 }
1759 break;
1760
1761 case LTTNG_KERNEL_TRACEPOINT: /* Fall-through */
1762 case LTTNG_KERNEL_KPROBE: /* Fall-through */
1763 case LTTNG_KERNEL_UPROBE:
1764 break;
1765
1766 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1767 case LTTNG_KERNEL_NOOP: /* Fall-through */
1768 default:
1769 return -EINVAL;
1770 }
1771 return 0;
1772 }
1773
1774 static
1775 int lttng_abi_create_event(struct file *channel_file,
1776 struct lttng_kernel_event *event_param)
1777 {
1778 struct lttng_channel *channel = channel_file->private_data;
1779 int event_fd, ret;
1780 struct file *event_file;
1781 void *priv;
1782
1783 event_param->name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
1784 switch (event_param->instrumentation) {
1785 case LTTNG_KERNEL_KRETPROBE:
1786 event_param->u.kretprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
1787 break;
1788 case LTTNG_KERNEL_KPROBE:
1789 event_param->u.kprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
1790 break;
1791 case LTTNG_KERNEL_FUNCTION:
1792 WARN_ON_ONCE(1);
1793 /* Not implemented. */
1794 break;
1795 default:
1796 break;
1797 }
1798 event_fd = lttng_get_unused_fd();
1799 if (event_fd < 0) {
1800 ret = event_fd;
1801 goto fd_error;
1802 }
1803 event_file = anon_inode_getfile("[lttng_event]",
1804 &lttng_event_fops,
1805 NULL, O_RDWR);
1806 if (IS_ERR(event_file)) {
1807 ret = PTR_ERR(event_file);
1808 goto file_error;
1809 }
1810 /* The event holds a reference on the channel */
1811 if (!atomic_long_add_unless(&channel_file->f_count, 1, LONG_MAX)) {
1812 ret = -EOVERFLOW;
1813 goto refcount_error;
1814 }
1815 ret = lttng_abi_validate_event_param(event_param);
1816 if (ret)
1817 goto event_error;
1818
1819 switch (event_param->instrumentation) {
1820 case LTTNG_KERNEL_TRACEPOINT: /* Fall-through */
1821 case LTTNG_KERNEL_SYSCALL:
1822 {
1823 struct lttng_event_enabler *event_enabler;
1824
1825 if (strutils_is_star_glob_pattern(event_param->name)) {
1826 /*
1827 * If the event name is a star globbing pattern,
1828 * we create the special star globbing enabler.
1829 */
1830 event_enabler = lttng_event_enabler_create(LTTNG_ENABLER_FORMAT_STAR_GLOB,
1831 event_param, channel);
1832 } else {
1833 event_enabler = lttng_event_enabler_create(LTTNG_ENABLER_FORMAT_NAME,
1834 event_param, channel);
1835 }
1836 priv = event_enabler;
1837 break;
1838 }
1839
1840 case LTTNG_KERNEL_KPROBE: /* Fall-through */
1841 case LTTNG_KERNEL_KRETPROBE: /* Fall-through */
1842 case LTTNG_KERNEL_UPROBE:
1843 {
1844 struct lttng_event *event;
1845
1846 /*
1847 * We tolerate no failure path after event creation. It
1848 * will stay invariant for the rest of the session.
1849 */
1850 event = lttng_event_create(channel, event_param,
1851 NULL, NULL,
1852 event_param->instrumentation);
1853 WARN_ON_ONCE(!event);
1854 if (IS_ERR(event)) {
1855 ret = PTR_ERR(event);
1856 goto event_error;
1857 }
1858 priv = event;
1859 break;
1860 }
1861
1862 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
1863 case LTTNG_KERNEL_NOOP: /* Fall-through */
1864 default:
1865 ret = -EINVAL;
1866 goto event_error;
1867 }
1868 event_file->private_data = priv;
1869 fd_install(event_fd, event_file);
1870 return event_fd;
1871
1872 event_error:
1873 atomic_long_dec(&channel_file->f_count);
1874 refcount_error:
1875 fput(event_file);
1876 file_error:
1877 put_unused_fd(event_fd);
1878 fd_error:
1879 return ret;
1880 }
1881
1882 static
1883 long lttng_event_notifier_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1884 {
1885 struct lttng_event_notifier *event_notifier;
1886 struct lttng_event_notifier_enabler *event_notifier_enabler;
1887 enum lttng_event_type *evtype = file->private_data;
1888
1889 switch (cmd) {
1890 case LTTNG_KERNEL_ENABLE:
1891 switch (*evtype) {
1892 case LTTNG_TYPE_EVENT:
1893 event_notifier = file->private_data;
1894 return lttng_event_notifier_enable(event_notifier);
1895 case LTTNG_TYPE_ENABLER:
1896 event_notifier_enabler = file->private_data;
1897 return lttng_event_notifier_enabler_enable(event_notifier_enabler);
1898 default:
1899 WARN_ON_ONCE(1);
1900 return -ENOSYS;
1901 }
1902 case LTTNG_KERNEL_DISABLE:
1903 switch (*evtype) {
1904 case LTTNG_TYPE_EVENT:
1905 event_notifier = file->private_data;
1906 return lttng_event_notifier_disable(event_notifier);
1907 case LTTNG_TYPE_ENABLER:
1908 event_notifier_enabler = file->private_data;
1909 return lttng_event_notifier_enabler_disable(event_notifier_enabler);
1910 default:
1911 WARN_ON_ONCE(1);
1912 return -ENOSYS;
1913 }
1914 case LTTNG_KERNEL_FILTER:
1915 switch (*evtype) {
1916 case LTTNG_TYPE_EVENT:
1917 return -EINVAL;
1918 case LTTNG_TYPE_ENABLER:
1919 event_notifier_enabler = file->private_data;
1920 return lttng_event_notifier_enabler_attach_filter_bytecode(
1921 event_notifier_enabler,
1922 (struct lttng_kernel_filter_bytecode __user *) arg);
1923 default:
1924 WARN_ON_ONCE(1);
1925 return -ENOSYS;
1926 }
1927
1928 case LTTNG_KERNEL_CAPTURE:
1929 switch (*evtype) {
1930 case LTTNG_TYPE_EVENT:
1931 return -EINVAL;
1932 case LTTNG_TYPE_ENABLER:
1933 event_notifier_enabler = file->private_data;
1934 return lttng_event_notifier_enabler_attach_capture_bytecode(
1935 event_notifier_enabler,
1936 (struct lttng_kernel_capture_bytecode __user *) arg);
1937 default:
1938 WARN_ON_ONCE(1);
1939 return -ENOSYS;
1940 }
1941 case LTTNG_KERNEL_ADD_CALLSITE:
1942 switch (*evtype) {
1943 case LTTNG_TYPE_EVENT:
1944 event_notifier = file->private_data;
1945 return lttng_event_notifier_add_callsite(event_notifier,
1946 (struct lttng_kernel_event_callsite __user *) arg);
1947 case LTTNG_TYPE_ENABLER:
1948 return -EINVAL;
1949 default:
1950 WARN_ON_ONCE(1);
1951 return -ENOSYS;
1952 }
1953 default:
1954 return -ENOIOCTLCMD;
1955 }
1956 }
1957
1958 static
1959 int lttng_event_notifier_release(struct inode *inode, struct file *file)
1960 {
1961 struct lttng_event_notifier *event_notifier;
1962 struct lttng_event_notifier_enabler *event_notifier_enabler;
1963 enum lttng_event_type *evtype = file->private_data;
1964
1965 if (!evtype)
1966 return 0;
1967
1968 switch (*evtype) {
1969 case LTTNG_TYPE_EVENT:
1970 event_notifier = file->private_data;
1971 if (event_notifier)
1972 fput(event_notifier->group->file);
1973 break;
1974 case LTTNG_TYPE_ENABLER:
1975 event_notifier_enabler = file->private_data;
1976 if (event_notifier_enabler)
1977 fput(event_notifier_enabler->group->file);
1978 break;
1979 default:
1980 WARN_ON_ONCE(1);
1981 break;
1982 }
1983
1984 return 0;
1985 }
1986
1987 static const struct file_operations lttng_event_notifier_fops = {
1988 .owner = THIS_MODULE,
1989 .release = lttng_event_notifier_release,
1990 .unlocked_ioctl = lttng_event_notifier_ioctl,
1991 #ifdef CONFIG_COMPAT
1992 .compat_ioctl = lttng_event_notifier_ioctl,
1993 #endif
1994 };
1995
1996 static
1997 int lttng_abi_create_event_notifier(struct file *event_notifier_group_file,
1998 struct lttng_kernel_event_notifier *event_notifier_param)
1999 {
2000 struct lttng_event_notifier_group *event_notifier_group =
2001 event_notifier_group_file->private_data;
2002 int event_notifier_fd, ret;
2003 struct file *event_notifier_file;
2004 void *priv;
2005
2006 switch (event_notifier_param->event.instrumentation) {
2007 case LTTNG_KERNEL_TRACEPOINT:
2008 case LTTNG_KERNEL_UPROBE:
2009 break;
2010 case LTTNG_KERNEL_KPROBE:
2011 event_notifier_param->event.u.kprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
2012 break;
2013 case LTTNG_KERNEL_SYSCALL:
2014 break;
2015 case LTTNG_KERNEL_KRETPROBE:
2016 /* Placing an event notifier on kretprobe is not supported. */
2017 case LTTNG_KERNEL_FUNCTION:
2018 case LTTNG_KERNEL_NOOP:
2019 default:
2020 ret = -EINVAL;
2021 goto inval_instr;
2022 }
2023
2024 event_notifier_param->event.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
2025
2026 event_notifier_fd = lttng_get_unused_fd();
2027 if (event_notifier_fd < 0) {
2028 ret = event_notifier_fd;
2029 goto fd_error;
2030 }
2031
2032 event_notifier_file = anon_inode_getfile("[lttng_event_notifier]",
2033 &lttng_event_notifier_fops,
2034 NULL, O_RDWR);
2035 if (IS_ERR(event_notifier_file)) {
2036 ret = PTR_ERR(event_notifier_file);
2037 goto file_error;
2038 }
2039
2040 /* The event notifier holds a reference on the event notifier group. */
2041 if (!atomic_long_add_unless(&event_notifier_group_file->f_count, 1, LONG_MAX)) {
2042 ret = -EOVERFLOW;
2043 goto refcount_error;
2044 }
2045
2046 ret = lttng_abi_validate_event_param(&event_notifier_param->event);
2047 if (ret)
2048 goto event_notifier_error;
2049
2050 switch (event_notifier_param->event.instrumentation) {
2051 case LTTNG_KERNEL_TRACEPOINT: /* Fall-through */
2052 case LTTNG_KERNEL_SYSCALL:
2053 {
2054 struct lttng_event_notifier_enabler *enabler;
2055
2056 if (strutils_is_star_glob_pattern(event_notifier_param->event.name)) {
2057 /*
2058 * If the event name is a star globbing pattern,
2059 * we create the special star globbing enabler.
2060 */
2061 enabler = lttng_event_notifier_enabler_create(
2062 event_notifier_group,
2063 LTTNG_ENABLER_FORMAT_STAR_GLOB,
2064 event_notifier_param);
2065 } else {
2066 enabler = lttng_event_notifier_enabler_create(
2067 event_notifier_group,
2068 LTTNG_ENABLER_FORMAT_NAME,
2069 event_notifier_param);
2070 }
2071 priv = enabler;
2072 break;
2073 }
2074
2075 case LTTNG_KERNEL_KPROBE: /* Fall-through */
2076 case LTTNG_KERNEL_KRETPROBE: /* Fall-through */
2077 case LTTNG_KERNEL_UPROBE:
2078 {
2079 struct lttng_event_notifier *event_notifier;
2080
2081 /*
2082 * We tolerate no failure path after event notifier creation.
2083 * It will stay invariant for the rest of the session.
2084 */
2085 event_notifier = lttng_event_notifier_create(NULL,
2086 event_notifier_param->event.token,
2087 event_notifier_param->error_counter_index,
2088 event_notifier_group,
2089 event_notifier_param, NULL,
2090 event_notifier_param->event.instrumentation);
2091 WARN_ON_ONCE(!event_notifier);
2092 if (IS_ERR(event_notifier)) {
2093 ret = PTR_ERR(event_notifier);
2094 goto event_notifier_error;
2095 }
2096 priv = event_notifier;
2097 break;
2098 }
2099
2100 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
2101 case LTTNG_KERNEL_NOOP: /* Fall-through */
2102 default:
2103 ret = -EINVAL;
2104 goto event_notifier_error;
2105 }
2106 event_notifier_file->private_data = priv;
2107 fd_install(event_notifier_fd, event_notifier_file);
2108 return event_notifier_fd;
2109
2110 event_notifier_error:
2111 atomic_long_dec(&event_notifier_group_file->f_count);
2112 refcount_error:
2113 fput(event_notifier_file);
2114 file_error:
2115 put_unused_fd(event_notifier_fd);
2116 fd_error:
2117 inval_instr:
2118 return ret;
2119 }
2120
2121 static
2122 long lttng_abi_event_notifier_group_create_error_counter(
2123 struct file *event_notifier_group_file,
2124 const struct lttng_kernel_counter_conf *error_counter_conf)
2125 {
2126 int counter_fd, ret;
2127 char *counter_transport_name;
2128 size_t counter_len;
2129 struct lttng_counter *counter = NULL;
2130 struct file *counter_file;
2131 struct lttng_event_notifier_group *event_notifier_group =
2132 (struct lttng_event_notifier_group *) event_notifier_group_file->private_data;
2133
2134 if (error_counter_conf->arithmetic != LTTNG_KERNEL_COUNTER_ARITHMETIC_MODULAR) {
2135 printk(KERN_ERR "LTTng: event_notifier: Error counter of the wrong arithmetic type.\n");
2136 return -EINVAL;
2137 }
2138
2139 if (error_counter_conf->number_dimensions != 1) {
2140 printk(KERN_ERR "LTTng: event_notifier: Error counter has more than one dimension.\n");
2141 return -EINVAL;
2142 }
2143
2144 switch (error_counter_conf->bitness) {
2145 case LTTNG_KERNEL_COUNTER_BITNESS_64:
2146 counter_transport_name = "counter-per-cpu-64-modular";
2147 break;
2148 case LTTNG_KERNEL_COUNTER_BITNESS_32:
2149 counter_transport_name = "counter-per-cpu-32-modular";
2150 break;
2151 default:
2152 return -EINVAL;
2153 }
2154
2155 /*
2156 * Lock sessions to provide mutual exclusion against concurrent
2157 * modification of event_notifier group, which would result in
2158 * overwriting the error counter if set concurrently.
2159 */
2160 lttng_lock_sessions();
2161
2162 if (event_notifier_group->error_counter) {
2163 printk(KERN_ERR "Error counter already created in event_notifier group\n");
2164 ret = -EBUSY;
2165 goto fd_error;
2166 }
2167
2168 counter_fd = lttng_get_unused_fd();
2169 if (counter_fd < 0) {
2170 ret = counter_fd;
2171 goto fd_error;
2172 }
2173
2174 counter_file = anon_inode_getfile("[lttng_counter]",
2175 &lttng_counter_fops,
2176 NULL, O_RDONLY);
2177 if (IS_ERR(counter_file)) {
2178 ret = PTR_ERR(counter_file);
2179 goto file_error;
2180 }
2181
2182 counter_len = error_counter_conf->dimensions[0].size;
2183
2184 if (!atomic_long_add_unless(&event_notifier_group_file->f_count, 1, LONG_MAX)) {
2185 ret = -EOVERFLOW;
2186 goto refcount_error;
2187 }
2188
2189 counter = lttng_kernel_counter_create(counter_transport_name,
2190 1, &counter_len);
2191 if (!counter) {
2192 ret = -EINVAL;
2193 goto counter_error;
2194 }
2195
2196 event_notifier_group->error_counter_len = counter_len;
2197 /*
2198 * store-release to publish error counter matches load-acquire
2199 * in record_error. Ensures the counter is created and the
2200 * error_counter_len is set before they are used.
2201 */
2202 lttng_smp_store_release(&event_notifier_group->error_counter, counter);
2203
2204 counter->file = counter_file;
2205 counter->owner = event_notifier_group->file;
2206 counter_file->private_data = counter;
2207 /* Ownership transferred. */
2208 counter = NULL;
2209
2210 fd_install(counter_fd, counter_file);
2211 lttng_unlock_sessions();
2212
2213 return counter_fd;
2214
2215 counter_error:
2216 atomic_long_dec(&event_notifier_group_file->f_count);
2217 refcount_error:
2218 fput(counter_file);
2219 file_error:
2220 put_unused_fd(counter_fd);
2221 fd_error:
2222 lttng_unlock_sessions();
2223 return ret;
2224 }
2225
2226 static
2227 long lttng_event_notifier_group_ioctl(struct file *file, unsigned int cmd,
2228 unsigned long arg)
2229 {
2230 switch (cmd) {
2231 case LTTNG_KERNEL_EVENT_NOTIFIER_GROUP_NOTIFICATION_FD:
2232 {
2233 return lttng_abi_open_event_notifier_group_stream(file);
2234 }
2235 case LTTNG_KERNEL_EVENT_NOTIFIER_CREATE:
2236 {
2237 struct lttng_kernel_event_notifier uevent_notifier_param;
2238
2239 if (copy_from_user(&uevent_notifier_param,
2240 (struct lttng_kernel_event_notifier __user *) arg,
2241 sizeof(uevent_notifier_param)))
2242 return -EFAULT;
2243 return lttng_abi_create_event_notifier(file, &uevent_notifier_param);
2244 }
2245 case LTTNG_KERNEL_COUNTER:
2246 {
2247 struct lttng_kernel_counter_conf uerror_counter_conf;
2248
2249 if (copy_from_user(&uerror_counter_conf,
2250 (struct lttng_kernel_counter_conf __user *) arg,
2251 sizeof(uerror_counter_conf)))
2252 return -EFAULT;
2253 return lttng_abi_event_notifier_group_create_error_counter(file,
2254 &uerror_counter_conf);
2255 }
2256 default:
2257 return -ENOIOCTLCMD;
2258 }
2259 return 0;
2260 }
2261
2262 static
2263 int lttng_event_notifier_group_release(struct inode *inode, struct file *file)
2264 {
2265 struct lttng_event_notifier_group *event_notifier_group =
2266 file->private_data;
2267
2268 if (event_notifier_group)
2269 lttng_event_notifier_group_destroy(event_notifier_group);
2270 return 0;
2271 }
2272
2273 static const struct file_operations lttng_event_notifier_group_fops = {
2274 .owner = THIS_MODULE,
2275 .release = lttng_event_notifier_group_release,
2276 .unlocked_ioctl = lttng_event_notifier_group_ioctl,
2277 #ifdef CONFIG_COMPAT
2278 .compat_ioctl = lttng_event_notifier_group_ioctl,
2279 #endif
2280 };
2281
2282 /**
2283 * lttng_channel_ioctl - lttng syscall through ioctl
2284 *
2285 * @file: the file
2286 * @cmd: the command
2287 * @arg: command arg
2288 *
2289 * This ioctl implements lttng commands:
2290 * LTTNG_KERNEL_STREAM
2291 * Returns an event stream file descriptor or failure.
2292 * (typically, one event stream records events from one CPU)
2293 * LTTNG_KERNEL_EVENT
2294 * Returns an event file descriptor or failure.
2295 * LTTNG_KERNEL_CONTEXT
2296 * Prepend a context field to each event in the channel
2297 * LTTNG_KERNEL_ENABLE
2298 * Enable recording for events in this channel (weak enable)
2299 * LTTNG_KERNEL_DISABLE
2300 * Disable recording for events in this channel (strong disable)
2301 *
2302 * Channel and event file descriptors also hold a reference on the session.
2303 */
2304 static
2305 long lttng_channel_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2306 {
2307 struct lttng_channel *channel = file->private_data;
2308
2309 switch (cmd) {
2310 case LTTNG_KERNEL_OLD_STREAM:
2311 case LTTNG_KERNEL_STREAM:
2312 return lttng_abi_open_stream(file);
2313 case LTTNG_KERNEL_OLD_EVENT:
2314 {
2315 struct lttng_kernel_event *uevent_param;
2316 struct lttng_kernel_old_event *old_uevent_param;
2317 int ret;
2318
2319 uevent_param = kmalloc(sizeof(struct lttng_kernel_event),
2320 GFP_KERNEL);
2321 if (!uevent_param) {
2322 ret = -ENOMEM;
2323 goto old_event_end;
2324 }
2325 old_uevent_param = kmalloc(
2326 sizeof(struct lttng_kernel_old_event),
2327 GFP_KERNEL);
2328 if (!old_uevent_param) {
2329 ret = -ENOMEM;
2330 goto old_event_error_free_param;
2331 }
2332 if (copy_from_user(old_uevent_param,
2333 (struct lttng_kernel_old_event __user *) arg,
2334 sizeof(struct lttng_kernel_old_event))) {
2335 ret = -EFAULT;
2336 goto old_event_error_free_old_param;
2337 }
2338
2339 memcpy(uevent_param->name, old_uevent_param->name,
2340 sizeof(uevent_param->name));
2341 uevent_param->instrumentation =
2342 old_uevent_param->instrumentation;
2343
2344 switch (old_uevent_param->instrumentation) {
2345 case LTTNG_KERNEL_KPROBE:
2346 uevent_param->u.kprobe.addr =
2347 old_uevent_param->u.kprobe.addr;
2348 uevent_param->u.kprobe.offset =
2349 old_uevent_param->u.kprobe.offset;
2350 memcpy(uevent_param->u.kprobe.symbol_name,
2351 old_uevent_param->u.kprobe.symbol_name,
2352 sizeof(uevent_param->u.kprobe.symbol_name));
2353 break;
2354 case LTTNG_KERNEL_KRETPROBE:
2355 uevent_param->u.kretprobe.addr =
2356 old_uevent_param->u.kretprobe.addr;
2357 uevent_param->u.kretprobe.offset =
2358 old_uevent_param->u.kretprobe.offset;
2359 memcpy(uevent_param->u.kretprobe.symbol_name,
2360 old_uevent_param->u.kretprobe.symbol_name,
2361 sizeof(uevent_param->u.kretprobe.symbol_name));
2362 break;
2363 case LTTNG_KERNEL_FUNCTION:
2364 WARN_ON_ONCE(1);
2365 /* Not implemented. */
2366 break;
2367 default:
2368 break;
2369 }
2370 ret = lttng_abi_create_event(file, uevent_param);
2371
2372 old_event_error_free_old_param:
2373 kfree(old_uevent_param);
2374 old_event_error_free_param:
2375 kfree(uevent_param);
2376 old_event_end:
2377 return ret;
2378 }
2379 case LTTNG_KERNEL_EVENT:
2380 {
2381 struct lttng_kernel_event uevent_param;
2382
2383 if (copy_from_user(&uevent_param,
2384 (struct lttng_kernel_event __user *) arg,
2385 sizeof(uevent_param)))
2386 return -EFAULT;
2387 return lttng_abi_create_event(file, &uevent_param);
2388 }
2389 case LTTNG_KERNEL_OLD_CONTEXT:
2390 {
2391 struct lttng_kernel_context *ucontext_param;
2392 struct lttng_kernel_old_context *old_ucontext_param;
2393 int ret;
2394
2395 ucontext_param = kmalloc(sizeof(struct lttng_kernel_context),
2396 GFP_KERNEL);
2397 if (!ucontext_param) {
2398 ret = -ENOMEM;
2399 goto old_ctx_end;
2400 }
2401 old_ucontext_param = kmalloc(sizeof(struct lttng_kernel_old_context),
2402 GFP_KERNEL);
2403 if (!old_ucontext_param) {
2404 ret = -ENOMEM;
2405 goto old_ctx_error_free_param;
2406 }
2407
2408 if (copy_from_user(old_ucontext_param,
2409 (struct lttng_kernel_old_context __user *) arg,
2410 sizeof(struct lttng_kernel_old_context))) {
2411 ret = -EFAULT;
2412 goto old_ctx_error_free_old_param;
2413 }
2414 ucontext_param->ctx = old_ucontext_param->ctx;
2415 memcpy(ucontext_param->padding, old_ucontext_param->padding,
2416 sizeof(ucontext_param->padding));
2417 /* only type that uses the union */
2418 if (old_ucontext_param->ctx == LTTNG_KERNEL_CONTEXT_PERF_COUNTER) {
2419 ucontext_param->u.perf_counter.type =
2420 old_ucontext_param->u.perf_counter.type;
2421 ucontext_param->u.perf_counter.config =
2422 old_ucontext_param->u.perf_counter.config;
2423 memcpy(ucontext_param->u.perf_counter.name,
2424 old_ucontext_param->u.perf_counter.name,
2425 sizeof(ucontext_param->u.perf_counter.name));
2426 }
2427
2428 ret = lttng_abi_add_context(file,
2429 ucontext_param,
2430 &channel->ctx, channel->session);
2431
2432 old_ctx_error_free_old_param:
2433 kfree(old_ucontext_param);
2434 old_ctx_error_free_param:
2435 kfree(ucontext_param);
2436 old_ctx_end:
2437 return ret;
2438 }
2439 case LTTNG_KERNEL_CONTEXT:
2440 {
2441 struct lttng_kernel_context ucontext_param;
2442
2443 if (copy_from_user(&ucontext_param,
2444 (struct lttng_kernel_context __user *) arg,
2445 sizeof(ucontext_param)))
2446 return -EFAULT;
2447 return lttng_abi_add_context(file,
2448 &ucontext_param,
2449 &channel->ctx, channel->session);
2450 }
2451 case LTTNG_KERNEL_OLD_ENABLE:
2452 case LTTNG_KERNEL_ENABLE:
2453 return lttng_channel_enable(channel);
2454 case LTTNG_KERNEL_OLD_DISABLE:
2455 case LTTNG_KERNEL_DISABLE:
2456 return lttng_channel_disable(channel);
2457 case LTTNG_KERNEL_SYSCALL_MASK:
2458 return lttng_channel_syscall_mask(channel,
2459 (struct lttng_kernel_syscall_mask __user *) arg);
2460 default:
2461 return -ENOIOCTLCMD;
2462 }
2463 }
2464
2465 /**
2466 * lttng_metadata_ioctl - lttng syscall through ioctl
2467 *
2468 * @file: the file
2469 * @cmd: the command
2470 * @arg: command arg
2471 *
2472 * This ioctl implements lttng commands:
2473 * LTTNG_KERNEL_STREAM
2474 * Returns an event stream file descriptor or failure.
2475 *
2476 * Channel and event file descriptors also hold a reference on the session.
2477 */
2478 static
2479 long lttng_metadata_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2480 {
2481 switch (cmd) {
2482 case LTTNG_KERNEL_OLD_STREAM:
2483 case LTTNG_KERNEL_STREAM:
2484 return lttng_abi_open_metadata_stream(file);
2485 default:
2486 return -ENOIOCTLCMD;
2487 }
2488 }
2489
2490 /**
2491 * lttng_channel_poll - lttng stream addition/removal monitoring
2492 *
2493 * @file: the file
2494 * @wait: poll table
2495 */
2496 unsigned int lttng_channel_poll(struct file *file, poll_table *wait)
2497 {
2498 struct lttng_channel *channel = file->private_data;
2499 unsigned int mask = 0;
2500
2501 if (file->f_mode & FMODE_READ) {
2502 poll_wait_set_exclusive(wait);
2503 poll_wait(file, channel->ops->get_hp_wait_queue(channel->chan),
2504 wait);
2505
2506 if (channel->ops->is_disabled(channel->chan))
2507 return POLLERR;
2508 if (channel->ops->is_finalized(channel->chan))
2509 return POLLHUP;
2510 if (channel->ops->buffer_has_read_closed_stream(channel->chan))
2511 return POLLIN | POLLRDNORM;
2512 return 0;
2513 }
2514 return mask;
2515
2516 }
2517
2518 static
2519 int lttng_channel_release(struct inode *inode, struct file *file)
2520 {
2521 struct lttng_channel *channel = file->private_data;
2522
2523 if (channel)
2524 fput(channel->session->file);
2525 return 0;
2526 }
2527
2528 static
2529 int lttng_metadata_channel_release(struct inode *inode, struct file *file)
2530 {
2531 struct lttng_channel *channel = file->private_data;
2532
2533 if (channel) {
2534 fput(channel->session->file);
2535 lttng_metadata_channel_destroy(channel);
2536 }
2537
2538 return 0;
2539 }
2540
2541 static const struct file_operations lttng_channel_fops = {
2542 .owner = THIS_MODULE,
2543 .release = lttng_channel_release,
2544 .poll = lttng_channel_poll,
2545 .unlocked_ioctl = lttng_channel_ioctl,
2546 #ifdef CONFIG_COMPAT
2547 .compat_ioctl = lttng_channel_ioctl,
2548 #endif
2549 };
2550
2551 static const struct file_operations lttng_metadata_fops = {
2552 .owner = THIS_MODULE,
2553 .release = lttng_metadata_channel_release,
2554 .unlocked_ioctl = lttng_metadata_ioctl,
2555 #ifdef CONFIG_COMPAT
2556 .compat_ioctl = lttng_metadata_ioctl,
2557 #endif
2558 };
2559
2560 /**
2561 * lttng_event_ioctl - lttng syscall through ioctl
2562 *
2563 * @file: the file
2564 * @cmd: the command
2565 * @arg: command arg
2566 *
2567 * This ioctl implements lttng commands:
2568 * LTTNG_KERNEL_CONTEXT
2569 * Prepend a context field to each record of this event
2570 * LTTNG_KERNEL_ENABLE
2571 * Enable recording for this event (weak enable)
2572 * LTTNG_KERNEL_DISABLE
2573 * Disable recording for this event (strong disable)
2574 */
2575 static
2576 long lttng_event_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2577 {
2578 struct lttng_event *event;
2579 struct lttng_event_enabler *event_enabler;
2580 enum lttng_event_type *evtype = file->private_data;
2581
2582 switch (cmd) {
2583 case LTTNG_KERNEL_OLD_CONTEXT:
2584 {
2585 /* Not implemented */
2586 return -ENOSYS;
2587 }
2588 case LTTNG_KERNEL_CONTEXT:
2589 {
2590 /* Not implemented */
2591 return -ENOSYS;
2592 }
2593 case LTTNG_KERNEL_OLD_ENABLE:
2594 case LTTNG_KERNEL_ENABLE:
2595 switch (*evtype) {
2596 case LTTNG_TYPE_EVENT:
2597 event = file->private_data;
2598 return lttng_event_enable(event);
2599 case LTTNG_TYPE_ENABLER:
2600 event_enabler = file->private_data;
2601 return lttng_event_enabler_enable(event_enabler);
2602 default:
2603 WARN_ON_ONCE(1);
2604 return -ENOSYS;
2605 }
2606 case LTTNG_KERNEL_OLD_DISABLE:
2607 case LTTNG_KERNEL_DISABLE:
2608 switch (*evtype) {
2609 case LTTNG_TYPE_EVENT:
2610 event = file->private_data;
2611 return lttng_event_disable(event);
2612 case LTTNG_TYPE_ENABLER:
2613 event_enabler = file->private_data;
2614 return lttng_event_enabler_disable(event_enabler);
2615 default:
2616 WARN_ON_ONCE(1);
2617 return -ENOSYS;
2618 }
2619 case LTTNG_KERNEL_FILTER:
2620 switch (*evtype) {
2621 case LTTNG_TYPE_EVENT:
2622 return -EINVAL;
2623 case LTTNG_TYPE_ENABLER:
2624 {
2625 event_enabler = file->private_data;
2626 return lttng_event_enabler_attach_filter_bytecode(
2627 event_enabler,
2628 (struct lttng_kernel_filter_bytecode __user *) arg);
2629 }
2630 default:
2631 WARN_ON_ONCE(1);
2632 return -ENOSYS;
2633 }
2634 case LTTNG_KERNEL_ADD_CALLSITE:
2635 switch (*evtype) {
2636 case LTTNG_TYPE_EVENT:
2637 event = file->private_data;
2638 return lttng_event_add_callsite(event,
2639 (struct lttng_kernel_event_callsite __user *) arg);
2640 case LTTNG_TYPE_ENABLER:
2641 return -EINVAL;
2642 default:
2643 WARN_ON_ONCE(1);
2644 return -ENOSYS;
2645 }
2646 default:
2647 return -ENOIOCTLCMD;
2648 }
2649 }
2650
2651 static
2652 int lttng_event_release(struct inode *inode, struct file *file)
2653 {
2654 struct lttng_event *event;
2655 struct lttng_event_enabler *event_enabler;
2656 enum lttng_event_type *evtype = file->private_data;
2657
2658 if (!evtype)
2659 return 0;
2660
2661 switch (*evtype) {
2662 case LTTNG_TYPE_EVENT:
2663 event = file->private_data;
2664 if (event)
2665 fput(event->chan->file);
2666 break;
2667 case LTTNG_TYPE_ENABLER:
2668 event_enabler = file->private_data;
2669 if (event_enabler)
2670 fput(event_enabler->chan->file);
2671 break;
2672 default:
2673 WARN_ON_ONCE(1);
2674 break;
2675 }
2676
2677 return 0;
2678 }
2679
2680 /* TODO: filter control ioctl */
2681 static const struct file_operations lttng_event_fops = {
2682 .owner = THIS_MODULE,
2683 .release = lttng_event_release,
2684 .unlocked_ioctl = lttng_event_ioctl,
2685 #ifdef CONFIG_COMPAT
2686 .compat_ioctl = lttng_event_ioctl,
2687 #endif
2688 };
2689
2690 static int put_u64(uint64_t val, unsigned long arg)
2691 {
2692 return put_user(val, (uint64_t __user *) arg);
2693 }
2694
2695 static int put_u32(uint32_t val, unsigned long arg)
2696 {
2697 return put_user(val, (uint32_t __user *) arg);
2698 }
2699
2700 static long lttng_stream_ring_buffer_ioctl(struct file *filp,
2701 unsigned int cmd, unsigned long arg)
2702 {
2703 struct lib_ring_buffer *buf = filp->private_data;
2704 struct channel *chan = buf->backend.chan;
2705 const struct lib_ring_buffer_config *config = &chan->backend.config;
2706 const struct lttng_channel_ops *ops = chan->backend.priv_ops;
2707 int ret;
2708
2709 if (atomic_read(&chan->record_disabled))
2710 return -EIO;
2711
2712 switch (cmd) {
2713 case LTTNG_RING_BUFFER_GET_TIMESTAMP_BEGIN:
2714 {
2715 uint64_t ts;
2716
2717 ret = ops->timestamp_begin(config, buf, &ts);
2718 if (ret < 0)
2719 goto error;
2720 return put_u64(ts, arg);
2721 }
2722 case LTTNG_RING_BUFFER_GET_TIMESTAMP_END:
2723 {
2724 uint64_t ts;
2725
2726 ret = ops->timestamp_end(config, buf, &ts);
2727 if (ret < 0)
2728 goto error;
2729 return put_u64(ts, arg);
2730 }
2731 case LTTNG_RING_BUFFER_GET_EVENTS_DISCARDED:
2732 {
2733 uint64_t ed;
2734
2735 ret = ops->events_discarded(config, buf, &ed);
2736 if (ret < 0)
2737 goto error;
2738 return put_u64(ed, arg);
2739 }
2740 case LTTNG_RING_BUFFER_GET_CONTENT_SIZE:
2741 {
2742 uint64_t cs;
2743
2744 ret = ops->content_size(config, buf, &cs);
2745 if (ret < 0)
2746 goto error;
2747 return put_u64(cs, arg);
2748 }
2749 case LTTNG_RING_BUFFER_GET_PACKET_SIZE:
2750 {
2751 uint64_t ps;
2752
2753 ret = ops->packet_size(config, buf, &ps);
2754 if (ret < 0)
2755 goto error;
2756 return put_u64(ps, arg);
2757 }
2758 case LTTNG_RING_BUFFER_GET_STREAM_ID:
2759 {
2760 uint64_t si;
2761
2762 ret = ops->stream_id(config, buf, &si);
2763 if (ret < 0)
2764 goto error;
2765 return put_u64(si, arg);
2766 }
2767 case LTTNG_RING_BUFFER_GET_CURRENT_TIMESTAMP:
2768 {
2769 uint64_t ts;
2770
2771 ret = ops->current_timestamp(config, buf, &ts);
2772 if (ret < 0)
2773 goto error;
2774 return put_u64(ts, arg);
2775 }
2776 case LTTNG_RING_BUFFER_GET_SEQ_NUM:
2777 {
2778 uint64_t seq;
2779
2780 ret = ops->sequence_number(config, buf, &seq);
2781 if (ret < 0)
2782 goto error;
2783 return put_u64(seq, arg);
2784 }
2785 case LTTNG_RING_BUFFER_INSTANCE_ID:
2786 {
2787 uint64_t id;
2788
2789 ret = ops->instance_id(config, buf, &id);
2790 if (ret < 0)
2791 goto error;
2792 return put_u64(id, arg);
2793 }
2794 default:
2795 return lib_ring_buffer_file_operations.unlocked_ioctl(filp,
2796 cmd, arg);
2797 }
2798
2799 error:
2800 return -ENOSYS;
2801 }
2802
2803 #ifdef CONFIG_COMPAT
2804 static long lttng_stream_ring_buffer_compat_ioctl(struct file *filp,
2805 unsigned int cmd, unsigned long arg)
2806 {
2807 struct lib_ring_buffer *buf = filp->private_data;
2808 struct channel *chan = buf->backend.chan;
2809 const struct lib_ring_buffer_config *config = &chan->backend.config;
2810 const struct lttng_channel_ops *ops = chan->backend.priv_ops;
2811 int ret;
2812
2813 if (atomic_read(&chan->record_disabled))
2814 return -EIO;
2815
2816 switch (cmd) {
2817 case LTTNG_RING_BUFFER_COMPAT_GET_TIMESTAMP_BEGIN:
2818 {
2819 uint64_t ts;
2820
2821 ret = ops->timestamp_begin(config, buf, &ts);
2822 if (ret < 0)
2823 goto error;
2824 return put_u64(ts, arg);
2825 }
2826 case LTTNG_RING_BUFFER_COMPAT_GET_TIMESTAMP_END:
2827 {
2828 uint64_t ts;
2829
2830 ret = ops->timestamp_end(config, buf, &ts);
2831 if (ret < 0)
2832 goto error;
2833 return put_u64(ts, arg);
2834 }
2835 case LTTNG_RING_BUFFER_COMPAT_GET_EVENTS_DISCARDED:
2836 {
2837 uint64_t ed;
2838
2839 ret = ops->events_discarded(config, buf, &ed);
2840 if (ret < 0)
2841 goto error;
2842 return put_u64(ed, arg);
2843 }
2844 case LTTNG_RING_BUFFER_COMPAT_GET_CONTENT_SIZE:
2845 {
2846 uint64_t cs;
2847
2848 ret = ops->content_size(config, buf, &cs);
2849 if (ret < 0)
2850 goto error;
2851 return put_u64(cs, arg);
2852 }
2853 case LTTNG_RING_BUFFER_COMPAT_GET_PACKET_SIZE:
2854 {
2855 uint64_t ps;
2856
2857 ret = ops->packet_size(config, buf, &ps);
2858 if (ret < 0)
2859 goto error;
2860 return put_u64(ps, arg);
2861 }
2862 case LTTNG_RING_BUFFER_COMPAT_GET_STREAM_ID:
2863 {
2864 uint64_t si;
2865
2866 ret = ops->stream_id(config, buf, &si);
2867 if (ret < 0)
2868 goto error;
2869 return put_u64(si, arg);
2870 }
2871 case LTTNG_RING_BUFFER_GET_CURRENT_TIMESTAMP:
2872 {
2873 uint64_t ts;
2874
2875 ret = ops->current_timestamp(config, buf, &ts);
2876 if (ret < 0)
2877 goto error;
2878 return put_u64(ts, arg);
2879 }
2880 case LTTNG_RING_BUFFER_COMPAT_GET_SEQ_NUM:
2881 {
2882 uint64_t seq;
2883
2884 ret = ops->sequence_number(config, buf, &seq);
2885 if (ret < 0)
2886 goto error;
2887 return put_u64(seq, arg);
2888 }
2889 case LTTNG_RING_BUFFER_COMPAT_INSTANCE_ID:
2890 {
2891 uint64_t id;
2892
2893 ret = ops->instance_id(config, buf, &id);
2894 if (ret < 0)
2895 goto error;
2896 return put_u64(id, arg);
2897 }
2898 default:
2899 return lib_ring_buffer_file_operations.compat_ioctl(filp,
2900 cmd, arg);
2901 }
2902
2903 error:
2904 return -ENOSYS;
2905 }
2906 #endif /* CONFIG_COMPAT */
2907
2908 static void lttng_stream_override_ring_buffer_fops(void)
2909 {
2910 lttng_stream_ring_buffer_file_operations.owner = THIS_MODULE;
2911 lttng_stream_ring_buffer_file_operations.open =
2912 lib_ring_buffer_file_operations.open;
2913 lttng_stream_ring_buffer_file_operations.release =
2914 lib_ring_buffer_file_operations.release;
2915 lttng_stream_ring_buffer_file_operations.poll =
2916 lib_ring_buffer_file_operations.poll;
2917 lttng_stream_ring_buffer_file_operations.splice_read =
2918 lib_ring_buffer_file_operations.splice_read;
2919 lttng_stream_ring_buffer_file_operations.mmap =
2920 lib_ring_buffer_file_operations.mmap;
2921 lttng_stream_ring_buffer_file_operations.unlocked_ioctl =
2922 lttng_stream_ring_buffer_ioctl;
2923 lttng_stream_ring_buffer_file_operations.llseek =
2924 lib_ring_buffer_file_operations.llseek;
2925 #ifdef CONFIG_COMPAT
2926 lttng_stream_ring_buffer_file_operations.compat_ioctl =
2927 lttng_stream_ring_buffer_compat_ioctl;
2928 #endif
2929 }
2930
2931 int __init lttng_abi_init(void)
2932 {
2933 int ret = 0;
2934
2935 wrapper_vmalloc_sync_mappings();
2936 lttng_clock_ref();
2937
2938 ret = lttng_tp_mempool_init();
2939 if (ret) {
2940 goto error;
2941 }
2942
2943 lttng_proc_dentry = proc_create_data("lttng", S_IRUSR | S_IWUSR, NULL,
2944 &lttng_proc_ops, NULL);
2945
2946 if (!lttng_proc_dentry) {
2947 printk(KERN_ERR "LTTng: Error creating control file\n");
2948 ret = -ENOMEM;
2949 goto error;
2950 }
2951 lttng_stream_override_ring_buffer_fops();
2952 return 0;
2953
2954 error:
2955 lttng_tp_mempool_destroy();
2956 lttng_clock_unref();
2957 return ret;
2958 }
2959
2960 /* No __exit annotation because used by init error path too. */
2961 void lttng_abi_exit(void)
2962 {
2963 lttng_tp_mempool_destroy();
2964 lttng_clock_unref();
2965 if (lttng_proc_dentry)
2966 remove_proc_entry("lttng", NULL);
2967 }
This page took 0.166711 seconds and 4 git commands to generate.