Fix: event-notifier: do not flush packet if it only contains subbuf header
[lttng-modules.git] / src / lttng-abi.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-abi.c
4 *
5 * LTTng ABI
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Mimic system calls for:
10 * - session creation, returns a file descriptor or failure.
11 * - channel creation, returns a file descriptor or failure.
12 * - Operates on a session file descriptor
13 * - Takes all channel options as parameters.
14 * - stream get, returns a file descriptor or failure.
15 * - Operates on a channel file descriptor.
16 * - stream notifier get, returns a file descriptor or failure.
17 * - Operates on a channel file descriptor.
18 * - event creation, returns a file descriptor or failure.
19 * - Operates on a channel file descriptor
20 * - Takes an event name as parameter
21 * - Takes an instrumentation source as parameter
22 * - e.g. tracepoints, dynamic_probes...
23 * - Takes instrumentation source specific arguments.
24 */
25
26 #include <linux/module.h>
27 #include <linux/proc_fs.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/file.h>
30 #include <linux/uaccess.h>
31 #include <linux/slab.h>
32 #include <linux/err.h>
33 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
34 #include <ringbuffer/vfs.h>
35 #include <ringbuffer/backend.h>
36 #include <ringbuffer/frontend.h>
37 #include <wrapper/poll.h>
38 #include <wrapper/file.h>
39 #include <wrapper/kref.h>
40 #include <lttng/string-utils.h>
41 #include <lttng/abi.h>
42 #include <lttng/abi-old.h>
43 #include <lttng/events.h>
44 #include <lttng/tracer.h>
45 #include <lttng/tp-mempool.h>
46 #include <ringbuffer/frontend_types.h>
47 #include <ringbuffer/iterator.h>
48
49 /*
50 * This is LTTng's own personal way to create a system call as an external
51 * module. We use ioctl() on /proc/lttng.
52 */
53
54 static struct proc_dir_entry *lttng_proc_dentry;
55
56 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
57 static const struct proc_ops lttng_proc_ops;
58 #else
59 static const struct file_operations lttng_proc_ops;
60 #endif
61
62 static const struct file_operations lttng_session_fops;
63 static const struct file_operations lttng_event_notifier_group_fops;
64 static const struct file_operations lttng_channel_fops;
65 static const struct file_operations lttng_metadata_fops;
66 static const struct file_operations lttng_event_fops;
67 static struct file_operations lttng_stream_ring_buffer_file_operations;
68
69 static int put_u64(uint64_t val, unsigned long arg);
70 static int put_u32(uint32_t val, unsigned long arg);
71
72 /*
73 * Teardown management: opened file descriptors keep a refcount on the module,
74 * so it can only exit when all file descriptors are closed.
75 */
76
77 static
78 int lttng_abi_create_session(void)
79 {
80 struct lttng_session *session;
81 struct file *session_file;
82 int session_fd, ret;
83
84 session = lttng_session_create();
85 if (!session)
86 return -ENOMEM;
87 session_fd = lttng_get_unused_fd();
88 if (session_fd < 0) {
89 ret = session_fd;
90 goto fd_error;
91 }
92 session_file = anon_inode_getfile("[lttng_session]",
93 &lttng_session_fops,
94 session, O_RDWR);
95 if (IS_ERR(session_file)) {
96 ret = PTR_ERR(session_file);
97 goto file_error;
98 }
99 session->file = session_file;
100 fd_install(session_fd, session_file);
101 return session_fd;
102
103 file_error:
104 put_unused_fd(session_fd);
105 fd_error:
106 lttng_session_destroy(session);
107 return ret;
108 }
109
110 void event_notifier_send_notification_work_wakeup(struct irq_work *entry)
111 {
112 struct lttng_event_notifier_group *event_notifier_group =
113 container_of(entry, struct lttng_event_notifier_group,
114 wakeup_pending);
115 wake_up_interruptible(&event_notifier_group->read_wait);
116 }
117
118 static
119 int lttng_abi_create_event_notifier_group(void)
120 {
121 struct lttng_event_notifier_group *event_notifier_group;
122 struct file *event_notifier_group_file;
123 int event_notifier_group_fd, ret;
124
125 event_notifier_group = lttng_event_notifier_group_create();
126 if (!event_notifier_group)
127 return -ENOMEM;
128
129 event_notifier_group_fd = lttng_get_unused_fd();
130 if (event_notifier_group_fd < 0) {
131 ret = event_notifier_group_fd;
132 goto fd_error;
133 }
134 event_notifier_group_file = anon_inode_getfile("[lttng_event_notifier_group]",
135 &lttng_event_notifier_group_fops,
136 event_notifier_group, O_RDWR);
137 if (IS_ERR(event_notifier_group_file)) {
138 ret = PTR_ERR(event_notifier_group_file);
139 goto file_error;
140 }
141
142 event_notifier_group->file = event_notifier_group_file;
143 init_waitqueue_head(&event_notifier_group->read_wait);
144 init_irq_work(&event_notifier_group->wakeup_pending,
145 event_notifier_send_notification_work_wakeup);
146 fd_install(event_notifier_group_fd, event_notifier_group_file);
147 return event_notifier_group_fd;
148
149 file_error:
150 put_unused_fd(event_notifier_group_fd);
151 fd_error:
152 lttng_event_notifier_group_destroy(event_notifier_group);
153 return ret;
154 }
155
156 static
157 int lttng_abi_tracepoint_list(void)
158 {
159 struct file *tracepoint_list_file;
160 int file_fd, ret;
161
162 file_fd = lttng_get_unused_fd();
163 if (file_fd < 0) {
164 ret = file_fd;
165 goto fd_error;
166 }
167
168 tracepoint_list_file = anon_inode_getfile("[lttng_tracepoint_list]",
169 &lttng_tracepoint_list_fops,
170 NULL, O_RDWR);
171 if (IS_ERR(tracepoint_list_file)) {
172 ret = PTR_ERR(tracepoint_list_file);
173 goto file_error;
174 }
175 ret = lttng_tracepoint_list_fops.open(NULL, tracepoint_list_file);
176 if (ret < 0)
177 goto open_error;
178 fd_install(file_fd, tracepoint_list_file);
179 return file_fd;
180
181 open_error:
182 fput(tracepoint_list_file);
183 file_error:
184 put_unused_fd(file_fd);
185 fd_error:
186 return ret;
187 }
188
189 #ifndef CONFIG_HAVE_SYSCALL_TRACEPOINTS
190 static inline
191 int lttng_abi_syscall_list(void)
192 {
193 return -ENOSYS;
194 }
195 #else
196 static
197 int lttng_abi_syscall_list(void)
198 {
199 struct file *syscall_list_file;
200 int file_fd, ret;
201
202 file_fd = lttng_get_unused_fd();
203 if (file_fd < 0) {
204 ret = file_fd;
205 goto fd_error;
206 }
207
208 syscall_list_file = anon_inode_getfile("[lttng_syscall_list]",
209 &lttng_syscall_list_fops,
210 NULL, O_RDWR);
211 if (IS_ERR(syscall_list_file)) {
212 ret = PTR_ERR(syscall_list_file);
213 goto file_error;
214 }
215 ret = lttng_syscall_list_fops.open(NULL, syscall_list_file);
216 if (ret < 0)
217 goto open_error;
218 fd_install(file_fd, syscall_list_file);
219 return file_fd;
220
221 open_error:
222 fput(syscall_list_file);
223 file_error:
224 put_unused_fd(file_fd);
225 fd_error:
226 return ret;
227 }
228 #endif
229
230 static
231 void lttng_abi_tracer_version(struct lttng_kernel_tracer_version *v)
232 {
233 v->major = LTTNG_MODULES_MAJOR_VERSION;
234 v->minor = LTTNG_MODULES_MINOR_VERSION;
235 v->patchlevel = LTTNG_MODULES_PATCHLEVEL_VERSION;
236 }
237
238 static
239 void lttng_abi_tracer_abi_version(struct lttng_kernel_tracer_abi_version *v)
240 {
241 v->major = LTTNG_MODULES_ABI_MAJOR_VERSION;
242 v->minor = LTTNG_MODULES_ABI_MINOR_VERSION;
243 }
244
245 static
246 long lttng_abi_add_context(struct file *file,
247 struct lttng_kernel_context *context_param,
248 struct lttng_ctx **ctx, struct lttng_session *session)
249 {
250
251 if (session->been_active)
252 return -EPERM;
253
254 switch (context_param->ctx) {
255 case LTTNG_KERNEL_CONTEXT_PID:
256 return lttng_add_pid_to_ctx(ctx);
257 case LTTNG_KERNEL_CONTEXT_PRIO:
258 return lttng_add_prio_to_ctx(ctx);
259 case LTTNG_KERNEL_CONTEXT_NICE:
260 return lttng_add_nice_to_ctx(ctx);
261 case LTTNG_KERNEL_CONTEXT_VPID:
262 return lttng_add_vpid_to_ctx(ctx);
263 case LTTNG_KERNEL_CONTEXT_TID:
264 return lttng_add_tid_to_ctx(ctx);
265 case LTTNG_KERNEL_CONTEXT_VTID:
266 return lttng_add_vtid_to_ctx(ctx);
267 case LTTNG_KERNEL_CONTEXT_PPID:
268 return lttng_add_ppid_to_ctx(ctx);
269 case LTTNG_KERNEL_CONTEXT_VPPID:
270 return lttng_add_vppid_to_ctx(ctx);
271 case LTTNG_KERNEL_CONTEXT_PERF_COUNTER:
272 context_param->u.perf_counter.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
273 return lttng_add_perf_counter_to_ctx(context_param->u.perf_counter.type,
274 context_param->u.perf_counter.config,
275 context_param->u.perf_counter.name,
276 ctx);
277 case LTTNG_KERNEL_CONTEXT_PROCNAME:
278 return lttng_add_procname_to_ctx(ctx);
279 case LTTNG_KERNEL_CONTEXT_HOSTNAME:
280 return lttng_add_hostname_to_ctx(ctx);
281 case LTTNG_KERNEL_CONTEXT_CPU_ID:
282 return lttng_add_cpu_id_to_ctx(ctx);
283 case LTTNG_KERNEL_CONTEXT_INTERRUPTIBLE:
284 return lttng_add_interruptible_to_ctx(ctx);
285 case LTTNG_KERNEL_CONTEXT_NEED_RESCHEDULE:
286 return lttng_add_need_reschedule_to_ctx(ctx);
287 case LTTNG_KERNEL_CONTEXT_PREEMPTIBLE:
288 return lttng_add_preemptible_to_ctx(ctx);
289 case LTTNG_KERNEL_CONTEXT_MIGRATABLE:
290 return lttng_add_migratable_to_ctx(ctx);
291 case LTTNG_KERNEL_CONTEXT_CALLSTACK_KERNEL:
292 case LTTNG_KERNEL_CONTEXT_CALLSTACK_USER:
293 return lttng_add_callstack_to_ctx(ctx, context_param->ctx);
294 case LTTNG_KERNEL_CONTEXT_CGROUP_NS:
295 return lttng_add_cgroup_ns_to_ctx(ctx);
296 case LTTNG_KERNEL_CONTEXT_IPC_NS:
297 return lttng_add_ipc_ns_to_ctx(ctx);
298 case LTTNG_KERNEL_CONTEXT_MNT_NS:
299 return lttng_add_mnt_ns_to_ctx(ctx);
300 case LTTNG_KERNEL_CONTEXT_NET_NS:
301 return lttng_add_net_ns_to_ctx(ctx);
302 case LTTNG_KERNEL_CONTEXT_PID_NS:
303 return lttng_add_pid_ns_to_ctx(ctx);
304 case LTTNG_KERNEL_CONTEXT_USER_NS:
305 return lttng_add_user_ns_to_ctx(ctx);
306 case LTTNG_KERNEL_CONTEXT_UTS_NS:
307 return lttng_add_uts_ns_to_ctx(ctx);
308 case LTTNG_KERNEL_CONTEXT_UID:
309 return lttng_add_uid_to_ctx(ctx);
310 case LTTNG_KERNEL_CONTEXT_EUID:
311 return lttng_add_euid_to_ctx(ctx);
312 case LTTNG_KERNEL_CONTEXT_SUID:
313 return lttng_add_suid_to_ctx(ctx);
314 case LTTNG_KERNEL_CONTEXT_GID:
315 return lttng_add_gid_to_ctx(ctx);
316 case LTTNG_KERNEL_CONTEXT_EGID:
317 return lttng_add_egid_to_ctx(ctx);
318 case LTTNG_KERNEL_CONTEXT_SGID:
319 return lttng_add_sgid_to_ctx(ctx);
320 case LTTNG_KERNEL_CONTEXT_VUID:
321 return lttng_add_vuid_to_ctx(ctx);
322 case LTTNG_KERNEL_CONTEXT_VEUID:
323 return lttng_add_veuid_to_ctx(ctx);
324 case LTTNG_KERNEL_CONTEXT_VSUID:
325 return lttng_add_vsuid_to_ctx(ctx);
326 case LTTNG_KERNEL_CONTEXT_VGID:
327 return lttng_add_vgid_to_ctx(ctx);
328 case LTTNG_KERNEL_CONTEXT_VEGID:
329 return lttng_add_vegid_to_ctx(ctx);
330 case LTTNG_KERNEL_CONTEXT_VSGID:
331 return lttng_add_vsgid_to_ctx(ctx);
332 case LTTNG_KERNEL_CONTEXT_TIME_NS:
333 return lttng_add_time_ns_to_ctx(ctx);
334 default:
335 return -EINVAL;
336 }
337 }
338
339 /**
340 * lttng_ioctl - lttng syscall through ioctl
341 *
342 * @file: the file
343 * @cmd: the command
344 * @arg: command arg
345 *
346 * This ioctl implements lttng commands:
347 * LTTNG_KERNEL_SESSION
348 * Returns a LTTng trace session file descriptor
349 * LTTNG_KERNEL_TRACER_VERSION
350 * Returns the LTTng kernel tracer version
351 * LTTNG_KERNEL_TRACEPOINT_LIST
352 * Returns a file descriptor listing available tracepoints
353 * LTTNG_KERNEL_WAIT_QUIESCENT
354 * Returns after all previously running probes have completed
355 * LTTNG_KERNEL_TRACER_ABI_VERSION
356 * Returns the LTTng kernel tracer ABI version
357 * LTTNG_KERNEL_EVENT_NOTIFIER_GROUP_CREATE
358 * Returns a LTTng event notifier group file descriptor
359 *
360 * The returned session will be deleted when its file descriptor is closed.
361 */
362 static
363 long lttng_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
364 {
365 switch (cmd) {
366 case LTTNG_KERNEL_OLD_SESSION:
367 case LTTNG_KERNEL_SESSION:
368 return lttng_abi_create_session();
369 case LTTNG_KERNEL_EVENT_NOTIFIER_GROUP_CREATE:
370 return lttng_abi_create_event_notifier_group();
371 case LTTNG_KERNEL_OLD_TRACER_VERSION:
372 {
373 struct lttng_kernel_tracer_version v;
374 struct lttng_kernel_old_tracer_version oldv;
375 struct lttng_kernel_old_tracer_version *uversion =
376 (struct lttng_kernel_old_tracer_version __user *) arg;
377
378 lttng_abi_tracer_version(&v);
379 oldv.major = v.major;
380 oldv.minor = v.minor;
381 oldv.patchlevel = v.patchlevel;
382
383 if (copy_to_user(uversion, &oldv, sizeof(oldv)))
384 return -EFAULT;
385 return 0;
386 }
387 case LTTNG_KERNEL_TRACER_VERSION:
388 {
389 struct lttng_kernel_tracer_version version;
390 struct lttng_kernel_tracer_version *uversion =
391 (struct lttng_kernel_tracer_version __user *) arg;
392
393 lttng_abi_tracer_version(&version);
394
395 if (copy_to_user(uversion, &version, sizeof(version)))
396 return -EFAULT;
397 return 0;
398 }
399 case LTTNG_KERNEL_TRACER_ABI_VERSION:
400 {
401 struct lttng_kernel_tracer_abi_version version;
402 struct lttng_kernel_tracer_abi_version *uversion =
403 (struct lttng_kernel_tracer_abi_version __user *) arg;
404
405 lttng_abi_tracer_abi_version(&version);
406
407 if (copy_to_user(uversion, &version, sizeof(version)))
408 return -EFAULT;
409 return 0;
410 }
411 case LTTNG_KERNEL_OLD_TRACEPOINT_LIST:
412 case LTTNG_KERNEL_TRACEPOINT_LIST:
413 return lttng_abi_tracepoint_list();
414 case LTTNG_KERNEL_SYSCALL_LIST:
415 return lttng_abi_syscall_list();
416 case LTTNG_KERNEL_OLD_WAIT_QUIESCENT:
417 case LTTNG_KERNEL_WAIT_QUIESCENT:
418 synchronize_trace();
419 return 0;
420 case LTTNG_KERNEL_OLD_CALIBRATE:
421 {
422 struct lttng_kernel_old_calibrate __user *ucalibrate =
423 (struct lttng_kernel_old_calibrate __user *) arg;
424 struct lttng_kernel_old_calibrate old_calibrate;
425 struct lttng_kernel_calibrate calibrate;
426 int ret;
427
428 if (copy_from_user(&old_calibrate, ucalibrate, sizeof(old_calibrate)))
429 return -EFAULT;
430 calibrate.type = old_calibrate.type;
431 ret = lttng_calibrate(&calibrate);
432 if (copy_to_user(ucalibrate, &old_calibrate, sizeof(old_calibrate)))
433 return -EFAULT;
434 return ret;
435 }
436 case LTTNG_KERNEL_CALIBRATE:
437 {
438 struct lttng_kernel_calibrate __user *ucalibrate =
439 (struct lttng_kernel_calibrate __user *) arg;
440 struct lttng_kernel_calibrate calibrate;
441 int ret;
442
443 if (copy_from_user(&calibrate, ucalibrate, sizeof(calibrate)))
444 return -EFAULT;
445 ret = lttng_calibrate(&calibrate);
446 if (copy_to_user(ucalibrate, &calibrate, sizeof(calibrate)))
447 return -EFAULT;
448 return ret;
449 }
450 default:
451 return -ENOIOCTLCMD;
452 }
453 }
454
455 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
456 static const struct proc_ops lttng_proc_ops = {
457 .proc_ioctl = lttng_ioctl,
458 #ifdef CONFIG_COMPAT
459 .proc_compat_ioctl = lttng_ioctl,
460 #endif /* CONFIG_COMPAT */
461 };
462 #else
463 static const struct file_operations lttng_proc_ops = {
464 .owner = THIS_MODULE,
465 .unlocked_ioctl = lttng_ioctl,
466 #ifdef CONFIG_COMPAT
467 .compat_ioctl = lttng_ioctl,
468 #endif /* CONFIG_COMPAT */
469 };
470 #endif
471
472 static
473 int lttng_abi_create_channel(struct file *session_file,
474 struct lttng_kernel_channel *chan_param,
475 enum channel_type channel_type)
476 {
477 struct lttng_session *session = session_file->private_data;
478 const struct file_operations *fops = NULL;
479 const char *transport_name;
480 struct lttng_channel *chan;
481 struct file *chan_file;
482 int chan_fd;
483 int ret = 0;
484
485 chan_fd = lttng_get_unused_fd();
486 if (chan_fd < 0) {
487 ret = chan_fd;
488 goto fd_error;
489 }
490 switch (channel_type) {
491 case PER_CPU_CHANNEL:
492 fops = &lttng_channel_fops;
493 break;
494 case METADATA_CHANNEL:
495 fops = &lttng_metadata_fops;
496 break;
497 }
498
499 chan_file = anon_inode_getfile("[lttng_channel]",
500 fops,
501 NULL, O_RDWR);
502 if (IS_ERR(chan_file)) {
503 ret = PTR_ERR(chan_file);
504 goto file_error;
505 }
506 switch (channel_type) {
507 case PER_CPU_CHANNEL:
508 if (chan_param->output == LTTNG_KERNEL_SPLICE) {
509 transport_name = chan_param->overwrite ?
510 "relay-overwrite" : "relay-discard";
511 } else if (chan_param->output == LTTNG_KERNEL_MMAP) {
512 transport_name = chan_param->overwrite ?
513 "relay-overwrite-mmap" : "relay-discard-mmap";
514 } else {
515 return -EINVAL;
516 }
517 break;
518 case METADATA_CHANNEL:
519 if (chan_param->output == LTTNG_KERNEL_SPLICE)
520 transport_name = "relay-metadata";
521 else if (chan_param->output == LTTNG_KERNEL_MMAP)
522 transport_name = "relay-metadata-mmap";
523 else
524 return -EINVAL;
525 break;
526 default:
527 transport_name = "<unknown>";
528 break;
529 }
530 if (!atomic_long_add_unless(&session_file->f_count, 1, LONG_MAX)) {
531 ret = -EOVERFLOW;
532 goto refcount_error;
533 }
534 /*
535 * We tolerate no failure path after channel creation. It will stay
536 * invariant for the rest of the session.
537 */
538 chan = lttng_channel_create(session, transport_name, NULL,
539 chan_param->subbuf_size,
540 chan_param->num_subbuf,
541 chan_param->switch_timer_interval,
542 chan_param->read_timer_interval,
543 channel_type);
544 if (!chan) {
545 ret = -EINVAL;
546 goto chan_error;
547 }
548 chan->file = chan_file;
549 chan_file->private_data = chan;
550 fd_install(chan_fd, chan_file);
551
552 return chan_fd;
553
554 chan_error:
555 atomic_long_dec(&session_file->f_count);
556 refcount_error:
557 fput(chan_file);
558 file_error:
559 put_unused_fd(chan_fd);
560 fd_error:
561 return ret;
562 }
563
564 static
565 int lttng_abi_session_set_name(struct lttng_session *session,
566 struct lttng_kernel_session_name *name)
567 {
568 size_t len;
569
570 len = strnlen(name->name, LTTNG_KERNEL_SESSION_NAME_LEN);
571
572 if (len == LTTNG_KERNEL_SESSION_NAME_LEN) {
573 /* Name is too long/malformed */
574 return -EINVAL;
575 }
576
577 strcpy(session->name, name->name);
578 return 0;
579 }
580
581 static
582 int lttng_abi_session_set_creation_time(struct lttng_session *session,
583 struct lttng_kernel_session_creation_time *time)
584 {
585 size_t len;
586
587 len = strnlen(time->iso8601, LTTNG_KERNEL_SESSION_CREATION_TIME_ISO8601_LEN);
588
589 if (len == LTTNG_KERNEL_SESSION_CREATION_TIME_ISO8601_LEN) {
590 /* Time is too long/malformed */
591 return -EINVAL;
592 }
593
594 strcpy(session->creation_time, time->iso8601);
595 return 0;
596 }
597
598 static
599 enum tracker_type get_tracker_type(struct lttng_kernel_tracker_args *tracker)
600 {
601 switch (tracker->type) {
602 case LTTNG_KERNEL_TRACKER_PID:
603 return TRACKER_PID;
604 case LTTNG_KERNEL_TRACKER_VPID:
605 return TRACKER_VPID;
606 case LTTNG_KERNEL_TRACKER_UID:
607 return TRACKER_UID;
608 case LTTNG_KERNEL_TRACKER_VUID:
609 return TRACKER_VUID;
610 case LTTNG_KERNEL_TRACKER_GID:
611 return TRACKER_GID;
612 case LTTNG_KERNEL_TRACKER_VGID:
613 return TRACKER_VGID;
614 default:
615 return TRACKER_UNKNOWN;
616 }
617 }
618
619 /**
620 * lttng_session_ioctl - lttng session fd ioctl
621 *
622 * @file: the file
623 * @cmd: the command
624 * @arg: command arg
625 *
626 * This ioctl implements lttng commands:
627 * LTTNG_KERNEL_CHANNEL
628 * Returns a LTTng channel file descriptor
629 * LTTNG_KERNEL_ENABLE
630 * Enables tracing for a session (weak enable)
631 * LTTNG_KERNEL_DISABLE
632 * Disables tracing for a session (strong disable)
633 * LTTNG_KERNEL_METADATA
634 * Returns a LTTng metadata file descriptor
635 * LTTNG_KERNEL_SESSION_TRACK_PID
636 * Add PID to session PID tracker
637 * LTTNG_KERNEL_SESSION_UNTRACK_PID
638 * Remove PID from session PID tracker
639 * LTTNG_KERNEL_SESSION_TRACK_ID
640 * Add ID to tracker
641 * LTTNG_KERNEL_SESSION_UNTRACK_ID
642 * Remove ID from tracker
643 *
644 * The returned channel will be deleted when its file descriptor is closed.
645 */
646 static
647 long lttng_session_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
648 {
649 struct lttng_session *session = file->private_data;
650 struct lttng_kernel_channel chan_param;
651 struct lttng_kernel_old_channel old_chan_param;
652
653 switch (cmd) {
654 case LTTNG_KERNEL_OLD_CHANNEL:
655 {
656 if (copy_from_user(&old_chan_param,
657 (struct lttng_kernel_old_channel __user *) arg,
658 sizeof(struct lttng_kernel_old_channel)))
659 return -EFAULT;
660 chan_param.overwrite = old_chan_param.overwrite;
661 chan_param.subbuf_size = old_chan_param.subbuf_size;
662 chan_param.num_subbuf = old_chan_param.num_subbuf;
663 chan_param.switch_timer_interval = old_chan_param.switch_timer_interval;
664 chan_param.read_timer_interval = old_chan_param.read_timer_interval;
665 chan_param.output = old_chan_param.output;
666
667 return lttng_abi_create_channel(file, &chan_param,
668 PER_CPU_CHANNEL);
669 }
670 case LTTNG_KERNEL_CHANNEL:
671 {
672 if (copy_from_user(&chan_param,
673 (struct lttng_kernel_channel __user *) arg,
674 sizeof(struct lttng_kernel_channel)))
675 return -EFAULT;
676 return lttng_abi_create_channel(file, &chan_param,
677 PER_CPU_CHANNEL);
678 }
679 case LTTNG_KERNEL_OLD_SESSION_START:
680 case LTTNG_KERNEL_OLD_ENABLE:
681 case LTTNG_KERNEL_SESSION_START:
682 case LTTNG_KERNEL_ENABLE:
683 return lttng_session_enable(session);
684 case LTTNG_KERNEL_OLD_SESSION_STOP:
685 case LTTNG_KERNEL_OLD_DISABLE:
686 case LTTNG_KERNEL_SESSION_STOP:
687 case LTTNG_KERNEL_DISABLE:
688 return lttng_session_disable(session);
689 case LTTNG_KERNEL_OLD_METADATA:
690 {
691 if (copy_from_user(&old_chan_param,
692 (struct lttng_kernel_old_channel __user *) arg,
693 sizeof(struct lttng_kernel_old_channel)))
694 return -EFAULT;
695 chan_param.overwrite = old_chan_param.overwrite;
696 chan_param.subbuf_size = old_chan_param.subbuf_size;
697 chan_param.num_subbuf = old_chan_param.num_subbuf;
698 chan_param.switch_timer_interval = old_chan_param.switch_timer_interval;
699 chan_param.read_timer_interval = old_chan_param.read_timer_interval;
700 chan_param.output = old_chan_param.output;
701
702 return lttng_abi_create_channel(file, &chan_param,
703 METADATA_CHANNEL);
704 }
705 case LTTNG_KERNEL_METADATA:
706 {
707 if (copy_from_user(&chan_param,
708 (struct lttng_kernel_channel __user *) arg,
709 sizeof(struct lttng_kernel_channel)))
710 return -EFAULT;
711 return lttng_abi_create_channel(file, &chan_param,
712 METADATA_CHANNEL);
713 }
714 case LTTNG_KERNEL_SESSION_TRACK_PID:
715 return lttng_session_track_id(session, TRACKER_PID, (int) arg);
716 case LTTNG_KERNEL_SESSION_UNTRACK_PID:
717 return lttng_session_untrack_id(session, TRACKER_PID, (int) arg);
718 case LTTNG_KERNEL_SESSION_TRACK_ID:
719 {
720 struct lttng_kernel_tracker_args tracker;
721 enum tracker_type tracker_type;
722
723 if (copy_from_user(&tracker,
724 (struct lttng_kernel_tracker_args __user *) arg,
725 sizeof(struct lttng_kernel_tracker_args)))
726 return -EFAULT;
727 tracker_type = get_tracker_type(&tracker);
728 if (tracker_type == TRACKER_UNKNOWN)
729 return -EINVAL;
730 return lttng_session_track_id(session, tracker_type, tracker.id);
731 }
732 case LTTNG_KERNEL_SESSION_UNTRACK_ID:
733 {
734 struct lttng_kernel_tracker_args tracker;
735 enum tracker_type tracker_type;
736
737 if (copy_from_user(&tracker,
738 (struct lttng_kernel_tracker_args __user *) arg,
739 sizeof(struct lttng_kernel_tracker_args)))
740 return -EFAULT;
741 tracker_type = get_tracker_type(&tracker);
742 if (tracker_type == TRACKER_UNKNOWN)
743 return -EINVAL;
744 return lttng_session_untrack_id(session, tracker_type,
745 tracker.id);
746 }
747 case LTTNG_KERNEL_SESSION_LIST_TRACKER_PIDS:
748 return lttng_session_list_tracker_ids(session, TRACKER_PID);
749 case LTTNG_KERNEL_SESSION_LIST_TRACKER_IDS:
750 {
751 struct lttng_kernel_tracker_args tracker;
752 enum tracker_type tracker_type;
753
754 if (copy_from_user(&tracker,
755 (struct lttng_kernel_tracker_args __user *) arg,
756 sizeof(struct lttng_kernel_tracker_args)))
757 return -EFAULT;
758 tracker_type = get_tracker_type(&tracker);
759 if (tracker_type == TRACKER_UNKNOWN)
760 return -EINVAL;
761 return lttng_session_list_tracker_ids(session, tracker_type);
762 }
763 case LTTNG_KERNEL_SESSION_METADATA_REGEN:
764 return lttng_session_metadata_regenerate(session);
765 case LTTNG_KERNEL_SESSION_STATEDUMP:
766 return lttng_session_statedump(session);
767 case LTTNG_KERNEL_SESSION_SET_NAME:
768 {
769 struct lttng_kernel_session_name name;
770
771 if (copy_from_user(&name,
772 (struct lttng_kernel_session_name __user *) arg,
773 sizeof(struct lttng_kernel_session_name)))
774 return -EFAULT;
775 return lttng_abi_session_set_name(session, &name);
776 }
777 case LTTNG_KERNEL_SESSION_SET_CREATION_TIME:
778 {
779 struct lttng_kernel_session_creation_time time;
780
781 if (copy_from_user(&time,
782 (struct lttng_kernel_session_creation_time __user *) arg,
783 sizeof(struct lttng_kernel_session_creation_time)))
784 return -EFAULT;
785 return lttng_abi_session_set_creation_time(session, &time);
786 }
787 default:
788 return -ENOIOCTLCMD;
789 }
790 }
791
792 /*
793 * Called when the last file reference is dropped.
794 *
795 * Big fat note: channels and events are invariant for the whole session after
796 * their creation. So this session destruction also destroys all channel and
797 * event structures specific to this session (they are not destroyed when their
798 * individual file is released).
799 */
800 static
801 int lttng_session_release(struct inode *inode, struct file *file)
802 {
803 struct lttng_session *session = file->private_data;
804
805 if (session)
806 lttng_session_destroy(session);
807 return 0;
808 }
809
810 static const struct file_operations lttng_session_fops = {
811 .owner = THIS_MODULE,
812 .release = lttng_session_release,
813 .unlocked_ioctl = lttng_session_ioctl,
814 #ifdef CONFIG_COMPAT
815 .compat_ioctl = lttng_session_ioctl,
816 #endif
817 };
818
819 /*
820 * When encountering empty buffer, flush current sub-buffer if non-empty
821 * and retry (if new data available to read after flush).
822 */
823 static
824 ssize_t lttng_event_notifier_group_notif_read(struct file *filp, char __user *user_buf,
825 size_t count, loff_t *ppos)
826 {
827 struct lttng_event_notifier_group *event_notifier_group = filp->private_data;
828 struct channel *chan = event_notifier_group->chan;
829 struct lib_ring_buffer *buf = event_notifier_group->buf;
830 ssize_t read_count = 0, len;
831 size_t read_offset;
832
833 might_sleep();
834 if (!lttng_access_ok(VERIFY_WRITE, user_buf, count))
835 return -EFAULT;
836
837 /* Finish copy of previous record */
838 if (*ppos != 0) {
839 if (read_count < count) {
840 len = chan->iter.len_left;
841 read_offset = *ppos;
842 goto skip_get_next;
843 }
844 }
845
846 while (read_count < count) {
847 size_t copy_len, space_left;
848
849 len = lib_ring_buffer_get_next_record(chan, buf);
850 len_test:
851 if (len < 0) {
852 /*
853 * Check if buffer is finalized (end of file).
854 */
855 if (len == -ENODATA) {
856 /* A 0 read_count will tell about end of file */
857 goto nodata;
858 }
859 if (filp->f_flags & O_NONBLOCK) {
860 if (!read_count)
861 read_count = -EAGAIN;
862 goto nodata;
863 } else {
864 int error;
865
866 /*
867 * No data available at the moment, return what
868 * we got.
869 */
870 if (read_count)
871 goto nodata;
872
873 /*
874 * Wait for returned len to be >= 0 or -ENODATA.
875 */
876 error = wait_event_interruptible(
877 event_notifier_group->read_wait,
878 ((len = lib_ring_buffer_get_next_record(
879 chan, buf)), len != -EAGAIN));
880 CHAN_WARN_ON(chan, len == -EBUSY);
881 if (error) {
882 read_count = error;
883 goto nodata;
884 }
885 CHAN_WARN_ON(chan, len < 0 && len != -ENODATA);
886 goto len_test;
887 }
888 }
889 read_offset = buf->iter.read_offset;
890 skip_get_next:
891 space_left = count - read_count;
892 if (len <= space_left) {
893 copy_len = len;
894 chan->iter.len_left = 0;
895 *ppos = 0;
896 } else {
897 copy_len = space_left;
898 chan->iter.len_left = len - copy_len;
899 *ppos = read_offset + copy_len;
900 }
901 if (__lib_ring_buffer_copy_to_user(&buf->backend, read_offset,
902 &user_buf[read_count],
903 copy_len)) {
904 /*
905 * Leave the len_left and ppos values at their current
906 * state, as we currently have a valid event to read.
907 */
908 return -EFAULT;
909 }
910 read_count += copy_len;
911 }
912 return read_count;
913
914 nodata:
915 *ppos = 0;
916 chan->iter.len_left = 0;
917 return read_count;
918 }
919
920 /*
921 * If the ring buffer is non empty (even just a partial subbuffer), return that
922 * there is data available. Perform a ring buffer flush if we encounter a
923 * non-empty ring buffer which does not have any consumeable subbuffer available.
924 */
925 static
926 unsigned int lttng_event_notifier_group_notif_poll(struct file *filp,
927 poll_table *wait)
928 {
929 unsigned int mask = 0;
930 struct lttng_event_notifier_group *event_notifier_group = filp->private_data;
931 struct channel *chan = event_notifier_group->chan;
932 struct lib_ring_buffer *buf = event_notifier_group->buf;
933 const struct lib_ring_buffer_config *config = &chan->backend.config;
934 int finalized, disabled;
935 unsigned long consumed, offset;
936 size_t subbuffer_header_size = config->cb.subbuffer_header_size();
937
938 if (filp->f_mode & FMODE_READ) {
939 poll_wait_set_exclusive(wait);
940 poll_wait(filp, &event_notifier_group->read_wait, wait);
941
942 finalized = lib_ring_buffer_is_finalized(config, buf);
943 disabled = lib_ring_buffer_channel_is_disabled(chan);
944
945 /*
946 * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
947 * finalized load before offsets loads.
948 */
949 WARN_ON(atomic_long_read(&buf->active_readers) != 1);
950 retry:
951 if (disabled)
952 return POLLERR;
953
954 offset = lib_ring_buffer_get_offset(config, buf);
955 consumed = lib_ring_buffer_get_consumed(config, buf);
956
957 /*
958 * If there is no buffer available to consume.
959 */
960 if (subbuf_trunc(offset, chan) - subbuf_trunc(consumed, chan) == 0) {
961 /*
962 * If there is a non-empty subbuffer, flush and try again.
963 */
964 if (subbuf_offset(offset, chan) > subbuffer_header_size) {
965 lib_ring_buffer_switch_remote(buf);
966 goto retry;
967 }
968
969 if (finalized)
970 return POLLHUP;
971 else {
972 /*
973 * The memory barriers
974 * __wait_event()/wake_up_interruptible() take
975 * care of "raw_spin_is_locked" memory ordering.
976 */
977 if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
978 goto retry;
979 else
980 return 0;
981 }
982 } else {
983 if (subbuf_trunc(offset, chan) - subbuf_trunc(consumed, chan)
984 >= chan->backend.buf_size)
985 return POLLPRI | POLLRDBAND;
986 else
987 return POLLIN | POLLRDNORM;
988 }
989 }
990
991 return mask;
992 }
993
994 /**
995 * lttng_event_notifier_group_notif_open - event_notifier ring buffer open file operation
996 * @inode: opened inode
997 * @file: opened file
998 *
999 * Open implementation. Makes sure only one open instance of a buffer is
1000 * done at a given moment.
1001 */
1002 static int lttng_event_notifier_group_notif_open(struct inode *inode, struct file *file)
1003 {
1004 struct lttng_event_notifier_group *event_notifier_group = inode->i_private;
1005 struct lib_ring_buffer *buf = event_notifier_group->buf;
1006
1007 file->private_data = event_notifier_group;
1008 return lib_ring_buffer_open(inode, file, buf);
1009 }
1010
1011 /**
1012 * lttng_event_notifier_group_notif_release - event_notifier ring buffer release file operation
1013 * @inode: opened inode
1014 * @file: opened file
1015 *
1016 * Release implementation.
1017 */
1018 static int lttng_event_notifier_group_notif_release(struct inode *inode, struct file *file)
1019 {
1020 struct lttng_event_notifier_group *event_notifier_group = file->private_data;
1021 struct lib_ring_buffer *buf = event_notifier_group->buf;
1022 int ret;
1023
1024 ret = lib_ring_buffer_release(inode, file, buf);
1025 if (ret)
1026 return ret;
1027 fput(event_notifier_group->file);
1028 return 0;
1029 }
1030
1031 static const struct file_operations lttng_event_notifier_group_notif_fops = {
1032 .owner = THIS_MODULE,
1033 .open = lttng_event_notifier_group_notif_open,
1034 .release = lttng_event_notifier_group_notif_release,
1035 .read = lttng_event_notifier_group_notif_read,
1036 .poll = lttng_event_notifier_group_notif_poll,
1037 };
1038
1039 /**
1040 * lttng_metadata_ring_buffer_poll - LTTng ring buffer poll file operation
1041 * @filp: the file
1042 * @wait: poll table
1043 *
1044 * Handles the poll operations for the metadata channels.
1045 */
1046 static
1047 unsigned int lttng_metadata_ring_buffer_poll(struct file *filp,
1048 poll_table *wait)
1049 {
1050 struct lttng_metadata_stream *stream = filp->private_data;
1051 struct lib_ring_buffer *buf = stream->priv;
1052 int finalized;
1053 unsigned int mask = 0;
1054
1055 if (filp->f_mode & FMODE_READ) {
1056 poll_wait_set_exclusive(wait);
1057 poll_wait(filp, &stream->read_wait, wait);
1058
1059 finalized = stream->finalized;
1060
1061 /*
1062 * lib_ring_buffer_is_finalized() contains a smp_rmb()
1063 * ordering finalized load before offsets loads.
1064 */
1065 WARN_ON(atomic_long_read(&buf->active_readers) != 1);
1066
1067 if (finalized)
1068 mask |= POLLHUP;
1069
1070 mutex_lock(&stream->metadata_cache->lock);
1071 if (stream->metadata_cache->metadata_written >
1072 stream->metadata_out)
1073 mask |= POLLIN;
1074 mutex_unlock(&stream->metadata_cache->lock);
1075 }
1076
1077 return mask;
1078 }
1079
1080 static
1081 void lttng_metadata_ring_buffer_ioctl_put_next_subbuf(struct file *filp,
1082 unsigned int cmd, unsigned long arg)
1083 {
1084 struct lttng_metadata_stream *stream = filp->private_data;
1085
1086 stream->metadata_out = stream->metadata_in;
1087 }
1088
1089 /*
1090 * Reset the counter of how much metadata has been consumed to 0. That way,
1091 * the consumer receives the content of the metadata cache unchanged. This is
1092 * different from the metadata_regenerate where the offset from epoch is
1093 * resampled, here we want the exact same content as the last time the metadata
1094 * was generated. This command is only possible if all the metadata written
1095 * in the cache has been output to the metadata stream to avoid corrupting the
1096 * metadata file.
1097 *
1098 * Return 0 on success, a negative value on error.
1099 */
1100 static
1101 int lttng_metadata_cache_dump(struct lttng_metadata_stream *stream)
1102 {
1103 int ret;
1104 struct lttng_metadata_cache *cache = stream->metadata_cache;
1105
1106 mutex_lock(&cache->lock);
1107 if (stream->metadata_out != cache->metadata_written) {
1108 ret = -EBUSY;
1109 goto end;
1110 }
1111 stream->metadata_out = 0;
1112 stream->metadata_in = 0;
1113 wake_up_interruptible(&stream->read_wait);
1114 ret = 0;
1115
1116 end:
1117 mutex_unlock(&cache->lock);
1118 return ret;
1119 }
1120
1121 static
1122 long lttng_metadata_ring_buffer_ioctl(struct file *filp,
1123 unsigned int cmd, unsigned long arg)
1124 {
1125 int ret;
1126 struct lttng_metadata_stream *stream = filp->private_data;
1127 struct lib_ring_buffer *buf = stream->priv;
1128 unsigned int rb_cmd;
1129 bool coherent;
1130
1131 if (cmd == RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK)
1132 rb_cmd = RING_BUFFER_GET_NEXT_SUBBUF;
1133 else
1134 rb_cmd = cmd;
1135
1136 switch (cmd) {
1137 case RING_BUFFER_GET_NEXT_SUBBUF:
1138 {
1139 struct lttng_metadata_stream *stream = filp->private_data;
1140 struct lib_ring_buffer *buf = stream->priv;
1141 struct channel *chan = buf->backend.chan;
1142
1143 ret = lttng_metadata_output_channel(stream, chan, NULL);
1144 if (ret > 0) {
1145 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
1146 ret = 0;
1147 } else if (ret < 0)
1148 goto err;
1149 break;
1150 }
1151 case RING_BUFFER_GET_SUBBUF:
1152 {
1153 /*
1154 * Random access is not allowed for metadata channel.
1155 */
1156 return -ENOSYS;
1157 }
1158 case RING_BUFFER_FLUSH_EMPTY: /* Fall-through. */
1159 case RING_BUFFER_FLUSH:
1160 {
1161 struct lttng_metadata_stream *stream = filp->private_data;
1162 struct lib_ring_buffer *buf = stream->priv;
1163 struct channel *chan = buf->backend.chan;
1164
1165 /*
1166 * Before doing the actual ring buffer flush, write up to one
1167 * packet of metadata in the ring buffer.
1168 */
1169 ret = lttng_metadata_output_channel(stream, chan, NULL);
1170 if (ret < 0)
1171 goto err;
1172 break;
1173 }
1174 case RING_BUFFER_GET_METADATA_VERSION:
1175 {
1176 struct lttng_metadata_stream *stream = filp->private_data;
1177
1178 return put_u64(stream->version, arg);
1179 }
1180 case RING_BUFFER_METADATA_CACHE_DUMP:
1181 {
1182 struct lttng_metadata_stream *stream = filp->private_data;
1183
1184 return lttng_metadata_cache_dump(stream);
1185 }
1186 case RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
1187 {
1188 struct lttng_metadata_stream *stream = filp->private_data;
1189 struct lib_ring_buffer *buf = stream->priv;
1190 struct channel *chan = buf->backend.chan;
1191
1192 ret = lttng_metadata_output_channel(stream, chan, &coherent);
1193 if (ret > 0) {
1194 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
1195 ret = 0;
1196 } else if (ret < 0) {
1197 goto err;
1198 }
1199 break;
1200 }
1201 default:
1202 break;
1203 }
1204 /* PUT_SUBBUF is the one from lib ring buffer, unmodified. */
1205
1206 /* Performing lib ring buffer ioctl after our own. */
1207 ret = lib_ring_buffer_ioctl(filp, rb_cmd, arg, buf);
1208 if (ret < 0)
1209 goto err;
1210
1211 switch (cmd) {
1212 case RING_BUFFER_PUT_NEXT_SUBBUF:
1213 {
1214 lttng_metadata_ring_buffer_ioctl_put_next_subbuf(filp,
1215 cmd, arg);
1216 break;
1217 }
1218 case RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
1219 {
1220 return put_u32(coherent, arg);
1221 }
1222 default:
1223 break;
1224 }
1225 err:
1226 return ret;
1227 }
1228
1229 #ifdef CONFIG_COMPAT
1230 static
1231 long lttng_metadata_ring_buffer_compat_ioctl(struct file *filp,
1232 unsigned int cmd, unsigned long arg)
1233 {
1234 int ret;
1235 struct lttng_metadata_stream *stream = filp->private_data;
1236 struct lib_ring_buffer *buf = stream->priv;
1237 unsigned int rb_cmd;
1238 bool coherent;
1239
1240 if (cmd == RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK)
1241 rb_cmd = RING_BUFFER_GET_NEXT_SUBBUF;
1242 else
1243 rb_cmd = cmd;
1244
1245 switch (cmd) {
1246 case RING_BUFFER_GET_NEXT_SUBBUF:
1247 {
1248 struct lttng_metadata_stream *stream = filp->private_data;
1249 struct lib_ring_buffer *buf = stream->priv;
1250 struct channel *chan = buf->backend.chan;
1251
1252 ret = lttng_metadata_output_channel(stream, chan, NULL);
1253 if (ret > 0) {
1254 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
1255 ret = 0;
1256 } else if (ret < 0)
1257 goto err;
1258 break;
1259 }
1260 case RING_BUFFER_GET_SUBBUF:
1261 {
1262 /*
1263 * Random access is not allowed for metadata channel.
1264 */
1265 return -ENOSYS;
1266 }
1267 case RING_BUFFER_FLUSH_EMPTY: /* Fall-through. */
1268 case RING_BUFFER_FLUSH:
1269 {
1270 struct lttng_metadata_stream *stream = filp->private_data;
1271 struct lib_ring_buffer *buf = stream->priv;
1272 struct channel *chan = buf->backend.chan;
1273
1274 /*
1275 * Before doing the actual ring buffer flush, write up to one
1276 * packet of metadata in the ring buffer.
1277 */
1278 ret = lttng_metadata_output_channel(stream, chan, NULL);
1279 if (ret < 0)
1280 goto err;
1281 break;
1282 }
1283 case RING_BUFFER_GET_METADATA_VERSION:
1284 {
1285 struct lttng_metadata_stream *stream = filp->private_data;
1286
1287 return put_u64(stream->version, arg);
1288 }
1289 case RING_BUFFER_METADATA_CACHE_DUMP:
1290 {
1291 struct lttng_metadata_stream *stream = filp->private_data;
1292
1293 return lttng_metadata_cache_dump(stream);
1294 }
1295 case RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
1296 {
1297 struct lttng_metadata_stream *stream = filp->private_data;
1298 struct lib_ring_buffer *buf = stream->priv;
1299 struct channel *chan = buf->backend.chan;
1300
1301 ret = lttng_metadata_output_channel(stream, chan, &coherent);
1302 if (ret > 0) {
1303 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
1304 ret = 0;
1305 } else if (ret < 0) {
1306 goto err;
1307 }
1308 break;
1309 }
1310 default:
1311 break;
1312 }
1313 /* PUT_SUBBUF is the one from lib ring buffer, unmodified. */
1314
1315 /* Performing lib ring buffer ioctl after our own. */
1316 ret = lib_ring_buffer_compat_ioctl(filp, rb_cmd, arg, buf);
1317 if (ret < 0)
1318 goto err;
1319
1320 switch (cmd) {
1321 case RING_BUFFER_PUT_NEXT_SUBBUF:
1322 {
1323 lttng_metadata_ring_buffer_ioctl_put_next_subbuf(filp,
1324 cmd, arg);
1325 break;
1326 }
1327 case RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
1328 {
1329 return put_u32(coherent, arg);
1330 }
1331 default:
1332 break;
1333 }
1334 err:
1335 return ret;
1336 }
1337 #endif
1338
1339 /*
1340 * This is not used by anonymous file descriptors. This code is left
1341 * there if we ever want to implement an inode with open() operation.
1342 */
1343 static
1344 int lttng_metadata_ring_buffer_open(struct inode *inode, struct file *file)
1345 {
1346 struct lttng_metadata_stream *stream = inode->i_private;
1347 struct lib_ring_buffer *buf = stream->priv;
1348
1349 file->private_data = buf;
1350 /*
1351 * Since life-time of metadata cache differs from that of
1352 * session, we need to keep our own reference on the transport.
1353 */
1354 if (!try_module_get(stream->transport->owner)) {
1355 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
1356 return -EBUSY;
1357 }
1358 return lib_ring_buffer_open(inode, file, buf);
1359 }
1360
1361 static
1362 int lttng_metadata_ring_buffer_release(struct inode *inode, struct file *file)
1363 {
1364 struct lttng_metadata_stream *stream = file->private_data;
1365 struct lib_ring_buffer *buf = stream->priv;
1366
1367 mutex_lock(&stream->metadata_cache->lock);
1368 list_del(&stream->list);
1369 mutex_unlock(&stream->metadata_cache->lock);
1370 kref_put(&stream->metadata_cache->refcount, metadata_cache_destroy);
1371 module_put(stream->transport->owner);
1372 kfree(stream);
1373 return lib_ring_buffer_release(inode, file, buf);
1374 }
1375
1376 static
1377 ssize_t lttng_metadata_ring_buffer_splice_read(struct file *in, loff_t *ppos,
1378 struct pipe_inode_info *pipe, size_t len,
1379 unsigned int flags)
1380 {
1381 struct lttng_metadata_stream *stream = in->private_data;
1382 struct lib_ring_buffer *buf = stream->priv;
1383
1384 return lib_ring_buffer_splice_read(in, ppos, pipe, len,
1385 flags, buf);
1386 }
1387
1388 static
1389 int lttng_metadata_ring_buffer_mmap(struct file *filp,
1390 struct vm_area_struct *vma)
1391 {
1392 struct lttng_metadata_stream *stream = filp->private_data;
1393 struct lib_ring_buffer *buf = stream->priv;
1394
1395 return lib_ring_buffer_mmap(filp, vma, buf);
1396 }
1397
1398 static
1399 const struct file_operations lttng_metadata_ring_buffer_file_operations = {
1400 .owner = THIS_MODULE,
1401 .open = lttng_metadata_ring_buffer_open,
1402 .release = lttng_metadata_ring_buffer_release,
1403 .poll = lttng_metadata_ring_buffer_poll,
1404 .splice_read = lttng_metadata_ring_buffer_splice_read,
1405 .mmap = lttng_metadata_ring_buffer_mmap,
1406 .unlocked_ioctl = lttng_metadata_ring_buffer_ioctl,
1407 .llseek = vfs_lib_ring_buffer_no_llseek,
1408 #ifdef CONFIG_COMPAT
1409 .compat_ioctl = lttng_metadata_ring_buffer_compat_ioctl,
1410 #endif
1411 };
1412
1413 static
1414 int lttng_abi_create_stream_fd(struct file *channel_file, void *stream_priv,
1415 const struct file_operations *fops, const char *name)
1416 {
1417 int stream_fd, ret;
1418 struct file *stream_file;
1419
1420 stream_fd = lttng_get_unused_fd();
1421 if (stream_fd < 0) {
1422 ret = stream_fd;
1423 goto fd_error;
1424 }
1425 stream_file = anon_inode_getfile(name, fops, stream_priv, O_RDWR);
1426 if (IS_ERR(stream_file)) {
1427 ret = PTR_ERR(stream_file);
1428 goto file_error;
1429 }
1430 /*
1431 * OPEN_FMODE, called within anon_inode_getfile/alloc_file, don't honor
1432 * FMODE_LSEEK, FMODE_PREAD nor FMODE_PWRITE. We need to read from this
1433 * file descriptor, so we set FMODE_PREAD here.
1434 */
1435 stream_file->f_mode |= FMODE_PREAD;
1436 fd_install(stream_fd, stream_file);
1437 /*
1438 * The stream holds a reference to the channel within the generic ring
1439 * buffer library, so no need to hold a refcount on the channel and
1440 * session files here.
1441 */
1442 return stream_fd;
1443
1444 file_error:
1445 put_unused_fd(stream_fd);
1446 fd_error:
1447 return ret;
1448 }
1449
1450 static
1451 int lttng_abi_open_stream(struct file *channel_file)
1452 {
1453 struct lttng_channel *channel = channel_file->private_data;
1454 struct lib_ring_buffer *buf;
1455 int ret;
1456 void *stream_priv;
1457
1458 buf = channel->ops->buffer_read_open(channel->chan);
1459 if (!buf)
1460 return -ENOENT;
1461
1462 stream_priv = buf;
1463 ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
1464 &lttng_stream_ring_buffer_file_operations,
1465 "[lttng_stream]");
1466 if (ret < 0)
1467 goto fd_error;
1468
1469 return ret;
1470
1471 fd_error:
1472 channel->ops->buffer_read_close(buf);
1473 return ret;
1474 }
1475
1476 static
1477 int lttng_abi_open_metadata_stream(struct file *channel_file)
1478 {
1479 struct lttng_channel *channel = channel_file->private_data;
1480 struct lttng_session *session = channel->session;
1481 struct lib_ring_buffer *buf;
1482 int ret;
1483 struct lttng_metadata_stream *metadata_stream;
1484 void *stream_priv;
1485
1486 buf = channel->ops->buffer_read_open(channel->chan);
1487 if (!buf)
1488 return -ENOENT;
1489
1490 metadata_stream = kzalloc(sizeof(struct lttng_metadata_stream),
1491 GFP_KERNEL);
1492 if (!metadata_stream) {
1493 ret = -ENOMEM;
1494 goto nomem;
1495 }
1496 metadata_stream->metadata_cache = session->metadata_cache;
1497 init_waitqueue_head(&metadata_stream->read_wait);
1498 metadata_stream->priv = buf;
1499 stream_priv = metadata_stream;
1500 metadata_stream->transport = channel->transport;
1501 /* Initial state is an empty metadata, considered as incoherent. */
1502 metadata_stream->coherent = false;
1503
1504 /*
1505 * Since life-time of metadata cache differs from that of
1506 * session, we need to keep our own reference on the transport.
1507 */
1508 if (!try_module_get(metadata_stream->transport->owner)) {
1509 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
1510 ret = -EINVAL;
1511 goto notransport;
1512 }
1513
1514 if (!lttng_kref_get(&session->metadata_cache->refcount)) {
1515 ret = -EOVERFLOW;
1516 goto kref_error;
1517 }
1518
1519 ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
1520 &lttng_metadata_ring_buffer_file_operations,
1521 "[lttng_metadata_stream]");
1522 if (ret < 0)
1523 goto fd_error;
1524
1525 mutex_lock(&session->metadata_cache->lock);
1526 list_add(&metadata_stream->list,
1527 &session->metadata_cache->metadata_stream);
1528 mutex_unlock(&session->metadata_cache->lock);
1529 return ret;
1530
1531 fd_error:
1532 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
1533 kref_error:
1534 module_put(metadata_stream->transport->owner);
1535 notransport:
1536 kfree(metadata_stream);
1537 nomem:
1538 channel->ops->buffer_read_close(buf);
1539 return ret;
1540 }
1541
1542 static
1543 int lttng_abi_open_event_notifier_group_stream(struct file *notif_file)
1544 {
1545 struct lttng_event_notifier_group *event_notifier_group = notif_file->private_data;
1546 struct channel *chan = event_notifier_group->chan;
1547 struct lib_ring_buffer *buf;
1548 int ret;
1549 void *stream_priv;
1550
1551 buf = event_notifier_group->ops->buffer_read_open(chan);
1552 if (!buf)
1553 return -ENOENT;
1554
1555 /* The event_notifier notification fd holds a reference on the event_notifier group */
1556 if (!atomic_long_add_unless(&notif_file->f_count, 1, LONG_MAX)) {
1557 ret = -EOVERFLOW;
1558 goto refcount_error;
1559 }
1560 event_notifier_group->buf = buf;
1561 stream_priv = event_notifier_group;
1562 ret = lttng_abi_create_stream_fd(notif_file, stream_priv,
1563 &lttng_event_notifier_group_notif_fops,
1564 "[lttng_event_notifier_stream]");
1565 if (ret < 0)
1566 goto fd_error;
1567
1568 return ret;
1569
1570 fd_error:
1571 atomic_long_dec(&notif_file->f_count);
1572 refcount_error:
1573 event_notifier_group->ops->buffer_read_close(buf);
1574 return ret;
1575 }
1576
1577 static
1578 int lttng_abi_validate_event_param(struct lttng_kernel_event *event_param)
1579 {
1580 /* Limit ABI to implemented features. */
1581 switch (event_param->instrumentation) {
1582 case LTTNG_KERNEL_SYSCALL:
1583 switch (event_param->u.syscall.entryexit) {
1584 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1585 break;
1586 default:
1587 return -EINVAL;
1588 }
1589 switch (event_param->u.syscall.abi) {
1590 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1591 break;
1592 default:
1593 return -EINVAL;
1594 }
1595 switch (event_param->u.syscall.match) {
1596 case LTTNG_SYSCALL_MATCH_NAME:
1597 break;
1598 default:
1599 return -EINVAL;
1600 }
1601 break;
1602
1603 case LTTNG_KERNEL_TRACEPOINT: /* Fallthrough */
1604 case LTTNG_KERNEL_KPROBE: /* Fallthrough */
1605 case LTTNG_KERNEL_KRETPROBE: /* Fallthrough */
1606 case LTTNG_KERNEL_NOOP: /* Fallthrough */
1607 case LTTNG_KERNEL_UPROBE:
1608 break;
1609
1610 case LTTNG_KERNEL_FUNCTION: /* Fallthrough */
1611 default:
1612 return -EINVAL;
1613 }
1614 return 0;
1615 }
1616
1617 static
1618 int lttng_abi_create_event(struct file *channel_file,
1619 struct lttng_kernel_event *event_param)
1620 {
1621 struct lttng_channel *channel = channel_file->private_data;
1622 int event_fd, ret;
1623 struct file *event_file;
1624 void *priv;
1625
1626 event_param->name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
1627 switch (event_param->instrumentation) {
1628 case LTTNG_KERNEL_KRETPROBE:
1629 event_param->u.kretprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
1630 break;
1631 case LTTNG_KERNEL_KPROBE:
1632 event_param->u.kprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
1633 break;
1634 case LTTNG_KERNEL_FUNCTION:
1635 WARN_ON_ONCE(1);
1636 /* Not implemented. */
1637 break;
1638 default:
1639 break;
1640 }
1641 event_fd = lttng_get_unused_fd();
1642 if (event_fd < 0) {
1643 ret = event_fd;
1644 goto fd_error;
1645 }
1646 event_file = anon_inode_getfile("[lttng_event]",
1647 &lttng_event_fops,
1648 NULL, O_RDWR);
1649 if (IS_ERR(event_file)) {
1650 ret = PTR_ERR(event_file);
1651 goto file_error;
1652 }
1653 /* The event holds a reference on the channel */
1654 if (!atomic_long_add_unless(&channel_file->f_count, 1, LONG_MAX)) {
1655 ret = -EOVERFLOW;
1656 goto refcount_error;
1657 }
1658 ret = lttng_abi_validate_event_param(event_param);
1659 if (ret)
1660 goto event_error;
1661 if (event_param->instrumentation == LTTNG_KERNEL_TRACEPOINT
1662 || event_param->instrumentation == LTTNG_KERNEL_SYSCALL) {
1663 struct lttng_event_enabler *event_enabler;
1664
1665 if (strutils_is_star_glob_pattern(event_param->name)) {
1666 /*
1667 * If the event name is a star globbing pattern,
1668 * we create the special star globbing enabler.
1669 */
1670 event_enabler = lttng_event_enabler_create(LTTNG_ENABLER_FORMAT_STAR_GLOB,
1671 event_param, channel);
1672 } else {
1673 event_enabler = lttng_event_enabler_create(LTTNG_ENABLER_FORMAT_NAME,
1674 event_param, channel);
1675 }
1676 priv = event_enabler;
1677 } else {
1678 struct lttng_event *event;
1679
1680 /*
1681 * We tolerate no failure path after event creation. It
1682 * will stay invariant for the rest of the session.
1683 */
1684 event = lttng_event_create(channel, event_param,
1685 NULL, NULL,
1686 event_param->instrumentation);
1687 WARN_ON_ONCE(!event);
1688 if (IS_ERR(event)) {
1689 ret = PTR_ERR(event);
1690 goto event_error;
1691 }
1692 priv = event;
1693 }
1694 event_file->private_data = priv;
1695 fd_install(event_fd, event_file);
1696 return event_fd;
1697
1698 event_error:
1699 atomic_long_dec(&channel_file->f_count);
1700 refcount_error:
1701 fput(event_file);
1702 file_error:
1703 put_unused_fd(event_fd);
1704 fd_error:
1705 return ret;
1706 }
1707
1708 static
1709 long lttng_event_notifier_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1710 {
1711 struct lttng_event_notifier_enabler *event_notifier_enabler;
1712 enum lttng_event_type *evtype = file->private_data;
1713
1714 switch (cmd) {
1715 case LTTNG_KERNEL_ENABLE:
1716 switch (*evtype) {
1717 case LTTNG_TYPE_EVENT:
1718 return -EINVAL;
1719 case LTTNG_TYPE_ENABLER:
1720 event_notifier_enabler = file->private_data;
1721 return lttng_event_notifier_enabler_enable(event_notifier_enabler);
1722 default:
1723 WARN_ON_ONCE(1);
1724 return -ENOSYS;
1725 }
1726 case LTTNG_KERNEL_DISABLE:
1727 switch (*evtype) {
1728 case LTTNG_TYPE_EVENT:
1729 return -EINVAL;
1730 case LTTNG_TYPE_ENABLER:
1731 event_notifier_enabler = file->private_data;
1732 return lttng_event_notifier_enabler_disable(event_notifier_enabler);
1733 default:
1734 WARN_ON_ONCE(1);
1735 return -ENOSYS;
1736 }
1737 case LTTNG_KERNEL_FILTER:
1738 switch (*evtype) {
1739 case LTTNG_TYPE_EVENT:
1740 return -EINVAL;
1741 case LTTNG_TYPE_ENABLER:
1742 event_notifier_enabler = file->private_data;
1743 return lttng_event_notifier_enabler_attach_bytecode(event_notifier_enabler,
1744 (struct lttng_kernel_filter_bytecode __user *) arg);
1745 default:
1746 WARN_ON_ONCE(1);
1747 return -ENOSYS;
1748 }
1749 default:
1750 return -ENOIOCTLCMD;
1751 }
1752 }
1753
1754 static
1755 int lttng_event_notifier_release(struct inode *inode, struct file *file)
1756 {
1757 struct lttng_event_notifier *event_notifier;
1758 struct lttng_event_notifier_enabler *event_notifier_enabler;
1759 enum lttng_event_type *evtype = file->private_data;
1760
1761 if (!evtype)
1762 return 0;
1763
1764 switch (*evtype) {
1765 case LTTNG_TYPE_EVENT:
1766 event_notifier = file->private_data;
1767 if (event_notifier)
1768 fput(event_notifier->group->file);
1769 break;
1770 case LTTNG_TYPE_ENABLER:
1771 event_notifier_enabler = file->private_data;
1772 if (event_notifier_enabler)
1773 fput(event_notifier_enabler->group->file);
1774 break;
1775 default:
1776 WARN_ON_ONCE(1);
1777 break;
1778 }
1779
1780 return 0;
1781 }
1782
1783 static const struct file_operations lttng_event_notifier_fops = {
1784 .owner = THIS_MODULE,
1785 .release = lttng_event_notifier_release,
1786 .unlocked_ioctl = lttng_event_notifier_ioctl,
1787 #ifdef CONFIG_COMPAT
1788 .compat_ioctl = lttng_event_notifier_ioctl,
1789 #endif
1790 };
1791
1792 static
1793 int lttng_abi_create_event_notifier(struct file *event_notifier_group_file,
1794 struct lttng_kernel_event_notifier *event_notifier_param)
1795 {
1796 struct lttng_event_notifier_group *event_notifier_group =
1797 event_notifier_group_file->private_data;
1798 int event_notifier_fd, ret;
1799 struct file *event_notifier_file;
1800 void *priv;
1801
1802 switch (event_notifier_param->event.instrumentation) {
1803 case LTTNG_KERNEL_TRACEPOINT:
1804 case LTTNG_KERNEL_KPROBE:
1805 case LTTNG_KERNEL_UPROBE:
1806 case LTTNG_KERNEL_KRETPROBE:
1807 case LTTNG_KERNEL_FUNCTION:
1808 case LTTNG_KERNEL_NOOP:
1809 case LTTNG_KERNEL_SYSCALL:
1810 default:
1811 ret = -EINVAL;
1812 goto inval_instr;
1813 }
1814
1815 event_notifier_param->event.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
1816
1817 event_notifier_fd = lttng_get_unused_fd();
1818 if (event_notifier_fd < 0) {
1819 ret = event_notifier_fd;
1820 goto fd_error;
1821 }
1822
1823 event_notifier_file = anon_inode_getfile("[lttng_event_notifier]",
1824 &lttng_event_notifier_fops,
1825 NULL, O_RDWR);
1826 if (IS_ERR(event_notifier_file)) {
1827 ret = PTR_ERR(event_notifier_file);
1828 goto file_error;
1829 }
1830
1831 /* The event notifier holds a reference on the event notifier group. */
1832 if (!atomic_long_add_unless(&event_notifier_group_file->f_count, 1, LONG_MAX)) {
1833 ret = -EOVERFLOW;
1834 goto refcount_error;
1835 }
1836
1837 if (event_notifier_param->event.instrumentation == LTTNG_KERNEL_TRACEPOINT
1838 || event_notifier_param->event.instrumentation == LTTNG_KERNEL_SYSCALL) {
1839 struct lttng_event_notifier_enabler *enabler;
1840
1841 if (strutils_is_star_glob_pattern(event_notifier_param->event.name)) {
1842 /*
1843 * If the event name is a star globbing pattern,
1844 * we create the special star globbing enabler.
1845 */
1846 enabler = lttng_event_notifier_enabler_create(
1847 event_notifier_group,
1848 LTTNG_ENABLER_FORMAT_STAR_GLOB,
1849 event_notifier_param);
1850 } else {
1851 enabler = lttng_event_notifier_enabler_create(
1852 event_notifier_group,
1853 LTTNG_ENABLER_FORMAT_NAME,
1854 event_notifier_param);
1855 }
1856 priv = enabler;
1857 } else {
1858 struct lttng_event_notifier *event_notifier;
1859
1860 /*
1861 * We tolerate no failure path after event notifier creation.
1862 * It will stay invariant for the rest of the session.
1863 */
1864 event_notifier = lttng_event_notifier_create(NULL,
1865 event_notifier_param->event.token, event_notifier_group,
1866 event_notifier_param, NULL,
1867 event_notifier_param->event.instrumentation);
1868 WARN_ON_ONCE(!event_notifier);
1869 if (IS_ERR(event_notifier)) {
1870 ret = PTR_ERR(event_notifier);
1871 goto event_notifier_error;
1872 }
1873 priv = event_notifier;
1874 }
1875 event_notifier_file->private_data = priv;
1876 fd_install(event_notifier_fd, event_notifier_file);
1877 return event_notifier_fd;
1878
1879 event_notifier_error:
1880 atomic_long_dec(&event_notifier_group_file->f_count);
1881 refcount_error:
1882 fput(event_notifier_file);
1883 file_error:
1884 put_unused_fd(event_notifier_fd);
1885 fd_error:
1886 inval_instr:
1887 return ret;
1888 }
1889
1890 static
1891 long lttng_event_notifier_group_ioctl(struct file *file, unsigned int cmd,
1892 unsigned long arg)
1893 {
1894 switch (cmd) {
1895 case LTTNG_KERNEL_EVENT_NOTIFIER_GROUP_NOTIFICATION_FD:
1896 {
1897 return lttng_abi_open_event_notifier_group_stream(file);
1898 }
1899 case LTTNG_KERNEL_EVENT_NOTIFIER_CREATE:
1900 {
1901 struct lttng_kernel_event_notifier uevent_notifier_param;
1902
1903 if (copy_from_user(&uevent_notifier_param,
1904 (struct lttng_kernel_event_notifier __user *) arg,
1905 sizeof(uevent_notifier_param)))
1906 return -EFAULT;
1907 return lttng_abi_create_event_notifier(file, &uevent_notifier_param);
1908 }
1909 default:
1910 return -ENOIOCTLCMD;
1911 }
1912 return 0;
1913 }
1914
1915 static
1916 int lttng_event_notifier_group_release(struct inode *inode, struct file *file)
1917 {
1918 struct lttng_event_notifier_group *event_notifier_group =
1919 file->private_data;
1920
1921 if (event_notifier_group)
1922 lttng_event_notifier_group_destroy(event_notifier_group);
1923 return 0;
1924 }
1925
1926 static const struct file_operations lttng_event_notifier_group_fops = {
1927 .owner = THIS_MODULE,
1928 .release = lttng_event_notifier_group_release,
1929 .unlocked_ioctl = lttng_event_notifier_group_ioctl,
1930 #ifdef CONFIG_COMPAT
1931 .compat_ioctl = lttng_event_notifier_group_ioctl,
1932 #endif
1933 };
1934
1935 /**
1936 * lttng_channel_ioctl - lttng syscall through ioctl
1937 *
1938 * @file: the file
1939 * @cmd: the command
1940 * @arg: command arg
1941 *
1942 * This ioctl implements lttng commands:
1943 * LTTNG_KERNEL_STREAM
1944 * Returns an event stream file descriptor or failure.
1945 * (typically, one event stream records events from one CPU)
1946 * LTTNG_KERNEL_EVENT
1947 * Returns an event file descriptor or failure.
1948 * LTTNG_KERNEL_CONTEXT
1949 * Prepend a context field to each event in the channel
1950 * LTTNG_KERNEL_ENABLE
1951 * Enable recording for events in this channel (weak enable)
1952 * LTTNG_KERNEL_DISABLE
1953 * Disable recording for events in this channel (strong disable)
1954 *
1955 * Channel and event file descriptors also hold a reference on the session.
1956 */
1957 static
1958 long lttng_channel_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1959 {
1960 struct lttng_channel *channel = file->private_data;
1961
1962 switch (cmd) {
1963 case LTTNG_KERNEL_OLD_STREAM:
1964 case LTTNG_KERNEL_STREAM:
1965 return lttng_abi_open_stream(file);
1966 case LTTNG_KERNEL_OLD_EVENT:
1967 {
1968 struct lttng_kernel_event *uevent_param;
1969 struct lttng_kernel_old_event *old_uevent_param;
1970 int ret;
1971
1972 uevent_param = kmalloc(sizeof(struct lttng_kernel_event),
1973 GFP_KERNEL);
1974 if (!uevent_param) {
1975 ret = -ENOMEM;
1976 goto old_event_end;
1977 }
1978 old_uevent_param = kmalloc(
1979 sizeof(struct lttng_kernel_old_event),
1980 GFP_KERNEL);
1981 if (!old_uevent_param) {
1982 ret = -ENOMEM;
1983 goto old_event_error_free_param;
1984 }
1985 if (copy_from_user(old_uevent_param,
1986 (struct lttng_kernel_old_event __user *) arg,
1987 sizeof(struct lttng_kernel_old_event))) {
1988 ret = -EFAULT;
1989 goto old_event_error_free_old_param;
1990 }
1991
1992 memcpy(uevent_param->name, old_uevent_param->name,
1993 sizeof(uevent_param->name));
1994 uevent_param->instrumentation =
1995 old_uevent_param->instrumentation;
1996
1997 switch (old_uevent_param->instrumentation) {
1998 case LTTNG_KERNEL_KPROBE:
1999 uevent_param->u.kprobe.addr =
2000 old_uevent_param->u.kprobe.addr;
2001 uevent_param->u.kprobe.offset =
2002 old_uevent_param->u.kprobe.offset;
2003 memcpy(uevent_param->u.kprobe.symbol_name,
2004 old_uevent_param->u.kprobe.symbol_name,
2005 sizeof(uevent_param->u.kprobe.symbol_name));
2006 break;
2007 case LTTNG_KERNEL_KRETPROBE:
2008 uevent_param->u.kretprobe.addr =
2009 old_uevent_param->u.kretprobe.addr;
2010 uevent_param->u.kretprobe.offset =
2011 old_uevent_param->u.kretprobe.offset;
2012 memcpy(uevent_param->u.kretprobe.symbol_name,
2013 old_uevent_param->u.kretprobe.symbol_name,
2014 sizeof(uevent_param->u.kretprobe.symbol_name));
2015 break;
2016 case LTTNG_KERNEL_FUNCTION:
2017 WARN_ON_ONCE(1);
2018 /* Not implemented. */
2019 break;
2020 default:
2021 break;
2022 }
2023 ret = lttng_abi_create_event(file, uevent_param);
2024
2025 old_event_error_free_old_param:
2026 kfree(old_uevent_param);
2027 old_event_error_free_param:
2028 kfree(uevent_param);
2029 old_event_end:
2030 return ret;
2031 }
2032 case LTTNG_KERNEL_EVENT:
2033 {
2034 struct lttng_kernel_event uevent_param;
2035
2036 if (copy_from_user(&uevent_param,
2037 (struct lttng_kernel_event __user *) arg,
2038 sizeof(uevent_param)))
2039 return -EFAULT;
2040 return lttng_abi_create_event(file, &uevent_param);
2041 }
2042 case LTTNG_KERNEL_OLD_CONTEXT:
2043 {
2044 struct lttng_kernel_context *ucontext_param;
2045 struct lttng_kernel_old_context *old_ucontext_param;
2046 int ret;
2047
2048 ucontext_param = kmalloc(sizeof(struct lttng_kernel_context),
2049 GFP_KERNEL);
2050 if (!ucontext_param) {
2051 ret = -ENOMEM;
2052 goto old_ctx_end;
2053 }
2054 old_ucontext_param = kmalloc(sizeof(struct lttng_kernel_old_context),
2055 GFP_KERNEL);
2056 if (!old_ucontext_param) {
2057 ret = -ENOMEM;
2058 goto old_ctx_error_free_param;
2059 }
2060
2061 if (copy_from_user(old_ucontext_param,
2062 (struct lttng_kernel_old_context __user *) arg,
2063 sizeof(struct lttng_kernel_old_context))) {
2064 ret = -EFAULT;
2065 goto old_ctx_error_free_old_param;
2066 }
2067 ucontext_param->ctx = old_ucontext_param->ctx;
2068 memcpy(ucontext_param->padding, old_ucontext_param->padding,
2069 sizeof(ucontext_param->padding));
2070 /* only type that uses the union */
2071 if (old_ucontext_param->ctx == LTTNG_KERNEL_CONTEXT_PERF_COUNTER) {
2072 ucontext_param->u.perf_counter.type =
2073 old_ucontext_param->u.perf_counter.type;
2074 ucontext_param->u.perf_counter.config =
2075 old_ucontext_param->u.perf_counter.config;
2076 memcpy(ucontext_param->u.perf_counter.name,
2077 old_ucontext_param->u.perf_counter.name,
2078 sizeof(ucontext_param->u.perf_counter.name));
2079 }
2080
2081 ret = lttng_abi_add_context(file,
2082 ucontext_param,
2083 &channel->ctx, channel->session);
2084
2085 old_ctx_error_free_old_param:
2086 kfree(old_ucontext_param);
2087 old_ctx_error_free_param:
2088 kfree(ucontext_param);
2089 old_ctx_end:
2090 return ret;
2091 }
2092 case LTTNG_KERNEL_CONTEXT:
2093 {
2094 struct lttng_kernel_context ucontext_param;
2095
2096 if (copy_from_user(&ucontext_param,
2097 (struct lttng_kernel_context __user *) arg,
2098 sizeof(ucontext_param)))
2099 return -EFAULT;
2100 return lttng_abi_add_context(file,
2101 &ucontext_param,
2102 &channel->ctx, channel->session);
2103 }
2104 case LTTNG_KERNEL_OLD_ENABLE:
2105 case LTTNG_KERNEL_ENABLE:
2106 return lttng_channel_enable(channel);
2107 case LTTNG_KERNEL_OLD_DISABLE:
2108 case LTTNG_KERNEL_DISABLE:
2109 return lttng_channel_disable(channel);
2110 case LTTNG_KERNEL_SYSCALL_MASK:
2111 return lttng_channel_syscall_mask(channel,
2112 (struct lttng_kernel_syscall_mask __user *) arg);
2113 default:
2114 return -ENOIOCTLCMD;
2115 }
2116 }
2117
2118 /**
2119 * lttng_metadata_ioctl - lttng syscall through ioctl
2120 *
2121 * @file: the file
2122 * @cmd: the command
2123 * @arg: command arg
2124 *
2125 * This ioctl implements lttng commands:
2126 * LTTNG_KERNEL_STREAM
2127 * Returns an event stream file descriptor or failure.
2128 *
2129 * Channel and event file descriptors also hold a reference on the session.
2130 */
2131 static
2132 long lttng_metadata_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2133 {
2134 switch (cmd) {
2135 case LTTNG_KERNEL_OLD_STREAM:
2136 case LTTNG_KERNEL_STREAM:
2137 return lttng_abi_open_metadata_stream(file);
2138 default:
2139 return -ENOIOCTLCMD;
2140 }
2141 }
2142
2143 /**
2144 * lttng_channel_poll - lttng stream addition/removal monitoring
2145 *
2146 * @file: the file
2147 * @wait: poll table
2148 */
2149 unsigned int lttng_channel_poll(struct file *file, poll_table *wait)
2150 {
2151 struct lttng_channel *channel = file->private_data;
2152 unsigned int mask = 0;
2153
2154 if (file->f_mode & FMODE_READ) {
2155 poll_wait_set_exclusive(wait);
2156 poll_wait(file, channel->ops->get_hp_wait_queue(channel->chan),
2157 wait);
2158
2159 if (channel->ops->is_disabled(channel->chan))
2160 return POLLERR;
2161 if (channel->ops->is_finalized(channel->chan))
2162 return POLLHUP;
2163 if (channel->ops->buffer_has_read_closed_stream(channel->chan))
2164 return POLLIN | POLLRDNORM;
2165 return 0;
2166 }
2167 return mask;
2168
2169 }
2170
2171 static
2172 int lttng_channel_release(struct inode *inode, struct file *file)
2173 {
2174 struct lttng_channel *channel = file->private_data;
2175
2176 if (channel)
2177 fput(channel->session->file);
2178 return 0;
2179 }
2180
2181 static
2182 int lttng_metadata_channel_release(struct inode *inode, struct file *file)
2183 {
2184 struct lttng_channel *channel = file->private_data;
2185
2186 if (channel) {
2187 fput(channel->session->file);
2188 lttng_metadata_channel_destroy(channel);
2189 }
2190
2191 return 0;
2192 }
2193
2194 static const struct file_operations lttng_channel_fops = {
2195 .owner = THIS_MODULE,
2196 .release = lttng_channel_release,
2197 .poll = lttng_channel_poll,
2198 .unlocked_ioctl = lttng_channel_ioctl,
2199 #ifdef CONFIG_COMPAT
2200 .compat_ioctl = lttng_channel_ioctl,
2201 #endif
2202 };
2203
2204 static const struct file_operations lttng_metadata_fops = {
2205 .owner = THIS_MODULE,
2206 .release = lttng_metadata_channel_release,
2207 .unlocked_ioctl = lttng_metadata_ioctl,
2208 #ifdef CONFIG_COMPAT
2209 .compat_ioctl = lttng_metadata_ioctl,
2210 #endif
2211 };
2212
2213 /**
2214 * lttng_event_ioctl - lttng syscall through ioctl
2215 *
2216 * @file: the file
2217 * @cmd: the command
2218 * @arg: command arg
2219 *
2220 * This ioctl implements lttng commands:
2221 * LTTNG_KERNEL_CONTEXT
2222 * Prepend a context field to each record of this event
2223 * LTTNG_KERNEL_ENABLE
2224 * Enable recording for this event (weak enable)
2225 * LTTNG_KERNEL_DISABLE
2226 * Disable recording for this event (strong disable)
2227 */
2228 static
2229 long lttng_event_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2230 {
2231 struct lttng_event *event;
2232 struct lttng_event_enabler *event_enabler;
2233 enum lttng_event_type *evtype = file->private_data;
2234
2235 switch (cmd) {
2236 case LTTNG_KERNEL_OLD_CONTEXT:
2237 {
2238 /* Not implemented */
2239 return -ENOSYS;
2240 }
2241 case LTTNG_KERNEL_CONTEXT:
2242 {
2243 /* Not implemented */
2244 return -ENOSYS;
2245 }
2246 case LTTNG_KERNEL_OLD_ENABLE:
2247 case LTTNG_KERNEL_ENABLE:
2248 switch (*evtype) {
2249 case LTTNG_TYPE_EVENT:
2250 event = file->private_data;
2251 return lttng_event_enable(event);
2252 case LTTNG_TYPE_ENABLER:
2253 event_enabler = file->private_data;
2254 return lttng_event_enabler_enable(event_enabler);
2255 default:
2256 WARN_ON_ONCE(1);
2257 return -ENOSYS;
2258 }
2259 case LTTNG_KERNEL_OLD_DISABLE:
2260 case LTTNG_KERNEL_DISABLE:
2261 switch (*evtype) {
2262 case LTTNG_TYPE_EVENT:
2263 event = file->private_data;
2264 return lttng_event_disable(event);
2265 case LTTNG_TYPE_ENABLER:
2266 event_enabler = file->private_data;
2267 return lttng_event_enabler_disable(event_enabler);
2268 default:
2269 WARN_ON_ONCE(1);
2270 return -ENOSYS;
2271 }
2272 case LTTNG_KERNEL_FILTER:
2273 switch (*evtype) {
2274 case LTTNG_TYPE_EVENT:
2275 return -EINVAL;
2276 case LTTNG_TYPE_ENABLER:
2277 {
2278 event_enabler = file->private_data;
2279 return lttng_event_enabler_attach_bytecode(event_enabler,
2280 (struct lttng_kernel_filter_bytecode __user *) arg);
2281 }
2282 default:
2283 WARN_ON_ONCE(1);
2284 return -ENOSYS;
2285 }
2286 case LTTNG_KERNEL_ADD_CALLSITE:
2287 switch (*evtype) {
2288 case LTTNG_TYPE_EVENT:
2289 event = file->private_data;
2290 return lttng_event_add_callsite(event,
2291 (struct lttng_kernel_event_callsite __user *) arg);
2292 case LTTNG_TYPE_ENABLER:
2293 return -EINVAL;
2294 default:
2295 WARN_ON_ONCE(1);
2296 return -ENOSYS;
2297 }
2298 default:
2299 return -ENOIOCTLCMD;
2300 }
2301 }
2302
2303 static
2304 int lttng_event_release(struct inode *inode, struct file *file)
2305 {
2306 struct lttng_event *event;
2307 struct lttng_event_enabler *event_enabler;
2308 enum lttng_event_type *evtype = file->private_data;
2309
2310 if (!evtype)
2311 return 0;
2312
2313 switch (*evtype) {
2314 case LTTNG_TYPE_EVENT:
2315 event = file->private_data;
2316 if (event)
2317 fput(event->chan->file);
2318 break;
2319 case LTTNG_TYPE_ENABLER:
2320 event_enabler = file->private_data;
2321 if (event_enabler)
2322 fput(event_enabler->chan->file);
2323 break;
2324 default:
2325 WARN_ON_ONCE(1);
2326 break;
2327 }
2328
2329 return 0;
2330 }
2331
2332 /* TODO: filter control ioctl */
2333 static const struct file_operations lttng_event_fops = {
2334 .owner = THIS_MODULE,
2335 .release = lttng_event_release,
2336 .unlocked_ioctl = lttng_event_ioctl,
2337 #ifdef CONFIG_COMPAT
2338 .compat_ioctl = lttng_event_ioctl,
2339 #endif
2340 };
2341
2342 static int put_u64(uint64_t val, unsigned long arg)
2343 {
2344 return put_user(val, (uint64_t __user *) arg);
2345 }
2346
2347 static int put_u32(uint32_t val, unsigned long arg)
2348 {
2349 return put_user(val, (uint32_t __user *) arg);
2350 }
2351
2352 static long lttng_stream_ring_buffer_ioctl(struct file *filp,
2353 unsigned int cmd, unsigned long arg)
2354 {
2355 struct lib_ring_buffer *buf = filp->private_data;
2356 struct channel *chan = buf->backend.chan;
2357 const struct lib_ring_buffer_config *config = &chan->backend.config;
2358 const struct lttng_channel_ops *ops = chan->backend.priv_ops;
2359 int ret;
2360
2361 if (atomic_read(&chan->record_disabled))
2362 return -EIO;
2363
2364 switch (cmd) {
2365 case LTTNG_RING_BUFFER_GET_TIMESTAMP_BEGIN:
2366 {
2367 uint64_t ts;
2368
2369 ret = ops->timestamp_begin(config, buf, &ts);
2370 if (ret < 0)
2371 goto error;
2372 return put_u64(ts, arg);
2373 }
2374 case LTTNG_RING_BUFFER_GET_TIMESTAMP_END:
2375 {
2376 uint64_t ts;
2377
2378 ret = ops->timestamp_end(config, buf, &ts);
2379 if (ret < 0)
2380 goto error;
2381 return put_u64(ts, arg);
2382 }
2383 case LTTNG_RING_BUFFER_GET_EVENTS_DISCARDED:
2384 {
2385 uint64_t ed;
2386
2387 ret = ops->events_discarded(config, buf, &ed);
2388 if (ret < 0)
2389 goto error;
2390 return put_u64(ed, arg);
2391 }
2392 case LTTNG_RING_BUFFER_GET_CONTENT_SIZE:
2393 {
2394 uint64_t cs;
2395
2396 ret = ops->content_size(config, buf, &cs);
2397 if (ret < 0)
2398 goto error;
2399 return put_u64(cs, arg);
2400 }
2401 case LTTNG_RING_BUFFER_GET_PACKET_SIZE:
2402 {
2403 uint64_t ps;
2404
2405 ret = ops->packet_size(config, buf, &ps);
2406 if (ret < 0)
2407 goto error;
2408 return put_u64(ps, arg);
2409 }
2410 case LTTNG_RING_BUFFER_GET_STREAM_ID:
2411 {
2412 uint64_t si;
2413
2414 ret = ops->stream_id(config, buf, &si);
2415 if (ret < 0)
2416 goto error;
2417 return put_u64(si, arg);
2418 }
2419 case LTTNG_RING_BUFFER_GET_CURRENT_TIMESTAMP:
2420 {
2421 uint64_t ts;
2422
2423 ret = ops->current_timestamp(config, buf, &ts);
2424 if (ret < 0)
2425 goto error;
2426 return put_u64(ts, arg);
2427 }
2428 case LTTNG_RING_BUFFER_GET_SEQ_NUM:
2429 {
2430 uint64_t seq;
2431
2432 ret = ops->sequence_number(config, buf, &seq);
2433 if (ret < 0)
2434 goto error;
2435 return put_u64(seq, arg);
2436 }
2437 case LTTNG_RING_BUFFER_INSTANCE_ID:
2438 {
2439 uint64_t id;
2440
2441 ret = ops->instance_id(config, buf, &id);
2442 if (ret < 0)
2443 goto error;
2444 return put_u64(id, arg);
2445 }
2446 default:
2447 return lib_ring_buffer_file_operations.unlocked_ioctl(filp,
2448 cmd, arg);
2449 }
2450
2451 error:
2452 return -ENOSYS;
2453 }
2454
2455 #ifdef CONFIG_COMPAT
2456 static long lttng_stream_ring_buffer_compat_ioctl(struct file *filp,
2457 unsigned int cmd, unsigned long arg)
2458 {
2459 struct lib_ring_buffer *buf = filp->private_data;
2460 struct channel *chan = buf->backend.chan;
2461 const struct lib_ring_buffer_config *config = &chan->backend.config;
2462 const struct lttng_channel_ops *ops = chan->backend.priv_ops;
2463 int ret;
2464
2465 if (atomic_read(&chan->record_disabled))
2466 return -EIO;
2467
2468 switch (cmd) {
2469 case LTTNG_RING_BUFFER_COMPAT_GET_TIMESTAMP_BEGIN:
2470 {
2471 uint64_t ts;
2472
2473 ret = ops->timestamp_begin(config, buf, &ts);
2474 if (ret < 0)
2475 goto error;
2476 return put_u64(ts, arg);
2477 }
2478 case LTTNG_RING_BUFFER_COMPAT_GET_TIMESTAMP_END:
2479 {
2480 uint64_t ts;
2481
2482 ret = ops->timestamp_end(config, buf, &ts);
2483 if (ret < 0)
2484 goto error;
2485 return put_u64(ts, arg);
2486 }
2487 case LTTNG_RING_BUFFER_COMPAT_GET_EVENTS_DISCARDED:
2488 {
2489 uint64_t ed;
2490
2491 ret = ops->events_discarded(config, buf, &ed);
2492 if (ret < 0)
2493 goto error;
2494 return put_u64(ed, arg);
2495 }
2496 case LTTNG_RING_BUFFER_COMPAT_GET_CONTENT_SIZE:
2497 {
2498 uint64_t cs;
2499
2500 ret = ops->content_size(config, buf, &cs);
2501 if (ret < 0)
2502 goto error;
2503 return put_u64(cs, arg);
2504 }
2505 case LTTNG_RING_BUFFER_COMPAT_GET_PACKET_SIZE:
2506 {
2507 uint64_t ps;
2508
2509 ret = ops->packet_size(config, buf, &ps);
2510 if (ret < 0)
2511 goto error;
2512 return put_u64(ps, arg);
2513 }
2514 case LTTNG_RING_BUFFER_COMPAT_GET_STREAM_ID:
2515 {
2516 uint64_t si;
2517
2518 ret = ops->stream_id(config, buf, &si);
2519 if (ret < 0)
2520 goto error;
2521 return put_u64(si, arg);
2522 }
2523 case LTTNG_RING_BUFFER_GET_CURRENT_TIMESTAMP:
2524 {
2525 uint64_t ts;
2526
2527 ret = ops->current_timestamp(config, buf, &ts);
2528 if (ret < 0)
2529 goto error;
2530 return put_u64(ts, arg);
2531 }
2532 case LTTNG_RING_BUFFER_COMPAT_GET_SEQ_NUM:
2533 {
2534 uint64_t seq;
2535
2536 ret = ops->sequence_number(config, buf, &seq);
2537 if (ret < 0)
2538 goto error;
2539 return put_u64(seq, arg);
2540 }
2541 case LTTNG_RING_BUFFER_COMPAT_INSTANCE_ID:
2542 {
2543 uint64_t id;
2544
2545 ret = ops->instance_id(config, buf, &id);
2546 if (ret < 0)
2547 goto error;
2548 return put_u64(id, arg);
2549 }
2550 default:
2551 return lib_ring_buffer_file_operations.compat_ioctl(filp,
2552 cmd, arg);
2553 }
2554
2555 error:
2556 return -ENOSYS;
2557 }
2558 #endif /* CONFIG_COMPAT */
2559
2560 static void lttng_stream_override_ring_buffer_fops(void)
2561 {
2562 lttng_stream_ring_buffer_file_operations.owner = THIS_MODULE;
2563 lttng_stream_ring_buffer_file_operations.open =
2564 lib_ring_buffer_file_operations.open;
2565 lttng_stream_ring_buffer_file_operations.release =
2566 lib_ring_buffer_file_operations.release;
2567 lttng_stream_ring_buffer_file_operations.poll =
2568 lib_ring_buffer_file_operations.poll;
2569 lttng_stream_ring_buffer_file_operations.splice_read =
2570 lib_ring_buffer_file_operations.splice_read;
2571 lttng_stream_ring_buffer_file_operations.mmap =
2572 lib_ring_buffer_file_operations.mmap;
2573 lttng_stream_ring_buffer_file_operations.unlocked_ioctl =
2574 lttng_stream_ring_buffer_ioctl;
2575 lttng_stream_ring_buffer_file_operations.llseek =
2576 lib_ring_buffer_file_operations.llseek;
2577 #ifdef CONFIG_COMPAT
2578 lttng_stream_ring_buffer_file_operations.compat_ioctl =
2579 lttng_stream_ring_buffer_compat_ioctl;
2580 #endif
2581 }
2582
2583 int __init lttng_abi_init(void)
2584 {
2585 int ret = 0;
2586
2587 wrapper_vmalloc_sync_mappings();
2588 lttng_clock_ref();
2589
2590 ret = lttng_tp_mempool_init();
2591 if (ret) {
2592 goto error;
2593 }
2594
2595 lttng_proc_dentry = proc_create_data("lttng", S_IRUSR | S_IWUSR, NULL,
2596 &lttng_proc_ops, NULL);
2597
2598 if (!lttng_proc_dentry) {
2599 printk(KERN_ERR "LTTng: Error creating control file\n");
2600 ret = -ENOMEM;
2601 goto error;
2602 }
2603 lttng_stream_override_ring_buffer_fops();
2604 return 0;
2605
2606 error:
2607 lttng_tp_mempool_destroy();
2608 lttng_clock_unref();
2609 return ret;
2610 }
2611
2612 /* No __exit annotation because used by init error path too. */
2613 void lttng_abi_exit(void)
2614 {
2615 lttng_tp_mempool_destroy();
2616 lttng_clock_unref();
2617 if (lttng_proc_dentry)
2618 remove_proc_entry("lttng", NULL);
2619 }
This page took 0.136302 seconds and 4 git commands to generate.