Move headers under include/
[lttng-modules.git] / lttng-statedump-impl.c
CommitLineData
b7cdc182 1/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
9f36eaed 2 *
886d51a3
MD
3 * lttng-statedump.c
4 *
c337ddc2
MD
5 * Linux Trace Toolkit Next Generation Kernel State Dump
6 *
7 * Copyright 2005 Jean-Hugues Deschenes <jean-hugues.deschenes@polymtl.ca>
8 * Copyright 2006-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 *
10 * Changes:
11 * Eric Clement: Add listing of network IP interface
12 * 2006, 2007 Mathieu Desnoyers Fix kernel threads
13 * Various updates
c337ddc2
MD
14 */
15
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/netlink.h>
19#include <linux/inet.h>
20#include <linux/ip.h>
21#include <linux/kthread.h>
22#include <linux/proc_fs.h>
23#include <linux/file.h>
24#include <linux/interrupt.h>
25#include <linux/irqnr.h>
26#include <linux/cpu.h>
27#include <linux/netdevice.h>
28#include <linux/inetdevice.h>
29#include <linux/sched.h>
30#include <linux/mm.h>
c337ddc2
MD
31#include <linux/swap.h>
32#include <linux/wait.h>
33#include <linux/mutex.h>
f0dbdefb 34#include <linux/device.h>
997eff5e 35#include <linux/fdtable.h>
88eb3d8e 36#include <linux/irq.h>
6afd444b 37#include <linux/genhd.h>
c337ddc2 38
b5304713
MD
39#include <lttng/lttng-events.h>
40#include <lttng/lttng-tracer.h>
c337ddc2 41
c337ddc2
MD
42/* Define the tracepoints, but do not build the probes */
43#define CREATE_TRACE_POINTS
241ae9a8 44#define TRACE_INCLUDE_PATH instrumentation/events/lttng-module
c337ddc2 45#define TRACE_INCLUDE_FILE lttng-statedump
3bc29f0a 46#define LTTNG_INSTRUMENTATION
241ae9a8 47#include <instrumentation/events/lttng-module/lttng-statedump.h>
c337ddc2 48
f0dbdefb 49DEFINE_TRACE(lttng_statedump_block_device);
20591cf7
MD
50DEFINE_TRACE(lttng_statedump_end);
51DEFINE_TRACE(lttng_statedump_interrupt);
52DEFINE_TRACE(lttng_statedump_file_descriptor);
53DEFINE_TRACE(lttng_statedump_start);
54DEFINE_TRACE(lttng_statedump_process_state);
1965e6b4 55DEFINE_TRACE(lttng_statedump_process_pid_ns);
1965e6b4 56DEFINE_TRACE(lttng_statedump_process_cgroup_ns);
1965e6b4
MJ
57DEFINE_TRACE(lttng_statedump_process_ipc_ns);
58#ifndef LTTNG_MNT_NS_MISSING_HEADER
59DEFINE_TRACE(lttng_statedump_process_mnt_ns);
60#endif
61DEFINE_TRACE(lttng_statedump_process_net_ns);
62DEFINE_TRACE(lttng_statedump_process_user_ns);
63DEFINE_TRACE(lttng_statedump_process_uts_ns);
20591cf7 64DEFINE_TRACE(lttng_statedump_network_interface);
d0b55e4c 65#ifdef LTTNG_HAVE_STATEDUMP_CPU_TOPOLOGY
502e4132
JD
66DEFINE_TRACE(lttng_statedump_cpu_topology);
67#endif
20591cf7 68
361c023a
MD
69struct lttng_fd_ctx {
70 char *page;
71 struct lttng_session *session;
d561ecfb 72 struct files_struct *files;
361c023a
MD
73};
74
c337ddc2
MD
75/*
76 * Protected by the trace lock.
77 */
78static struct delayed_work cpu_work[NR_CPUS];
79static DECLARE_WAIT_QUEUE_HEAD(statedump_wq);
80static atomic_t kernel_threads_to_run;
81
82enum lttng_thread_type {
83 LTTNG_USER_THREAD = 0,
84 LTTNG_KERNEL_THREAD = 1,
85};
86
87enum lttng_execution_mode {
88 LTTNG_USER_MODE = 0,
89 LTTNG_SYSCALL = 1,
90 LTTNG_TRAP = 2,
91 LTTNG_IRQ = 3,
92 LTTNG_SOFTIRQ = 4,
93 LTTNG_MODE_UNKNOWN = 5,
94};
95
96enum lttng_execution_submode {
97 LTTNG_NONE = 0,
98 LTTNG_UNKNOWN = 1,
99};
100
101enum lttng_process_status {
102 LTTNG_UNNAMED = 0,
103 LTTNG_WAIT_FORK = 1,
104 LTTNG_WAIT_CPU = 2,
105 LTTNG_EXIT = 3,
106 LTTNG_ZOMBIE = 4,
107 LTTNG_WAIT = 5,
108 LTTNG_RUN = 6,
109 LTTNG_DEAD = 7,
110};
111
f0dbdefb
HD
112static
113int lttng_enumerate_block_devices(struct lttng_session *session)
114{
f0dbdefb
HD
115 struct class_dev_iter iter;
116 struct device *dev;
117
6afd444b
MD
118 class_dev_iter_init(&iter, gendisk_block_class(), NULL,
119 gendisk_device_type());
f0dbdefb
HD
120 while ((dev = class_dev_iter_next(&iter))) {
121 struct disk_part_iter piter;
122 struct gendisk *disk = dev_to_disk(dev);
123 struct hd_struct *part;
124
5a91f3df
MD
125 /*
126 * Don't show empty devices or things that have been
127 * suppressed
128 */
129 if (get_capacity(disk) == 0 ||
130 (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
131 continue;
132
f0dbdefb
HD
133 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
134 while ((part = disk_part_iter_next(&piter))) {
135 char name_buf[BDEVNAME_SIZE];
136 char *p;
137
6afd444b 138 p = gendisk_name(disk, part->partno, name_buf);
f0dbdefb
HD
139 if (!p) {
140 disk_part_iter_exit(&piter);
141 class_dev_iter_exit(&iter);
142 return -ENOSYS;
143 }
144 trace_lttng_statedump_block_device(session,
145 part_devt(part), name_buf);
146 }
147 disk_part_iter_exit(&piter);
148 }
149 class_dev_iter_exit(&iter);
150 return 0;
151}
152
c337ddc2 153#ifdef CONFIG_INET
f0dbdefb 154
c337ddc2
MD
155static
156void lttng_enumerate_device(struct lttng_session *session,
157 struct net_device *dev)
158{
159 struct in_device *in_dev;
160 struct in_ifaddr *ifa;
161
162 if (dev->flags & IFF_UP) {
163 in_dev = in_dev_get(dev);
164 if (in_dev) {
165 for (ifa = in_dev->ifa_list; ifa != NULL;
166 ifa = ifa->ifa_next) {
167 trace_lttng_statedump_network_interface(
168 session, dev, ifa);
169 }
170 in_dev_put(in_dev);
171 }
172 } else {
173 trace_lttng_statedump_network_interface(
174 session, dev, NULL);
175 }
176}
177
178static
179int lttng_enumerate_network_ip_interface(struct lttng_session *session)
180{
181 struct net_device *dev;
182
183 read_lock(&dev_base_lock);
184 for_each_netdev(&init_net, dev)
185 lttng_enumerate_device(session, dev);
186 read_unlock(&dev_base_lock);
187
188 return 0;
189}
190#else /* CONFIG_INET */
191static inline
192int lttng_enumerate_network_ip_interface(struct lttng_session *session)
193{
194 return 0;
195}
196#endif /* CONFIG_INET */
197
361c023a
MD
198static
199int lttng_dump_one_fd(const void *p, struct file *file, unsigned int fd)
200{
201 const struct lttng_fd_ctx *ctx = p;
202 const char *s = d_path(&file->f_path, ctx->page, PAGE_SIZE);
29021503 203 unsigned int flags = file->f_flags;
d561ecfb 204 struct fdtable *fdt;
361c023a 205
29021503
MD
206 /*
207 * We don't expose kernel internal flags, only userspace-visible
208 * flags.
209 */
210 flags &= ~FMODE_NONOTIFY;
d561ecfb
MD
211 fdt = files_fdtable(ctx->files);
212 /*
213 * We need to check here again whether fd is within the fdt
214 * max_fds range, because we might be seeing a different
215 * files_fdtable() than iterate_fd(), assuming only RCU is
216 * protecting the read. In reality, iterate_fd() holds
217 * file_lock, which should ensure the fdt does not change while
218 * the lock is taken, but we are not aware whether this is
219 * guaranteed or not, so play safe.
220 */
997eff5e 221 if (fd < fdt->max_fds && close_on_exec(fd, fdt))
29021503 222 flags |= O_CLOEXEC;
361c023a
MD
223 if (IS_ERR(s)) {
224 struct dentry *dentry = file->f_path.dentry;
225
226 /* Make sure we give at least some info */
227 spin_lock(&dentry->d_lock);
e7a0ca72
MD
228 trace_lttng_statedump_file_descriptor(ctx->session,
229 ctx->files, fd, dentry->d_name.name, flags,
230 file->f_mode);
361c023a
MD
231 spin_unlock(&dentry->d_lock);
232 goto end;
233 }
e7a0ca72
MD
234 trace_lttng_statedump_file_descriptor(ctx->session,
235 ctx->files, fd, s, flags, file->f_mode);
361c023a
MD
236end:
237 return 0;
238}
c337ddc2 239
e7a0ca72 240/* Called with task lock held. */
c337ddc2 241static
e7a0ca72
MD
242void lttng_enumerate_files(struct lttng_session *session,
243 struct files_struct *files,
244 char *tmp)
c337ddc2 245{
e7a0ca72 246 struct lttng_fd_ctx ctx = { .page = tmp, .session = session, .files = files, };
c337ddc2 247
997eff5e 248 iterate_fd(files, 0, lttng_dump_one_fd, &ctx);
c337ddc2
MD
249}
250
d0b55e4c 251#ifdef LTTNG_HAVE_STATEDUMP_CPU_TOPOLOGY
502e4132
JD
252static
253int lttng_enumerate_cpu_topology(struct lttng_session *session)
254{
255 int cpu;
256 const cpumask_t *cpumask = cpu_possible_mask;
257
258 for (cpu = cpumask_first(cpumask); cpu < nr_cpu_ids;
259 cpu = cpumask_next(cpu, cpumask)) {
260 trace_lttng_statedump_cpu_topology(session, &cpu_data(cpu));
261 }
262
263 return 0;
264}
265#else
266static
267int lttng_enumerate_cpu_topology(struct lttng_session *session)
268{
269 return 0;
270}
271#endif
272
0658bdda
MD
273#if 0
274/*
275 * FIXME: we cannot take a mmap_sem while in a RCU read-side critical section
276 * (scheduling in atomic). Normally, the tasklist lock protects this kind of
277 * iteration, but it is not exported to modules.
278 */
c337ddc2
MD
279static
280void lttng_enumerate_task_vm_maps(struct lttng_session *session,
281 struct task_struct *p)
282{
283 struct mm_struct *mm;
284 struct vm_area_struct *map;
285 unsigned long ino;
286
287 /* get_task_mm does a task_lock... */
288 mm = get_task_mm(p);
289 if (!mm)
290 return;
291
292 map = mm->mmap;
293 if (map) {
294 down_read(&mm->mmap_sem);
295 while (map) {
296 if (map->vm_file)
34522739 297 ino = map->vm_file->f_path.dentry->d_inode->i_ino;
c337ddc2
MD
298 else
299 ino = 0;
300 trace_lttng_statedump_vm_map(session, p, map, ino);
301 map = map->vm_next;
302 }
303 up_read(&mm->mmap_sem);
304 }
305 mmput(mm);
306}
307
308static
309int lttng_enumerate_vm_maps(struct lttng_session *session)
310{
311 struct task_struct *p;
312
313 rcu_read_lock();
314 for_each_process(p)
315 lttng_enumerate_task_vm_maps(session, p);
316 rcu_read_unlock();
317 return 0;
318}
0658bdda 319#endif
c337ddc2 320
c337ddc2 321static
cfcee1c7 322int lttng_list_interrupts(struct lttng_session *session)
c337ddc2
MD
323{
324 unsigned int irq;
325 unsigned long flags = 0;
326 struct irq_desc *desc;
327
c337ddc2
MD
328 /* needs irq_desc */
329 for_each_irq_desc(irq, desc) {
330 struct irqaction *action;
331 const char *irq_chip_name =
332 irq_desc_get_chip(desc)->name ? : "unnamed_irq_chip";
333
334 local_irq_save(flags);
fc94c945 335 raw_spin_lock(&desc->lock);
c337ddc2
MD
336 for (action = desc->action; action; action = action->next) {
337 trace_lttng_statedump_interrupt(session,
338 irq, irq_chip_name, action);
339 }
fc94c945 340 raw_spin_unlock(&desc->lock);
c337ddc2
MD
341 local_irq_restore(flags);
342 }
cfcee1c7 343 return 0;
c337ddc2 344}
c337ddc2 345
4ba1f53c 346/*
1965e6b4
MJ
347 * Statedump the task's namespaces using the proc filesystem inode number as
348 * the unique identifier. The user and pid ns are nested and will be dumped
349 * recursively.
350 *
4ba1f53c
MD
351 * Called with task lock held.
352 */
73e8ba37
JD
353static
354void lttng_statedump_process_ns(struct lttng_session *session,
355 struct task_struct *p,
356 enum lttng_thread_type type,
357 enum lttng_execution_mode mode,
358 enum lttng_execution_submode submode,
359 enum lttng_process_status status)
360{
1965e6b4 361 struct nsproxy *proxy;
73e8ba37 362 struct pid_namespace *pid_ns;
1965e6b4 363 struct user_namespace *user_ns;
73e8ba37 364
1965e6b4
MJ
365 /*
366 * The pid and user namespaces are special, they are nested and
367 * accessed with specific functions instead of the nsproxy struct
368 * like the other namespaces.
369 */
887bcdac
MJ
370 pid_ns = task_active_pid_ns(p);
371 do {
1965e6b4 372 trace_lttng_statedump_process_pid_ns(session, p, pid_ns);
adcc8b5e 373 pid_ns = pid_ns ? pid_ns->parent : NULL;
887bcdac 374 } while (pid_ns);
1965e6b4
MJ
375
376
377 user_ns = task_cred_xxx(p, user_ns);
378 do {
379 trace_lttng_statedump_process_user_ns(session, p, user_ns);
1964cccb
MD
380 /*
381 * trace_lttng_statedump_process_user_ns() internally
382 * checks whether user_ns is NULL. While this does not
383 * appear to be a possible return value for
384 * task_cred_xxx(), err on the safe side and check
385 * for NULL here as well to be consistent with the
386 * paranoid behavior of
387 * trace_lttng_statedump_process_user_ns().
388 */
6388029c 389 user_ns = user_ns ? user_ns->parent : NULL;
1965e6b4
MJ
390 } while (user_ns);
391
392 /*
393 * Back and forth on locking strategy within Linux upstream for nsproxy.
394 * See Linux upstream commit 728dba3a39c66b3d8ac889ddbe38b5b1c264aec3
395 * "namespaces: Use task_lock and not rcu to protect nsproxy"
396 * for details.
397 */
1965e6b4 398 proxy = p->nsproxy;
1965e6b4 399 if (proxy) {
1965e6b4 400 trace_lttng_statedump_process_cgroup_ns(session, p, proxy->cgroup_ns);
1965e6b4
MJ
401 trace_lttng_statedump_process_ipc_ns(session, p, proxy->ipc_ns);
402#ifndef LTTNG_MNT_NS_MISSING_HEADER
403 trace_lttng_statedump_process_mnt_ns(session, p, proxy->mnt_ns);
404#endif
405 trace_lttng_statedump_process_net_ns(session, p, proxy->net_ns);
406 trace_lttng_statedump_process_uts_ns(session, p, proxy->uts_ns);
407 }
73e8ba37
JD
408}
409
c337ddc2
MD
410static
411int lttng_enumerate_process_states(struct lttng_session *session)
412{
413 struct task_struct *g, *p;
e7a0ca72
MD
414 char *tmp;
415
416 tmp = (char *) __get_free_page(GFP_KERNEL);
417 if (!tmp)
418 return -ENOMEM;
c337ddc2
MD
419
420 rcu_read_lock();
421 for_each_process(g) {
e7a0ca72
MD
422 struct files_struct *prev_files = NULL;
423
c337ddc2
MD
424 p = g;
425 do {
426 enum lttng_execution_mode mode =
427 LTTNG_MODE_UNKNOWN;
428 enum lttng_execution_submode submode =
429 LTTNG_UNKNOWN;
430 enum lttng_process_status status;
431 enum lttng_thread_type type;
e7a0ca72 432 struct files_struct *files;
c337ddc2
MD
433
434 task_lock(p);
435 if (p->exit_state == EXIT_ZOMBIE)
436 status = LTTNG_ZOMBIE;
437 else if (p->exit_state == EXIT_DEAD)
438 status = LTTNG_DEAD;
439 else if (p->state == TASK_RUNNING) {
440 /* Is this a forked child that has not run yet? */
441 if (list_empty(&p->rt.run_list))
442 status = LTTNG_WAIT_FORK;
443 else
444 /*
445 * All tasks are considered as wait_cpu;
446 * the viewer will sort out if the task
447 * was really running at this time.
448 */
449 status = LTTNG_WAIT_CPU;
450 } else if (p->state &
451 (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) {
452 /* Task is waiting for something to complete */
453 status = LTTNG_WAIT;
454 } else
455 status = LTTNG_UNNAMED;
456 submode = LTTNG_NONE;
457
458 /*
459 * Verification of t->mm is to filter out kernel
460 * threads; Viewer will further filter out if a
461 * user-space thread was in syscall mode or not.
462 */
463 if (p->mm)
464 type = LTTNG_USER_THREAD;
465 else
466 type = LTTNG_KERNEL_THREAD;
e7a0ca72 467 files = p->files;
d2a927ac
MJ
468
469 trace_lttng_statedump_process_state(session,
e7a0ca72 470 p, type, mode, submode, status, files);
73e8ba37 471 lttng_statedump_process_ns(session,
c337ddc2 472 p, type, mode, submode, status);
e7a0ca72
MD
473 /*
474 * As an optimisation for the common case, do not
475 * repeat information for the same files_struct in
476 * two consecutive threads. This is the common case
477 * for threads sharing the same fd table. RCU guarantees
478 * that the same files_struct pointer is not re-used
479 * throughout processes/threads iteration.
480 */
481 if (files && files != prev_files) {
482 lttng_enumerate_files(session, files, tmp);
483 prev_files = files;
484 }
c337ddc2
MD
485 task_unlock(p);
486 } while_each_thread(g, p);
487 }
488 rcu_read_unlock();
489
e7a0ca72
MD
490 free_page((unsigned long) tmp);
491
c337ddc2
MD
492 return 0;
493}
494
495static
496void lttng_statedump_work_func(struct work_struct *work)
497{
498 if (atomic_dec_and_test(&kernel_threads_to_run))
499 /* If we are the last thread, wake up do_lttng_statedump */
500 wake_up(&statedump_wq);
501}
502
503static
504int do_lttng_statedump(struct lttng_session *session)
505{
cfcee1c7 506 int cpu, ret;
c337ddc2 507
c337ddc2 508 trace_lttng_statedump_start(session);
cfcee1c7 509 ret = lttng_enumerate_process_states(session);
cfcee1c7
MD
510 if (ret)
511 return ret;
512 /*
513 * FIXME
514 * ret = lttng_enumerate_vm_maps(session);
515 * if (ret)
516 * return ret;
517 */
518 ret = lttng_list_interrupts(session);
519 if (ret)
520 return ret;
521 ret = lttng_enumerate_network_ip_interface(session);
522 if (ret)
523 return ret;
524 ret = lttng_enumerate_block_devices(session);
525 switch (ret) {
84c7055e
MD
526 case 0:
527 break;
cfcee1c7
MD
528 case -ENOSYS:
529 printk(KERN_WARNING "LTTng: block device enumeration is not supported by kernel\n");
530 break;
531 default:
532 return ret;
533 }
502e4132
JD
534 ret = lttng_enumerate_cpu_topology(session);
535 if (ret)
536 return ret;
c337ddc2
MD
537
538 /* TODO lttng_dump_idt_table(session); */
539 /* TODO lttng_dump_softirq_vec(session); */
540 /* TODO lttng_list_modules(session); */
541 /* TODO lttng_dump_swap_files(session); */
542
543 /*
544 * Fire off a work queue on each CPU. Their sole purpose in life
545 * is to guarantee that each CPU has been in a state where is was in
546 * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ).
547 */
548 get_online_cpus();
549 atomic_set(&kernel_threads_to_run, num_online_cpus());
550 for_each_online_cpu(cpu) {
551 INIT_DELAYED_WORK(&cpu_work[cpu], lttng_statedump_work_func);
552 schedule_delayed_work_on(cpu, &cpu_work[cpu], 0);
553 }
554 /* Wait for all threads to run */
7a7128e0 555 __wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) == 0));
c337ddc2
MD
556 put_online_cpus();
557 /* Our work is done */
c337ddc2
MD
558 trace_lttng_statedump_end(session);
559 return 0;
560}
561
562/*
563 * Called with session mutex held.
564 */
565int lttng_statedump_start(struct lttng_session *session)
566{
c337ddc2
MD
567 return do_lttng_statedump(session);
568}
569EXPORT_SYMBOL_GPL(lttng_statedump_start);
570
dd8d5afb
MD
571static
572int __init lttng_statedump_init(void)
573{
d16aa9c9 574 return 0;
dd8d5afb
MD
575}
576
577module_init(lttng_statedump_init);
578
461277e7
MD
579static
580void __exit lttng_statedump_exit(void)
581{
582}
583
584module_exit(lttng_statedump_exit);
585
c337ddc2
MD
586MODULE_LICENSE("GPL and additional rights");
587MODULE_AUTHOR("Jean-Hugues Deschenes");
1c124020 588MODULE_DESCRIPTION("LTTng statedump provider");
13ab8b0a
MD
589MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
590 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
591 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
592 LTTNG_MODULES_EXTRAVERSION);
This page took 0.061415 seconds and 4 git commands to generate.