Move headers under include/
[lttng-modules.git] / lttng-statedump-impl.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-statedump.c
4 *
5 * Linux Trace Toolkit Next Generation Kernel State Dump
6 *
7 * Copyright 2005 Jean-Hugues Deschenes <jean-hugues.deschenes@polymtl.ca>
8 * Copyright 2006-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 *
10 * Changes:
11 * Eric Clement: Add listing of network IP interface
12 * 2006, 2007 Mathieu Desnoyers Fix kernel threads
13 * Various updates
14 */
15
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/netlink.h>
19 #include <linux/inet.h>
20 #include <linux/ip.h>
21 #include <linux/kthread.h>
22 #include <linux/proc_fs.h>
23 #include <linux/file.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqnr.h>
26 #include <linux/cpu.h>
27 #include <linux/netdevice.h>
28 #include <linux/inetdevice.h>
29 #include <linux/sched.h>
30 #include <linux/mm.h>
31 #include <linux/swap.h>
32 #include <linux/wait.h>
33 #include <linux/mutex.h>
34 #include <linux/device.h>
35 #include <linux/fdtable.h>
36 #include <linux/irq.h>
37 #include <linux/genhd.h>
38
39 #include <lttng/lttng-events.h>
40 #include <lttng/lttng-tracer.h>
41
42 /* Define the tracepoints, but do not build the probes */
43 #define CREATE_TRACE_POINTS
44 #define TRACE_INCLUDE_PATH instrumentation/events/lttng-module
45 #define TRACE_INCLUDE_FILE lttng-statedump
46 #define LTTNG_INSTRUMENTATION
47 #include <instrumentation/events/lttng-module/lttng-statedump.h>
48
49 DEFINE_TRACE(lttng_statedump_block_device);
50 DEFINE_TRACE(lttng_statedump_end);
51 DEFINE_TRACE(lttng_statedump_interrupt);
52 DEFINE_TRACE(lttng_statedump_file_descriptor);
53 DEFINE_TRACE(lttng_statedump_start);
54 DEFINE_TRACE(lttng_statedump_process_state);
55 DEFINE_TRACE(lttng_statedump_process_pid_ns);
56 DEFINE_TRACE(lttng_statedump_process_cgroup_ns);
57 DEFINE_TRACE(lttng_statedump_process_ipc_ns);
58 #ifndef LTTNG_MNT_NS_MISSING_HEADER
59 DEFINE_TRACE(lttng_statedump_process_mnt_ns);
60 #endif
61 DEFINE_TRACE(lttng_statedump_process_net_ns);
62 DEFINE_TRACE(lttng_statedump_process_user_ns);
63 DEFINE_TRACE(lttng_statedump_process_uts_ns);
64 DEFINE_TRACE(lttng_statedump_network_interface);
65 #ifdef LTTNG_HAVE_STATEDUMP_CPU_TOPOLOGY
66 DEFINE_TRACE(lttng_statedump_cpu_topology);
67 #endif
68
69 struct lttng_fd_ctx {
70 char *page;
71 struct lttng_session *session;
72 struct files_struct *files;
73 };
74
75 /*
76 * Protected by the trace lock.
77 */
78 static struct delayed_work cpu_work[NR_CPUS];
79 static DECLARE_WAIT_QUEUE_HEAD(statedump_wq);
80 static atomic_t kernel_threads_to_run;
81
82 enum lttng_thread_type {
83 LTTNG_USER_THREAD = 0,
84 LTTNG_KERNEL_THREAD = 1,
85 };
86
87 enum lttng_execution_mode {
88 LTTNG_USER_MODE = 0,
89 LTTNG_SYSCALL = 1,
90 LTTNG_TRAP = 2,
91 LTTNG_IRQ = 3,
92 LTTNG_SOFTIRQ = 4,
93 LTTNG_MODE_UNKNOWN = 5,
94 };
95
96 enum lttng_execution_submode {
97 LTTNG_NONE = 0,
98 LTTNG_UNKNOWN = 1,
99 };
100
101 enum lttng_process_status {
102 LTTNG_UNNAMED = 0,
103 LTTNG_WAIT_FORK = 1,
104 LTTNG_WAIT_CPU = 2,
105 LTTNG_EXIT = 3,
106 LTTNG_ZOMBIE = 4,
107 LTTNG_WAIT = 5,
108 LTTNG_RUN = 6,
109 LTTNG_DEAD = 7,
110 };
111
112 static
113 int lttng_enumerate_block_devices(struct lttng_session *session)
114 {
115 struct class_dev_iter iter;
116 struct device *dev;
117
118 class_dev_iter_init(&iter, gendisk_block_class(), NULL,
119 gendisk_device_type());
120 while ((dev = class_dev_iter_next(&iter))) {
121 struct disk_part_iter piter;
122 struct gendisk *disk = dev_to_disk(dev);
123 struct hd_struct *part;
124
125 /*
126 * Don't show empty devices or things that have been
127 * suppressed
128 */
129 if (get_capacity(disk) == 0 ||
130 (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
131 continue;
132
133 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
134 while ((part = disk_part_iter_next(&piter))) {
135 char name_buf[BDEVNAME_SIZE];
136 char *p;
137
138 p = gendisk_name(disk, part->partno, name_buf);
139 if (!p) {
140 disk_part_iter_exit(&piter);
141 class_dev_iter_exit(&iter);
142 return -ENOSYS;
143 }
144 trace_lttng_statedump_block_device(session,
145 part_devt(part), name_buf);
146 }
147 disk_part_iter_exit(&piter);
148 }
149 class_dev_iter_exit(&iter);
150 return 0;
151 }
152
153 #ifdef CONFIG_INET
154
155 static
156 void lttng_enumerate_device(struct lttng_session *session,
157 struct net_device *dev)
158 {
159 struct in_device *in_dev;
160 struct in_ifaddr *ifa;
161
162 if (dev->flags & IFF_UP) {
163 in_dev = in_dev_get(dev);
164 if (in_dev) {
165 for (ifa = in_dev->ifa_list; ifa != NULL;
166 ifa = ifa->ifa_next) {
167 trace_lttng_statedump_network_interface(
168 session, dev, ifa);
169 }
170 in_dev_put(in_dev);
171 }
172 } else {
173 trace_lttng_statedump_network_interface(
174 session, dev, NULL);
175 }
176 }
177
178 static
179 int lttng_enumerate_network_ip_interface(struct lttng_session *session)
180 {
181 struct net_device *dev;
182
183 read_lock(&dev_base_lock);
184 for_each_netdev(&init_net, dev)
185 lttng_enumerate_device(session, dev);
186 read_unlock(&dev_base_lock);
187
188 return 0;
189 }
190 #else /* CONFIG_INET */
191 static inline
192 int lttng_enumerate_network_ip_interface(struct lttng_session *session)
193 {
194 return 0;
195 }
196 #endif /* CONFIG_INET */
197
198 static
199 int lttng_dump_one_fd(const void *p, struct file *file, unsigned int fd)
200 {
201 const struct lttng_fd_ctx *ctx = p;
202 const char *s = d_path(&file->f_path, ctx->page, PAGE_SIZE);
203 unsigned int flags = file->f_flags;
204 struct fdtable *fdt;
205
206 /*
207 * We don't expose kernel internal flags, only userspace-visible
208 * flags.
209 */
210 flags &= ~FMODE_NONOTIFY;
211 fdt = files_fdtable(ctx->files);
212 /*
213 * We need to check here again whether fd is within the fdt
214 * max_fds range, because we might be seeing a different
215 * files_fdtable() than iterate_fd(), assuming only RCU is
216 * protecting the read. In reality, iterate_fd() holds
217 * file_lock, which should ensure the fdt does not change while
218 * the lock is taken, but we are not aware whether this is
219 * guaranteed or not, so play safe.
220 */
221 if (fd < fdt->max_fds && close_on_exec(fd, fdt))
222 flags |= O_CLOEXEC;
223 if (IS_ERR(s)) {
224 struct dentry *dentry = file->f_path.dentry;
225
226 /* Make sure we give at least some info */
227 spin_lock(&dentry->d_lock);
228 trace_lttng_statedump_file_descriptor(ctx->session,
229 ctx->files, fd, dentry->d_name.name, flags,
230 file->f_mode);
231 spin_unlock(&dentry->d_lock);
232 goto end;
233 }
234 trace_lttng_statedump_file_descriptor(ctx->session,
235 ctx->files, fd, s, flags, file->f_mode);
236 end:
237 return 0;
238 }
239
240 /* Called with task lock held. */
241 static
242 void lttng_enumerate_files(struct lttng_session *session,
243 struct files_struct *files,
244 char *tmp)
245 {
246 struct lttng_fd_ctx ctx = { .page = tmp, .session = session, .files = files, };
247
248 iterate_fd(files, 0, lttng_dump_one_fd, &ctx);
249 }
250
251 #ifdef LTTNG_HAVE_STATEDUMP_CPU_TOPOLOGY
252 static
253 int lttng_enumerate_cpu_topology(struct lttng_session *session)
254 {
255 int cpu;
256 const cpumask_t *cpumask = cpu_possible_mask;
257
258 for (cpu = cpumask_first(cpumask); cpu < nr_cpu_ids;
259 cpu = cpumask_next(cpu, cpumask)) {
260 trace_lttng_statedump_cpu_topology(session, &cpu_data(cpu));
261 }
262
263 return 0;
264 }
265 #else
266 static
267 int lttng_enumerate_cpu_topology(struct lttng_session *session)
268 {
269 return 0;
270 }
271 #endif
272
273 #if 0
274 /*
275 * FIXME: we cannot take a mmap_sem while in a RCU read-side critical section
276 * (scheduling in atomic). Normally, the tasklist lock protects this kind of
277 * iteration, but it is not exported to modules.
278 */
279 static
280 void lttng_enumerate_task_vm_maps(struct lttng_session *session,
281 struct task_struct *p)
282 {
283 struct mm_struct *mm;
284 struct vm_area_struct *map;
285 unsigned long ino;
286
287 /* get_task_mm does a task_lock... */
288 mm = get_task_mm(p);
289 if (!mm)
290 return;
291
292 map = mm->mmap;
293 if (map) {
294 down_read(&mm->mmap_sem);
295 while (map) {
296 if (map->vm_file)
297 ino = map->vm_file->f_path.dentry->d_inode->i_ino;
298 else
299 ino = 0;
300 trace_lttng_statedump_vm_map(session, p, map, ino);
301 map = map->vm_next;
302 }
303 up_read(&mm->mmap_sem);
304 }
305 mmput(mm);
306 }
307
308 static
309 int lttng_enumerate_vm_maps(struct lttng_session *session)
310 {
311 struct task_struct *p;
312
313 rcu_read_lock();
314 for_each_process(p)
315 lttng_enumerate_task_vm_maps(session, p);
316 rcu_read_unlock();
317 return 0;
318 }
319 #endif
320
321 static
322 int lttng_list_interrupts(struct lttng_session *session)
323 {
324 unsigned int irq;
325 unsigned long flags = 0;
326 struct irq_desc *desc;
327
328 /* needs irq_desc */
329 for_each_irq_desc(irq, desc) {
330 struct irqaction *action;
331 const char *irq_chip_name =
332 irq_desc_get_chip(desc)->name ? : "unnamed_irq_chip";
333
334 local_irq_save(flags);
335 raw_spin_lock(&desc->lock);
336 for (action = desc->action; action; action = action->next) {
337 trace_lttng_statedump_interrupt(session,
338 irq, irq_chip_name, action);
339 }
340 raw_spin_unlock(&desc->lock);
341 local_irq_restore(flags);
342 }
343 return 0;
344 }
345
346 /*
347 * Statedump the task's namespaces using the proc filesystem inode number as
348 * the unique identifier. The user and pid ns are nested and will be dumped
349 * recursively.
350 *
351 * Called with task lock held.
352 */
353 static
354 void lttng_statedump_process_ns(struct lttng_session *session,
355 struct task_struct *p,
356 enum lttng_thread_type type,
357 enum lttng_execution_mode mode,
358 enum lttng_execution_submode submode,
359 enum lttng_process_status status)
360 {
361 struct nsproxy *proxy;
362 struct pid_namespace *pid_ns;
363 struct user_namespace *user_ns;
364
365 /*
366 * The pid and user namespaces are special, they are nested and
367 * accessed with specific functions instead of the nsproxy struct
368 * like the other namespaces.
369 */
370 pid_ns = task_active_pid_ns(p);
371 do {
372 trace_lttng_statedump_process_pid_ns(session, p, pid_ns);
373 pid_ns = pid_ns ? pid_ns->parent : NULL;
374 } while (pid_ns);
375
376
377 user_ns = task_cred_xxx(p, user_ns);
378 do {
379 trace_lttng_statedump_process_user_ns(session, p, user_ns);
380 /*
381 * trace_lttng_statedump_process_user_ns() internally
382 * checks whether user_ns is NULL. While this does not
383 * appear to be a possible return value for
384 * task_cred_xxx(), err on the safe side and check
385 * for NULL here as well to be consistent with the
386 * paranoid behavior of
387 * trace_lttng_statedump_process_user_ns().
388 */
389 user_ns = user_ns ? user_ns->parent : NULL;
390 } while (user_ns);
391
392 /*
393 * Back and forth on locking strategy within Linux upstream for nsproxy.
394 * See Linux upstream commit 728dba3a39c66b3d8ac889ddbe38b5b1c264aec3
395 * "namespaces: Use task_lock and not rcu to protect nsproxy"
396 * for details.
397 */
398 proxy = p->nsproxy;
399 if (proxy) {
400 trace_lttng_statedump_process_cgroup_ns(session, p, proxy->cgroup_ns);
401 trace_lttng_statedump_process_ipc_ns(session, p, proxy->ipc_ns);
402 #ifndef LTTNG_MNT_NS_MISSING_HEADER
403 trace_lttng_statedump_process_mnt_ns(session, p, proxy->mnt_ns);
404 #endif
405 trace_lttng_statedump_process_net_ns(session, p, proxy->net_ns);
406 trace_lttng_statedump_process_uts_ns(session, p, proxy->uts_ns);
407 }
408 }
409
410 static
411 int lttng_enumerate_process_states(struct lttng_session *session)
412 {
413 struct task_struct *g, *p;
414 char *tmp;
415
416 tmp = (char *) __get_free_page(GFP_KERNEL);
417 if (!tmp)
418 return -ENOMEM;
419
420 rcu_read_lock();
421 for_each_process(g) {
422 struct files_struct *prev_files = NULL;
423
424 p = g;
425 do {
426 enum lttng_execution_mode mode =
427 LTTNG_MODE_UNKNOWN;
428 enum lttng_execution_submode submode =
429 LTTNG_UNKNOWN;
430 enum lttng_process_status status;
431 enum lttng_thread_type type;
432 struct files_struct *files;
433
434 task_lock(p);
435 if (p->exit_state == EXIT_ZOMBIE)
436 status = LTTNG_ZOMBIE;
437 else if (p->exit_state == EXIT_DEAD)
438 status = LTTNG_DEAD;
439 else if (p->state == TASK_RUNNING) {
440 /* Is this a forked child that has not run yet? */
441 if (list_empty(&p->rt.run_list))
442 status = LTTNG_WAIT_FORK;
443 else
444 /*
445 * All tasks are considered as wait_cpu;
446 * the viewer will sort out if the task
447 * was really running at this time.
448 */
449 status = LTTNG_WAIT_CPU;
450 } else if (p->state &
451 (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) {
452 /* Task is waiting for something to complete */
453 status = LTTNG_WAIT;
454 } else
455 status = LTTNG_UNNAMED;
456 submode = LTTNG_NONE;
457
458 /*
459 * Verification of t->mm is to filter out kernel
460 * threads; Viewer will further filter out if a
461 * user-space thread was in syscall mode or not.
462 */
463 if (p->mm)
464 type = LTTNG_USER_THREAD;
465 else
466 type = LTTNG_KERNEL_THREAD;
467 files = p->files;
468
469 trace_lttng_statedump_process_state(session,
470 p, type, mode, submode, status, files);
471 lttng_statedump_process_ns(session,
472 p, type, mode, submode, status);
473 /*
474 * As an optimisation for the common case, do not
475 * repeat information for the same files_struct in
476 * two consecutive threads. This is the common case
477 * for threads sharing the same fd table. RCU guarantees
478 * that the same files_struct pointer is not re-used
479 * throughout processes/threads iteration.
480 */
481 if (files && files != prev_files) {
482 lttng_enumerate_files(session, files, tmp);
483 prev_files = files;
484 }
485 task_unlock(p);
486 } while_each_thread(g, p);
487 }
488 rcu_read_unlock();
489
490 free_page((unsigned long) tmp);
491
492 return 0;
493 }
494
495 static
496 void lttng_statedump_work_func(struct work_struct *work)
497 {
498 if (atomic_dec_and_test(&kernel_threads_to_run))
499 /* If we are the last thread, wake up do_lttng_statedump */
500 wake_up(&statedump_wq);
501 }
502
503 static
504 int do_lttng_statedump(struct lttng_session *session)
505 {
506 int cpu, ret;
507
508 trace_lttng_statedump_start(session);
509 ret = lttng_enumerate_process_states(session);
510 if (ret)
511 return ret;
512 /*
513 * FIXME
514 * ret = lttng_enumerate_vm_maps(session);
515 * if (ret)
516 * return ret;
517 */
518 ret = lttng_list_interrupts(session);
519 if (ret)
520 return ret;
521 ret = lttng_enumerate_network_ip_interface(session);
522 if (ret)
523 return ret;
524 ret = lttng_enumerate_block_devices(session);
525 switch (ret) {
526 case 0:
527 break;
528 case -ENOSYS:
529 printk(KERN_WARNING "LTTng: block device enumeration is not supported by kernel\n");
530 break;
531 default:
532 return ret;
533 }
534 ret = lttng_enumerate_cpu_topology(session);
535 if (ret)
536 return ret;
537
538 /* TODO lttng_dump_idt_table(session); */
539 /* TODO lttng_dump_softirq_vec(session); */
540 /* TODO lttng_list_modules(session); */
541 /* TODO lttng_dump_swap_files(session); */
542
543 /*
544 * Fire off a work queue on each CPU. Their sole purpose in life
545 * is to guarantee that each CPU has been in a state where is was in
546 * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ).
547 */
548 get_online_cpus();
549 atomic_set(&kernel_threads_to_run, num_online_cpus());
550 for_each_online_cpu(cpu) {
551 INIT_DELAYED_WORK(&cpu_work[cpu], lttng_statedump_work_func);
552 schedule_delayed_work_on(cpu, &cpu_work[cpu], 0);
553 }
554 /* Wait for all threads to run */
555 __wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) == 0));
556 put_online_cpus();
557 /* Our work is done */
558 trace_lttng_statedump_end(session);
559 return 0;
560 }
561
562 /*
563 * Called with session mutex held.
564 */
565 int lttng_statedump_start(struct lttng_session *session)
566 {
567 return do_lttng_statedump(session);
568 }
569 EXPORT_SYMBOL_GPL(lttng_statedump_start);
570
571 static
572 int __init lttng_statedump_init(void)
573 {
574 return 0;
575 }
576
577 module_init(lttng_statedump_init);
578
579 static
580 void __exit lttng_statedump_exit(void)
581 {
582 }
583
584 module_exit(lttng_statedump_exit);
585
586 MODULE_LICENSE("GPL and additional rights");
587 MODULE_AUTHOR("Jean-Hugues Deschenes");
588 MODULE_DESCRIPTION("LTTng statedump provider");
589 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
590 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
591 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
592 LTTNG_MODULES_EXTRAVERSION);
This page took 0.040683 seconds and 4 git commands to generate.