4d7b2921f1bac52ba43a6a271f69ff3aa8865ca2
[lttng-modules.git] / src / lttng-statedump-impl.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-statedump.c
4 *
5 * Linux Trace Toolkit Next Generation Kernel State Dump
6 *
7 * Copyright 2005 Jean-Hugues Deschenes <jean-hugues.deschenes@polymtl.ca>
8 * Copyright 2006-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 *
10 * Changes:
11 * Eric Clement: Add listing of network IP interface
12 * 2006, 2007 Mathieu Desnoyers Fix kernel threads
13 * Various updates
14 */
15
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/netlink.h>
19 #include <linux/inet.h>
20 #include <linux/ip.h>
21 #include <linux/kthread.h>
22 #include <linux/proc_fs.h>
23 #include <linux/file.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqnr.h>
26 #include <linux/netdevice.h>
27 #include <linux/inetdevice.h>
28 #include <linux/mm.h>
29 #include <linux/swap.h>
30 #include <linux/wait.h>
31 #include <linux/mutex.h>
32 #include <linux/device.h>
33
34 #include <linux/blkdev.h>
35
36 #include <lttng/events.h>
37 #include <lttng/tracer.h>
38 #include <wrapper/cpu.h>
39 #include <wrapper/irqdesc.h>
40 #include <wrapper/fdtable.h>
41 #include <wrapper/namespace.h>
42 #include <wrapper/irq.h>
43 #include <wrapper/tracepoint.h>
44 #include <wrapper/genhd.h>
45 #include <wrapper/file.h>
46 #include <wrapper/fdtable.h>
47 #include <wrapper/sched.h>
48
49 #ifdef CONFIG_LTTNG_HAS_LIST_IRQ
50 #include <linux/irq.h>
51 #endif
52
53 /* Define the tracepoints, but do not build the probes */
54 #define CREATE_TRACE_POINTS
55 #define TRACE_INCLUDE_PATH instrumentation/events
56 #define TRACE_INCLUDE_FILE lttng-statedump
57 #define LTTNG_INSTRUMENTATION
58 #include <instrumentation/events/lttng-statedump.h>
59
60 LTTNG_DEFINE_TRACE(lttng_statedump_block_device,
61 TP_PROTO(struct lttng_kernel_session *session,
62 dev_t dev, const char *diskname),
63 TP_ARGS(session, dev, diskname));
64
65 LTTNG_DEFINE_TRACE(lttng_statedump_end,
66 TP_PROTO(struct lttng_kernel_session *session),
67 TP_ARGS(session));
68
69 LTTNG_DEFINE_TRACE(lttng_statedump_interrupt,
70 TP_PROTO(struct lttng_kernel_session *session,
71 unsigned int irq, const char *chip_name,
72 struct irqaction *action),
73 TP_ARGS(session, irq, chip_name, action));
74
75 LTTNG_DEFINE_TRACE(lttng_statedump_file_descriptor,
76 TP_PROTO(struct lttng_kernel_session *session,
77 struct files_struct *files,
78 int fd, const char *filename,
79 unsigned int flags, fmode_t fmode),
80 TP_ARGS(session, files, fd, filename, flags, fmode));
81
82 LTTNG_DEFINE_TRACE(lttng_statedump_start,
83 TP_PROTO(struct lttng_kernel_session *session),
84 TP_ARGS(session));
85
86 LTTNG_DEFINE_TRACE(lttng_statedump_process_state,
87 TP_PROTO(struct lttng_kernel_session *session,
88 struct task_struct *p,
89 int type, int mode, int submode, int status,
90 struct files_struct *files),
91 TP_ARGS(session, p, type, mode, submode, status, files));
92
93 LTTNG_DEFINE_TRACE(lttng_statedump_process_pid_ns,
94 TP_PROTO(struct lttng_kernel_session *session,
95 struct task_struct *p,
96 struct pid_namespace *pid_ns),
97 TP_ARGS(session, p, pid_ns));
98
99 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,6,0))
100 LTTNG_DEFINE_TRACE(lttng_statedump_process_cgroup_ns,
101 TP_PROTO(struct lttng_kernel_session *session,
102 struct task_struct *p,
103 struct cgroup_namespace *cgroup_ns),
104 TP_ARGS(session, p, cgroup_ns));
105 #endif
106
107 LTTNG_DEFINE_TRACE(lttng_statedump_process_ipc_ns,
108 TP_PROTO(struct lttng_kernel_session *session,
109 struct task_struct *p,
110 struct ipc_namespace *ipc_ns),
111 TP_ARGS(session, p, ipc_ns));
112
113 #ifndef LTTNG_MNT_NS_MISSING_HEADER
114 LTTNG_DEFINE_TRACE(lttng_statedump_process_mnt_ns,
115 TP_PROTO(struct lttng_kernel_session *session,
116 struct task_struct *p,
117 struct mnt_namespace *mnt_ns),
118 TP_ARGS(session, p, mnt_ns));
119 #endif
120
121 LTTNG_DEFINE_TRACE(lttng_statedump_process_net_ns,
122 TP_PROTO(struct lttng_kernel_session *session,
123 struct task_struct *p,
124 struct net *net_ns),
125 TP_ARGS(session, p, net_ns));
126
127 LTTNG_DEFINE_TRACE(lttng_statedump_process_user_ns,
128 TP_PROTO(struct lttng_kernel_session *session,
129 struct task_struct *p,
130 struct user_namespace *user_ns),
131 TP_ARGS(session, p, user_ns));
132
133 LTTNG_DEFINE_TRACE(lttng_statedump_process_uts_ns,
134 TP_PROTO(struct lttng_kernel_session *session,
135 struct task_struct *p,
136 struct uts_namespace *uts_ns),
137 TP_ARGS(session, p, uts_ns));
138
139 LTTNG_DEFINE_TRACE(lttng_statedump_process_time_ns,
140 TP_PROTO(struct lttng_kernel_session *session,
141 struct task_struct *p,
142 struct time_namespace *time_ns),
143 TP_ARGS(session, p, time_ns));
144
145 LTTNG_DEFINE_TRACE(lttng_statedump_network_interface,
146 TP_PROTO(struct lttng_kernel_session *session,
147 struct net_device *dev, struct in_ifaddr *ifa),
148 TP_ARGS(session, dev, ifa));
149
150 #ifdef LTTNG_HAVE_STATEDUMP_CPU_TOPOLOGY
151 LTTNG_DEFINE_TRACE(lttng_statedump_cpu_topology,
152 TP_PROTO(struct lttng_kernel_session *session, struct cpuinfo_x86 *c),
153 TP_ARGS(session, c));
154 #endif
155
156 struct lttng_fd_ctx {
157 char *page;
158 struct lttng_kernel_session *session;
159 struct files_struct *files;
160 };
161
162 /*
163 * Protected by the trace lock.
164 */
165 static struct delayed_work cpu_work[NR_CPUS];
166 static DECLARE_WAIT_QUEUE_HEAD(statedump_wq);
167 static atomic_t kernel_threads_to_run;
168
169 enum lttng_thread_type {
170 LTTNG_USER_THREAD = 0,
171 LTTNG_KERNEL_THREAD = 1,
172 };
173
174 enum lttng_execution_mode {
175 LTTNG_USER_MODE = 0,
176 LTTNG_SYSCALL = 1,
177 LTTNG_TRAP = 2,
178 LTTNG_IRQ = 3,
179 LTTNG_SOFTIRQ = 4,
180 LTTNG_MODE_UNKNOWN = 5,
181 };
182
183 enum lttng_execution_submode {
184 LTTNG_NONE = 0,
185 LTTNG_UNKNOWN = 1,
186 };
187
188 enum lttng_process_status {
189 LTTNG_UNNAMED = 0,
190 LTTNG_WAIT_FORK = 1,
191 LTTNG_WAIT_CPU = 2,
192 LTTNG_EXIT = 3,
193 LTTNG_ZOMBIE = 4,
194 LTTNG_WAIT = 5,
195 LTTNG_RUN = 6,
196 LTTNG_DEAD = 7,
197 };
198
199
200 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,11,0))
201
202 #define LTTNG_PART_STRUCT_TYPE struct block_device
203
204 static
205 int lttng_get_part_name(struct gendisk *disk, struct block_device *part, char *name_buf)
206 {
207 const char *p;
208
209 p = bdevname(part, name_buf);
210 if (!p)
211 return -ENOSYS;
212
213 return 0;
214 }
215
216 static
217 dev_t lttng_get_part_devt(struct block_device *part)
218 {
219 return part->bd_dev;
220 }
221
222 #else
223
224 #define LTTNG_PART_STRUCT_TYPE struct hd_struct
225
226 static
227 int lttng_get_part_name(struct gendisk *disk, struct hd_struct *part, char *name_buf)
228 {
229 const char *p;
230 struct block_device bdev;
231
232 /*
233 * Create a partial 'struct blockdevice' to use
234 * 'bdevname()' which is a simple wrapper over
235 * 'disk_name()' but has the honor to be EXPORT_SYMBOL.
236 */
237 bdev.bd_disk = disk;
238 bdev.bd_part = part;
239
240 p = bdevname(&bdev, name_buf);
241 if (!p)
242 return -ENOSYS;
243
244 return 0;
245 }
246
247 static
248 dev_t lttng_get_part_devt(struct hd_struct *part)
249 {
250 return part_devt(part);
251 }
252 #endif
253
254 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,12,0))
255 static
256 int lttng_statedump_each_block_device(struct lttng_kernel_session *session, struct gendisk *disk)
257 {
258 struct block_device *part;
259 unsigned long idx;
260 int ret = 0;
261
262 /* Include partition 0 */
263 idx = 0;
264
265 rcu_read_lock();
266 xa_for_each(&disk->part_tbl, idx, part) {
267 char name_buf[BDEVNAME_SIZE];
268
269 /* Exclude non-partitions bdev and empty partitions. */
270 if (bdev_is_partition(part) && !bdev_nr_sectors(part))
271 continue;
272
273 if (lttng_get_part_name(disk, part, name_buf) == -ENOSYS) {
274 ret = -ENOSYS;
275 goto end;
276 }
277 trace_lttng_statedump_block_device(session, lttng_get_part_devt(part),
278 name_buf);
279 }
280 end:
281 rcu_read_unlock();
282 return ret;
283 }
284 #else
285 static
286 int lttng_statedump_each_block_device(struct lttng_kernel_session *session, struct gendisk *disk)
287 {
288 struct disk_part_iter piter;
289 LTTNG_PART_STRUCT_TYPE *part;
290
291 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
292
293 while ((part = disk_part_iter_next(&piter))) {
294 char name_buf[BDEVNAME_SIZE];
295
296 if (lttng_get_part_name(disk, part, name_buf) == -ENOSYS) {
297 disk_part_iter_exit(&piter);
298 return -ENOSYS;
299 }
300 trace_lttng_statedump_block_device(session, lttng_get_part_devt(part),
301 name_buf);
302 }
303 disk_part_iter_exit(&piter);
304
305 return 0;
306 }
307 #endif
308
309 static
310 int lttng_enumerate_block_devices(struct lttng_kernel_session *session)
311 {
312 struct class *ptr_block_class;
313 struct device_type *ptr_disk_type;
314 struct class_dev_iter iter;
315 struct device *dev;
316 int ret = 0;
317
318 ptr_block_class = wrapper_get_block_class();
319 if (!ptr_block_class) {
320 ret = -ENOSYS;
321 goto end;
322 }
323 ptr_disk_type = wrapper_get_disk_type();
324 if (!ptr_disk_type) {
325 ret = -ENOSYS;
326 goto end;
327 }
328 class_dev_iter_init(&iter, ptr_block_class, NULL, ptr_disk_type);
329 while ((dev = class_dev_iter_next(&iter))) {
330 struct gendisk *disk = dev_to_disk(dev);
331
332 /*
333 * Don't show empty devices or things that have been
334 * suppressed
335 */
336 if (get_capacity(disk) == 0 ||
337 (disk->flags & LTTNG_GENHD_FL_HIDDEN))
338 continue;
339
340 ret = lttng_statedump_each_block_device(session, disk);
341 }
342 class_dev_iter_exit(&iter);
343 end:
344 return ret;
345 }
346
347 #ifdef CONFIG_INET
348
349 static
350 void lttng_enumerate_device(struct lttng_kernel_session *session,
351 struct net_device *dev)
352 {
353 struct in_device *in_dev;
354 struct in_ifaddr *ifa;
355
356 if (dev->flags & IFF_UP) {
357 in_dev = in_dev_get(dev);
358 if (in_dev) {
359 for (ifa = in_dev->ifa_list; ifa != NULL;
360 ifa = ifa->ifa_next) {
361 trace_lttng_statedump_network_interface(
362 session, dev, ifa);
363 }
364 in_dev_put(in_dev);
365 }
366 } else {
367 trace_lttng_statedump_network_interface(
368 session, dev, NULL);
369 }
370 }
371
372 static
373 int lttng_enumerate_network_ip_interface(struct lttng_kernel_session *session)
374 {
375 struct net_device *dev;
376
377 read_lock(&dev_base_lock);
378 for_each_netdev(&init_net, dev)
379 lttng_enumerate_device(session, dev);
380 read_unlock(&dev_base_lock);
381
382 return 0;
383 }
384 #else /* CONFIG_INET */
385 static inline
386 int lttng_enumerate_network_ip_interface(struct lttng_kernel_session *session)
387 {
388 return 0;
389 }
390 #endif /* CONFIG_INET */
391
392 static
393 int lttng_dump_one_fd(const void *p, struct file *file, unsigned int fd)
394 {
395 const struct lttng_fd_ctx *ctx = p;
396 const char *s = d_path(&file->f_path, ctx->page, PAGE_SIZE);
397 unsigned int flags = file->f_flags;
398 struct fdtable *fdt;
399
400 /*
401 * We don't expose kernel internal flags, only userspace-visible
402 * flags.
403 */
404 flags &= ~FMODE_NONOTIFY;
405 fdt = files_fdtable(ctx->files);
406 /*
407 * We need to check here again whether fd is within the fdt
408 * max_fds range, because we might be seeing a different
409 * files_fdtable() than iterate_fd(), assuming only RCU is
410 * protecting the read. In reality, iterate_fd() holds
411 * file_lock, which should ensure the fdt does not change while
412 * the lock is taken, but we are not aware whether this is
413 * guaranteed or not, so play safe.
414 */
415 if (fd < fdt->max_fds && lttng_close_on_exec(fd, fdt))
416 flags |= O_CLOEXEC;
417 if (IS_ERR(s)) {
418 struct dentry *dentry = file->f_path.dentry;
419
420 /* Make sure we give at least some info */
421 spin_lock(&dentry->d_lock);
422 trace_lttng_statedump_file_descriptor(ctx->session,
423 ctx->files, fd, dentry->d_name.name, flags,
424 file->f_mode);
425 spin_unlock(&dentry->d_lock);
426 goto end;
427 }
428 trace_lttng_statedump_file_descriptor(ctx->session,
429 ctx->files, fd, s, flags, file->f_mode);
430 end:
431 return 0;
432 }
433
434 /* Called with task lock held. */
435 static
436 void lttng_enumerate_files(struct lttng_kernel_session *session,
437 struct files_struct *files,
438 char *tmp)
439 {
440 struct lttng_fd_ctx ctx = { .page = tmp, .session = session, .files = files, };
441
442 lttng_iterate_fd(files, 0, lttng_dump_one_fd, &ctx);
443 }
444
445 #ifdef LTTNG_HAVE_STATEDUMP_CPU_TOPOLOGY
446 static
447 int lttng_enumerate_cpu_topology(struct lttng_kernel_session *session)
448 {
449 int cpu;
450 const cpumask_t *cpumask = cpu_possible_mask;
451
452 for (cpu = cpumask_first(cpumask); cpu < nr_cpu_ids;
453 cpu = cpumask_next(cpu, cpumask)) {
454 trace_lttng_statedump_cpu_topology(session, &cpu_data(cpu));
455 }
456
457 return 0;
458 }
459 #else
460 static
461 int lttng_enumerate_cpu_topology(struct lttng_kernel_session *session)
462 {
463 return 0;
464 }
465 #endif
466
467 #if 0
468 /*
469 * FIXME: we cannot take a mmap_sem while in a RCU read-side critical section
470 * (scheduling in atomic). Normally, the tasklist lock protects this kind of
471 * iteration, but it is not exported to modules.
472 */
473 static
474 void lttng_enumerate_task_vm_maps(struct lttng_kernel_session *session,
475 struct task_struct *p)
476 {
477 struct mm_struct *mm;
478 struct vm_area_struct *map;
479 unsigned long ino;
480
481 /* get_task_mm does a task_lock... */
482 mm = get_task_mm(p);
483 if (!mm)
484 return;
485
486 map = mm->mmap;
487 if (map) {
488 down_read(&mm->mmap_sem);
489 while (map) {
490 if (map->vm_file)
491 ino = map->vm_file->lttng_f_dentry->d_inode->i_ino;
492 else
493 ino = 0;
494 trace_lttng_statedump_vm_map(session, p, map, ino);
495 map = map->vm_next;
496 }
497 up_read(&mm->mmap_sem);
498 }
499 mmput(mm);
500 }
501
502 static
503 int lttng_enumerate_vm_maps(struct lttng_kernel_session *session)
504 {
505 struct task_struct *p;
506
507 rcu_read_lock();
508 for_each_process(p)
509 lttng_enumerate_task_vm_maps(session, p);
510 rcu_read_unlock();
511 return 0;
512 }
513 #endif
514
515 #ifdef CONFIG_LTTNG_HAS_LIST_IRQ
516
517 static
518 int lttng_list_interrupts(struct lttng_kernel_session *session)
519 {
520 unsigned int irq;
521 unsigned long flags = 0;
522 struct irq_desc *desc;
523
524 #define irq_to_desc wrapper_irq_to_desc
525 /* needs irq_desc */
526 for_each_irq_desc(irq, desc) {
527 struct irqaction *action;
528 const char *irq_chip_name =
529 irq_desc_get_chip(desc)->name ? : "unnamed_irq_chip";
530
531 local_irq_save(flags);
532 raw_spin_lock(&desc->lock);
533 for (action = desc->action; action; action = action->next) {
534 trace_lttng_statedump_interrupt(session,
535 irq, irq_chip_name, action);
536 }
537 raw_spin_unlock(&desc->lock);
538 local_irq_restore(flags);
539 }
540 return 0;
541 #undef irq_to_desc
542 }
543 #else
544 static inline
545 int lttng_list_interrupts(struct lttng_kernel_session *session)
546 {
547 return 0;
548 }
549 #endif
550
551 /*
552 * Statedump the task's namespaces using the proc filesystem inode number as
553 * the unique identifier. The user and pid ns are nested and will be dumped
554 * recursively.
555 *
556 * Called with task lock held.
557 */
558 static
559 void lttng_statedump_process_ns(struct lttng_kernel_session *session,
560 struct task_struct *p,
561 enum lttng_thread_type type,
562 enum lttng_execution_mode mode,
563 enum lttng_execution_submode submode,
564 enum lttng_process_status status)
565 {
566 struct nsproxy *proxy;
567 struct pid_namespace *pid_ns;
568 struct user_namespace *user_ns;
569
570 /*
571 * The pid and user namespaces are special, they are nested and
572 * accessed with specific functions instead of the nsproxy struct
573 * like the other namespaces.
574 */
575 pid_ns = task_active_pid_ns(p);
576 do {
577 trace_lttng_statedump_process_pid_ns(session, p, pid_ns);
578 pid_ns = pid_ns ? pid_ns->parent : NULL;
579 } while (pid_ns);
580
581
582 user_ns = task_cred_xxx(p, user_ns);
583 do {
584 trace_lttng_statedump_process_user_ns(session, p, user_ns);
585 /*
586 * trace_lttng_statedump_process_user_ns() internally
587 * checks whether user_ns is NULL. While this does not
588 * appear to be a possible return value for
589 * task_cred_xxx(), err on the safe side and check
590 * for NULL here as well to be consistent with the
591 * paranoid behavior of
592 * trace_lttng_statedump_process_user_ns().
593 */
594 user_ns = user_ns ? user_ns->lttng_user_ns_parent : NULL;
595 } while (user_ns);
596
597 /*
598 * Back and forth on locking strategy within Linux upstream for nsproxy.
599 * See Linux upstream commit 728dba3a39c66b3d8ac889ddbe38b5b1c264aec3
600 * "namespaces: Use task_lock and not rcu to protect nsproxy"
601 * for details.
602 */
603 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,17,0) || \
604 LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,36, 3,14,0,0) || \
605 LTTNG_UBUNTU_KERNEL_RANGE(3,16,1,11, 3,17,0,0) || \
606 LTTNG_RHEL_KERNEL_RANGE(3,10,0,229,13,0, 3,11,0,0,0,0))
607 proxy = p->nsproxy;
608 #else
609 rcu_read_lock();
610 proxy = task_nsproxy(p);
611 #endif
612 if (proxy) {
613 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,6,0))
614 trace_lttng_statedump_process_cgroup_ns(session, p, proxy->cgroup_ns);
615 #endif
616 trace_lttng_statedump_process_ipc_ns(session, p, proxy->ipc_ns);
617 #ifndef LTTNG_MNT_NS_MISSING_HEADER
618 trace_lttng_statedump_process_mnt_ns(session, p, proxy->mnt_ns);
619 #endif
620 trace_lttng_statedump_process_net_ns(session, p, proxy->net_ns);
621 trace_lttng_statedump_process_uts_ns(session, p, proxy->uts_ns);
622 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0) || \
623 LTTNG_RHEL_KERNEL_RANGE(4,18,0,305,0,0, 4,19,0,0,0,0))
624 trace_lttng_statedump_process_time_ns(session, p, proxy->time_ns);
625 #endif
626 }
627 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,17,0) || \
628 LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,36, 3,14,0,0) || \
629 LTTNG_UBUNTU_KERNEL_RANGE(3,16,1,11, 3,17,0,0) || \
630 LTTNG_RHEL_KERNEL_RANGE(3,10,0,229,13,0, 3,11,0,0,0,0))
631 /* (nothing) */
632 #else
633 rcu_read_unlock();
634 #endif
635 }
636
637 static
638 int lttng_enumerate_process_states(struct lttng_kernel_session *session)
639 {
640 struct task_struct *g, *p;
641 char *tmp;
642
643 tmp = (char *) __get_free_page(GFP_KERNEL);
644 if (!tmp)
645 return -ENOMEM;
646
647 rcu_read_lock();
648 for_each_process(g) {
649 struct files_struct *prev_files = NULL;
650
651 p = g;
652 do {
653 enum lttng_execution_mode mode =
654 LTTNG_MODE_UNKNOWN;
655 enum lttng_execution_submode submode =
656 LTTNG_UNKNOWN;
657 enum lttng_process_status status;
658 enum lttng_thread_type type;
659 struct files_struct *files;
660
661 task_lock(p);
662 if (p->exit_state == EXIT_ZOMBIE)
663 status = LTTNG_ZOMBIE;
664 else if (p->exit_state == EXIT_DEAD)
665 status = LTTNG_DEAD;
666 else if (lttng_task_is_running(p)) {
667 /* Is this a forked child that has not run yet? */
668 if (list_empty(&p->rt.run_list))
669 status = LTTNG_WAIT_FORK;
670 else
671 /*
672 * All tasks are considered as wait_cpu;
673 * the viewer will sort out if the task
674 * was really running at this time.
675 */
676 status = LTTNG_WAIT_CPU;
677 } else if (lttng_get_task_state(p) &
678 (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) {
679 /* Task is waiting for something to complete */
680 status = LTTNG_WAIT;
681 } else
682 status = LTTNG_UNNAMED;
683 submode = LTTNG_NONE;
684
685 /*
686 * Verification of t->mm is to filter out kernel
687 * threads; Viewer will further filter out if a
688 * user-space thread was in syscall mode or not.
689 */
690 if (p->mm)
691 type = LTTNG_USER_THREAD;
692 else
693 type = LTTNG_KERNEL_THREAD;
694 files = p->files;
695
696 trace_lttng_statedump_process_state(session,
697 p, type, mode, submode, status, files);
698 lttng_statedump_process_ns(session,
699 p, type, mode, submode, status);
700 /*
701 * As an optimisation for the common case, do not
702 * repeat information for the same files_struct in
703 * two consecutive threads. This is the common case
704 * for threads sharing the same fd table. RCU guarantees
705 * that the same files_struct pointer is not re-used
706 * throughout processes/threads iteration.
707 */
708 if (files && files != prev_files) {
709 lttng_enumerate_files(session, files, tmp);
710 prev_files = files;
711 }
712 task_unlock(p);
713 } while_each_thread(g, p);
714 }
715 rcu_read_unlock();
716
717 free_page((unsigned long) tmp);
718
719 return 0;
720 }
721
722 static
723 void lttng_statedump_work_func(struct work_struct *work)
724 {
725 if (atomic_dec_and_test(&kernel_threads_to_run))
726 /* If we are the last thread, wake up do_lttng_statedump */
727 wake_up(&statedump_wq);
728 }
729
730 static
731 int do_lttng_statedump(struct lttng_kernel_session *session)
732 {
733 int cpu, ret;
734
735 trace_lttng_statedump_start(session);
736 ret = lttng_enumerate_process_states(session);
737 if (ret)
738 return ret;
739 /*
740 * FIXME
741 * ret = lttng_enumerate_vm_maps(session);
742 * if (ret)
743 * return ret;
744 */
745 ret = lttng_list_interrupts(session);
746 if (ret)
747 return ret;
748 ret = lttng_enumerate_network_ip_interface(session);
749 if (ret)
750 return ret;
751 ret = lttng_enumerate_block_devices(session);
752 switch (ret) {
753 case 0:
754 break;
755 case -ENOSYS:
756 printk(KERN_WARNING "LTTng: block device enumeration is not supported by kernel\n");
757 break;
758 default:
759 return ret;
760 }
761 ret = lttng_enumerate_cpu_topology(session);
762 if (ret)
763 return ret;
764
765 /* TODO lttng_dump_idt_table(session); */
766 /* TODO lttng_dump_softirq_vec(session); */
767 /* TODO lttng_list_modules(session); */
768 /* TODO lttng_dump_swap_files(session); */
769
770 /*
771 * Fire off a work queue on each CPU. Their sole purpose in life
772 * is to guarantee that each CPU has been in a state where is was in
773 * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ).
774 */
775 lttng_cpus_read_lock();
776 atomic_set(&kernel_threads_to_run, num_online_cpus());
777 for_each_online_cpu(cpu) {
778 INIT_DELAYED_WORK(&cpu_work[cpu], lttng_statedump_work_func);
779 schedule_delayed_work_on(cpu, &cpu_work[cpu], 0);
780 }
781 /* Wait for all threads to run */
782 __wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) == 0));
783 lttng_cpus_read_unlock();
784 /* Our work is done */
785 trace_lttng_statedump_end(session);
786 return 0;
787 }
788
789 /*
790 * Called with session mutex held.
791 */
792 int lttng_statedump_start(struct lttng_kernel_session *session)
793 {
794 return do_lttng_statedump(session);
795 }
796 EXPORT_SYMBOL_GPL(lttng_statedump_start);
797
798 static
799 int __init lttng_statedump_init(void)
800 {
801 /*
802 * Allow module to load even if the fixup cannot be done. This
803 * will allow seemless transition when the underlying issue fix
804 * is merged into the Linux kernel, and when tracepoint.c
805 * "tracepoint_module_notify" is turned into a static function.
806 */
807 (void) wrapper_lttng_fixup_sig(THIS_MODULE);
808 return 0;
809 }
810
811 module_init(lttng_statedump_init);
812
813 static
814 void __exit lttng_statedump_exit(void)
815 {
816 }
817
818 module_exit(lttng_statedump_exit);
819
820 MODULE_LICENSE("GPL and additional rights");
821 MODULE_AUTHOR("Jean-Hugues Deschenes");
822 MODULE_DESCRIPTION("LTTng statedump provider");
823 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
824 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
825 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
826 LTTNG_MODULES_EXTRAVERSION);
This page took 0.043794 seconds and 3 git commands to generate.