Add support for kvm x86 specific tracepoints
[lttng-modules.git] / lttng-statedump-impl.c
CommitLineData
c337ddc2 1/*
886d51a3
MD
2 * lttng-statedump.c
3 *
c337ddc2
MD
4 * Linux Trace Toolkit Next Generation Kernel State Dump
5 *
6 * Copyright 2005 Jean-Hugues Deschenes <jean-hugues.deschenes@polymtl.ca>
7 * Copyright 2006-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
886d51a3
MD
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; only
12 * version 2.1 of the License.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 *
c337ddc2
MD
23 * Changes:
24 * Eric Clement: Add listing of network IP interface
25 * 2006, 2007 Mathieu Desnoyers Fix kernel threads
26 * Various updates
c337ddc2
MD
27 */
28
29#include <linux/init.h>
30#include <linux/module.h>
31#include <linux/netlink.h>
32#include <linux/inet.h>
33#include <linux/ip.h>
34#include <linux/kthread.h>
35#include <linux/proc_fs.h>
36#include <linux/file.h>
37#include <linux/interrupt.h>
38#include <linux/irqnr.h>
39#include <linux/cpu.h>
40#include <linux/netdevice.h>
41#include <linux/inetdevice.h>
42#include <linux/sched.h>
43#include <linux/mm.h>
44#include <linux/fdtable.h>
45#include <linux/swap.h>
46#include <linux/wait.h>
47#include <linux/mutex.h>
48
49#include "lttng-events.h"
50#include "wrapper/irqdesc.h"
3a523f5b 51#include "wrapper/spinlock.h"
361c023a 52#include "wrapper/fdtable.h"
c337ddc2
MD
53
54#ifdef CONFIG_GENERIC_HARDIRQS
55#include <linux/irq.h>
56#endif
57
58/* Define the tracepoints, but do not build the probes */
59#define CREATE_TRACE_POINTS
60#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
61#define TRACE_INCLUDE_FILE lttng-statedump
62#include "instrumentation/events/lttng-module/lttng-statedump.h"
63
361c023a
MD
64struct lttng_fd_ctx {
65 char *page;
66 struct lttng_session *session;
67 struct task_struct *p;
68};
69
c337ddc2
MD
70/*
71 * Protected by the trace lock.
72 */
73static struct delayed_work cpu_work[NR_CPUS];
74static DECLARE_WAIT_QUEUE_HEAD(statedump_wq);
75static atomic_t kernel_threads_to_run;
76
77enum lttng_thread_type {
78 LTTNG_USER_THREAD = 0,
79 LTTNG_KERNEL_THREAD = 1,
80};
81
82enum lttng_execution_mode {
83 LTTNG_USER_MODE = 0,
84 LTTNG_SYSCALL = 1,
85 LTTNG_TRAP = 2,
86 LTTNG_IRQ = 3,
87 LTTNG_SOFTIRQ = 4,
88 LTTNG_MODE_UNKNOWN = 5,
89};
90
91enum lttng_execution_submode {
92 LTTNG_NONE = 0,
93 LTTNG_UNKNOWN = 1,
94};
95
96enum lttng_process_status {
97 LTTNG_UNNAMED = 0,
98 LTTNG_WAIT_FORK = 1,
99 LTTNG_WAIT_CPU = 2,
100 LTTNG_EXIT = 3,
101 LTTNG_ZOMBIE = 4,
102 LTTNG_WAIT = 5,
103 LTTNG_RUN = 6,
104 LTTNG_DEAD = 7,
105};
106
107#ifdef CONFIG_INET
108static
109void lttng_enumerate_device(struct lttng_session *session,
110 struct net_device *dev)
111{
112 struct in_device *in_dev;
113 struct in_ifaddr *ifa;
114
115 if (dev->flags & IFF_UP) {
116 in_dev = in_dev_get(dev);
117 if (in_dev) {
118 for (ifa = in_dev->ifa_list; ifa != NULL;
119 ifa = ifa->ifa_next) {
120 trace_lttng_statedump_network_interface(
121 session, dev, ifa);
122 }
123 in_dev_put(in_dev);
124 }
125 } else {
126 trace_lttng_statedump_network_interface(
127 session, dev, NULL);
128 }
129}
130
131static
132int lttng_enumerate_network_ip_interface(struct lttng_session *session)
133{
134 struct net_device *dev;
135
136 read_lock(&dev_base_lock);
137 for_each_netdev(&init_net, dev)
138 lttng_enumerate_device(session, dev);
139 read_unlock(&dev_base_lock);
140
141 return 0;
142}
143#else /* CONFIG_INET */
144static inline
145int lttng_enumerate_network_ip_interface(struct lttng_session *session)
146{
147 return 0;
148}
149#endif /* CONFIG_INET */
150
361c023a
MD
151static
152int lttng_dump_one_fd(const void *p, struct file *file, unsigned int fd)
153{
154 const struct lttng_fd_ctx *ctx = p;
155 const char *s = d_path(&file->f_path, ctx->page, PAGE_SIZE);
156
157 if (IS_ERR(s)) {
158 struct dentry *dentry = file->f_path.dentry;
159
160 /* Make sure we give at least some info */
161 spin_lock(&dentry->d_lock);
162 trace_lttng_statedump_file_descriptor(ctx->session, ctx->p, fd,
163 dentry->d_name.name);
164 spin_unlock(&dentry->d_lock);
165 goto end;
166 }
167 trace_lttng_statedump_file_descriptor(ctx->session, ctx->p, fd, s);
168end:
169 return 0;
170}
c337ddc2
MD
171
172static
173void lttng_enumerate_task_fd(struct lttng_session *session,
174 struct task_struct *p, char *tmp)
175{
361c023a 176 struct lttng_fd_ctx ctx = { .page = tmp, .session = session, .p = p };
c337ddc2
MD
177
178 task_lock(p);
361c023a 179 lttng_iterate_fd(p->files, 0, lttng_dump_one_fd, &ctx);
c337ddc2
MD
180 task_unlock(p);
181}
182
183static
184int lttng_enumerate_file_descriptors(struct lttng_session *session)
185{
186 struct task_struct *p;
187 char *tmp = (char *) __get_free_page(GFP_KERNEL);
188
189 /* Enumerate active file descriptors */
190 rcu_read_lock();
191 for_each_process(p)
192 lttng_enumerate_task_fd(session, p, tmp);
193 rcu_read_unlock();
194 free_page((unsigned long) tmp);
195 return 0;
196}
197
0658bdda
MD
198#if 0
199/*
200 * FIXME: we cannot take a mmap_sem while in a RCU read-side critical section
201 * (scheduling in atomic). Normally, the tasklist lock protects this kind of
202 * iteration, but it is not exported to modules.
203 */
c337ddc2
MD
204static
205void lttng_enumerate_task_vm_maps(struct lttng_session *session,
206 struct task_struct *p)
207{
208 struct mm_struct *mm;
209 struct vm_area_struct *map;
210 unsigned long ino;
211
212 /* get_task_mm does a task_lock... */
213 mm = get_task_mm(p);
214 if (!mm)
215 return;
216
217 map = mm->mmap;
218 if (map) {
219 down_read(&mm->mmap_sem);
220 while (map) {
221 if (map->vm_file)
222 ino = map->vm_file->f_dentry->d_inode->i_ino;
223 else
224 ino = 0;
225 trace_lttng_statedump_vm_map(session, p, map, ino);
226 map = map->vm_next;
227 }
228 up_read(&mm->mmap_sem);
229 }
230 mmput(mm);
231}
232
233static
234int lttng_enumerate_vm_maps(struct lttng_session *session)
235{
236 struct task_struct *p;
237
238 rcu_read_lock();
239 for_each_process(p)
240 lttng_enumerate_task_vm_maps(session, p);
241 rcu_read_unlock();
242 return 0;
243}
0658bdda 244#endif
c337ddc2
MD
245
246#ifdef CONFIG_GENERIC_HARDIRQS
47faec4b
JN
247
248#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
249#define irq_desc_get_chip(desc) get_irq_desc_chip(desc)
250#endif
251
c337ddc2
MD
252static
253void lttng_list_interrupts(struct lttng_session *session)
254{
255 unsigned int irq;
256 unsigned long flags = 0;
257 struct irq_desc *desc;
258
259#define irq_to_desc wrapper_irq_to_desc
260 /* needs irq_desc */
261 for_each_irq_desc(irq, desc) {
262 struct irqaction *action;
263 const char *irq_chip_name =
264 irq_desc_get_chip(desc)->name ? : "unnamed_irq_chip";
265
266 local_irq_save(flags);
3a523f5b 267 wrapper_desc_spin_lock(&desc->lock);
c337ddc2
MD
268 for (action = desc->action; action; action = action->next) {
269 trace_lttng_statedump_interrupt(session,
270 irq, irq_chip_name, action);
271 }
3a523f5b 272 wrapper_desc_spin_unlock(&desc->lock);
c337ddc2
MD
273 local_irq_restore(flags);
274 }
275#undef irq_to_desc
276}
277#else
278static inline
279void list_interrupts(struct lttng_session *session)
280{
281}
282#endif
283
73e8ba37
JD
284static
285void lttng_statedump_process_ns(struct lttng_session *session,
286 struct task_struct *p,
287 enum lttng_thread_type type,
288 enum lttng_execution_mode mode,
289 enum lttng_execution_submode submode,
290 enum lttng_process_status status)
291{
292 struct nsproxy *proxy;
293 struct pid_namespace *pid_ns;
294
295 rcu_read_lock();
296 proxy = task_nsproxy(p);
297 if (proxy) {
298 pid_ns = proxy->pid_ns;
299 do {
300 trace_lttng_statedump_process_state(session,
301 p, type, mode, submode, status, pid_ns);
af73f727 302 pid_ns = pid_ns->parent;
73e8ba37
JD
303 } while (pid_ns);
304 } else {
305 trace_lttng_statedump_process_state(session,
306 p, type, mode, submode, status, NULL);
307 }
308 rcu_read_unlock();
309}
310
c337ddc2
MD
311static
312int lttng_enumerate_process_states(struct lttng_session *session)
313{
314 struct task_struct *g, *p;
315
316 rcu_read_lock();
317 for_each_process(g) {
318 p = g;
319 do {
320 enum lttng_execution_mode mode =
321 LTTNG_MODE_UNKNOWN;
322 enum lttng_execution_submode submode =
323 LTTNG_UNKNOWN;
324 enum lttng_process_status status;
325 enum lttng_thread_type type;
326
327 task_lock(p);
328 if (p->exit_state == EXIT_ZOMBIE)
329 status = LTTNG_ZOMBIE;
330 else if (p->exit_state == EXIT_DEAD)
331 status = LTTNG_DEAD;
332 else if (p->state == TASK_RUNNING) {
333 /* Is this a forked child that has not run yet? */
334 if (list_empty(&p->rt.run_list))
335 status = LTTNG_WAIT_FORK;
336 else
337 /*
338 * All tasks are considered as wait_cpu;
339 * the viewer will sort out if the task
340 * was really running at this time.
341 */
342 status = LTTNG_WAIT_CPU;
343 } else if (p->state &
344 (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) {
345 /* Task is waiting for something to complete */
346 status = LTTNG_WAIT;
347 } else
348 status = LTTNG_UNNAMED;
349 submode = LTTNG_NONE;
350
351 /*
352 * Verification of t->mm is to filter out kernel
353 * threads; Viewer will further filter out if a
354 * user-space thread was in syscall mode or not.
355 */
356 if (p->mm)
357 type = LTTNG_USER_THREAD;
358 else
359 type = LTTNG_KERNEL_THREAD;
73e8ba37 360 lttng_statedump_process_ns(session,
c337ddc2
MD
361 p, type, mode, submode, status);
362 task_unlock(p);
363 } while_each_thread(g, p);
364 }
365 rcu_read_unlock();
366
367 return 0;
368}
369
370static
371void lttng_statedump_work_func(struct work_struct *work)
372{
373 if (atomic_dec_and_test(&kernel_threads_to_run))
374 /* If we are the last thread, wake up do_lttng_statedump */
375 wake_up(&statedump_wq);
376}
377
378static
379int do_lttng_statedump(struct lttng_session *session)
380{
381 int cpu;
382
383 printk(KERN_DEBUG "LTT state dump thread start\n");
384 trace_lttng_statedump_start(session);
385 lttng_enumerate_process_states(session);
386 lttng_enumerate_file_descriptors(session);
0658bdda 387 /* FIXME lttng_enumerate_vm_maps(session); */
c337ddc2
MD
388 lttng_list_interrupts(session);
389 lttng_enumerate_network_ip_interface(session);
390
391 /* TODO lttng_dump_idt_table(session); */
392 /* TODO lttng_dump_softirq_vec(session); */
393 /* TODO lttng_list_modules(session); */
394 /* TODO lttng_dump_swap_files(session); */
395
396 /*
397 * Fire off a work queue on each CPU. Their sole purpose in life
398 * is to guarantee that each CPU has been in a state where is was in
399 * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ).
400 */
401 get_online_cpus();
402 atomic_set(&kernel_threads_to_run, num_online_cpus());
403 for_each_online_cpu(cpu) {
404 INIT_DELAYED_WORK(&cpu_work[cpu], lttng_statedump_work_func);
405 schedule_delayed_work_on(cpu, &cpu_work[cpu], 0);
406 }
407 /* Wait for all threads to run */
7a7128e0 408 __wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) == 0));
c337ddc2
MD
409 put_online_cpus();
410 /* Our work is done */
411 printk(KERN_DEBUG "LTT state dump end\n");
412 trace_lttng_statedump_end(session);
413 return 0;
414}
415
416/*
417 * Called with session mutex held.
418 */
419int lttng_statedump_start(struct lttng_session *session)
420{
421 printk(KERN_DEBUG "LTTng: state dump begin\n");
422 return do_lttng_statedump(session);
423}
424EXPORT_SYMBOL_GPL(lttng_statedump_start);
425
426MODULE_LICENSE("GPL and additional rights");
427MODULE_AUTHOR("Jean-Hugues Deschenes");
428MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Statedump");
This page took 0.039348 seconds and 4 git commands to generate.