Fix: ensure power of 2 check handles 64-bit size_t entirely
[lttng-modules.git] / lttng-statedump-impl.c
CommitLineData
c337ddc2
MD
1/*
2 * Linux Trace Toolkit Next Generation Kernel State Dump
3 *
4 * Copyright 2005 Jean-Hugues Deschenes <jean-hugues.deschenes@polymtl.ca>
5 * Copyright 2006-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * Changes:
8 * Eric Clement: Add listing of network IP interface
9 * 2006, 2007 Mathieu Desnoyers Fix kernel threads
10 * Various updates
11 *
12 * Dual LGPL v2.1/GPL v2 license.
13 */
14
15#include <linux/init.h>
16#include <linux/module.h>
17#include <linux/netlink.h>
18#include <linux/inet.h>
19#include <linux/ip.h>
20#include <linux/kthread.h>
21#include <linux/proc_fs.h>
22#include <linux/file.h>
23#include <linux/interrupt.h>
24#include <linux/irqnr.h>
25#include <linux/cpu.h>
26#include <linux/netdevice.h>
27#include <linux/inetdevice.h>
28#include <linux/sched.h>
29#include <linux/mm.h>
30#include <linux/fdtable.h>
31#include <linux/swap.h>
32#include <linux/wait.h>
33#include <linux/mutex.h>
34
35#include "lttng-events.h"
36#include "wrapper/irqdesc.h"
37
38#ifdef CONFIG_GENERIC_HARDIRQS
39#include <linux/irq.h>
40#endif
41
42/* Define the tracepoints, but do not build the probes */
43#define CREATE_TRACE_POINTS
44#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
45#define TRACE_INCLUDE_FILE lttng-statedump
46#include "instrumentation/events/lttng-module/lttng-statedump.h"
47
48/*
49 * Protected by the trace lock.
50 */
51static struct delayed_work cpu_work[NR_CPUS];
52static DECLARE_WAIT_QUEUE_HEAD(statedump_wq);
53static atomic_t kernel_threads_to_run;
54
55enum lttng_thread_type {
56 LTTNG_USER_THREAD = 0,
57 LTTNG_KERNEL_THREAD = 1,
58};
59
60enum lttng_execution_mode {
61 LTTNG_USER_MODE = 0,
62 LTTNG_SYSCALL = 1,
63 LTTNG_TRAP = 2,
64 LTTNG_IRQ = 3,
65 LTTNG_SOFTIRQ = 4,
66 LTTNG_MODE_UNKNOWN = 5,
67};
68
69enum lttng_execution_submode {
70 LTTNG_NONE = 0,
71 LTTNG_UNKNOWN = 1,
72};
73
74enum lttng_process_status {
75 LTTNG_UNNAMED = 0,
76 LTTNG_WAIT_FORK = 1,
77 LTTNG_WAIT_CPU = 2,
78 LTTNG_EXIT = 3,
79 LTTNG_ZOMBIE = 4,
80 LTTNG_WAIT = 5,
81 LTTNG_RUN = 6,
82 LTTNG_DEAD = 7,
83};
84
85#ifdef CONFIG_INET
86static
87void lttng_enumerate_device(struct lttng_session *session,
88 struct net_device *dev)
89{
90 struct in_device *in_dev;
91 struct in_ifaddr *ifa;
92
93 if (dev->flags & IFF_UP) {
94 in_dev = in_dev_get(dev);
95 if (in_dev) {
96 for (ifa = in_dev->ifa_list; ifa != NULL;
97 ifa = ifa->ifa_next) {
98 trace_lttng_statedump_network_interface(
99 session, dev, ifa);
100 }
101 in_dev_put(in_dev);
102 }
103 } else {
104 trace_lttng_statedump_network_interface(
105 session, dev, NULL);
106 }
107}
108
109static
110int lttng_enumerate_network_ip_interface(struct lttng_session *session)
111{
112 struct net_device *dev;
113
114 read_lock(&dev_base_lock);
115 for_each_netdev(&init_net, dev)
116 lttng_enumerate_device(session, dev);
117 read_unlock(&dev_base_lock);
118
119 return 0;
120}
121#else /* CONFIG_INET */
122static inline
123int lttng_enumerate_network_ip_interface(struct lttng_session *session)
124{
125 return 0;
126}
127#endif /* CONFIG_INET */
128
129
130static
131void lttng_enumerate_task_fd(struct lttng_session *session,
132 struct task_struct *p, char *tmp)
133{
134 struct fdtable *fdt;
135 struct file *filp;
136 unsigned int i;
137 const unsigned char *path;
138
139 task_lock(p);
140 if (!p->files)
141 goto unlock_task;
142 spin_lock(&p->files->file_lock);
143 fdt = files_fdtable(p->files);
144 for (i = 0; i < fdt->max_fds; i++) {
145 filp = fcheck_files(p->files, i);
146 if (!filp)
147 continue;
148 path = d_path(&filp->f_path, tmp, PAGE_SIZE);
149 /* Make sure we give at least some info */
150 trace_lttng_statedump_file_descriptor(session, p, i,
151 IS_ERR(path) ?
152 filp->f_dentry->d_name.name :
153 path);
154 }
155 spin_unlock(&p->files->file_lock);
156unlock_task:
157 task_unlock(p);
158}
159
160static
161int lttng_enumerate_file_descriptors(struct lttng_session *session)
162{
163 struct task_struct *p;
164 char *tmp = (char *) __get_free_page(GFP_KERNEL);
165
166 /* Enumerate active file descriptors */
167 rcu_read_lock();
168 for_each_process(p)
169 lttng_enumerate_task_fd(session, p, tmp);
170 rcu_read_unlock();
171 free_page((unsigned long) tmp);
172 return 0;
173}
174
175static
176void lttng_enumerate_task_vm_maps(struct lttng_session *session,
177 struct task_struct *p)
178{
179 struct mm_struct *mm;
180 struct vm_area_struct *map;
181 unsigned long ino;
182
183 /* get_task_mm does a task_lock... */
184 mm = get_task_mm(p);
185 if (!mm)
186 return;
187
188 map = mm->mmap;
189 if (map) {
190 down_read(&mm->mmap_sem);
191 while (map) {
192 if (map->vm_file)
193 ino = map->vm_file->f_dentry->d_inode->i_ino;
194 else
195 ino = 0;
196 trace_lttng_statedump_vm_map(session, p, map, ino);
197 map = map->vm_next;
198 }
199 up_read(&mm->mmap_sem);
200 }
201 mmput(mm);
202}
203
204static
205int lttng_enumerate_vm_maps(struct lttng_session *session)
206{
207 struct task_struct *p;
208
209 rcu_read_lock();
210 for_each_process(p)
211 lttng_enumerate_task_vm_maps(session, p);
212 rcu_read_unlock();
213 return 0;
214}
215
216#ifdef CONFIG_GENERIC_HARDIRQS
47faec4b
JN
217
218#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
219#define irq_desc_get_chip(desc) get_irq_desc_chip(desc)
220#endif
221
c337ddc2
MD
222static
223void lttng_list_interrupts(struct lttng_session *session)
224{
225 unsigned int irq;
226 unsigned long flags = 0;
227 struct irq_desc *desc;
228
229#define irq_to_desc wrapper_irq_to_desc
230 /* needs irq_desc */
231 for_each_irq_desc(irq, desc) {
232 struct irqaction *action;
233 const char *irq_chip_name =
234 irq_desc_get_chip(desc)->name ? : "unnamed_irq_chip";
235
236 local_irq_save(flags);
237 raw_spin_lock(&desc->lock);
238 for (action = desc->action; action; action = action->next) {
239 trace_lttng_statedump_interrupt(session,
240 irq, irq_chip_name, action);
241 }
242 raw_spin_unlock(&desc->lock);
243 local_irq_restore(flags);
244 }
245#undef irq_to_desc
246}
247#else
248static inline
249void list_interrupts(struct lttng_session *session)
250{
251}
252#endif
253
254static
255int lttng_enumerate_process_states(struct lttng_session *session)
256{
257 struct task_struct *g, *p;
258
259 rcu_read_lock();
260 for_each_process(g) {
261 p = g;
262 do {
263 enum lttng_execution_mode mode =
264 LTTNG_MODE_UNKNOWN;
265 enum lttng_execution_submode submode =
266 LTTNG_UNKNOWN;
267 enum lttng_process_status status;
268 enum lttng_thread_type type;
269
270 task_lock(p);
271 if (p->exit_state == EXIT_ZOMBIE)
272 status = LTTNG_ZOMBIE;
273 else if (p->exit_state == EXIT_DEAD)
274 status = LTTNG_DEAD;
275 else if (p->state == TASK_RUNNING) {
276 /* Is this a forked child that has not run yet? */
277 if (list_empty(&p->rt.run_list))
278 status = LTTNG_WAIT_FORK;
279 else
280 /*
281 * All tasks are considered as wait_cpu;
282 * the viewer will sort out if the task
283 * was really running at this time.
284 */
285 status = LTTNG_WAIT_CPU;
286 } else if (p->state &
287 (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) {
288 /* Task is waiting for something to complete */
289 status = LTTNG_WAIT;
290 } else
291 status = LTTNG_UNNAMED;
292 submode = LTTNG_NONE;
293
294 /*
295 * Verification of t->mm is to filter out kernel
296 * threads; Viewer will further filter out if a
297 * user-space thread was in syscall mode or not.
298 */
299 if (p->mm)
300 type = LTTNG_USER_THREAD;
301 else
302 type = LTTNG_KERNEL_THREAD;
303 trace_lttng_statedump_process_state(session,
304 p, type, mode, submode, status);
305 task_unlock(p);
306 } while_each_thread(g, p);
307 }
308 rcu_read_unlock();
309
310 return 0;
311}
312
313static
314void lttng_statedump_work_func(struct work_struct *work)
315{
316 if (atomic_dec_and_test(&kernel_threads_to_run))
317 /* If we are the last thread, wake up do_lttng_statedump */
318 wake_up(&statedump_wq);
319}
320
321static
322int do_lttng_statedump(struct lttng_session *session)
323{
324 int cpu;
325
326 printk(KERN_DEBUG "LTT state dump thread start\n");
327 trace_lttng_statedump_start(session);
328 lttng_enumerate_process_states(session);
329 lttng_enumerate_file_descriptors(session);
330 lttng_enumerate_vm_maps(session);
331 lttng_list_interrupts(session);
332 lttng_enumerate_network_ip_interface(session);
333
334 /* TODO lttng_dump_idt_table(session); */
335 /* TODO lttng_dump_softirq_vec(session); */
336 /* TODO lttng_list_modules(session); */
337 /* TODO lttng_dump_swap_files(session); */
338
339 /*
340 * Fire off a work queue on each CPU. Their sole purpose in life
341 * is to guarantee that each CPU has been in a state where is was in
342 * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ).
343 */
344 get_online_cpus();
345 atomic_set(&kernel_threads_to_run, num_online_cpus());
346 for_each_online_cpu(cpu) {
347 INIT_DELAYED_WORK(&cpu_work[cpu], lttng_statedump_work_func);
348 schedule_delayed_work_on(cpu, &cpu_work[cpu], 0);
349 }
350 /* Wait for all threads to run */
351 __wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) != 0));
352 put_online_cpus();
353 /* Our work is done */
354 printk(KERN_DEBUG "LTT state dump end\n");
355 trace_lttng_statedump_end(session);
356 return 0;
357}
358
359/*
360 * Called with session mutex held.
361 */
362int lttng_statedump_start(struct lttng_session *session)
363{
364 printk(KERN_DEBUG "LTTng: state dump begin\n");
365 return do_lttng_statedump(session);
366}
367EXPORT_SYMBOL_GPL(lttng_statedump_start);
368
369MODULE_LICENSE("GPL and additional rights");
370MODULE_AUTHOR("Jean-Hugues Deschenes");
371MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Statedump");
This page took 0.035751 seconds and 4 git commands to generate.