Kernel 3.15 don't define map unmap for pipe
[lttng-modules.git] / lttng-statedump-impl.c
CommitLineData
c337ddc2 1/*
886d51a3
MD
2 * lttng-statedump.c
3 *
c337ddc2
MD
4 * Linux Trace Toolkit Next Generation Kernel State Dump
5 *
6 * Copyright 2005 Jean-Hugues Deschenes <jean-hugues.deschenes@polymtl.ca>
7 * Copyright 2006-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
886d51a3
MD
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; only
12 * version 2.1 of the License.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 *
c337ddc2
MD
23 * Changes:
24 * Eric Clement: Add listing of network IP interface
25 * 2006, 2007 Mathieu Desnoyers Fix kernel threads
26 * Various updates
c337ddc2
MD
27 */
28
29#include <linux/init.h>
30#include <linux/module.h>
31#include <linux/netlink.h>
32#include <linux/inet.h>
33#include <linux/ip.h>
34#include <linux/kthread.h>
35#include <linux/proc_fs.h>
36#include <linux/file.h>
37#include <linux/interrupt.h>
38#include <linux/irqnr.h>
39#include <linux/cpu.h>
40#include <linux/netdevice.h>
41#include <linux/inetdevice.h>
42#include <linux/sched.h>
43#include <linux/mm.h>
44#include <linux/fdtable.h>
45#include <linux/swap.h>
46#include <linux/wait.h>
47#include <linux/mutex.h>
48
49#include "lttng-events.h"
13ab8b0a 50#include "lttng-tracer.h"
c337ddc2 51#include "wrapper/irqdesc.h"
3a523f5b 52#include "wrapper/spinlock.h"
361c023a 53#include "wrapper/fdtable.h"
3247f8bd 54#include "wrapper/nsproxy.h"
29784493 55#include "wrapper/irq.h"
dd8d5afb 56#include "wrapper/tracepoint.h"
c337ddc2 57
29784493 58#ifdef CONFIG_LTTNG_HAS_LIST_IRQ
c337ddc2
MD
59#include <linux/irq.h>
60#endif
61
62/* Define the tracepoints, but do not build the probes */
63#define CREATE_TRACE_POINTS
64#define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module
65#define TRACE_INCLUDE_FILE lttng-statedump
66#include "instrumentation/events/lttng-module/lttng-statedump.h"
67
361c023a
MD
68struct lttng_fd_ctx {
69 char *page;
70 struct lttng_session *session;
71 struct task_struct *p;
72};
73
c337ddc2
MD
74/*
75 * Protected by the trace lock.
76 */
77static struct delayed_work cpu_work[NR_CPUS];
78static DECLARE_WAIT_QUEUE_HEAD(statedump_wq);
79static atomic_t kernel_threads_to_run;
80
81enum lttng_thread_type {
82 LTTNG_USER_THREAD = 0,
83 LTTNG_KERNEL_THREAD = 1,
84};
85
86enum lttng_execution_mode {
87 LTTNG_USER_MODE = 0,
88 LTTNG_SYSCALL = 1,
89 LTTNG_TRAP = 2,
90 LTTNG_IRQ = 3,
91 LTTNG_SOFTIRQ = 4,
92 LTTNG_MODE_UNKNOWN = 5,
93};
94
95enum lttng_execution_submode {
96 LTTNG_NONE = 0,
97 LTTNG_UNKNOWN = 1,
98};
99
100enum lttng_process_status {
101 LTTNG_UNNAMED = 0,
102 LTTNG_WAIT_FORK = 1,
103 LTTNG_WAIT_CPU = 2,
104 LTTNG_EXIT = 3,
105 LTTNG_ZOMBIE = 4,
106 LTTNG_WAIT = 5,
107 LTTNG_RUN = 6,
108 LTTNG_DEAD = 7,
109};
110
111#ifdef CONFIG_INET
112static
113void lttng_enumerate_device(struct lttng_session *session,
114 struct net_device *dev)
115{
116 struct in_device *in_dev;
117 struct in_ifaddr *ifa;
118
119 if (dev->flags & IFF_UP) {
120 in_dev = in_dev_get(dev);
121 if (in_dev) {
122 for (ifa = in_dev->ifa_list; ifa != NULL;
123 ifa = ifa->ifa_next) {
124 trace_lttng_statedump_network_interface(
125 session, dev, ifa);
126 }
127 in_dev_put(in_dev);
128 }
129 } else {
130 trace_lttng_statedump_network_interface(
131 session, dev, NULL);
132 }
133}
134
135static
136int lttng_enumerate_network_ip_interface(struct lttng_session *session)
137{
138 struct net_device *dev;
139
140 read_lock(&dev_base_lock);
141 for_each_netdev(&init_net, dev)
142 lttng_enumerate_device(session, dev);
143 read_unlock(&dev_base_lock);
144
145 return 0;
146}
147#else /* CONFIG_INET */
148static inline
149int lttng_enumerate_network_ip_interface(struct lttng_session *session)
150{
151 return 0;
152}
153#endif /* CONFIG_INET */
154
361c023a
MD
155static
156int lttng_dump_one_fd(const void *p, struct file *file, unsigned int fd)
157{
158 const struct lttng_fd_ctx *ctx = p;
159 const char *s = d_path(&file->f_path, ctx->page, PAGE_SIZE);
160
161 if (IS_ERR(s)) {
162 struct dentry *dentry = file->f_path.dentry;
163
164 /* Make sure we give at least some info */
165 spin_lock(&dentry->d_lock);
166 trace_lttng_statedump_file_descriptor(ctx->session, ctx->p, fd,
167 dentry->d_name.name);
168 spin_unlock(&dentry->d_lock);
169 goto end;
170 }
171 trace_lttng_statedump_file_descriptor(ctx->session, ctx->p, fd, s);
172end:
173 return 0;
174}
c337ddc2
MD
175
176static
177void lttng_enumerate_task_fd(struct lttng_session *session,
178 struct task_struct *p, char *tmp)
179{
361c023a 180 struct lttng_fd_ctx ctx = { .page = tmp, .session = session, .p = p };
c337ddc2
MD
181
182 task_lock(p);
361c023a 183 lttng_iterate_fd(p->files, 0, lttng_dump_one_fd, &ctx);
c337ddc2
MD
184 task_unlock(p);
185}
186
187static
188int lttng_enumerate_file_descriptors(struct lttng_session *session)
189{
190 struct task_struct *p;
191 char *tmp = (char *) __get_free_page(GFP_KERNEL);
192
193 /* Enumerate active file descriptors */
194 rcu_read_lock();
195 for_each_process(p)
196 lttng_enumerate_task_fd(session, p, tmp);
197 rcu_read_unlock();
198 free_page((unsigned long) tmp);
199 return 0;
200}
201
0658bdda
MD
202#if 0
203/*
204 * FIXME: we cannot take a mmap_sem while in a RCU read-side critical section
205 * (scheduling in atomic). Normally, the tasklist lock protects this kind of
206 * iteration, but it is not exported to modules.
207 */
c337ddc2
MD
208static
209void lttng_enumerate_task_vm_maps(struct lttng_session *session,
210 struct task_struct *p)
211{
212 struct mm_struct *mm;
213 struct vm_area_struct *map;
214 unsigned long ino;
215
216 /* get_task_mm does a task_lock... */
217 mm = get_task_mm(p);
218 if (!mm)
219 return;
220
221 map = mm->mmap;
222 if (map) {
223 down_read(&mm->mmap_sem);
224 while (map) {
225 if (map->vm_file)
226 ino = map->vm_file->f_dentry->d_inode->i_ino;
227 else
228 ino = 0;
229 trace_lttng_statedump_vm_map(session, p, map, ino);
230 map = map->vm_next;
231 }
232 up_read(&mm->mmap_sem);
233 }
234 mmput(mm);
235}
236
237static
238int lttng_enumerate_vm_maps(struct lttng_session *session)
239{
240 struct task_struct *p;
241
242 rcu_read_lock();
243 for_each_process(p)
244 lttng_enumerate_task_vm_maps(session, p);
245 rcu_read_unlock();
246 return 0;
247}
0658bdda 248#endif
c337ddc2 249
29784493 250#ifdef CONFIG_LTTNG_HAS_LIST_IRQ
47faec4b
JN
251
252#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
253#define irq_desc_get_chip(desc) get_irq_desc_chip(desc)
254#endif
255
c337ddc2
MD
256static
257void lttng_list_interrupts(struct lttng_session *session)
258{
259 unsigned int irq;
260 unsigned long flags = 0;
261 struct irq_desc *desc;
262
263#define irq_to_desc wrapper_irq_to_desc
264 /* needs irq_desc */
265 for_each_irq_desc(irq, desc) {
266 struct irqaction *action;
267 const char *irq_chip_name =
268 irq_desc_get_chip(desc)->name ? : "unnamed_irq_chip";
269
270 local_irq_save(flags);
3a523f5b 271 wrapper_desc_spin_lock(&desc->lock);
c337ddc2
MD
272 for (action = desc->action; action; action = action->next) {
273 trace_lttng_statedump_interrupt(session,
274 irq, irq_chip_name, action);
275 }
3a523f5b 276 wrapper_desc_spin_unlock(&desc->lock);
c337ddc2
MD
277 local_irq_restore(flags);
278 }
279#undef irq_to_desc
280}
281#else
282static inline
2e7a0709 283void lttng_list_interrupts(struct lttng_session *session)
c337ddc2
MD
284{
285}
286#endif
287
73e8ba37
JD
288static
289void lttng_statedump_process_ns(struct lttng_session *session,
290 struct task_struct *p,
291 enum lttng_thread_type type,
292 enum lttng_execution_mode mode,
293 enum lttng_execution_submode submode,
294 enum lttng_process_status status)
295{
296 struct nsproxy *proxy;
297 struct pid_namespace *pid_ns;
298
299 rcu_read_lock();
300 proxy = task_nsproxy(p);
301 if (proxy) {
3247f8bd 302 pid_ns = lttng_get_proxy_pid_ns(proxy);
73e8ba37
JD
303 do {
304 trace_lttng_statedump_process_state(session,
305 p, type, mode, submode, status, pid_ns);
af73f727 306 pid_ns = pid_ns->parent;
73e8ba37
JD
307 } while (pid_ns);
308 } else {
309 trace_lttng_statedump_process_state(session,
310 p, type, mode, submode, status, NULL);
311 }
312 rcu_read_unlock();
313}
314
c337ddc2
MD
315static
316int lttng_enumerate_process_states(struct lttng_session *session)
317{
318 struct task_struct *g, *p;
319
320 rcu_read_lock();
321 for_each_process(g) {
322 p = g;
323 do {
324 enum lttng_execution_mode mode =
325 LTTNG_MODE_UNKNOWN;
326 enum lttng_execution_submode submode =
327 LTTNG_UNKNOWN;
328 enum lttng_process_status status;
329 enum lttng_thread_type type;
330
331 task_lock(p);
332 if (p->exit_state == EXIT_ZOMBIE)
333 status = LTTNG_ZOMBIE;
334 else if (p->exit_state == EXIT_DEAD)
335 status = LTTNG_DEAD;
336 else if (p->state == TASK_RUNNING) {
337 /* Is this a forked child that has not run yet? */
338 if (list_empty(&p->rt.run_list))
339 status = LTTNG_WAIT_FORK;
340 else
341 /*
342 * All tasks are considered as wait_cpu;
343 * the viewer will sort out if the task
344 * was really running at this time.
345 */
346 status = LTTNG_WAIT_CPU;
347 } else if (p->state &
348 (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) {
349 /* Task is waiting for something to complete */
350 status = LTTNG_WAIT;
351 } else
352 status = LTTNG_UNNAMED;
353 submode = LTTNG_NONE;
354
355 /*
356 * Verification of t->mm is to filter out kernel
357 * threads; Viewer will further filter out if a
358 * user-space thread was in syscall mode or not.
359 */
360 if (p->mm)
361 type = LTTNG_USER_THREAD;
362 else
363 type = LTTNG_KERNEL_THREAD;
73e8ba37 364 lttng_statedump_process_ns(session,
c337ddc2
MD
365 p, type, mode, submode, status);
366 task_unlock(p);
367 } while_each_thread(g, p);
368 }
369 rcu_read_unlock();
370
371 return 0;
372}
373
374static
375void lttng_statedump_work_func(struct work_struct *work)
376{
377 if (atomic_dec_and_test(&kernel_threads_to_run))
378 /* If we are the last thread, wake up do_lttng_statedump */
379 wake_up(&statedump_wq);
380}
381
382static
383int do_lttng_statedump(struct lttng_session *session)
384{
385 int cpu;
386
c337ddc2
MD
387 trace_lttng_statedump_start(session);
388 lttng_enumerate_process_states(session);
389 lttng_enumerate_file_descriptors(session);
0658bdda 390 /* FIXME lttng_enumerate_vm_maps(session); */
c337ddc2
MD
391 lttng_list_interrupts(session);
392 lttng_enumerate_network_ip_interface(session);
393
394 /* TODO lttng_dump_idt_table(session); */
395 /* TODO lttng_dump_softirq_vec(session); */
396 /* TODO lttng_list_modules(session); */
397 /* TODO lttng_dump_swap_files(session); */
398
399 /*
400 * Fire off a work queue on each CPU. Their sole purpose in life
401 * is to guarantee that each CPU has been in a state where is was in
402 * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ).
403 */
404 get_online_cpus();
405 atomic_set(&kernel_threads_to_run, num_online_cpus());
406 for_each_online_cpu(cpu) {
407 INIT_DELAYED_WORK(&cpu_work[cpu], lttng_statedump_work_func);
408 schedule_delayed_work_on(cpu, &cpu_work[cpu], 0);
409 }
410 /* Wait for all threads to run */
7a7128e0 411 __wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) == 0));
c337ddc2
MD
412 put_online_cpus();
413 /* Our work is done */
c337ddc2
MD
414 trace_lttng_statedump_end(session);
415 return 0;
416}
417
418/*
419 * Called with session mutex held.
420 */
421int lttng_statedump_start(struct lttng_session *session)
422{
c337ddc2
MD
423 return do_lttng_statedump(session);
424}
425EXPORT_SYMBOL_GPL(lttng_statedump_start);
426
dd8d5afb
MD
427static
428int __init lttng_statedump_init(void)
429{
d16aa9c9
MD
430 /*
431 * Allow module to load even if the fixup cannot be done. This
432 * will allow seemless transition when the underlying issue fix
433 * is merged into the Linux kernel, and when tracepoint.c
434 * "tracepoint_module_notify" is turned into a static function.
435 */
436 (void) wrapper_lttng_fixup_sig(THIS_MODULE);
437 return 0;
dd8d5afb
MD
438}
439
440module_init(lttng_statedump_init);
441
461277e7
MD
442static
443void __exit lttng_statedump_exit(void)
444{
445}
446
447module_exit(lttng_statedump_exit);
448
c337ddc2
MD
449MODULE_LICENSE("GPL and additional rights");
450MODULE_AUTHOR("Jean-Hugues Deschenes");
451MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Statedump");
13ab8b0a
MD
452MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
453 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
454 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
455 LTTNG_MODULES_EXTRAVERSION);
This page took 0.042673 seconds and 4 git commands to generate.