fix: tracepoint: Optimize using static_call() (v5.10)
[lttng-modules.git] / src / lttng-statedump-impl.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-statedump.c
4 *
5 * Linux Trace Toolkit Next Generation Kernel State Dump
6 *
7 * Copyright 2005 Jean-Hugues Deschenes <jean-hugues.deschenes@polymtl.ca>
8 * Copyright 2006-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 *
10 * Changes:
11 * Eric Clement: Add listing of network IP interface
12 * 2006, 2007 Mathieu Desnoyers Fix kernel threads
13 * Various updates
14 */
15
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/netlink.h>
19 #include <linux/inet.h>
20 #include <linux/ip.h>
21 #include <linux/kthread.h>
22 #include <linux/proc_fs.h>
23 #include <linux/file.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqnr.h>
26 #include <linux/cpu.h>
27 #include <linux/netdevice.h>
28 #include <linux/inetdevice.h>
29 #include <linux/sched.h>
30 #include <linux/mm.h>
31 #include <linux/swap.h>
32 #include <linux/wait.h>
33 #include <linux/mutex.h>
34 #include <linux/device.h>
35
36 #include <lttng/events.h>
37 #include <lttng/tracer.h>
38 #include <wrapper/irqdesc.h>
39 #include <wrapper/fdtable.h>
40 #include <wrapper/namespace.h>
41 #include <wrapper/irq.h>
42 #include <wrapper/tracepoint.h>
43 #include <wrapper/genhd.h>
44 #include <wrapper/file.h>
45 #include <wrapper/fdtable.h>
46
47 #ifdef CONFIG_LTTNG_HAS_LIST_IRQ
48 #include <linux/irq.h>
49 #endif
50
51 /* Define the tracepoints, but do not build the probes */
52 #define CREATE_TRACE_POINTS
53 #define TRACE_INCLUDE_PATH instrumentation/events
54 #define TRACE_INCLUDE_FILE lttng-statedump
55 #define LTTNG_INSTRUMENTATION
56 #include <instrumentation/events/lttng-statedump.h>
57
58 LTTNG_DEFINE_TRACE(lttng_statedump_block_device,
59 TP_PROTO(struct lttng_session *session,
60 dev_t dev, const char *diskname),
61 TP_ARGS(session, dev, diskname));
62
63 LTTNG_DEFINE_TRACE(lttng_statedump_end,
64 TP_PROTO(struct lttng_session *session),
65 TP_ARGS(session));
66
67 LTTNG_DEFINE_TRACE(lttng_statedump_interrupt,
68 TP_PROTO(struct lttng_session *session,
69 unsigned int irq, const char *chip_name,
70 struct irqaction *action),
71 TP_ARGS(session, irq, chip_name, action));
72
73 LTTNG_DEFINE_TRACE(lttng_statedump_file_descriptor,
74 TP_PROTO(struct lttng_session *session,
75 struct files_struct *files,
76 int fd, const char *filename,
77 unsigned int flags, fmode_t fmode),
78 TP_ARGS(session, files, fd, filename, flags, fmode));
79
80 LTTNG_DEFINE_TRACE(lttng_statedump_start,
81 TP_PROTO(struct lttng_session *session),
82 TP_ARGS(session));
83
84 LTTNG_DEFINE_TRACE(lttng_statedump_process_state,
85 TP_PROTO(struct lttng_session *session,
86 struct task_struct *p,
87 int type, int mode, int submode, int status,
88 struct files_struct *files),
89 TP_ARGS(session, p, type, mode, submode, status, files));
90
91 LTTNG_DEFINE_TRACE(lttng_statedump_process_pid_ns,
92 TP_PROTO(struct lttng_session *session,
93 struct task_struct *p,
94 struct pid_namespace *pid_ns),
95 TP_ARGS(session, p, pid_ns));
96
97 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0))
98 LTTNG_DEFINE_TRACE(lttng_statedump_process_cgroup_ns,
99 TP_PROTO(struct lttng_session *session,
100 struct task_struct *p,
101 struct cgroup_namespace *cgroup_ns),
102 TP_ARGS(session, p, cgroup_ns));
103 #endif
104
105 LTTNG_DEFINE_TRACE(lttng_statedump_process_ipc_ns,
106 TP_PROTO(struct lttng_session *session,
107 struct task_struct *p,
108 struct ipc_namespace *ipc_ns),
109 TP_ARGS(session, p, ipc_ns));
110
111 #ifndef LTTNG_MNT_NS_MISSING_HEADER
112 LTTNG_DEFINE_TRACE(lttng_statedump_process_mnt_ns,
113 TP_PROTO(struct lttng_session *session,
114 struct task_struct *p,
115 struct mnt_namespace *mnt_ns),
116 TP_ARGS(session, p, mnt_ns));
117 #endif
118
119 LTTNG_DEFINE_TRACE(lttng_statedump_process_net_ns,
120 TP_PROTO(struct lttng_session *session,
121 struct task_struct *p,
122 struct net *net_ns),
123 TP_ARGS(session, p, net_ns));
124
125 LTTNG_DEFINE_TRACE(lttng_statedump_process_user_ns,
126 TP_PROTO(struct lttng_session *session,
127 struct task_struct *p,
128 struct user_namespace *user_ns),
129 TP_ARGS(session, p, user_ns));
130
131 LTTNG_DEFINE_TRACE(lttng_statedump_process_uts_ns,
132 TP_PROTO(struct lttng_session *session,
133 struct task_struct *p,
134 struct uts_namespace *uts_ns),
135 TP_ARGS(session, p, uts_ns));
136
137 LTTNG_DEFINE_TRACE(lttng_statedump_process_time_ns,
138 TP_PROTO(struct lttng_session *session,
139 struct task_struct *p,
140 struct time_namespace *time_ns),
141 TP_ARGS(session, p, time_ns));
142
143 LTTNG_DEFINE_TRACE(lttng_statedump_network_interface,
144 TP_PROTO(struct lttng_session *session,
145 struct net_device *dev, struct in_ifaddr *ifa),
146 TP_ARGS(session, dev, ifa));
147
148 #ifdef LTTNG_HAVE_STATEDUMP_CPU_TOPOLOGY
149 LTTNG_DEFINE_TRACE(lttng_statedump_cpu_topology,
150 TP_PROTO(struct lttng_session *session, struct cpuinfo_x86 *c),
151 TP_ARGS(session, c));
152 #endif
153
154 struct lttng_fd_ctx {
155 char *page;
156 struct lttng_session *session;
157 struct files_struct *files;
158 };
159
160 /*
161 * Protected by the trace lock.
162 */
163 static struct delayed_work cpu_work[NR_CPUS];
164 static DECLARE_WAIT_QUEUE_HEAD(statedump_wq);
165 static atomic_t kernel_threads_to_run;
166
167 enum lttng_thread_type {
168 LTTNG_USER_THREAD = 0,
169 LTTNG_KERNEL_THREAD = 1,
170 };
171
172 enum lttng_execution_mode {
173 LTTNG_USER_MODE = 0,
174 LTTNG_SYSCALL = 1,
175 LTTNG_TRAP = 2,
176 LTTNG_IRQ = 3,
177 LTTNG_SOFTIRQ = 4,
178 LTTNG_MODE_UNKNOWN = 5,
179 };
180
181 enum lttng_execution_submode {
182 LTTNG_NONE = 0,
183 LTTNG_UNKNOWN = 1,
184 };
185
186 enum lttng_process_status {
187 LTTNG_UNNAMED = 0,
188 LTTNG_WAIT_FORK = 1,
189 LTTNG_WAIT_CPU = 2,
190 LTTNG_EXIT = 3,
191 LTTNG_ZOMBIE = 4,
192 LTTNG_WAIT = 5,
193 LTTNG_RUN = 6,
194 LTTNG_DEAD = 7,
195 };
196
197 static
198 int lttng_enumerate_block_devices(struct lttng_session *session)
199 {
200 struct class *ptr_block_class;
201 struct device_type *ptr_disk_type;
202 struct class_dev_iter iter;
203 struct device *dev;
204
205 ptr_block_class = wrapper_get_block_class();
206 if (!ptr_block_class)
207 return -ENOSYS;
208 ptr_disk_type = wrapper_get_disk_type();
209 if (!ptr_disk_type) {
210 return -ENOSYS;
211 }
212 class_dev_iter_init(&iter, ptr_block_class, NULL, ptr_disk_type);
213 while ((dev = class_dev_iter_next(&iter))) {
214 struct disk_part_iter piter;
215 struct gendisk *disk = dev_to_disk(dev);
216 struct hd_struct *part;
217
218 /*
219 * Don't show empty devices or things that have been
220 * suppressed
221 */
222 if (get_capacity(disk) == 0 ||
223 (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
224 continue;
225
226 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
227 while ((part = disk_part_iter_next(&piter))) {
228 struct block_device bdev;
229 char name_buf[BDEVNAME_SIZE];
230 const char *p;
231
232 /*
233 * Create a partial 'struct blockdevice' to use
234 * 'bdevname()' which is a simple wrapper over
235 * 'disk_name()' but has the honor to be EXPORT_SYMBOL.
236 */
237 bdev.bd_disk = disk;
238 bdev.bd_part = part;
239
240 p = bdevname(&bdev, name_buf);
241 if (!p) {
242 disk_part_iter_exit(&piter);
243 class_dev_iter_exit(&iter);
244 return -ENOSYS;
245 }
246 trace_lttng_statedump_block_device(session,
247 part_devt(part), name_buf);
248 }
249 disk_part_iter_exit(&piter);
250 }
251 class_dev_iter_exit(&iter);
252 return 0;
253 }
254
255 #ifdef CONFIG_INET
256
257 static
258 void lttng_enumerate_device(struct lttng_session *session,
259 struct net_device *dev)
260 {
261 struct in_device *in_dev;
262 struct in_ifaddr *ifa;
263
264 if (dev->flags & IFF_UP) {
265 in_dev = in_dev_get(dev);
266 if (in_dev) {
267 for (ifa = in_dev->ifa_list; ifa != NULL;
268 ifa = ifa->ifa_next) {
269 trace_lttng_statedump_network_interface(
270 session, dev, ifa);
271 }
272 in_dev_put(in_dev);
273 }
274 } else {
275 trace_lttng_statedump_network_interface(
276 session, dev, NULL);
277 }
278 }
279
280 static
281 int lttng_enumerate_network_ip_interface(struct lttng_session *session)
282 {
283 struct net_device *dev;
284
285 read_lock(&dev_base_lock);
286 for_each_netdev(&init_net, dev)
287 lttng_enumerate_device(session, dev);
288 read_unlock(&dev_base_lock);
289
290 return 0;
291 }
292 #else /* CONFIG_INET */
293 static inline
294 int lttng_enumerate_network_ip_interface(struct lttng_session *session)
295 {
296 return 0;
297 }
298 #endif /* CONFIG_INET */
299
300 static
301 int lttng_dump_one_fd(const void *p, struct file *file, unsigned int fd)
302 {
303 const struct lttng_fd_ctx *ctx = p;
304 const char *s = d_path(&file->f_path, ctx->page, PAGE_SIZE);
305 unsigned int flags = file->f_flags;
306 struct fdtable *fdt;
307
308 /*
309 * We don't expose kernel internal flags, only userspace-visible
310 * flags.
311 */
312 flags &= ~FMODE_NONOTIFY;
313 fdt = files_fdtable(ctx->files);
314 /*
315 * We need to check here again whether fd is within the fdt
316 * max_fds range, because we might be seeing a different
317 * files_fdtable() than iterate_fd(), assuming only RCU is
318 * protecting the read. In reality, iterate_fd() holds
319 * file_lock, which should ensure the fdt does not change while
320 * the lock is taken, but we are not aware whether this is
321 * guaranteed or not, so play safe.
322 */
323 if (fd < fdt->max_fds && lttng_close_on_exec(fd, fdt))
324 flags |= O_CLOEXEC;
325 if (IS_ERR(s)) {
326 struct dentry *dentry = file->f_path.dentry;
327
328 /* Make sure we give at least some info */
329 spin_lock(&dentry->d_lock);
330 trace_lttng_statedump_file_descriptor(ctx->session,
331 ctx->files, fd, dentry->d_name.name, flags,
332 file->f_mode);
333 spin_unlock(&dentry->d_lock);
334 goto end;
335 }
336 trace_lttng_statedump_file_descriptor(ctx->session,
337 ctx->files, fd, s, flags, file->f_mode);
338 end:
339 return 0;
340 }
341
342 /* Called with task lock held. */
343 static
344 void lttng_enumerate_files(struct lttng_session *session,
345 struct files_struct *files,
346 char *tmp)
347 {
348 struct lttng_fd_ctx ctx = { .page = tmp, .session = session, .files = files, };
349
350 lttng_iterate_fd(files, 0, lttng_dump_one_fd, &ctx);
351 }
352
353 #ifdef LTTNG_HAVE_STATEDUMP_CPU_TOPOLOGY
354 static
355 int lttng_enumerate_cpu_topology(struct lttng_session *session)
356 {
357 int cpu;
358 const cpumask_t *cpumask = cpu_possible_mask;
359
360 for (cpu = cpumask_first(cpumask); cpu < nr_cpu_ids;
361 cpu = cpumask_next(cpu, cpumask)) {
362 trace_lttng_statedump_cpu_topology(session, &cpu_data(cpu));
363 }
364
365 return 0;
366 }
367 #else
368 static
369 int lttng_enumerate_cpu_topology(struct lttng_session *session)
370 {
371 return 0;
372 }
373 #endif
374
375 #if 0
376 /*
377 * FIXME: we cannot take a mmap_sem while in a RCU read-side critical section
378 * (scheduling in atomic). Normally, the tasklist lock protects this kind of
379 * iteration, but it is not exported to modules.
380 */
381 static
382 void lttng_enumerate_task_vm_maps(struct lttng_session *session,
383 struct task_struct *p)
384 {
385 struct mm_struct *mm;
386 struct vm_area_struct *map;
387 unsigned long ino;
388
389 /* get_task_mm does a task_lock... */
390 mm = get_task_mm(p);
391 if (!mm)
392 return;
393
394 map = mm->mmap;
395 if (map) {
396 down_read(&mm->mmap_sem);
397 while (map) {
398 if (map->vm_file)
399 ino = map->vm_file->lttng_f_dentry->d_inode->i_ino;
400 else
401 ino = 0;
402 trace_lttng_statedump_vm_map(session, p, map, ino);
403 map = map->vm_next;
404 }
405 up_read(&mm->mmap_sem);
406 }
407 mmput(mm);
408 }
409
410 static
411 int lttng_enumerate_vm_maps(struct lttng_session *session)
412 {
413 struct task_struct *p;
414
415 rcu_read_lock();
416 for_each_process(p)
417 lttng_enumerate_task_vm_maps(session, p);
418 rcu_read_unlock();
419 return 0;
420 }
421 #endif
422
423 #ifdef CONFIG_LTTNG_HAS_LIST_IRQ
424
425 static
426 int lttng_list_interrupts(struct lttng_session *session)
427 {
428 unsigned int irq;
429 unsigned long flags = 0;
430 struct irq_desc *desc;
431
432 #define irq_to_desc wrapper_irq_to_desc
433 /* needs irq_desc */
434 for_each_irq_desc(irq, desc) {
435 struct irqaction *action;
436 const char *irq_chip_name =
437 irq_desc_get_chip(desc)->name ? : "unnamed_irq_chip";
438
439 local_irq_save(flags);
440 raw_spin_lock(&desc->lock);
441 for (action = desc->action; action; action = action->next) {
442 trace_lttng_statedump_interrupt(session,
443 irq, irq_chip_name, action);
444 }
445 raw_spin_unlock(&desc->lock);
446 local_irq_restore(flags);
447 }
448 return 0;
449 #undef irq_to_desc
450 }
451 #else
452 static inline
453 int lttng_list_interrupts(struct lttng_session *session)
454 {
455 return 0;
456 }
457 #endif
458
459 /*
460 * Statedump the task's namespaces using the proc filesystem inode number as
461 * the unique identifier. The user and pid ns are nested and will be dumped
462 * recursively.
463 *
464 * Called with task lock held.
465 */
466 static
467 void lttng_statedump_process_ns(struct lttng_session *session,
468 struct task_struct *p,
469 enum lttng_thread_type type,
470 enum lttng_execution_mode mode,
471 enum lttng_execution_submode submode,
472 enum lttng_process_status status)
473 {
474 struct nsproxy *proxy;
475 struct pid_namespace *pid_ns;
476 struct user_namespace *user_ns;
477
478 /*
479 * The pid and user namespaces are special, they are nested and
480 * accessed with specific functions instead of the nsproxy struct
481 * like the other namespaces.
482 */
483 pid_ns = task_active_pid_ns(p);
484 do {
485 trace_lttng_statedump_process_pid_ns(session, p, pid_ns);
486 pid_ns = pid_ns ? pid_ns->parent : NULL;
487 } while (pid_ns);
488
489
490 user_ns = task_cred_xxx(p, user_ns);
491 do {
492 trace_lttng_statedump_process_user_ns(session, p, user_ns);
493 /*
494 * trace_lttng_statedump_process_user_ns() internally
495 * checks whether user_ns is NULL. While this does not
496 * appear to be a possible return value for
497 * task_cred_xxx(), err on the safe side and check
498 * for NULL here as well to be consistent with the
499 * paranoid behavior of
500 * trace_lttng_statedump_process_user_ns().
501 */
502 user_ns = user_ns ? user_ns->lttng_user_ns_parent : NULL;
503 } while (user_ns);
504
505 /*
506 * Back and forth on locking strategy within Linux upstream for nsproxy.
507 * See Linux upstream commit 728dba3a39c66b3d8ac889ddbe38b5b1c264aec3
508 * "namespaces: Use task_lock and not rcu to protect nsproxy"
509 * for details.
510 */
511 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) || \
512 LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,36, 3,14,0,0) || \
513 LTTNG_UBUNTU_KERNEL_RANGE(3,16,1,11, 3,17,0,0) || \
514 LTTNG_RHEL_KERNEL_RANGE(3,10,0,229,13,0, 3,11,0,0,0,0))
515 proxy = p->nsproxy;
516 #else
517 rcu_read_lock();
518 proxy = task_nsproxy(p);
519 #endif
520 if (proxy) {
521 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0))
522 trace_lttng_statedump_process_cgroup_ns(session, p, proxy->cgroup_ns);
523 #endif
524 trace_lttng_statedump_process_ipc_ns(session, p, proxy->ipc_ns);
525 #ifndef LTTNG_MNT_NS_MISSING_HEADER
526 trace_lttng_statedump_process_mnt_ns(session, p, proxy->mnt_ns);
527 #endif
528 trace_lttng_statedump_process_net_ns(session, p, proxy->net_ns);
529 trace_lttng_statedump_process_uts_ns(session, p, proxy->uts_ns);
530 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
531 trace_lttng_statedump_process_time_ns(session, p, proxy->time_ns);
532 #endif
533 }
534 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0) || \
535 LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,36, 3,14,0,0) || \
536 LTTNG_UBUNTU_KERNEL_RANGE(3,16,1,11, 3,17,0,0) || \
537 LTTNG_RHEL_KERNEL_RANGE(3,10,0,229,13,0, 3,11,0,0,0,0))
538 /* (nothing) */
539 #else
540 rcu_read_unlock();
541 #endif
542 }
543
544 static
545 int lttng_enumerate_process_states(struct lttng_session *session)
546 {
547 struct task_struct *g, *p;
548 char *tmp;
549
550 tmp = (char *) __get_free_page(GFP_KERNEL);
551 if (!tmp)
552 return -ENOMEM;
553
554 rcu_read_lock();
555 for_each_process(g) {
556 struct files_struct *prev_files = NULL;
557
558 p = g;
559 do {
560 enum lttng_execution_mode mode =
561 LTTNG_MODE_UNKNOWN;
562 enum lttng_execution_submode submode =
563 LTTNG_UNKNOWN;
564 enum lttng_process_status status;
565 enum lttng_thread_type type;
566 struct files_struct *files;
567
568 task_lock(p);
569 if (p->exit_state == EXIT_ZOMBIE)
570 status = LTTNG_ZOMBIE;
571 else if (p->exit_state == EXIT_DEAD)
572 status = LTTNG_DEAD;
573 else if (p->state == TASK_RUNNING) {
574 /* Is this a forked child that has not run yet? */
575 if (list_empty(&p->rt.run_list))
576 status = LTTNG_WAIT_FORK;
577 else
578 /*
579 * All tasks are considered as wait_cpu;
580 * the viewer will sort out if the task
581 * was really running at this time.
582 */
583 status = LTTNG_WAIT_CPU;
584 } else if (p->state &
585 (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) {
586 /* Task is waiting for something to complete */
587 status = LTTNG_WAIT;
588 } else
589 status = LTTNG_UNNAMED;
590 submode = LTTNG_NONE;
591
592 /*
593 * Verification of t->mm is to filter out kernel
594 * threads; Viewer will further filter out if a
595 * user-space thread was in syscall mode or not.
596 */
597 if (p->mm)
598 type = LTTNG_USER_THREAD;
599 else
600 type = LTTNG_KERNEL_THREAD;
601 files = p->files;
602
603 trace_lttng_statedump_process_state(session,
604 p, type, mode, submode, status, files);
605 lttng_statedump_process_ns(session,
606 p, type, mode, submode, status);
607 /*
608 * As an optimisation for the common case, do not
609 * repeat information for the same files_struct in
610 * two consecutive threads. This is the common case
611 * for threads sharing the same fd table. RCU guarantees
612 * that the same files_struct pointer is not re-used
613 * throughout processes/threads iteration.
614 */
615 if (files && files != prev_files) {
616 lttng_enumerate_files(session, files, tmp);
617 prev_files = files;
618 }
619 task_unlock(p);
620 } while_each_thread(g, p);
621 }
622 rcu_read_unlock();
623
624 free_page((unsigned long) tmp);
625
626 return 0;
627 }
628
629 static
630 void lttng_statedump_work_func(struct work_struct *work)
631 {
632 if (atomic_dec_and_test(&kernel_threads_to_run))
633 /* If we are the last thread, wake up do_lttng_statedump */
634 wake_up(&statedump_wq);
635 }
636
637 static
638 int do_lttng_statedump(struct lttng_session *session)
639 {
640 int cpu, ret;
641
642 trace_lttng_statedump_start(session);
643 ret = lttng_enumerate_process_states(session);
644 if (ret)
645 return ret;
646 /*
647 * FIXME
648 * ret = lttng_enumerate_vm_maps(session);
649 * if (ret)
650 * return ret;
651 */
652 ret = lttng_list_interrupts(session);
653 if (ret)
654 return ret;
655 ret = lttng_enumerate_network_ip_interface(session);
656 if (ret)
657 return ret;
658 ret = lttng_enumerate_block_devices(session);
659 switch (ret) {
660 case 0:
661 break;
662 case -ENOSYS:
663 printk(KERN_WARNING "LTTng: block device enumeration is not supported by kernel\n");
664 break;
665 default:
666 return ret;
667 }
668 ret = lttng_enumerate_cpu_topology(session);
669 if (ret)
670 return ret;
671
672 /* TODO lttng_dump_idt_table(session); */
673 /* TODO lttng_dump_softirq_vec(session); */
674 /* TODO lttng_list_modules(session); */
675 /* TODO lttng_dump_swap_files(session); */
676
677 /*
678 * Fire off a work queue on each CPU. Their sole purpose in life
679 * is to guarantee that each CPU has been in a state where is was in
680 * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ).
681 */
682 get_online_cpus();
683 atomic_set(&kernel_threads_to_run, num_online_cpus());
684 for_each_online_cpu(cpu) {
685 INIT_DELAYED_WORK(&cpu_work[cpu], lttng_statedump_work_func);
686 schedule_delayed_work_on(cpu, &cpu_work[cpu], 0);
687 }
688 /* Wait for all threads to run */
689 __wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) == 0));
690 put_online_cpus();
691 /* Our work is done */
692 trace_lttng_statedump_end(session);
693 return 0;
694 }
695
696 /*
697 * Called with session mutex held.
698 */
699 int lttng_statedump_start(struct lttng_session *session)
700 {
701 return do_lttng_statedump(session);
702 }
703 EXPORT_SYMBOL_GPL(lttng_statedump_start);
704
705 static
706 int __init lttng_statedump_init(void)
707 {
708 /*
709 * Allow module to load even if the fixup cannot be done. This
710 * will allow seemless transition when the underlying issue fix
711 * is merged into the Linux kernel, and when tracepoint.c
712 * "tracepoint_module_notify" is turned into a static function.
713 */
714 (void) wrapper_lttng_fixup_sig(THIS_MODULE);
715 return 0;
716 }
717
718 module_init(lttng_statedump_init);
719
720 static
721 void __exit lttng_statedump_exit(void)
722 {
723 }
724
725 module_exit(lttng_statedump_exit);
726
727 MODULE_LICENSE("GPL and additional rights");
728 MODULE_AUTHOR("Jean-Hugues Deschenes");
729 MODULE_DESCRIPTION("LTTng statedump provider");
730 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
731 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
732 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
733 LTTNG_MODULES_EXTRAVERSION);
This page took 0.04401 seconds and 4 git commands to generate.