#include <linux/proc_fs.h>
#include <linux/file.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/irqnr.h>
-#include <linux/cpu.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
-#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/device.h>
+#include <linux/blkdev.h>
+
#include <lttng/events.h>
#include <lttng/tracer.h>
+#include <wrapper/cpu.h>
#include <wrapper/irqdesc.h>
#include <wrapper/fdtable.h>
-#include <wrapper/namespace.h>
-#include <wrapper/irq.h>
#include <wrapper/tracepoint.h>
-#include <wrapper/genhd.h>
-#include <wrapper/file.h>
+#include <wrapper/blkdev.h>
#include <wrapper/fdtable.h>
-
-#ifdef CONFIG_LTTNG_HAS_LIST_IRQ
-#include <linux/irq.h>
-#endif
+#include <wrapper/sched.h>
/* Define the tracepoints, but do not build the probes */
#define CREATE_TRACE_POINTS
};
-#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,11,0))
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(6,0,0))
+
+#define LTTNG_PART_STRUCT_TYPE struct block_device
+
+static
+int lttng_get_part_name(struct gendisk *disk, struct block_device *part, char *name_buf)
+{
+ int ret;
+
+ ret = snprintf(name_buf, BDEVNAME_SIZE, "%pg", part);
+ if (ret < 0 || ret >= BDEVNAME_SIZE)
+ return -ENOSYS;
+
+ return 0;
+}
+
+static
+dev_t lttng_get_part_devt(struct block_device *part)
+{
+ return part->bd_dev;
+}
+
+#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,11,0))
#define LTTNG_PART_STRUCT_TYPE struct block_device
* suppressed
*/
if (get_capacity(disk) == 0 ||
- (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
+ (disk->flags & LTTNG_GENHD_FL_HIDDEN))
continue;
ret = lttng_statedump_each_block_device(session, disk);
* the lock is taken, but we are not aware whether this is
* guaranteed or not, so play safe.
*/
- if (fd < fdt->max_fds && lttng_close_on_exec(fd, fdt))
+ if (fd < fdt->max_fds && close_on_exec(fd, fdt))
flags |= O_CLOEXEC;
if (IS_ERR(s)) {
struct dentry *dentry = file->f_path.dentry;
{
struct lttng_fd_ctx ctx = { .page = tmp, .session = session, .files = files, };
- lttng_iterate_fd(files, 0, lttng_dump_one_fd, &ctx);
+ iterate_fd(files, 0, lttng_dump_one_fd, &ctx);
}
#ifdef LTTNG_HAVE_STATEDUMP_CPU_TOPOLOGY
down_read(&mm->mmap_sem);
while (map) {
if (map->vm_file)
- ino = map->vm_file->lttng_f_dentry->d_inode->i_ino;
+ ino = map->vm_file->f_path.dentry->d_inode->i_ino;
else
ino = 0;
trace_lttng_statedump_vm_map(session, p, map, ino);
}
#endif
-#ifdef CONFIG_LTTNG_HAS_LIST_IRQ
-
static
int lttng_list_interrupts(struct lttng_kernel_session *session)
{
return 0;
#undef irq_to_desc
}
-#else
-static inline
-int lttng_list_interrupts(struct lttng_kernel_session *session)
-{
- return 0;
-}
-#endif
/*
* Statedump the task's namespaces using the proc filesystem inode number as
* paranoid behavior of
* trace_lttng_statedump_process_user_ns().
*/
- user_ns = user_ns ? user_ns->lttng_user_ns_parent : NULL;
+ user_ns = user_ns ? user_ns->parent : NULL;
} while (user_ns);
/*
#endif
trace_lttng_statedump_process_net_ns(session, p, proxy->net_ns);
trace_lttng_statedump_process_uts_ns(session, p, proxy->uts_ns);
-#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0))
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0) || \
+ LTTNG_RHEL_KERNEL_RANGE(4,18,0,305,0,0, 4,19,0,0,0,0))
trace_lttng_statedump_process_time_ns(session, p, proxy->time_ns);
#endif
}
status = LTTNG_ZOMBIE;
else if (p->exit_state == EXIT_DEAD)
status = LTTNG_DEAD;
- else if (p->state == TASK_RUNNING) {
+ else if (lttng_task_is_running(p)) {
/* Is this a forked child that has not run yet? */
if (list_empty(&p->rt.run_list))
status = LTTNG_WAIT_FORK;
* was really running at this time.
*/
status = LTTNG_WAIT_CPU;
- } else if (p->state &
+ } else if (lttng_get_task_state(p) &
(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)) {
/* Task is waiting for something to complete */
status = LTTNG_WAIT;
* is to guarantee that each CPU has been in a state where is was in
* syscall mode (i.e. not in a trap, an IRQ or a soft IRQ).
*/
- get_online_cpus();
+ lttng_cpus_read_lock();
atomic_set(&kernel_threads_to_run, num_online_cpus());
for_each_online_cpu(cpu) {
INIT_DELAYED_WORK(&cpu_work[cpu], lttng_statedump_work_func);
}
/* Wait for all threads to run */
__wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) == 0));
- put_online_cpus();
+ lttng_cpus_read_unlock();
/* Our work is done */
trace_lttng_statedump_end(session);
return 0;
static
int __init lttng_statedump_init(void)
{
- /*
- * Allow module to load even if the fixup cannot be done. This
- * will allow seemless transition when the underlying issue fix
- * is merged into the Linux kernel, and when tracepoint.c
- * "tracepoint_module_notify" is turned into a static function.
- */
- (void) wrapper_lttng_fixup_sig(THIS_MODULE);
return 0;
}