1 #if !defined(LTTNG_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ)
2 #define LTTNG_TRACE_KVMMMU_H
4 #include "../../../../../../probes/lttng-tracepoint-event.h"
5 #include <linux/version.h>
7 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
8 #include <linux/trace_events.h>
9 #else /* if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)) */
10 #include <linux/ftrace_event.h>
11 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)) */
14 #define TRACE_SYSTEM kvmmmu
16 #undef KVM_MMU_PAGE_FIELDS
17 #undef KVM_MMU_PAGE_ASSIGN
19 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
21 #define KVM_MMU_PAGE_FIELDS \
22 __field(unsigned long, mmu_valid_gen) \
24 __field(__u32, role) \
25 __field(__u32, root_count) \
28 #define KVM_MMU_PAGE_ASSIGN(sp) \
29 tp_assign(mmu_valid_gen, sp->mmu_valid_gen) \
30 tp_assign(gfn, sp->gfn) \
31 tp_assign(role, sp->role.word) \
32 tp_assign(root_count, sp->root_count) \
33 tp_assign(unsync, sp->unsync)
35 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
37 #define KVM_MMU_PAGE_FIELDS \
39 __field(__u32, role) \
40 __field(__u32, root_count) \
43 #define KVM_MMU_PAGE_ASSIGN(sp) \
44 tp_assign(gfn, sp->gfn) \
45 tp_assign(role, sp->role.word) \
46 tp_assign(root_count, sp->root_count) \
47 tp_assign(unsync, sp->unsync)
49 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
51 #define kvm_mmu_trace_pferr_flags \
52 { PFERR_PRESENT_MASK, "P" }, \
53 { PFERR_WRITE_MASK, "W" }, \
54 { PFERR_USER_MASK, "U" }, \
55 { PFERR_RSVD_MASK, "RSVD" }, \
56 { PFERR_FETCH_MASK, "F" }
58 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
60 * A pagetable walk has started
62 LTTNG_TRACEPOINT_EVENT(
63 kvm_mmu_pagetable_walk
,
64 TP_PROTO(u64 addr
, u32 pferr
),
74 tp_assign(pferr
, pferr
)
77 TP_printk("addr %llx pferr %x %s", __entry
->addr
, __entry
->pferr
,
78 __print_flags(__entry
->pferr
, "|", kvm_mmu_trace_pferr_flags
))
80 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)) */
82 * A pagetable walk has started
84 LTTNG_TRACEPOINT_EVENT(
85 kvm_mmu_pagetable_walk
,
86 TP_PROTO(u64 addr
, int write_fault
, int user_fault
, int fetch_fault
),
87 TP_ARGS(addr
, write_fault
, user_fault
, fetch_fault
),
97 (!!write_fault
<< 1) | (!!user_fault
<< 2)
98 | (!!fetch_fault
<< 4))
101 TP_printk("addr %llx pferr %x %s", __entry
->addr
, __entry
->pferr
,
102 __print_flags(__entry
->pferr
, "|", kvm_mmu_trace_pferr_flags
))
104 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)) */
107 /* We just walked a paging element */
108 LTTNG_TRACEPOINT_EVENT(
109 kvm_mmu_paging_element
,
110 TP_PROTO(u64 pte
, int level
),
115 __field(__u32
, level
)
120 tp_assign(level
, level
)
123 TP_printk("pte %llx level %u", __entry
->pte
, __entry
->level
)
126 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_set_bit_class
,
128 TP_PROTO(unsigned long table_gfn
, unsigned index
, unsigned size
),
130 TP_ARGS(table_gfn
, index
, size
),
137 tp_assign(gpa
, ((u64
)table_gfn
<< PAGE_SHIFT
)
141 TP_printk("gpa %llx", __entry
->gpa
)
144 /* We set a pte accessed bit */
145 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class
, kvm_mmu_set_accessed_bit
,
147 TP_PROTO(unsigned long table_gfn
, unsigned index
, unsigned size
),
149 TP_ARGS(table_gfn
, index
, size
)
152 /* We set a pte dirty bit */
153 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class
, kvm_mmu_set_dirty_bit
,
155 TP_PROTO(unsigned long table_gfn
, unsigned index
, unsigned size
),
157 TP_ARGS(table_gfn
, index
, size
)
160 LTTNG_TRACEPOINT_EVENT(
161 kvm_mmu_walker_error
,
166 __field(__u32
, pferr
)
170 tp_assign(pferr
, pferr
)
173 TP_printk("pferr %x %s", __entry
->pferr
,
174 __print_flags(__entry
->pferr
, "|", kvm_mmu_trace_pferr_flags
))
177 LTTNG_TRACEPOINT_EVENT(
179 TP_PROTO(struct kvm_mmu_page
*sp
, bool created
),
180 TP_ARGS(sp
, created
),
184 __field(bool, created
)
188 KVM_MMU_PAGE_ASSIGN(sp
)
189 tp_assign(created
, created
)
195 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_page_class
,
197 TP_PROTO(struct kvm_mmu_page
*sp
),
205 KVM_MMU_PAGE_ASSIGN(sp
)
211 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class
, kvm_mmu_sync_page
,
212 TP_PROTO(struct kvm_mmu_page
*sp
),
217 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class
, kvm_mmu_unsync_page
,
218 TP_PROTO(struct kvm_mmu_page
*sp
),
223 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class
, kvm_mmu_prepare_zap_page
,
224 TP_PROTO(struct kvm_mmu_page
*sp
),
229 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
231 LTTNG_TRACEPOINT_EVENT(
233 TP_PROTO(u64
*sptep
, gfn_t gfn
, unsigned access
, unsigned int gen
),
234 TP_ARGS(sptep
, gfn
, access
, gen
),
237 __field(void *, sptep
)
239 __field(unsigned, access
)
240 __field(unsigned int, gen
)
244 tp_assign(sptep
, sptep
)
246 tp_assign(access
, access
)
250 TP_printk("sptep:%p gfn %llx access %x", __entry
->sptep
, __entry
->gfn
,
254 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
256 LTTNG_TRACEPOINT_EVENT(
258 TP_PROTO(u64
*sptep
, gfn_t gfn
, unsigned access
),
259 TP_ARGS(sptep
, gfn
, access
),
262 __field(void *, sptep
)
264 __field(unsigned, access
)
268 tp_assign(sptep
, sptep
)
270 tp_assign(access
, access
)
273 TP_printk("sptep:%p gfn %llx access %x", __entry
->sptep
, __entry
->gfn
,
277 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
279 LTTNG_TRACEPOINT_EVENT(
280 handle_mmio_page_fault
,
281 TP_PROTO(u64 addr
, gfn_t gfn
, unsigned access
),
282 TP_ARGS(addr
, gfn
, access
),
287 __field(unsigned, access
)
291 tp_assign(addr
, addr
)
293 tp_assign(access
, access
)
296 TP_printk("addr:%llx gfn %llx access %x", __entry
->addr
, __entry
->gfn
,
300 #define __spte_satisfied(__spte) \
301 (__entry->retry && is_writable_pte(__entry->__spte))
303 LTTNG_TRACEPOINT_EVENT(
305 TP_PROTO(struct kvm_vcpu
*vcpu
, gva_t gva
, u32 error_code
,
306 u64
*sptep
, u64 old_spte
, bool retry
),
307 TP_ARGS(vcpu
, gva
, error_code
, sptep
, old_spte
, retry
),
310 __field(int, vcpu_id
)
312 __field(u32
, error_code
)
313 __field(u64
*, sptep
)
314 __field(u64
, old_spte
)
315 __field(u64
, new_spte
)
320 tp_assign(vcpu_id
, vcpu
->vcpu_id
)
322 tp_assign(error_code
, error_code
)
323 tp_assign(sptep
, sptep
)
324 tp_assign(old_spte
, old_spte
)
325 tp_assign(new_spte
, *sptep
)
326 tp_assign(retry
, retry
)
329 TP_printk("vcpu %d gva %lx error_code %s sptep %p old %#llx"
330 " new %llx spurious %d fixed %d", __entry
->vcpu_id
,
331 __entry
->gva
, __print_flags(__entry
->error_code
, "|",
332 kvm_mmu_trace_pferr_flags
), __entry
->sptep
,
333 __entry
->old_spte
, __entry
->new_spte
,
334 __spte_satisfied(old_spte
), __spte_satisfied(new_spte
)
337 #endif /* LTTNG_TRACE_KVMMMU_H */
339 #undef TRACE_INCLUDE_PATH
340 #define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module/arch/x86/kvm
341 #undef TRACE_INCLUDE_FILE
342 #define TRACE_INCLUDE_FILE mmutrace
344 /* This part must be outside protection */
345 #include "../../../../../../probes/define_trace.h"