1 #if !defined(LTTNG_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ)
2 #define LTTNG_TRACE_KVMMMU_H
4 #include "../../../../../../probes/lttng-tracepoint-event.h"
5 #include <linux/ftrace_event.h>
6 #include <linux/version.h>
9 #define TRACE_SYSTEM kvmmmu
11 #undef KVM_MMU_PAGE_FIELDS
12 #undef KVM_MMU_PAGE_ASSIGN
14 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
16 #define KVM_MMU_PAGE_FIELDS \
17 __field(unsigned long, mmu_valid_gen) \
19 __field(__u32, role) \
20 __field(__u32, root_count) \
23 #define KVM_MMU_PAGE_ASSIGN(sp) \
24 tp_assign(mmu_valid_gen, sp->mmu_valid_gen) \
25 tp_assign(gfn, sp->gfn) \
26 tp_assign(role, sp->role.word) \
27 tp_assign(root_count, sp->root_count) \
28 tp_assign(unsync, sp->unsync)
30 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
32 #define KVM_MMU_PAGE_FIELDS \
34 __field(__u32, role) \
35 __field(__u32, root_count) \
38 #define KVM_MMU_PAGE_ASSIGN(sp) \
39 tp_assign(gfn, sp->gfn) \
40 tp_assign(role, sp->role.word) \
41 tp_assign(root_count, sp->root_count) \
42 tp_assign(unsync, sp->unsync)
44 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
46 #define kvm_mmu_trace_pferr_flags \
47 { PFERR_PRESENT_MASK, "P" }, \
48 { PFERR_WRITE_MASK, "W" }, \
49 { PFERR_USER_MASK, "U" }, \
50 { PFERR_RSVD_MASK, "RSVD" }, \
51 { PFERR_FETCH_MASK, "F" }
54 * A pagetable walk has started
56 LTTNG_TRACEPOINT_EVENT(
57 kvm_mmu_pagetable_walk
,
58 TP_PROTO(u64 addr
, u32 pferr
),
68 tp_assign(pferr
, pferr
)
71 TP_printk("addr %llx pferr %x %s", __entry
->addr
, __entry
->pferr
,
72 __print_flags(__entry
->pferr
, "|", kvm_mmu_trace_pferr_flags
))
76 /* We just walked a paging element */
77 LTTNG_TRACEPOINT_EVENT(
78 kvm_mmu_paging_element
,
79 TP_PROTO(u64 pte
, int level
),
89 tp_assign(level
, level
)
92 TP_printk("pte %llx level %u", __entry
->pte
, __entry
->level
)
95 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_set_bit_class
,
97 TP_PROTO(unsigned long table_gfn
, unsigned index
, unsigned size
),
99 TP_ARGS(table_gfn
, index
, size
),
106 tp_assign(gpa
, ((u64
)table_gfn
<< PAGE_SHIFT
)
110 TP_printk("gpa %llx", __entry
->gpa
)
113 /* We set a pte accessed bit */
114 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class
, kvm_mmu_set_accessed_bit
,
116 TP_PROTO(unsigned long table_gfn
, unsigned index
, unsigned size
),
118 TP_ARGS(table_gfn
, index
, size
)
121 /* We set a pte dirty bit */
122 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class
, kvm_mmu_set_dirty_bit
,
124 TP_PROTO(unsigned long table_gfn
, unsigned index
, unsigned size
),
126 TP_ARGS(table_gfn
, index
, size
)
129 LTTNG_TRACEPOINT_EVENT(
130 kvm_mmu_walker_error
,
135 __field(__u32
, pferr
)
139 tp_assign(pferr
, pferr
)
142 TP_printk("pferr %x %s", __entry
->pferr
,
143 __print_flags(__entry
->pferr
, "|", kvm_mmu_trace_pferr_flags
))
146 LTTNG_TRACEPOINT_EVENT(
148 TP_PROTO(struct kvm_mmu_page
*sp
, bool created
),
149 TP_ARGS(sp
, created
),
153 __field(bool, created
)
157 KVM_MMU_PAGE_ASSIGN(sp
)
158 tp_assign(created
, created
)
164 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_page_class
,
166 TP_PROTO(struct kvm_mmu_page
*sp
),
174 KVM_MMU_PAGE_ASSIGN(sp
)
180 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class
, kvm_mmu_sync_page
,
181 TP_PROTO(struct kvm_mmu_page
*sp
),
186 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class
, kvm_mmu_unsync_page
,
187 TP_PROTO(struct kvm_mmu_page
*sp
),
192 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class
, kvm_mmu_prepare_zap_page
,
193 TP_PROTO(struct kvm_mmu_page
*sp
),
198 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
200 LTTNG_TRACEPOINT_EVENT(
202 TP_PROTO(u64
*sptep
, gfn_t gfn
, unsigned access
, unsigned int gen
),
203 TP_ARGS(sptep
, gfn
, access
, gen
),
206 __field(void *, sptep
)
208 __field(unsigned, access
)
209 __field(unsigned int, gen
)
213 tp_assign(sptep
, sptep
)
215 tp_assign(access
, access
)
219 TP_printk("sptep:%p gfn %llx access %x", __entry
->sptep
, __entry
->gfn
,
223 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
225 LTTNG_TRACEPOINT_EVENT(
227 TP_PROTO(u64
*sptep
, gfn_t gfn
, unsigned access
),
228 TP_ARGS(sptep
, gfn
, access
),
231 __field(void *, sptep
)
233 __field(unsigned, access
)
237 tp_assign(sptep
, sptep
)
239 tp_assign(access
, access
)
242 TP_printk("sptep:%p gfn %llx access %x", __entry
->sptep
, __entry
->gfn
,
246 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
248 LTTNG_TRACEPOINT_EVENT(
249 handle_mmio_page_fault
,
250 TP_PROTO(u64 addr
, gfn_t gfn
, unsigned access
),
251 TP_ARGS(addr
, gfn
, access
),
256 __field(unsigned, access
)
260 tp_assign(addr
, addr
)
262 tp_assign(access
, access
)
265 TP_printk("addr:%llx gfn %llx access %x", __entry
->addr
, __entry
->gfn
,
269 #define __spte_satisfied(__spte) \
270 (__entry->retry && is_writable_pte(__entry->__spte))
272 LTTNG_TRACEPOINT_EVENT(
274 TP_PROTO(struct kvm_vcpu
*vcpu
, gva_t gva
, u32 error_code
,
275 u64
*sptep
, u64 old_spte
, bool retry
),
276 TP_ARGS(vcpu
, gva
, error_code
, sptep
, old_spte
, retry
),
279 __field(int, vcpu_id
)
281 __field(u32
, error_code
)
282 __field(u64
*, sptep
)
283 __field(u64
, old_spte
)
284 __field(u64
, new_spte
)
289 tp_assign(vcpu_id
, vcpu
->vcpu_id
)
291 tp_assign(error_code
, error_code
)
292 tp_assign(sptep
, sptep
)
293 tp_assign(old_spte
, old_spte
)
294 tp_assign(new_spte
, *sptep
)
295 tp_assign(retry
, retry
)
298 TP_printk("vcpu %d gva %lx error_code %s sptep %p old %#llx"
299 " new %llx spurious %d fixed %d", __entry
->vcpu_id
,
300 __entry
->gva
, __print_flags(__entry
->error_code
, "|",
301 kvm_mmu_trace_pferr_flags
), __entry
->sptep
,
302 __entry
->old_spte
, __entry
->new_spte
,
303 __spte_satisfied(old_spte
), __spte_satisfied(new_spte
)
306 #endif /* LTTNG_TRACE_KVMMMU_H */
308 #undef TRACE_INCLUDE_PATH
309 #define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module/arch/x86/kvm
310 #undef TRACE_INCLUDE_FILE
311 #define TRACE_INCLUDE_FILE mmutrace
313 /* This part must be outside protection */
314 #include "../../../../../../probes/define_trace.h"