Fix: x86 kvm mmutrace instrumentation for kernel < 3.6
[lttng-modules.git] / instrumentation / events / lttng-module / arch / x86 / kvm / mmutrace.h
1 #if !defined(LTTNG_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ)
2 #define LTTNG_TRACE_KVMMMU_H
3
4 #include "../../../../../../probes/lttng-tracepoint-event.h"
5 #include <linux/version.h>
6
7 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
8 #include <linux/trace_events.h>
9 #else /* if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)) */
10 #include <linux/ftrace_event.h>
11 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)) */
12
13 #undef TRACE_SYSTEM
14 #define TRACE_SYSTEM kvmmmu
15
16 #undef KVM_MMU_PAGE_FIELDS
17 #undef KVM_MMU_PAGE_ASSIGN
18
19 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
20
21 #define KVM_MMU_PAGE_FIELDS \
22 __field(unsigned long, mmu_valid_gen) \
23 __field(__u64, gfn) \
24 __field(__u32, role) \
25 __field(__u32, root_count) \
26 __field(bool, unsync)
27
28 #define KVM_MMU_PAGE_ASSIGN(sp) \
29 tp_assign(mmu_valid_gen, sp->mmu_valid_gen) \
30 tp_assign(gfn, sp->gfn) \
31 tp_assign(role, sp->role.word) \
32 tp_assign(root_count, sp->root_count) \
33 tp_assign(unsync, sp->unsync)
34
35 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
36
37 #define KVM_MMU_PAGE_FIELDS \
38 __field(__u64, gfn) \
39 __field(__u32, role) \
40 __field(__u32, root_count) \
41 __field(bool, unsync)
42
43 #define KVM_MMU_PAGE_ASSIGN(sp) \
44 tp_assign(gfn, sp->gfn) \
45 tp_assign(role, sp->role.word) \
46 tp_assign(root_count, sp->root_count) \
47 tp_assign(unsync, sp->unsync)
48
49 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
50
51 #define kvm_mmu_trace_pferr_flags \
52 { PFERR_PRESENT_MASK, "P" }, \
53 { PFERR_WRITE_MASK, "W" }, \
54 { PFERR_USER_MASK, "U" }, \
55 { PFERR_RSVD_MASK, "RSVD" }, \
56 { PFERR_FETCH_MASK, "F" }
57
58 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
59 /*
60 * A pagetable walk has started
61 */
62 LTTNG_TRACEPOINT_EVENT(
63 kvm_mmu_pagetable_walk,
64 TP_PROTO(u64 addr, u32 pferr),
65 TP_ARGS(addr, pferr),
66
67 TP_STRUCT__entry(
68 __field(__u64, addr)
69 __field(__u32, pferr)
70 ),
71
72 TP_fast_assign(
73 tp_assign(addr, addr)
74 tp_assign(pferr, pferr)
75 ),
76
77 TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr,
78 __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
79 )
80 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)) */
81 /*
82 * A pagetable walk has started
83 */
84 LTTNG_TRACEPOINT_EVENT(
85 kvm_mmu_pagetable_walk,
86 TP_PROTO(u64 addr, int write_fault, int user_fault, int fetch_fault),
87 TP_ARGS(addr, write_fault, user_fault, fetch_fault),
88
89 TP_STRUCT__entry(
90 __field(__u64, addr)
91 __field(__u32, pferr)
92 ),
93
94 TP_fast_assign(
95 tp_assign(addr, addr)
96 tp_assign(pferr,
97 (!!write_fault << 1) | (!!user_fault << 2)
98 | (!!fetch_fault << 4))
99 ),
100
101 TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr,
102 __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
103 )
104 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)) */
105
106
107 /* We just walked a paging element */
108 LTTNG_TRACEPOINT_EVENT(
109 kvm_mmu_paging_element,
110 TP_PROTO(u64 pte, int level),
111 TP_ARGS(pte, level),
112
113 TP_STRUCT__entry(
114 __field(__u64, pte)
115 __field(__u32, level)
116 ),
117
118 TP_fast_assign(
119 tp_assign(pte, pte)
120 tp_assign(level, level)
121 ),
122
123 TP_printk("pte %llx level %u", __entry->pte, __entry->level)
124 )
125
126 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_set_bit_class,
127
128 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
129
130 TP_ARGS(table_gfn, index, size),
131
132 TP_STRUCT__entry(
133 __field(__u64, gpa)
134 ),
135
136 TP_fast_assign(
137 tp_assign(gpa, ((u64)table_gfn << PAGE_SHIFT)
138 + index * size)
139 ),
140
141 TP_printk("gpa %llx", __entry->gpa)
142 )
143
144 /* We set a pte accessed bit */
145 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
146
147 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
148
149 TP_ARGS(table_gfn, index, size)
150 )
151
152 /* We set a pte dirty bit */
153 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
154
155 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
156
157 TP_ARGS(table_gfn, index, size)
158 )
159
160 LTTNG_TRACEPOINT_EVENT(
161 kvm_mmu_walker_error,
162 TP_PROTO(u32 pferr),
163 TP_ARGS(pferr),
164
165 TP_STRUCT__entry(
166 __field(__u32, pferr)
167 ),
168
169 TP_fast_assign(
170 tp_assign(pferr, pferr)
171 ),
172
173 TP_printk("pferr %x %s", __entry->pferr,
174 __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
175 )
176
177 LTTNG_TRACEPOINT_EVENT(
178 kvm_mmu_get_page,
179 TP_PROTO(struct kvm_mmu_page *sp, bool created),
180 TP_ARGS(sp, created),
181
182 TP_STRUCT__entry(
183 KVM_MMU_PAGE_FIELDS
184 __field(bool, created)
185 ),
186
187 TP_fast_assign(
188 KVM_MMU_PAGE_ASSIGN(sp)
189 tp_assign(created, created)
190 ),
191
192 TP_printk()
193 )
194
195 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_page_class,
196
197 TP_PROTO(struct kvm_mmu_page *sp),
198 TP_ARGS(sp),
199
200 TP_STRUCT__entry(
201 KVM_MMU_PAGE_FIELDS
202 ),
203
204 TP_fast_assign(
205 KVM_MMU_PAGE_ASSIGN(sp)
206 ),
207
208 TP_printk()
209 )
210
211 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_sync_page,
212 TP_PROTO(struct kvm_mmu_page *sp),
213
214 TP_ARGS(sp)
215 )
216
217 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_unsync_page,
218 TP_PROTO(struct kvm_mmu_page *sp),
219
220 TP_ARGS(sp)
221 )
222
223 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
224 TP_PROTO(struct kvm_mmu_page *sp),
225
226 TP_ARGS(sp)
227 )
228
229 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
230
231 LTTNG_TRACEPOINT_EVENT(
232 mark_mmio_spte,
233 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
234 TP_ARGS(sptep, gfn, access, gen),
235
236 TP_STRUCT__entry(
237 __field(void *, sptep)
238 __field(gfn_t, gfn)
239 __field(unsigned, access)
240 __field(unsigned int, gen)
241 ),
242
243 TP_fast_assign(
244 tp_assign(sptep, sptep)
245 tp_assign(gfn, gfn)
246 tp_assign(access, access)
247 tp_assign(gen, gen)
248 ),
249
250 TP_printk("sptep:%p gfn %llx access %x", __entry->sptep, __entry->gfn,
251 __entry->access)
252 )
253
254 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
255
256 LTTNG_TRACEPOINT_EVENT(
257 mark_mmio_spte,
258 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access),
259 TP_ARGS(sptep, gfn, access),
260
261 TP_STRUCT__entry(
262 __field(void *, sptep)
263 __field(gfn_t, gfn)
264 __field(unsigned, access)
265 ),
266
267 TP_fast_assign(
268 tp_assign(sptep, sptep)
269 tp_assign(gfn, gfn)
270 tp_assign(access, access)
271 ),
272
273 TP_printk("sptep:%p gfn %llx access %x", __entry->sptep, __entry->gfn,
274 __entry->access)
275 )
276
277 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
278
279 LTTNG_TRACEPOINT_EVENT(
280 handle_mmio_page_fault,
281 TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
282 TP_ARGS(addr, gfn, access),
283
284 TP_STRUCT__entry(
285 __field(u64, addr)
286 __field(gfn_t, gfn)
287 __field(unsigned, access)
288 ),
289
290 TP_fast_assign(
291 tp_assign(addr, addr)
292 tp_assign(gfn, gfn)
293 tp_assign(access, access)
294 ),
295
296 TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
297 __entry->access)
298 )
299
300 #define __spte_satisfied(__spte) \
301 (__entry->retry && is_writable_pte(__entry->__spte))
302
303 LTTNG_TRACEPOINT_EVENT(
304 fast_page_fault,
305 TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
306 u64 *sptep, u64 old_spte, bool retry),
307 TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry),
308
309 TP_STRUCT__entry(
310 __field(int, vcpu_id)
311 __field(gva_t, gva)
312 __field(u32, error_code)
313 __field(u64 *, sptep)
314 __field(u64, old_spte)
315 __field(u64, new_spte)
316 __field(bool, retry)
317 ),
318
319 TP_fast_assign(
320 tp_assign(vcpu_id, vcpu->vcpu_id)
321 tp_assign(gva, gva)
322 tp_assign(error_code, error_code)
323 tp_assign(sptep, sptep)
324 tp_assign(old_spte, old_spte)
325 tp_assign(new_spte, *sptep)
326 tp_assign(retry, retry)
327 ),
328
329 TP_printk("vcpu %d gva %lx error_code %s sptep %p old %#llx"
330 " new %llx spurious %d fixed %d", __entry->vcpu_id,
331 __entry->gva, __print_flags(__entry->error_code, "|",
332 kvm_mmu_trace_pferr_flags), __entry->sptep,
333 __entry->old_spte, __entry->new_spte,
334 __spte_satisfied(old_spte), __spte_satisfied(new_spte)
335 )
336 )
337 #endif /* LTTNG_TRACE_KVMMMU_H */
338
339 #undef TRACE_INCLUDE_PATH
340 #define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module/arch/x86/kvm
341 #undef TRACE_INCLUDE_FILE
342 #define TRACE_INCLUDE_FILE mmutrace
343
344 /* This part must be outside protection */
345 #include "../../../../../../probes/define_trace.h"
This page took 0.037505 seconds and 4 git commands to generate.