Commit | Line | Data |
---|---|---|
b3c40230 MG |
1 | #if !defined(_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ) |
2 | #define _TRACE_KVMMMU_H | |
3 | ||
4 | #include <linux/tracepoint.h> | |
5 | #include <linux/ftrace_event.h> | |
c873bb58 | 6 | #include <linux/version.h> |
b3c40230 MG |
7 | |
8 | #undef TRACE_SYSTEM | |
9 | #define TRACE_SYSTEM kvmmmu | |
10 | ||
efc4bffd MD |
11 | #undef KVM_MMU_PAGE_FIELDS |
12 | #undef KVM_MMU_PAGE_ASSIGN | |
13 | ||
14 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) | |
15 | ||
16 | #define KVM_MMU_PAGE_FIELDS \ | |
17 | __field(unsigned long, mmu_valid_gen) \ | |
18 | __field(__u64, gfn) \ | |
19 | __field(__u32, role) \ | |
20 | __field(__u32, root_count) \ | |
21 | __field(bool, unsync) | |
22 | ||
23 | #define KVM_MMU_PAGE_ASSIGN(sp) \ | |
24 | tp_assign(mmu_valid_gen, sp->mmu_valid_gen) \ | |
25 | tp_assign(gfn, sp->gfn) \ | |
26 | tp_assign(role, sp->role.word) \ | |
27 | tp_assign(root_count, sp->root_count) \ | |
28 | tp_assign(unsync, sp->unsync) | |
29 | ||
30 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */ | |
31 | ||
b3c40230 MG |
32 | #define KVM_MMU_PAGE_FIELDS \ |
33 | __field(__u64, gfn) \ | |
34 | __field(__u32, role) \ | |
35 | __field(__u32, root_count) \ | |
36 | __field(bool, unsync) | |
37 | ||
38 | #define KVM_MMU_PAGE_ASSIGN(sp) \ | |
39 | tp_assign(gfn, sp->gfn) \ | |
40 | tp_assign(role, sp->role.word) \ | |
41 | tp_assign(root_count, sp->root_count) \ | |
42 | tp_assign(unsync, sp->unsync) | |
43 | ||
efc4bffd | 44 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */ |
b3c40230 MG |
45 | |
46 | #define kvm_mmu_trace_pferr_flags \ | |
47 | { PFERR_PRESENT_MASK, "P" }, \ | |
48 | { PFERR_WRITE_MASK, "W" }, \ | |
49 | { PFERR_USER_MASK, "U" }, \ | |
50 | { PFERR_RSVD_MASK, "RSVD" }, \ | |
51 | { PFERR_FETCH_MASK, "F" } | |
52 | ||
53 | /* | |
54 | * A pagetable walk has started | |
55 | */ | |
56 | TRACE_EVENT( | |
57 | kvm_mmu_pagetable_walk, | |
58 | TP_PROTO(u64 addr, u32 pferr), | |
59 | TP_ARGS(addr, pferr), | |
60 | ||
61 | TP_STRUCT__entry( | |
62 | __field(__u64, addr) | |
63 | __field(__u32, pferr) | |
64 | ), | |
65 | ||
66 | TP_fast_assign( | |
67 | tp_assign(addr, addr) | |
68 | tp_assign(pferr, pferr) | |
69 | ), | |
70 | ||
71 | TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr, | |
72 | __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags)) | |
73 | ) | |
74 | ||
75 | ||
76 | /* We just walked a paging element */ | |
77 | TRACE_EVENT( | |
78 | kvm_mmu_paging_element, | |
79 | TP_PROTO(u64 pte, int level), | |
80 | TP_ARGS(pte, level), | |
81 | ||
82 | TP_STRUCT__entry( | |
83 | __field(__u64, pte) | |
84 | __field(__u32, level) | |
85 | ), | |
86 | ||
87 | TP_fast_assign( | |
88 | tp_assign(pte, pte) | |
89 | tp_assign(level, level) | |
90 | ), | |
91 | ||
92 | TP_printk("pte %llx level %u", __entry->pte, __entry->level) | |
93 | ) | |
94 | ||
95 | DECLARE_EVENT_CLASS(kvm_mmu_set_bit_class, | |
96 | ||
97 | TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size), | |
98 | ||
99 | TP_ARGS(table_gfn, index, size), | |
100 | ||
101 | TP_STRUCT__entry( | |
102 | __field(__u64, gpa) | |
103 | ), | |
104 | ||
105 | TP_fast_assign( | |
106 | tp_assign(gpa, ((u64)table_gfn << PAGE_SHIFT) | |
107 | + index * size) | |
108 | ), | |
109 | ||
110 | TP_printk("gpa %llx", __entry->gpa) | |
111 | ) | |
112 | ||
113 | /* We set a pte accessed bit */ | |
114 | DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit, | |
115 | ||
116 | TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size), | |
117 | ||
118 | TP_ARGS(table_gfn, index, size) | |
119 | ) | |
120 | ||
121 | /* We set a pte dirty bit */ | |
122 | DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit, | |
123 | ||
124 | TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size), | |
125 | ||
126 | TP_ARGS(table_gfn, index, size) | |
127 | ) | |
128 | ||
129 | TRACE_EVENT( | |
130 | kvm_mmu_walker_error, | |
131 | TP_PROTO(u32 pferr), | |
132 | TP_ARGS(pferr), | |
133 | ||
134 | TP_STRUCT__entry( | |
135 | __field(__u32, pferr) | |
136 | ), | |
137 | ||
138 | TP_fast_assign( | |
139 | tp_assign(pferr, pferr) | |
140 | ), | |
141 | ||
142 | TP_printk("pferr %x %s", __entry->pferr, | |
143 | __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags)) | |
144 | ) | |
145 | ||
146 | TRACE_EVENT( | |
147 | kvm_mmu_get_page, | |
148 | TP_PROTO(struct kvm_mmu_page *sp, bool created), | |
149 | TP_ARGS(sp, created), | |
150 | ||
151 | TP_STRUCT__entry( | |
152 | KVM_MMU_PAGE_FIELDS | |
153 | __field(bool, created) | |
154 | ), | |
155 | ||
156 | TP_fast_assign( | |
157 | KVM_MMU_PAGE_ASSIGN(sp) | |
158 | tp_assign(created, created) | |
159 | ), | |
160 | ||
efc4bffd | 161 | TP_printk() |
b3c40230 MG |
162 | ) |
163 | ||
164 | DECLARE_EVENT_CLASS(kvm_mmu_page_class, | |
165 | ||
166 | TP_PROTO(struct kvm_mmu_page *sp), | |
167 | TP_ARGS(sp), | |
168 | ||
169 | TP_STRUCT__entry( | |
170 | KVM_MMU_PAGE_FIELDS | |
171 | ), | |
172 | ||
173 | TP_fast_assign( | |
174 | KVM_MMU_PAGE_ASSIGN(sp) | |
175 | ), | |
176 | ||
efc4bffd | 177 | TP_printk() |
b3c40230 MG |
178 | ) |
179 | ||
180 | DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_sync_page, | |
181 | TP_PROTO(struct kvm_mmu_page *sp), | |
182 | ||
183 | TP_ARGS(sp) | |
184 | ) | |
185 | ||
186 | DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_unsync_page, | |
187 | TP_PROTO(struct kvm_mmu_page *sp), | |
188 | ||
189 | TP_ARGS(sp) | |
190 | ) | |
191 | ||
192 | DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page, | |
193 | TP_PROTO(struct kvm_mmu_page *sp), | |
194 | ||
195 | TP_ARGS(sp) | |
196 | ) | |
197 | ||
c873bb58 MD |
198 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) |
199 | ||
200 | TRACE_EVENT( | |
201 | mark_mmio_spte, | |
202 | TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen), | |
203 | TP_ARGS(sptep, gfn, access, gen), | |
204 | ||
205 | TP_STRUCT__entry( | |
206 | __field(void *, sptep) | |
207 | __field(gfn_t, gfn) | |
208 | __field(unsigned, access) | |
209 | __field(unsigned int, gen) | |
210 | ), | |
211 | ||
212 | TP_fast_assign( | |
213 | tp_assign(sptep, sptep) | |
214 | tp_assign(gfn, gfn) | |
215 | tp_assign(access, access) | |
216 | tp_assign(gen, gen) | |
217 | ), | |
218 | ||
219 | TP_printk("sptep:%p gfn %llx access %x", __entry->sptep, __entry->gfn, | |
220 | __entry->access) | |
221 | ) | |
222 | ||
223 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */ | |
224 | ||
b3c40230 MG |
225 | TRACE_EVENT( |
226 | mark_mmio_spte, | |
227 | TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access), | |
228 | TP_ARGS(sptep, gfn, access), | |
229 | ||
230 | TP_STRUCT__entry( | |
231 | __field(void *, sptep) | |
232 | __field(gfn_t, gfn) | |
233 | __field(unsigned, access) | |
234 | ), | |
235 | ||
236 | TP_fast_assign( | |
237 | tp_assign(sptep, sptep) | |
238 | tp_assign(gfn, gfn) | |
239 | tp_assign(access, access) | |
240 | ), | |
241 | ||
242 | TP_printk("sptep:%p gfn %llx access %x", __entry->sptep, __entry->gfn, | |
243 | __entry->access) | |
244 | ) | |
245 | ||
c873bb58 MD |
246 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */ |
247 | ||
b3c40230 MG |
248 | TRACE_EVENT( |
249 | handle_mmio_page_fault, | |
250 | TP_PROTO(u64 addr, gfn_t gfn, unsigned access), | |
251 | TP_ARGS(addr, gfn, access), | |
252 | ||
253 | TP_STRUCT__entry( | |
254 | __field(u64, addr) | |
255 | __field(gfn_t, gfn) | |
256 | __field(unsigned, access) | |
257 | ), | |
258 | ||
259 | TP_fast_assign( | |
260 | tp_assign(addr, addr) | |
261 | tp_assign(gfn, gfn) | |
262 | tp_assign(access, access) | |
263 | ), | |
264 | ||
265 | TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn, | |
266 | __entry->access) | |
267 | ) | |
268 | ||
269 | #define __spte_satisfied(__spte) \ | |
270 | (__entry->retry && is_writable_pte(__entry->__spte)) | |
271 | ||
272 | TRACE_EVENT( | |
273 | fast_page_fault, | |
274 | TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code, | |
275 | u64 *sptep, u64 old_spte, bool retry), | |
276 | TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry), | |
277 | ||
278 | TP_STRUCT__entry( | |
279 | __field(int, vcpu_id) | |
280 | __field(gva_t, gva) | |
281 | __field(u32, error_code) | |
282 | __field(u64 *, sptep) | |
283 | __field(u64, old_spte) | |
284 | __field(u64, new_spte) | |
285 | __field(bool, retry) | |
286 | ), | |
287 | ||
288 | TP_fast_assign( | |
289 | tp_assign(vcpu_id, vcpu->vcpu_id) | |
290 | tp_assign(gva, gva) | |
291 | tp_assign(error_code, error_code) | |
292 | tp_assign(sptep, sptep) | |
293 | tp_assign(old_spte, old_spte) | |
294 | tp_assign(new_spte, *sptep) | |
295 | tp_assign(retry, retry) | |
296 | ), | |
297 | ||
298 | TP_printk("vcpu %d gva %lx error_code %s sptep %p old %#llx" | |
299 | " new %llx spurious %d fixed %d", __entry->vcpu_id, | |
300 | __entry->gva, __print_flags(__entry->error_code, "|", | |
301 | kvm_mmu_trace_pferr_flags), __entry->sptep, | |
302 | __entry->old_spte, __entry->new_spte, | |
303 | __spte_satisfied(old_spte), __spte_satisfied(new_spte) | |
304 | ) | |
305 | ) | |
306 | #endif /* _TRACE_KVMMMU_H */ | |
307 | ||
308 | #undef TRACE_INCLUDE_PATH | |
309 | #define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module/arch/x86/kvm | |
310 | #undef TRACE_INCLUDE_FILE | |
311 | #define TRACE_INCLUDE_FILE mmutrace | |
312 | ||
313 | /* This part must be outside protection */ | |
314 | #include "../../../../../../probes/define_trace.h" |