Fix: Revert "KVM: MMU: show mmu_valid_gen..." (v5.1)
[lttng-modules.git] / instrumentation / events / lttng-module / arch / x86 / kvm / mmutrace.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #if !defined(LTTNG_TRACE_KVM_MMU_H) || defined(TRACE_HEADER_MULTI_READ)
3 #define LTTNG_TRACE_KVM_MMU_H
4
5 #include <probes/lttng-tracepoint-event.h>
6 #include <linux/version.h>
7
8 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
9 #include <linux/trace_events.h>
10 #else /* if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)) */
11 #include <linux/ftrace_event.h>
12 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)) */
13
14 #undef TRACE_SYSTEM
15 #define TRACE_SYSTEM kvm_mmu
16
17 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
18
19 #define LTTNG_KVM_MMU_PAGE_FIELDS \
20 ctf_integer(__u64, gfn, (sp)->gfn) \
21 ctf_integer(__u32, role, (sp)->role.word) \
22 ctf_integer(__u32, root_count, (sp)->root_count) \
23 ctf_integer(bool, unsync, (sp)->unsync)
24
25 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
26
27 #define LTTNG_KVM_MMU_PAGE_FIELDS \
28 ctf_integer(unsigned long, mmu_valid_gen, (sp)->mmu_valid_gen) \
29 ctf_integer(__u64, gfn, (sp)->gfn) \
30 ctf_integer(__u32, role, (sp)->role.word) \
31 ctf_integer(__u32, root_count, (sp)->root_count) \
32 ctf_integer(bool, unsync, (sp)->unsync)
33
34 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
35
36 #define LTTNG_KVM_MMU_PAGE_FIELDS \
37 ctf_integer(__u64, gfn, (sp)->gfn) \
38 ctf_integer(__u32, role, (sp)->role.word) \
39 ctf_integer(__u32, root_count, (sp)->root_count) \
40 ctf_integer(bool, unsync, (sp)->unsync)
41
42 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
43
44 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
45 /*
46 * A pagetable walk has started
47 */
48 LTTNG_TRACEPOINT_EVENT(
49 kvm_mmu_pagetable_walk,
50 TP_PROTO(u64 addr, u32 pferr),
51 TP_ARGS(addr, pferr),
52
53 TP_FIELDS(
54 ctf_integer(__u64, addr, addr)
55 ctf_integer(__u32, pferr, pferr)
56 )
57 )
58 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)) */
59 /*
60 * A pagetable walk has started
61 */
62 LTTNG_TRACEPOINT_EVENT(
63 kvm_mmu_pagetable_walk,
64 TP_PROTO(u64 addr, int write_fault, int user_fault, int fetch_fault),
65 TP_ARGS(addr, write_fault, user_fault, fetch_fault),
66
67 TP_FIELDS(
68 ctf_integer(__u64, addr, addr)
69 ctf_integer(__u32, pferr,
70 (!!write_fault << 1) | (!!user_fault << 2)
71 | (!!fetch_fault << 4))
72 )
73 )
74 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)) */
75
76 /* We just walked a paging element */
77 LTTNG_TRACEPOINT_EVENT(
78 kvm_mmu_paging_element,
79 TP_PROTO(u64 pte, int level),
80 TP_ARGS(pte, level),
81
82 TP_FIELDS(
83 ctf_integer(__u64, pte, pte)
84 ctf_integer(__u32, level, level)
85 )
86 )
87
88 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_set_bit_class,
89
90 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
91
92 TP_ARGS(table_gfn, index, size),
93
94 TP_FIELDS(
95 ctf_integer(__u64, gpa,
96 ((u64)table_gfn << PAGE_SHIFT) + index * size)
97 )
98 )
99
100 /* We set a pte accessed bit */
101 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
102
103 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
104
105 TP_ARGS(table_gfn, index, size)
106 )
107
108 /* We set a pte dirty bit */
109 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
110
111 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
112
113 TP_ARGS(table_gfn, index, size)
114 )
115
116 LTTNG_TRACEPOINT_EVENT(
117 kvm_mmu_walker_error,
118 TP_PROTO(u32 pferr),
119 TP_ARGS(pferr),
120
121 TP_FIELDS(
122 ctf_integer(__u32, pferr, pferr)
123 )
124 )
125
126 LTTNG_TRACEPOINT_EVENT(
127 kvm_mmu_get_page,
128 TP_PROTO(struct kvm_mmu_page *sp, bool created),
129 TP_ARGS(sp, created),
130
131 TP_FIELDS(
132 LTTNG_KVM_MMU_PAGE_FIELDS
133 ctf_integer(bool, created, created)
134 )
135 )
136
137 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_page_class,
138
139 TP_PROTO(struct kvm_mmu_page *sp),
140 TP_ARGS(sp),
141
142 TP_FIELDS(
143 LTTNG_KVM_MMU_PAGE_FIELDS
144 )
145 )
146
147 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_sync_page,
148 TP_PROTO(struct kvm_mmu_page *sp),
149
150 TP_ARGS(sp)
151 )
152
153 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_unsync_page,
154 TP_PROTO(struct kvm_mmu_page *sp),
155
156 TP_ARGS(sp)
157 )
158
159 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
160 TP_PROTO(struct kvm_mmu_page *sp),
161
162 TP_ARGS(sp)
163 )
164
165 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
166
167 LTTNG_TRACEPOINT_EVENT_MAP(
168 mark_mmio_spte,
169
170 kvm_mmu_mark_mmio_spte,
171
172 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
173 TP_ARGS(sptep, gfn, access, gen),
174
175 TP_FIELDS(
176 ctf_integer_hex(void *, sptep, sptep)
177 ctf_integer(gfn_t, gfn, gfn)
178 ctf_integer(unsigned, access, access)
179 ctf_integer(unsigned int, gen, gen)
180 )
181 )
182
183 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
184
185 LTTNG_TRACEPOINT_EVENT_MAP(
186 mark_mmio_spte,
187
188 kvm_mmu_mark_mmio_spte,
189
190 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access),
191 TP_ARGS(sptep, gfn, access),
192
193 TP_FIELDS(
194 ctf_integer_hex(void *, sptep, sptep)
195 ctf_integer(gfn_t, gfn, gfn)
196 ctf_integer(unsigned, access, access)
197 )
198 )
199
200 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
201
202 LTTNG_TRACEPOINT_EVENT_MAP(
203 handle_mmio_page_fault,
204
205 kvm_mmu_handle_mmio_page_fault,
206
207 TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
208 TP_ARGS(addr, gfn, access),
209
210 TP_FIELDS(
211 ctf_integer(u64, addr, addr)
212 ctf_integer(gfn_t, gfn, gfn)
213 ctf_integer(unsigned, access, access)
214 )
215 )
216
217 LTTNG_TRACEPOINT_EVENT_MAP(
218 fast_page_fault,
219
220 kvm_mmu_fast_page_fault,
221
222 TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
223 u64 *sptep, u64 old_spte, bool retry),
224 TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry),
225
226 TP_FIELDS(
227 ctf_integer(int, vcpu_id, vcpu->vcpu_id)
228 ctf_integer(gva_t, gva, gva)
229 ctf_integer(u32, error_code, error_code)
230 ctf_integer_hex(u64 *, sptep, sptep)
231 ctf_integer(u64, old_spte, old_spte)
232 ctf_integer(u64, new_spte, *sptep)
233 ctf_integer(bool, retry, retry)
234 )
235 )
236 #endif /* LTTNG_TRACE_KVM_MMU_H */
237
238 #undef TRACE_INCLUDE_PATH
239 #define TRACE_INCLUDE_PATH instrumentation/events/lttng-module/arch/x86/kvm
240 #undef TRACE_INCLUDE_FILE
241 #define TRACE_INCLUDE_FILE mmutrace
242
243 /* This part must be outside protection */
244 #include <probes/define_trace.h>
This page took 0.03563 seconds and 4 git commands to generate.