65dabde9ff39b495b2bfaa6ceadba58d443f0b5d
[lttng-modules.git] / instrumentation / events / lttng-module / arch / x86 / kvm / mmutrace.h
1 #if !defined(LTTNG_TRACE_KVM_MMU_H) || defined(TRACE_HEADER_MULTI_READ)
2 #define LTTNG_TRACE_KVM_MMU_H
3
4 #include "../../../../../../probes/lttng-tracepoint-event.h"
5 #include <linux/version.h>
6
7 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
8 #include <linux/trace_events.h>
9 #else /* if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)) */
10 #include <linux/ftrace_event.h>
11 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)) */
12
13 #undef TRACE_SYSTEM
14 #define TRACE_SYSTEM kvm_mmu
15
16 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
17
18 #define LTTNG_KVM_MMU_PAGE_FIELDS \
19 ctf_integer(unsigned long, mmu_valid_gen, (sp)->mmu_valid_gen) \
20 ctf_integer(__u64, gfn, (sp)->gfn) \
21 ctf_integer(__u32, role, (sp)->role.word) \
22 ctf_integer(__u32, root_count, (sp)->root_count) \
23 ctf_integer(bool, unsync, (sp)->unsync)
24
25 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
26
27 #define LTTNG_KVM_MMU_PAGE_FIELDS \
28 ctf_integer(__u64, gfn, (sp)->gfn) \
29 ctf_integer(__u32, role, (sp)->role.word) \
30 ctf_integer(__u32, root_count, (sp)->root_count) \
31 ctf_integer(bool, unsync, (sp)->unsync)
32
33 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
34
35 /*
36 * A pagetable walk has started
37 */
38 LTTNG_TRACEPOINT_EVENT(
39 kvm_mmu_pagetable_walk,
40 TP_PROTO(u64 addr, u32 pferr),
41 TP_ARGS(addr, pferr),
42
43 TP_FIELDS(
44 ctf_integer(__u64, addr, addr)
45 ctf_integer(__u32, pferr, pferr)
46 )
47 )
48
49
50 /* We just walked a paging element */
51 LTTNG_TRACEPOINT_EVENT(
52 kvm_mmu_paging_element,
53 TP_PROTO(u64 pte, int level),
54 TP_ARGS(pte, level),
55
56 TP_FIELDS(
57 ctf_integer(__u64, pte, pte)
58 ctf_integer(__u32, level, level)
59 )
60 )
61
62 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_set_bit_class,
63
64 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
65
66 TP_ARGS(table_gfn, index, size),
67
68 TP_FIELDS(
69 ctf_integer(__u64, gpa,
70 ((u64)table_gfn << PAGE_SHIFT) + index * size)
71 )
72 )
73
74 /* We set a pte accessed bit */
75 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit,
76
77 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
78
79 TP_ARGS(table_gfn, index, size)
80 )
81
82 /* We set a pte dirty bit */
83 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit,
84
85 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
86
87 TP_ARGS(table_gfn, index, size)
88 )
89
90 LTTNG_TRACEPOINT_EVENT(
91 kvm_mmu_walker_error,
92 TP_PROTO(u32 pferr),
93 TP_ARGS(pferr),
94
95 TP_FIELDS(
96 ctf_integer(__u32, pferr, pferr)
97 )
98 )
99
100 LTTNG_TRACEPOINT_EVENT(
101 kvm_mmu_get_page,
102 TP_PROTO(struct kvm_mmu_page *sp, bool created),
103 TP_ARGS(sp, created),
104
105 TP_FIELDS(
106 LTTNG_KVM_MMU_PAGE_FIELDS
107 ctf_integer(bool, created, created)
108 )
109 )
110
111 LTTNG_TRACEPOINT_EVENT_CLASS(kvm_mmu_page_class,
112
113 TP_PROTO(struct kvm_mmu_page *sp),
114 TP_ARGS(sp),
115
116 TP_FIELDS(
117 LTTNG_KVM_MMU_PAGE_FIELDS
118 )
119 )
120
121 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_sync_page,
122 TP_PROTO(struct kvm_mmu_page *sp),
123
124 TP_ARGS(sp)
125 )
126
127 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_unsync_page,
128 TP_PROTO(struct kvm_mmu_page *sp),
129
130 TP_ARGS(sp)
131 )
132
133 LTTNG_TRACEPOINT_EVENT_INSTANCE(kvm_mmu_page_class, kvm_mmu_prepare_zap_page,
134 TP_PROTO(struct kvm_mmu_page *sp),
135
136 TP_ARGS(sp)
137 )
138
139 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0))
140
141 LTTNG_TRACEPOINT_EVENT_MAP(
142 mark_mmio_spte,
143
144 kvm_mmu_mark_mmio_spte,
145
146 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
147 TP_ARGS(sptep, gfn, access, gen),
148
149 TP_FIELDS(
150 ctf_integer(void *, sptep, sptep)
151 ctf_integer(gfn_t, gfn, gfn)
152 ctf_integer(unsigned, access, access)
153 ctf_integer(unsigned int, gen, gen)
154 )
155 )
156
157 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
158
159 LTTNG_TRACEPOINT_EVENT_MAP(
160 mark_mmio_spte,
161
162 kvm_mmu_mark_mmio_spte,
163
164 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access),
165 TP_ARGS(sptep, gfn, access),
166
167 TP_FIELDS(
168 ctf_integer(void *, sptep, sptep)
169 ctf_integer(gfn_t, gfn, gfn)
170 ctf_integer(unsigned, access, access)
171 )
172 )
173
174 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,11,0)) */
175
176 LTTNG_TRACEPOINT_EVENT_MAP(
177 handle_mmio_page_fault,
178
179 kvm_mmu_handle_mmio_page_fault,
180
181 TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
182 TP_ARGS(addr, gfn, access),
183
184 TP_FIELDS(
185 ctf_integer(u64, addr, addr)
186 ctf_integer(gfn_t, gfn, gfn)
187 ctf_integer(unsigned, access, access)
188 )
189 )
190
191 LTTNG_TRACEPOINT_EVENT_MAP(
192 fast_page_fault,
193
194 kvm_mmu_fast_page_fault,
195
196 TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
197 u64 *sptep, u64 old_spte, bool retry),
198 TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry),
199
200 TP_FIELDS(
201 ctf_integer(int, vcpu_id, vcpu->vcpu_id)
202 ctf_integer(gva_t, gva, gva)
203 ctf_integer(u32, error_code, error_code)
204 ctf_integer(u64 *, sptep, sptep)
205 ctf_integer(u64, old_spte, old_spte)
206 ctf_integer(u64, new_spte, *sptep)
207 ctf_integer(bool, retry, retry)
208 )
209 )
210 #endif /* LTTNG_TRACE_KVM_MMU_H */
211
212 #undef TRACE_INCLUDE_PATH
213 #define TRACE_INCLUDE_PATH ../instrumentation/events/lttng-module/arch/x86/kvm
214 #undef TRACE_INCLUDE_FILE
215 #define TRACE_INCLUDE_FILE mmutrace
216
217 /* This part must be outside protection */
218 #include "../../../../../../probes/define_trace.h"
This page took 0.032841 seconds and 3 git commands to generate.