tracepoint event instrumentation: use system headers
[lttng-modules.git] / instrumentation / events / lttng-module / block.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM block
3
4 #if !defined(LTTNG_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define LTTNG_TRACE_BLOCK_H
6
7 #include <probes/lttng-tracepoint-event.h>
8 #include <linux/blktrace_api.h>
9 #include <linux/blkdev.h>
10 #include <linux/trace_seq.h>
11 #include <linux/version.h>
12
13 #ifndef _TRACE_BLOCK_DEF_
14 #define _TRACE_BLOCK_DEF_
15
16 enum {
17 RWBS_FLAG_WRITE = (1 << 0),
18 RWBS_FLAG_DISCARD = (1 << 1),
19 RWBS_FLAG_READ = (1 << 2),
20 RWBS_FLAG_RAHEAD = (1 << 3),
21 RWBS_FLAG_BARRIER = (1 << 4),
22 RWBS_FLAG_SYNC = (1 << 5),
23 RWBS_FLAG_META = (1 << 6),
24 RWBS_FLAG_SECURE = (1 << 7),
25 RWBS_FLAG_FLUSH = (1 << 8),
26 RWBS_FLAG_FUA = (1 << 9),
27 };
28
29 #endif /* _TRACE_BLOCK_DEF_ */
30
31 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
32
33 #define blk_rwbs_ctf_integer(type, rwbs, rw, bytes) \
34 ctf_integer(type, rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
35 ( (rw) & REQ_DISCARD ? RWBS_FLAG_DISCARD : \
36 ( (bytes) ? RWBS_FLAG_READ : \
37 ( 0 )))) \
38 | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
39 | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
40 | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
41 | ((rw) & REQ_SECURE ? RWBS_FLAG_SECURE : 0) \
42 | ((rw) & REQ_FLUSH ? RWBS_FLAG_FLUSH : 0) \
43 | ((rw) & REQ_FUA ? RWBS_FLAG_FUA : 0))
44
45 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
46
47 #define blk_rwbs_ctf_integer(type, rwbs, rw, bytes) \
48 ctf_integer(type, rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
49 ( (rw) & REQ_DISCARD ? RWBS_FLAG_DISCARD : \
50 ( (bytes) ? RWBS_FLAG_READ : \
51 ( 0 )))) \
52 | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
53 | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
54 | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
55 | ((rw) & REQ_SECURE ? RWBS_FLAG_SECURE : 0))
56
57 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
58
59 #define blk_rwbs_ctf_integer(type, rwbs, rw, bytes) \
60 ctf_integer(type, rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
61 ( (rw) & REQ_DISCARD ? RWBS_FLAG_DISCARD : \
62 ( (bytes) ? RWBS_FLAG_READ : \
63 ( 0 )))) \
64 | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
65 | ((rw) & REQ_HARDBARRIER ? RWBS_FLAG_BARRIER : 0) \
66 | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
67 | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
68 | ((rw) & REQ_SECURE ? RWBS_FLAG_SECURE : 0))
69
70 #else
71
72 #define blk_rwbs_ctf_integer(type, rwbs, rw, bytes) \
73 ctf_integer(type, rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
74 ( (rw) & (1 << BIO_RW_DISCARD) ? RWBS_FLAG_DISCARD : \
75 ( (bytes) ? RWBS_FLAG_READ : \
76 ( 0 )))) \
77 | ((rw) & (1 << BIO_RW_AHEAD) ? RWBS_FLAG_RAHEAD : 0) \
78 | ((rw) & (1 << BIO_RW_SYNCIO) ? RWBS_FLAG_SYNC : 0) \
79 | ((rw) & (1 << BIO_RW_META) ? RWBS_FLAG_META : 0) \
80 | ((rw) & (1 << BIO_RW_BARRIER) ? RWBS_FLAG_BARRIER : 0))
81
82 #endif
83
84 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
85 LTTNG_TRACEPOINT_EVENT_CLASS(block_buffer,
86
87 TP_PROTO(struct buffer_head *bh),
88
89 TP_ARGS(bh),
90
91 TP_FIELDS (
92 ctf_integer(dev_t, dev, bh->b_bdev->bd_dev)
93 ctf_integer(sector_t, sector, bh->b_blocknr)
94 ctf_integer(size_t, size, bh->b_size)
95 )
96 )
97
98 /**
99 * block_touch_buffer - mark a buffer accessed
100 * @bh: buffer_head being touched
101 *
102 * Called from touch_buffer().
103 */
104 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_buffer, block_touch_buffer,
105
106 TP_PROTO(struct buffer_head *bh),
107
108 TP_ARGS(bh)
109 )
110
111 /**
112 * block_dirty_buffer - mark a buffer dirty
113 * @bh: buffer_head being dirtied
114 *
115 * Called from mark_buffer_dirty().
116 */
117 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_buffer, block_dirty_buffer,
118
119 TP_PROTO(struct buffer_head *bh),
120
121 TP_ARGS(bh)
122 )
123 #endif
124
125 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(block_rq_with_error,
126
127 TP_PROTO(struct request_queue *q, struct request *rq),
128
129 TP_ARGS(q, rq),
130
131 TP_locvar(
132 sector_t sector;
133 unsigned int nr_sector;
134 unsigned char *cmd;
135 size_t cmd_len;
136 ),
137
138 TP_code_pre(
139 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
140 tp_locvar->sector = 0;
141 tp_locvar->nr_sector = 0;
142 tp_locvar->cmd = rq->cmd;
143 tp_locvar->cmd_len = rq->cmd_len;
144 } else {
145 tp_locvar->sector = blk_rq_pos(rq);
146 tp_locvar->nr_sector = blk_rq_sectors(rq);
147 tp_locvar->cmd = NULL;
148 tp_locvar->cmd_len = 0;
149 }
150 ),
151
152 TP_FIELDS(
153 ctf_integer(dev_t, dev,
154 rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
155 ctf_integer(sector_t, sector, tp_locvar->sector)
156 ctf_integer(unsigned int, nr_sector, tp_locvar->nr_sector)
157 ctf_integer(int, errors, rq->errors)
158 blk_rwbs_ctf_integer(unsigned int, rwbs,
159 rq->cmd_flags, blk_rq_bytes(rq))
160 ctf_sequence_hex(unsigned char, cmd,
161 tp_locvar->cmd, size_t, tp_locvar->cmd_len)
162 ),
163
164 TP_code_post()
165 )
166
167 /**
168 * block_rq_abort - abort block operation request
169 * @q: queue containing the block operation request
170 * @rq: block IO operation request
171 *
172 * Called immediately after pending block IO operation request @rq in
173 * queue @q is aborted. The fields in the operation request @rq
174 * can be examined to determine which device and sectors the pending
175 * operation would access.
176 */
177 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_rq_with_error, block_rq_abort,
178
179 TP_PROTO(struct request_queue *q, struct request *rq),
180
181 TP_ARGS(q, rq)
182 )
183
184 /**
185 * block_rq_requeue - place block IO request back on a queue
186 * @q: queue holding operation
187 * @rq: block IO operation request
188 *
189 * The block operation request @rq is being placed back into queue
190 * @q. For some reason the request was not completed and needs to be
191 * put back in the queue.
192 */
193 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_rq_with_error, block_rq_requeue,
194
195 TP_PROTO(struct request_queue *q, struct request *rq),
196
197 TP_ARGS(q, rq)
198 )
199
200 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,5) \
201 || LTTNG_KERNEL_RANGE(3,12,21, 3,13,0) \
202 || LTTNG_KERNEL_RANGE(3,10,41, 3,11,0) \
203 || LTTNG_KERNEL_RANGE(3,4,91, 3,5,0) \
204 || LTTNG_KERNEL_RANGE(3,2,58, 3,3,0) \
205 || LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,28, 3,14,0,0) \
206 || LTTNG_RHEL_KERNEL_RANGE(3,10,0,7,1, 3,11,0,0,0))
207
208 /**
209 * block_rq_complete - block IO operation completed by device driver
210 * @q: queue containing the block operation request
211 * @rq: block operations request
212 * @nr_bytes: number of completed bytes
213 *
214 * The block_rq_complete tracepoint event indicates that some portion
215 * of operation request has been completed by the device driver. If
216 * the @rq->bio is %NULL, then there is absolutely no additional work to
217 * do for the request. If @rq->bio is non-NULL then there is
218 * additional work required to complete the request.
219 */
220 LTTNG_TRACEPOINT_EVENT_CODE(block_rq_complete,
221
222 TP_PROTO(struct request_queue *q, struct request *rq,
223 unsigned int nr_bytes),
224
225 TP_ARGS(q, rq, nr_bytes),
226
227 TP_locvar(
228 unsigned char *cmd;
229 size_t cmd_len;
230 ),
231
232 TP_code_pre(
233 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
234 tp_locvar->cmd = rq->cmd;
235 tp_locvar->cmd_len = rq->cmd_len;
236 } else {
237 tp_locvar->cmd = NULL;
238 tp_locvar->cmd_len = 0;
239 }
240 ),
241
242 TP_FIELDS(
243 ctf_integer(dev_t, dev,
244 rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
245 ctf_integer(sector_t, sector, blk_rq_pos(rq))
246 ctf_integer(unsigned int, nr_sector, nr_bytes >> 9)
247 ctf_integer(int, errors, rq->errors)
248 blk_rwbs_ctf_integer(unsigned int, rwbs,
249 rq->cmd_flags, nr_bytes)
250 ctf_sequence_hex(unsigned char, cmd,
251 tp_locvar->cmd, size_t, tp_locvar->cmd_len)
252 ),
253
254 TP_code_post()
255 )
256
257 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0)) */
258
259 /**
260 * block_rq_complete - block IO operation completed by device driver
261 * @q: queue containing the block operation request
262 * @rq: block operations request
263 *
264 * The block_rq_complete tracepoint event indicates that some portion
265 * of operation request has been completed by the device driver. If
266 * the @rq->bio is %NULL, then there is absolutely no additional work to
267 * do for the request. If @rq->bio is non-NULL then there is
268 * additional work required to complete the request.
269 */
270 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_rq_with_error, block_rq_complete,
271
272 TP_PROTO(struct request_queue *q, struct request *rq),
273
274 TP_ARGS(q, rq)
275 )
276
277 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0)) */
278
279 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(block_rq,
280
281 TP_PROTO(struct request_queue *q, struct request *rq),
282
283 TP_ARGS(q, rq),
284
285 TP_locvar(
286 sector_t sector;
287 unsigned int nr_sector;
288 unsigned int bytes;
289 unsigned char *cmd;
290 size_t cmd_len;
291 ),
292
293 TP_code_pre(
294 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
295 tp_locvar->sector = 0;
296 tp_locvar->nr_sector = 0;
297 tp_locvar->bytes = blk_rq_bytes(rq);
298 tp_locvar->cmd = rq->cmd;
299 tp_locvar->cmd_len = rq->cmd_len;
300 } else {
301 tp_locvar->sector = blk_rq_pos(rq);
302 tp_locvar->nr_sector = blk_rq_sectors(rq);
303 tp_locvar->bytes = 0;
304 tp_locvar->cmd = NULL;
305 tp_locvar->cmd_len = 0;
306 }
307 ),
308
309 TP_FIELDS(
310 ctf_integer(dev_t, dev,
311 rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
312 ctf_integer(sector_t, sector, tp_locvar->sector)
313 ctf_integer(unsigned int, nr_sector, tp_locvar->nr_sector)
314 ctf_integer(unsigned int, bytes, tp_locvar->bytes)
315 ctf_integer(pid_t, tid, current->pid)
316 blk_rwbs_ctf_integer(unsigned int, rwbs,
317 rq->cmd_flags, blk_rq_bytes(rq))
318 ctf_sequence_hex(unsigned char, cmd,
319 tp_locvar->cmd, size_t, tp_locvar->cmd_len)
320 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
321 ),
322
323 TP_code_post()
324 )
325
326 /**
327 * block_rq_insert - insert block operation request into queue
328 * @q: target queue
329 * @rq: block IO operation request
330 *
331 * Called immediately before block operation request @rq is inserted
332 * into queue @q. The fields in the operation request @rq struct can
333 * be examined to determine which device and sectors the pending
334 * operation would access.
335 */
336 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_rq, block_rq_insert,
337
338 TP_PROTO(struct request_queue *q, struct request *rq),
339
340 TP_ARGS(q, rq)
341 )
342
343 /**
344 * block_rq_issue - issue pending block IO request operation to device driver
345 * @q: queue holding operation
346 * @rq: block IO operation operation request
347 *
348 * Called when block operation request @rq from queue @q is sent to a
349 * device driver for processing.
350 */
351 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_rq, block_rq_issue,
352
353 TP_PROTO(struct request_queue *q, struct request *rq),
354
355 TP_ARGS(q, rq)
356 )
357
358 /**
359 * block_bio_bounce - used bounce buffer when processing block operation
360 * @q: queue holding the block operation
361 * @bio: block operation
362 *
363 * A bounce buffer was used to handle the block operation @bio in @q.
364 * This occurs when hardware limitations prevent a direct transfer of
365 * data between the @bio data memory area and the IO device. Use of a
366 * bounce buffer requires extra copying of data and decreases
367 * performance.
368 */
369 LTTNG_TRACEPOINT_EVENT(block_bio_bounce,
370
371 TP_PROTO(struct request_queue *q, struct bio *bio),
372
373 TP_ARGS(q, bio),
374
375 TP_FIELDS(
376 ctf_integer(dev_t, dev, bio->bi_bdev ? bio->bi_bdev->bd_dev : 0)
377 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
378 ctf_integer(sector_t, sector, bio->bi_iter.bi_sector)
379 ctf_integer(unsigned int, nr_sector, bio_sectors(bio))
380 blk_rwbs_ctf_integer(unsigned int, rwbs,
381 bio->bi_rw, bio->bi_iter.bi_size)
382 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
383 ctf_integer(sector_t, sector, bio->bi_sector)
384 ctf_integer(unsigned int, nr_sector, bio->bi_size >> 9)
385 blk_rwbs_ctf_integer(unsigned int, rwbs,
386 bio->bi_rw, bio->bi_size)
387 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
388 ctf_integer(pid_t, tid, current->pid)
389 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
390 )
391 )
392
393 /**
394 * block_bio_complete - completed all work on the block operation
395 * @q: queue holding the block operation
396 * @bio: block operation completed
397 * @error: io error value
398 *
399 * This tracepoint indicates there is no further work to do on this
400 * block IO operation @bio.
401 */
402 LTTNG_TRACEPOINT_EVENT(block_bio_complete,
403
404 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
405 TP_PROTO(struct request_queue *q, struct bio *bio, int error),
406
407 TP_ARGS(q, bio, error),
408 #else
409 TP_PROTO(struct request_queue *q, struct bio *bio),
410
411 TP_ARGS(q, bio),
412 #endif
413
414 TP_FIELDS(
415 ctf_integer(dev_t, dev, bio->bi_bdev->bd_dev)
416 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
417 ctf_integer(sector_t, sector, bio->bi_iter.bi_sector)
418 ctf_integer(unsigned int, nr_sector, bio_sectors(bio))
419 ctf_integer(int, error, error)
420 blk_rwbs_ctf_integer(unsigned int, rwbs,
421 bio->bi_rw, bio->bi_iter.bi_size)
422 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
423 ctf_integer(sector_t, sector, bio->bi_sector)
424 ctf_integer(unsigned int, nr_sector, bio->bi_size >> 9)
425 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
426 ctf_integer(int, error, error)
427 #else
428 ctf_integer(int, error, 0)
429 #endif
430 blk_rwbs_ctf_integer(unsigned int, rwbs,
431 bio->bi_rw, bio->bi_size)
432 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
433 )
434 )
435
436 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
437 LTTNG_TRACEPOINT_EVENT_CLASS(block_bio_merge,
438
439 TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
440
441 TP_ARGS(q, rq, bio),
442
443 TP_FIELDS(
444 ctf_integer(dev_t, dev, bio->bi_bdev->bd_dev)
445 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
446 ctf_integer(sector_t, sector, bio->bi_iter.bi_sector)
447 ctf_integer(unsigned int, nr_sector, bio_sectors(bio))
448 blk_rwbs_ctf_integer(unsigned int, rwbs,
449 bio->bi_rw, bio->bi_iter.bi_size)
450 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
451 ctf_integer(sector_t, sector, bio->bi_sector)
452 ctf_integer(unsigned int, nr_sector, bio->bi_size >> 9)
453 blk_rwbs_ctf_integer(unsigned int, rwbs,
454 bio->bi_rw, bio->bi_size)
455 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
456 ctf_integer(pid_t, tid, current->pid)
457 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
458 )
459 )
460
461 /**
462 * block_bio_backmerge - merging block operation to the end of an existing operation
463 * @q: queue holding operation
464 * @bio: new block operation to merge
465 *
466 * Merging block request @bio to the end of an existing block request
467 * in queue @q.
468 */
469 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_bio_merge, block_bio_backmerge,
470
471 TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
472
473 TP_ARGS(q, rq, bio)
474 )
475
476 /**
477 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
478 * @q: queue holding operation
479 * @bio: new block operation to merge
480 *
481 * Merging block IO operation @bio to the beginning of an existing block
482 * operation in queue @q.
483 */
484 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_bio_merge, block_bio_frontmerge,
485
486 TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
487
488 TP_ARGS(q, rq, bio)
489 )
490
491 /**
492 * block_bio_queue - putting new block IO operation in queue
493 * @q: queue holding operation
494 * @bio: new block operation
495 *
496 * About to place the block IO operation @bio into queue @q.
497 */
498 LTTNG_TRACEPOINT_EVENT(block_bio_queue,
499
500 TP_PROTO(struct request_queue *q, struct bio *bio),
501
502 TP_ARGS(q, bio),
503
504 TP_FIELDS(
505 ctf_integer(dev_t, dev, bio->bi_bdev->bd_dev)
506 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
507 ctf_integer(sector_t, sector, bio->bi_iter.bi_sector)
508 ctf_integer(unsigned int, nr_sector, bio_sectors(bio))
509 blk_rwbs_ctf_integer(unsigned int, rwbs,
510 bio->bi_rw, bio->bi_iter.bi_size)
511 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
512 ctf_integer(sector_t, sector, bio->bi_sector)
513 ctf_integer(unsigned int, nr_sector, bio->bi_size >> 9)
514 blk_rwbs_ctf_integer(unsigned int, rwbs,
515 bio->bi_rw, bio->bi_size)
516 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
517 ctf_integer(pid_t, tid, current->pid)
518 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
519 )
520 )
521 #else /* if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) */
522 LTTNG_TRACEPOINT_EVENT_CLASS(block_bio,
523
524 TP_PROTO(struct request_queue *q, struct bio *bio),
525
526 TP_ARGS(q, bio),
527
528 TP_FIELDS(
529 ctf_integer(dev_t, dev, bio->bi_bdev ? bio->bi_bdev->bd_dev : 0)
530 ctf_integer(sector_t, sector, bio->bi_sector)
531 ctf_integer(unsigned int, nr_sector, bio->bi_size >> 9)
532 blk_rwbs_ctf_integer(unsigned int, rwbs,
533 bio->bi_rw, bio->bi_size)
534 ctf_integer(pid_t, tid, current->pid)
535 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
536 )
537 )
538
539 /**
540 * block_bio_backmerge - merging block operation to the end of an existing operation
541 * @q: queue holding operation
542 * @bio: new block operation to merge
543 *
544 * Merging block request @bio to the end of an existing block request
545 * in queue @q.
546 */
547 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_bio, block_bio_backmerge,
548
549 TP_PROTO(struct request_queue *q, struct bio *bio),
550
551 TP_ARGS(q, bio)
552 )
553
554 /**
555 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
556 * @q: queue holding operation
557 * @bio: new block operation to merge
558 *
559 * Merging block IO operation @bio to the beginning of an existing block
560 * operation in queue @q.
561 */
562 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_bio, block_bio_frontmerge,
563
564 TP_PROTO(struct request_queue *q, struct bio *bio),
565
566 TP_ARGS(q, bio)
567 )
568
569 /**
570 * block_bio_queue - putting new block IO operation in queue
571 * @q: queue holding operation
572 * @bio: new block operation
573 *
574 * About to place the block IO operation @bio into queue @q.
575 */
576 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_bio, block_bio_queue,
577
578 TP_PROTO(struct request_queue *q, struct bio *bio),
579
580 TP_ARGS(q, bio)
581 )
582 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) */
583
584 LTTNG_TRACEPOINT_EVENT_CLASS(block_get_rq,
585
586 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
587
588 TP_ARGS(q, bio, rw),
589
590 TP_FIELDS(
591 ctf_integer(dev_t, dev, bio ? bio->bi_bdev->bd_dev : 0)
592 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
593 ctf_integer(sector_t, sector, bio ? bio->bi_iter.bi_sector : 0)
594 ctf_integer(unsigned int, nr_sector,
595 bio ? bio_sectors(bio) : 0)
596 blk_rwbs_ctf_integer(unsigned int, rwbs,
597 bio ? bio->bi_rw : 0,
598 bio ? bio->bi_iter.bi_size : 0)
599 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
600 ctf_integer(sector_t, sector, bio ? bio->bi_sector : 0)
601 ctf_integer(unsigned int, nr_sector,
602 bio ? bio->bi_size >> 9 : 0)
603 blk_rwbs_ctf_integer(unsigned int, rwbs,
604 bio ? bio->bi_rw : 0,
605 bio ? bio->bi_size : 0)
606 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
607 ctf_integer(pid_t, tid, current->pid)
608 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
609 )
610 )
611
612 /**
613 * block_getrq - get a free request entry in queue for block IO operations
614 * @q: queue for operations
615 * @bio: pending block IO operation
616 * @rw: low bit indicates a read (%0) or a write (%1)
617 *
618 * A request struct for queue @q has been allocated to handle the
619 * block IO operation @bio.
620 */
621 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_get_rq, block_getrq,
622
623 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
624
625 TP_ARGS(q, bio, rw)
626 )
627
628 /**
629 * block_sleeprq - waiting to get a free request entry in queue for block IO operation
630 * @q: queue for operation
631 * @bio: pending block IO operation
632 * @rw: low bit indicates a read (%0) or a write (%1)
633 *
634 * In the case where a request struct cannot be provided for queue @q
635 * the process needs to wait for an request struct to become
636 * available. This tracepoint event is generated each time the
637 * process goes to sleep waiting for request struct become available.
638 */
639 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_get_rq, block_sleeprq,
640
641 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
642
643 TP_ARGS(q, bio, rw)
644 )
645
646 /**
647 * block_plug - keep operations requests in request queue
648 * @q: request queue to plug
649 *
650 * Plug the request queue @q. Do not allow block operation requests
651 * to be sent to the device driver. Instead, accumulate requests in
652 * the queue to improve throughput performance of the block device.
653 */
654 LTTNG_TRACEPOINT_EVENT(block_plug,
655
656 TP_PROTO(struct request_queue *q),
657
658 TP_ARGS(q),
659
660 TP_FIELDS(
661 ctf_integer(pid_t, tid, current->pid)
662 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
663 )
664 )
665
666 LTTNG_TRACEPOINT_EVENT_CLASS(block_unplug,
667
668 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
669 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
670
671 TP_ARGS(q, depth, explicit),
672 #else
673 TP_PROTO(struct request_queue *q),
674
675 TP_ARGS(q),
676 #endif
677
678 TP_FIELDS(
679 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
680 ctf_integer(int, nr_rq, depth)
681 #else
682 ctf_integer(int, nr_rq, q->rq.count[READ] + q->rq.count[WRITE])
683 #endif
684 ctf_integer(pid_t, tid, current->pid)
685 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
686 )
687 )
688
689 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
690 /**
691 * block_unplug_timer - timed release of operations requests in queue to device driver
692 * @q: request queue to unplug
693 *
694 * Unplug the request queue @q because a timer expired and allow block
695 * operation requests to be sent to the device driver.
696 */
697 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_unplug, block_unplug_timer,
698
699 TP_PROTO(struct request_queue *q),
700
701 TP_ARGS(q)
702 )
703 #endif
704
705 /**
706 * block_unplug - release of operations requests in request queue
707 * @q: request queue to unplug
708 * @depth: number of requests just added to the queue
709 * @explicit: whether this was an explicit unplug, or one from schedule()
710 *
711 * Unplug request queue @q because device driver is scheduled to work
712 * on elements in the request queue.
713 */
714 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
715 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_unplug, block_unplug,
716 #else
717 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_unplug, block_unplug_io,
718 #endif
719
720 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
721 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
722
723 TP_ARGS(q, depth, explicit)
724 #else
725 TP_PROTO(struct request_queue *q),
726
727 TP_ARGS(q)
728 #endif
729 )
730
731 /**
732 * block_split - split a single bio struct into two bio structs
733 * @q: queue containing the bio
734 * @bio: block operation being split
735 * @new_sector: The starting sector for the new bio
736 *
737 * The bio request @bio in request queue @q needs to be split into two
738 * bio requests. The newly created @bio request starts at
739 * @new_sector. This split may be required due to hardware limitation
740 * such as operation crossing device boundaries in a RAID system.
741 */
742 LTTNG_TRACEPOINT_EVENT(block_split,
743
744 TP_PROTO(struct request_queue *q, struct bio *bio,
745 unsigned int new_sector),
746
747 TP_ARGS(q, bio, new_sector),
748
749 TP_FIELDS(
750 ctf_integer(dev_t, dev, bio->bi_bdev->bd_dev)
751 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
752 ctf_integer(sector_t, sector, bio->bi_iter.bi_sector)
753 blk_rwbs_ctf_integer(unsigned int, rwbs,
754 bio->bi_rw, bio->bi_iter.bi_size)
755 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
756 ctf_integer(sector_t, sector, bio->bi_sector)
757 blk_rwbs_ctf_integer(unsigned int, rwbs,
758 bio->bi_rw, bio->bi_size)
759 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
760 ctf_integer(sector_t, new_sector, new_sector)
761 ctf_integer(pid_t, tid, current->pid)
762 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
763 )
764 )
765
766 /**
767 * block_bio_remap - map request for a logical device to the raw device
768 * @q: queue holding the operation
769 * @bio: revised operation
770 * @dev: device for the operation
771 * @from: original sector for the operation
772 *
773 * An operation for a logical device has been mapped to the
774 * raw block device.
775 */
776 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
777 LTTNG_TRACEPOINT_EVENT(block_bio_remap,
778 #else
779 LTTNG_TRACEPOINT_EVENT(block_remap,
780 #endif
781
782 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
783 sector_t from),
784
785 TP_ARGS(q, bio, dev, from),
786
787 TP_FIELDS(
788 ctf_integer(dev_t, dev, bio->bi_bdev->bd_dev)
789 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
790 ctf_integer(sector_t, sector, bio->bi_iter.bi_sector)
791 ctf_integer(unsigned int, nr_sector, bio_sectors(bio))
792 blk_rwbs_ctf_integer(unsigned int, rwbs,
793 bio->bi_rw, bio->bi_iter.bi_size)
794 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
795 ctf_integer(sector_t, sector, bio->bi_sector)
796 ctf_integer(unsigned int, nr_sector, bio->bi_size >> 9)
797 blk_rwbs_ctf_integer(unsigned int, rwbs,
798 bio->bi_rw, bio->bi_size)
799 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
800 ctf_integer(dev_t, old_dev, dev)
801 ctf_integer(sector_t, old_sector, from)
802 )
803 )
804
805 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
806 /**
807 * block_rq_remap - map request for a block operation request
808 * @q: queue holding the operation
809 * @rq: block IO operation request
810 * @dev: device for the operation
811 * @from: original sector for the operation
812 *
813 * The block operation request @rq in @q has been remapped. The block
814 * operation request @rq holds the current information and @from hold
815 * the original sector.
816 */
817 LTTNG_TRACEPOINT_EVENT(block_rq_remap,
818
819 TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
820 sector_t from),
821
822 TP_ARGS(q, rq, dev, from),
823
824 TP_FIELDS(
825 ctf_integer(dev_t, dev, disk_devt(rq->rq_disk))
826 ctf_integer(sector_t, sector, blk_rq_pos(rq))
827 ctf_integer(unsigned int, nr_sector, blk_rq_sectors(rq))
828 ctf_integer(dev_t, old_dev, dev)
829 ctf_integer(sector_t, old_sector, from)
830 blk_rwbs_ctf_integer(unsigned int, rwbs,
831 rq->cmd_flags, blk_rq_bytes(rq))
832 )
833 )
834 #endif
835
836 #undef __print_rwbs_flags
837 #undef blk_fill_rwbs
838
839 #endif /* LTTNG_TRACE_BLOCK_H */
840
841 /* This part must be outside protection */
842 #include <probes/define_trace.h>
This page took 0.055901 seconds and 5 git commands to generate.