Fix: blktrace instrumentation for backported branches
[lttng-modules.git] / instrumentation / events / lttng-module / block.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM block
3
4 #if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_BLOCK_H
6
7 #include <linux/blktrace_api.h>
8 #include <linux/blkdev.h>
9 #include <linux/tracepoint.h>
10 #include <linux/trace_seq.h>
11 #include <linux/version.h>
12
13 #define RWBS_LEN 8
14
15 #ifndef _TRACE_BLOCK_DEF_
16 #define _TRACE_BLOCK_DEF_
17
18 #define __blk_dump_cmd(cmd, len) "<unknown>"
19
20 enum {
21 RWBS_FLAG_WRITE = (1 << 0),
22 RWBS_FLAG_DISCARD = (1 << 1),
23 RWBS_FLAG_READ = (1 << 2),
24 RWBS_FLAG_RAHEAD = (1 << 3),
25 RWBS_FLAG_BARRIER = (1 << 4),
26 RWBS_FLAG_SYNC = (1 << 5),
27 RWBS_FLAG_META = (1 << 6),
28 RWBS_FLAG_SECURE = (1 << 7),
29 RWBS_FLAG_FLUSH = (1 << 8),
30 RWBS_FLAG_FUA = (1 << 9),
31 };
32
33 #endif /* _TRACE_BLOCK_DEF_ */
34
35 #define __print_rwbs_flags(rwbs) \
36 __print_flags(rwbs, "", \
37 { RWBS_FLAG_FLUSH, "F" }, \
38 { RWBS_FLAG_WRITE, "W" }, \
39 { RWBS_FLAG_DISCARD, "D" }, \
40 { RWBS_FLAG_READ, "R" }, \
41 { RWBS_FLAG_FUA, "F" }, \
42 { RWBS_FLAG_RAHEAD, "A" }, \
43 { RWBS_FLAG_BARRIER, "B" }, \
44 { RWBS_FLAG_SYNC, "S" }, \
45 { RWBS_FLAG_META, "M" }, \
46 { RWBS_FLAG_SECURE, "E" })
47
48 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
49
50 #define blk_fill_rwbs(rwbs, rw, bytes) \
51 tp_assign(rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
52 ( (rw) & REQ_DISCARD ? RWBS_FLAG_DISCARD : \
53 ( (bytes) ? RWBS_FLAG_READ : \
54 ( 0 )))) \
55 | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
56 | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
57 | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
58 | ((rw) & REQ_SECURE ? RWBS_FLAG_SECURE : 0) \
59 | ((rw) & REQ_FLUSH ? RWBS_FLAG_FLUSH : 0) \
60 | ((rw) & REQ_FUA ? RWBS_FLAG_FUA : 0))
61
62 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
63
64 #define blk_fill_rwbs(rwbs, rw, bytes) \
65 tp_assign(rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
66 ( (rw) & REQ_DISCARD ? RWBS_FLAG_DISCARD : \
67 ( (bytes) ? RWBS_FLAG_READ : \
68 ( 0 )))) \
69 | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
70 | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
71 | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
72 | ((rw) & REQ_SECURE ? RWBS_FLAG_SECURE : 0))
73
74 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
75
76 #define blk_fill_rwbs(rwbs, rw, bytes) \
77 tp_assign(rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
78 ( (rw) & REQ_DISCARD ? RWBS_FLAG_DISCARD : \
79 ( (bytes) ? RWBS_FLAG_READ : \
80 ( 0 )))) \
81 | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
82 | ((rw) & REQ_HARDBARRIER ? RWBS_FLAG_BARRIER : 0) \
83 | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
84 | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
85 | ((rw) & REQ_SECURE ? RWBS_FLAG_SECURE : 0))
86
87 #else
88
89 #define blk_fill_rwbs(rwbs, rw, bytes) \
90 tp_assign(rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
91 ( (rw) & (1 << BIO_RW_DISCARD) ? RWBS_FLAG_DISCARD : \
92 ( (bytes) ? RWBS_FLAG_READ : \
93 ( 0 )))) \
94 | ((rw) & (1 << BIO_RW_AHEAD) ? RWBS_FLAG_RAHEAD : 0) \
95 | ((rw) & (1 << BIO_RW_SYNCIO) ? RWBS_FLAG_SYNC : 0) \
96 | ((rw) & (1 << BIO_RW_META) ? RWBS_FLAG_META : 0) \
97 | ((rw) & (1 << BIO_RW_BARRIER) ? RWBS_FLAG_BARRIER : 0))
98
99 #endif
100
101 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
102 DECLARE_EVENT_CLASS(block_buffer,
103
104 TP_PROTO(struct buffer_head *bh),
105
106 TP_ARGS(bh),
107
108 TP_STRUCT__entry (
109 __field( dev_t, dev )
110 __field( sector_t, sector )
111 __field( size_t, size )
112 ),
113
114 TP_fast_assign(
115 tp_assign(dev, bh->b_bdev->bd_dev)
116 tp_assign(sector, bh->b_blocknr)
117 tp_assign(size, bh->b_size)
118 ),
119
120 TP_printk("%d,%d sector=%llu size=%zu",
121 MAJOR(__entry->dev), MINOR(__entry->dev),
122 (unsigned long long)__entry->sector, __entry->size
123 )
124 )
125
126 /**
127 * block_touch_buffer - mark a buffer accessed
128 * @bh: buffer_head being touched
129 *
130 * Called from touch_buffer().
131 */
132 DEFINE_EVENT(block_buffer, block_touch_buffer,
133
134 TP_PROTO(struct buffer_head *bh),
135
136 TP_ARGS(bh)
137 )
138
139 /**
140 * block_dirty_buffer - mark a buffer dirty
141 * @bh: buffer_head being dirtied
142 *
143 * Called from mark_buffer_dirty().
144 */
145 DEFINE_EVENT(block_buffer, block_dirty_buffer,
146
147 TP_PROTO(struct buffer_head *bh),
148
149 TP_ARGS(bh)
150 )
151 #endif
152
153 DECLARE_EVENT_CLASS(block_rq_with_error,
154
155 TP_PROTO(struct request_queue *q, struct request *rq),
156
157 TP_ARGS(q, rq),
158
159 TP_STRUCT__entry(
160 __field( dev_t, dev )
161 __field( sector_t, sector )
162 __field( unsigned int, nr_sector )
163 __field( int, errors )
164 __field( unsigned int, rwbs )
165 __dynamic_array_hex( unsigned char, cmd,
166 (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
167 rq->cmd_len : 0)
168 ),
169
170 TP_fast_assign(
171 tp_assign(dev, rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
172 tp_assign(sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
173 0 : blk_rq_pos(rq))
174 tp_assign(nr_sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
175 0 : blk_rq_sectors(rq))
176 tp_assign(errors, rq->errors)
177 blk_fill_rwbs(rwbs, rq->cmd_flags, blk_rq_bytes(rq))
178 tp_memcpy_dyn(cmd, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
179 rq->cmd : NULL)
180 ),
181
182 TP_printk("%d,%d %s (%s) %llu + %u [%d]",
183 MAJOR(__entry->dev), MINOR(__entry->dev),
184 __print_rwbs_flags(__entry->rwbs),
185 __blk_dump_cmd(__get_dynamic_array(cmd),
186 __get_dynamic_array_len(cmd)),
187 (unsigned long long)__entry->sector,
188 __entry->nr_sector, __entry->errors)
189 )
190
191 /**
192 * block_rq_abort - abort block operation request
193 * @q: queue containing the block operation request
194 * @rq: block IO operation request
195 *
196 * Called immediately after pending block IO operation request @rq in
197 * queue @q is aborted. The fields in the operation request @rq
198 * can be examined to determine which device and sectors the pending
199 * operation would access.
200 */
201 DEFINE_EVENT(block_rq_with_error, block_rq_abort,
202
203 TP_PROTO(struct request_queue *q, struct request *rq),
204
205 TP_ARGS(q, rq)
206 )
207
208 /**
209 * block_rq_requeue - place block IO request back on a queue
210 * @q: queue holding operation
211 * @rq: block IO operation request
212 *
213 * The block operation request @rq is being placed back into queue
214 * @q. For some reason the request was not completed and needs to be
215 * put back in the queue.
216 */
217 DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
218
219 TP_PROTO(struct request_queue *q, struct request *rq),
220
221 TP_ARGS(q, rq)
222 )
223
224 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,5) \
225 || LTTNG_KERNEL_RANGE(3,12,21, 3,13,0) \
226 || LTTNG_KERNEL_RANGE(3,10,41, 3,11,0) \
227 || LTTNG_KERNEL_RANGE(3,4,91, 3,5,1) \
228 || LTTNG_KERNEL_RANGE(3,2,58, 3,3,0))
229
230 /**
231 * block_rq_complete - block IO operation completed by device driver
232 * @q: queue containing the block operation request
233 * @rq: block operations request
234 * @nr_bytes: number of completed bytes
235 *
236 * The block_rq_complete tracepoint event indicates that some portion
237 * of operation request has been completed by the device driver. If
238 * the @rq->bio is %NULL, then there is absolutely no additional work to
239 * do for the request. If @rq->bio is non-NULL then there is
240 * additional work required to complete the request.
241 */
242 TRACE_EVENT(block_rq_complete,
243
244 TP_PROTO(struct request_queue *q, struct request *rq,
245 unsigned int nr_bytes),
246
247 TP_ARGS(q, rq, nr_bytes),
248
249 TP_STRUCT__entry(
250 __field( dev_t, dev )
251 __field( sector_t, sector )
252 __field( unsigned int, nr_sector )
253 __field( int, errors )
254 __field( unsigned int, rwbs )
255 __dynamic_array_hex( unsigned char, cmd,
256 (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
257 rq->cmd_len : 0)
258 ),
259
260 TP_fast_assign(
261 tp_assign(dev, rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
262 tp_assign(sector, blk_rq_pos(rq))
263 tp_assign(nr_sector, nr_bytes >> 9)
264 tp_assign(errors, rq->errors)
265 blk_fill_rwbs(rwbs, rq->cmd_flags, nr_bytes)
266 tp_memcpy_dyn(cmd, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
267 rq->cmd : NULL)
268 ),
269
270 TP_printk("%d,%d %s (%s) %llu + %u [%d]",
271 MAJOR(__entry->dev), MINOR(__entry->dev),
272 __entry->rwbs, __get_str(cmd),
273 (unsigned long long)__entry->sector,
274 __entry->nr_sector, __entry->errors)
275 )
276
277 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0)) */
278
279 /**
280 * block_rq_complete - block IO operation completed by device driver
281 * @q: queue containing the block operation request
282 * @rq: block operations request
283 *
284 * The block_rq_complete tracepoint event indicates that some portion
285 * of operation request has been completed by the device driver. If
286 * the @rq->bio is %NULL, then there is absolutely no additional work to
287 * do for the request. If @rq->bio is non-NULL then there is
288 * additional work required to complete the request.
289 */
290 DEFINE_EVENT(block_rq_with_error, block_rq_complete,
291
292 TP_PROTO(struct request_queue *q, struct request *rq),
293
294 TP_ARGS(q, rq)
295 )
296
297 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0)) */
298
299 DECLARE_EVENT_CLASS(block_rq,
300
301 TP_PROTO(struct request_queue *q, struct request *rq),
302
303 TP_ARGS(q, rq),
304
305 TP_STRUCT__entry(
306 __field( dev_t, dev )
307 __field( sector_t, sector )
308 __field( unsigned int, nr_sector )
309 __field( unsigned int, bytes )
310 __field( unsigned int, rwbs )
311 __array_text( char, comm, TASK_COMM_LEN )
312 __dynamic_array_hex( unsigned char, cmd,
313 (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
314 rq->cmd_len : 0)
315 ),
316
317 TP_fast_assign(
318 tp_assign(dev, rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
319 tp_assign(sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
320 0 : blk_rq_pos(rq))
321 tp_assign(nr_sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
322 0 : blk_rq_sectors(rq))
323 tp_assign(bytes, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
324 blk_rq_bytes(rq) : 0)
325 blk_fill_rwbs(rwbs, rq->cmd_flags, blk_rq_bytes(rq))
326 tp_memcpy_dyn(cmd, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
327 rq->cmd : NULL)
328 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
329 ),
330
331 TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
332 MAJOR(__entry->dev), MINOR(__entry->dev),
333 __print_rwbs_flags(__entry->rwbs),
334 __entry->bytes,
335 __blk_dump_cmd(__get_dynamic_array(cmd),
336 __get_dynamic_array_len(cmd)),
337 (unsigned long long)__entry->sector,
338 __entry->nr_sector, __entry->comm)
339 )
340
341 /**
342 * block_rq_insert - insert block operation request into queue
343 * @q: target queue
344 * @rq: block IO operation request
345 *
346 * Called immediately before block operation request @rq is inserted
347 * into queue @q. The fields in the operation request @rq struct can
348 * be examined to determine which device and sectors the pending
349 * operation would access.
350 */
351 DEFINE_EVENT(block_rq, block_rq_insert,
352
353 TP_PROTO(struct request_queue *q, struct request *rq),
354
355 TP_ARGS(q, rq)
356 )
357
358 /**
359 * block_rq_issue - issue pending block IO request operation to device driver
360 * @q: queue holding operation
361 * @rq: block IO operation operation request
362 *
363 * Called when block operation request @rq from queue @q is sent to a
364 * device driver for processing.
365 */
366 DEFINE_EVENT(block_rq, block_rq_issue,
367
368 TP_PROTO(struct request_queue *q, struct request *rq),
369
370 TP_ARGS(q, rq)
371 )
372
373 /**
374 * block_bio_bounce - used bounce buffer when processing block operation
375 * @q: queue holding the block operation
376 * @bio: block operation
377 *
378 * A bounce buffer was used to handle the block operation @bio in @q.
379 * This occurs when hardware limitations prevent a direct transfer of
380 * data between the @bio data memory area and the IO device. Use of a
381 * bounce buffer requires extra copying of data and decreases
382 * performance.
383 */
384 TRACE_EVENT(block_bio_bounce,
385
386 TP_PROTO(struct request_queue *q, struct bio *bio),
387
388 TP_ARGS(q, bio),
389
390 TP_STRUCT__entry(
391 __field( dev_t, dev )
392 __field( sector_t, sector )
393 __field( unsigned int, nr_sector )
394 __field( unsigned int, rwbs )
395 __array_text( char, comm, TASK_COMM_LEN )
396 ),
397
398 TP_fast_assign(
399 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
400 tp_assign(dev, bio->bi_bdev ?
401 bio->bi_bdev->bd_dev : 0)
402 tp_assign(sector, bio->bi_iter.bi_sector)
403 tp_assign(nr_sector, bio_sectors(bio))
404 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_iter.bi_size)
405 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
406 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
407 tp_assign(dev, bio->bi_bdev ?
408 bio->bi_bdev->bd_dev : 0)
409 tp_assign(sector, bio->bi_sector)
410 tp_assign(nr_sector, bio->bi_size >> 9)
411 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
412 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
413 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
414 ),
415
416 TP_printk("%d,%d %s %llu + %u [%s]",
417 MAJOR(__entry->dev), MINOR(__entry->dev),
418 __print_rwbs_flags(__entry->rwbs),
419 (unsigned long long)__entry->sector,
420 __entry->nr_sector, __entry->comm)
421 )
422
423 /**
424 * block_bio_complete - completed all work on the block operation
425 * @q: queue holding the block operation
426 * @bio: block operation completed
427 * @error: io error value
428 *
429 * This tracepoint indicates there is no further work to do on this
430 * block IO operation @bio.
431 */
432 TRACE_EVENT(block_bio_complete,
433
434 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
435 TP_PROTO(struct request_queue *q, struct bio *bio, int error),
436
437 TP_ARGS(q, bio, error),
438 #else
439 TP_PROTO(struct request_queue *q, struct bio *bio),
440
441 TP_ARGS(q, bio),
442 #endif
443
444 TP_STRUCT__entry(
445 __field( dev_t, dev )
446 __field( sector_t, sector )
447 __field( unsigned, nr_sector )
448 __field( int, error )
449 __field( unsigned int, rwbs )
450 ),
451
452 TP_fast_assign(
453 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
454 tp_assign(dev, bio->bi_bdev->bd_dev)
455 tp_assign(sector, bio->bi_iter.bi_sector)
456 tp_assign(nr_sector, bio_sectors(bio))
457 tp_assign(error, error)
458 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_iter.bi_size)
459 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
460 tp_assign(dev, bio->bi_bdev->bd_dev)
461 tp_assign(sector, bio->bi_sector)
462 tp_assign(nr_sector, bio->bi_size >> 9)
463 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
464 tp_assign(error, error)
465 #else
466 tp_assign(error, 0)
467 #endif
468 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
469 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
470 ),
471
472 TP_printk("%d,%d %s %llu + %u [%d]",
473 MAJOR(__entry->dev), MINOR(__entry->dev),
474 __print_rwbs_flags(__entry->rwbs),
475 (unsigned long long)__entry->sector,
476 __entry->nr_sector, __entry->error)
477 )
478
479 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
480 DECLARE_EVENT_CLASS(block_bio_merge,
481
482 TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
483
484 TP_ARGS(q, rq, bio),
485
486 TP_STRUCT__entry(
487 __field( dev_t, dev )
488 __field( sector_t, sector )
489 __field( unsigned int, nr_sector )
490 __field( unsigned int, rwbs )
491 __array_text( char, comm, TASK_COMM_LEN )
492 ),
493
494 TP_fast_assign(
495 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
496 tp_assign(dev, bio->bi_bdev->bd_dev)
497 tp_assign(sector, bio->bi_iter.bi_sector)
498 tp_assign(nr_sector, bio_sectors(bio))
499 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_iter.bi_size)
500 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
501 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
502 tp_assign(dev, bio->bi_bdev->bd_dev)
503 tp_assign(sector, bio->bi_sector)
504 tp_assign(nr_sector, bio->bi_size >> 9)
505 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
506 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
507 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
508 ),
509
510 TP_printk("%d,%d %s %llu + %u [%s]",
511 MAJOR(__entry->dev), MINOR(__entry->dev),
512 __print_rwbs_flags(__entry->rwbs),
513 (unsigned long long)__entry->sector,
514 __entry->nr_sector, __entry->comm)
515 )
516
517 /**
518 * block_bio_backmerge - merging block operation to the end of an existing operation
519 * @q: queue holding operation
520 * @bio: new block operation to merge
521 *
522 * Merging block request @bio to the end of an existing block request
523 * in queue @q.
524 */
525 DEFINE_EVENT(block_bio_merge, block_bio_backmerge,
526
527 TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
528
529 TP_ARGS(q, rq, bio)
530 )
531
532 /**
533 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
534 * @q: queue holding operation
535 * @bio: new block operation to merge
536 *
537 * Merging block IO operation @bio to the beginning of an existing block
538 * operation in queue @q.
539 */
540 DEFINE_EVENT(block_bio_merge, block_bio_frontmerge,
541
542 TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
543
544 TP_ARGS(q, rq, bio)
545 )
546
547 /**
548 * block_bio_queue - putting new block IO operation in queue
549 * @q: queue holding operation
550 * @bio: new block operation
551 *
552 * About to place the block IO operation @bio into queue @q.
553 */
554 TRACE_EVENT(block_bio_queue,
555
556 TP_PROTO(struct request_queue *q, struct bio *bio),
557
558 TP_ARGS(q, bio),
559
560 TP_STRUCT__entry(
561 __field( dev_t, dev )
562 __field( sector_t, sector )
563 __field( unsigned int, nr_sector )
564 __field( unsigned int, rwbs )
565 __array_text( char, comm, TASK_COMM_LEN )
566 ),
567
568 TP_fast_assign(
569 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
570 tp_assign(dev, bio->bi_bdev->bd_dev)
571 tp_assign(sector, bio->bi_iter.bi_sector)
572 tp_assign(nr_sector, bio_sectors(bio))
573 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_iter.bi_size)
574 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
575 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
576 tp_assign(dev, bio->bi_bdev->bd_dev)
577 tp_assign(sector, bio->bi_sector)
578 tp_assign(nr_sector, bio->bi_size >> 9)
579 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
580 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
581 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
582 ),
583
584 TP_printk("%d,%d %s %llu + %u [%s]",
585 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
586 (unsigned long long)__entry->sector,
587 __entry->nr_sector, __entry->comm)
588 )
589 #else
590 DECLARE_EVENT_CLASS(block_bio,
591
592 TP_PROTO(struct request_queue *q, struct bio *bio),
593
594 TP_ARGS(q, bio),
595
596 TP_STRUCT__entry(
597 __field( dev_t, dev )
598 __field( sector_t, sector )
599 __field( unsigned int, nr_sector )
600 __field( unsigned int, rwbs )
601 __array_text( char, comm, TASK_COMM_LEN )
602 ),
603
604 TP_fast_assign(
605 tp_assign(dev, bio->bi_bdev ? bio->bi_bdev->bd_dev : 0)
606 tp_assign(sector, bio->bi_sector)
607 tp_assign(nr_sector, bio->bi_size >> 9)
608 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
609 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
610 ),
611
612 TP_printk("%d,%d %s %llu + %u [%s]",
613 MAJOR(__entry->dev), MINOR(__entry->dev),
614 __print_rwbs_flags(__entry->rwbs),
615 (unsigned long long)__entry->sector,
616 __entry->nr_sector, __entry->comm)
617 )
618
619 /**
620 * block_bio_backmerge - merging block operation to the end of an existing operation
621 * @q: queue holding operation
622 * @bio: new block operation to merge
623 *
624 * Merging block request @bio to the end of an existing block request
625 * in queue @q.
626 */
627 DEFINE_EVENT(block_bio, block_bio_backmerge,
628
629 TP_PROTO(struct request_queue *q, struct bio *bio),
630
631 TP_ARGS(q, bio)
632 )
633
634 /**
635 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
636 * @q: queue holding operation
637 * @bio: new block operation to merge
638 *
639 * Merging block IO operation @bio to the beginning of an existing block
640 * operation in queue @q.
641 */
642 DEFINE_EVENT(block_bio, block_bio_frontmerge,
643
644 TP_PROTO(struct request_queue *q, struct bio *bio),
645
646 TP_ARGS(q, bio)
647 )
648
649 /**
650 * block_bio_queue - putting new block IO operation in queue
651 * @q: queue holding operation
652 * @bio: new block operation
653 *
654 * About to place the block IO operation @bio into queue @q.
655 */
656 DEFINE_EVENT(block_bio, block_bio_queue,
657
658 TP_PROTO(struct request_queue *q, struct bio *bio),
659
660 TP_ARGS(q, bio)
661 )
662 #endif
663
664 DECLARE_EVENT_CLASS(block_get_rq,
665
666 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
667
668 TP_ARGS(q, bio, rw),
669
670 TP_STRUCT__entry(
671 __field( dev_t, dev )
672 __field( sector_t, sector )
673 __field( unsigned int, nr_sector )
674 __field( unsigned int, rwbs )
675 __array_text( char, comm, TASK_COMM_LEN )
676 ),
677
678 TP_fast_assign(
679 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
680 tp_assign(dev, bio ? bio->bi_bdev->bd_dev : 0)
681 tp_assign(sector, bio ? bio->bi_iter.bi_sector : 0)
682 tp_assign(nr_sector, bio ? bio_sectors(bio) : 0)
683 blk_fill_rwbs(rwbs, bio ? bio->bi_rw : 0,
684 bio ? bio_sectors(bio) : 0)
685 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
686 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
687 tp_assign(dev, bio ? bio->bi_bdev->bd_dev : 0)
688 tp_assign(sector, bio ? bio->bi_sector : 0)
689 tp_assign(nr_sector, bio ? bio->bi_size >> 9 : 0)
690 blk_fill_rwbs(rwbs, bio ? bio->bi_rw : 0,
691 bio ? bio->bi_size >> 9 : 0)
692 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
693 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
694 ),
695
696 TP_printk("%d,%d %s %llu + %u [%s]",
697 MAJOR(__entry->dev), MINOR(__entry->dev),
698 __print_rwbs_flags(__entry->rwbs),
699 (unsigned long long)__entry->sector,
700 __entry->nr_sector, __entry->comm)
701 )
702
703 /**
704 * block_getrq - get a free request entry in queue for block IO operations
705 * @q: queue for operations
706 * @bio: pending block IO operation
707 * @rw: low bit indicates a read (%0) or a write (%1)
708 *
709 * A request struct for queue @q has been allocated to handle the
710 * block IO operation @bio.
711 */
712 DEFINE_EVENT(block_get_rq, block_getrq,
713
714 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
715
716 TP_ARGS(q, bio, rw)
717 )
718
719 /**
720 * block_sleeprq - waiting to get a free request entry in queue for block IO operation
721 * @q: queue for operation
722 * @bio: pending block IO operation
723 * @rw: low bit indicates a read (%0) or a write (%1)
724 *
725 * In the case where a request struct cannot be provided for queue @q
726 * the process needs to wait for an request struct to become
727 * available. This tracepoint event is generated each time the
728 * process goes to sleep waiting for request struct become available.
729 */
730 DEFINE_EVENT(block_get_rq, block_sleeprq,
731
732 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
733
734 TP_ARGS(q, bio, rw)
735 )
736
737 /**
738 * block_plug - keep operations requests in request queue
739 * @q: request queue to plug
740 *
741 * Plug the request queue @q. Do not allow block operation requests
742 * to be sent to the device driver. Instead, accumulate requests in
743 * the queue to improve throughput performance of the block device.
744 */
745 TRACE_EVENT(block_plug,
746
747 TP_PROTO(struct request_queue *q),
748
749 TP_ARGS(q),
750
751 TP_STRUCT__entry(
752 __array_text( char, comm, TASK_COMM_LEN )
753 ),
754
755 TP_fast_assign(
756 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
757 ),
758
759 TP_printk("[%s]", __entry->comm)
760 )
761
762 DECLARE_EVENT_CLASS(block_unplug,
763
764 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
765 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
766
767 TP_ARGS(q, depth, explicit),
768 #else
769 TP_PROTO(struct request_queue *q),
770
771 TP_ARGS(q),
772 #endif
773
774 TP_STRUCT__entry(
775 __field( int, nr_rq )
776 __array_text( char, comm, TASK_COMM_LEN )
777 ),
778
779 TP_fast_assign(
780 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
781 tp_assign(nr_rq, depth)
782 #else
783 tp_assign(nr_rq, q->rq.count[READ] + q->rq.count[WRITE])
784 #endif
785 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
786 ),
787
788 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
789 )
790
791 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
792 /**
793 * block_unplug_timer - timed release of operations requests in queue to device driver
794 * @q: request queue to unplug
795 *
796 * Unplug the request queue @q because a timer expired and allow block
797 * operation requests to be sent to the device driver.
798 */
799 DEFINE_EVENT(block_unplug, block_unplug_timer,
800
801 TP_PROTO(struct request_queue *q),
802
803 TP_ARGS(q)
804 )
805 #endif
806
807 /**
808 * block_unplug - release of operations requests in request queue
809 * @q: request queue to unplug
810 * @depth: number of requests just added to the queue
811 * @explicit: whether this was an explicit unplug, or one from schedule()
812 *
813 * Unplug request queue @q because device driver is scheduled to work
814 * on elements in the request queue.
815 */
816 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
817 DEFINE_EVENT(block_unplug, block_unplug,
818 #else
819 DEFINE_EVENT(block_unplug, block_unplug_io,
820 #endif
821
822 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
823 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
824
825 TP_ARGS(q, depth, explicit)
826 #else
827 TP_PROTO(struct request_queue *q),
828
829 TP_ARGS(q)
830 #endif
831 )
832
833 /**
834 * block_split - split a single bio struct into two bio structs
835 * @q: queue containing the bio
836 * @bio: block operation being split
837 * @new_sector: The starting sector for the new bio
838 *
839 * The bio request @bio in request queue @q needs to be split into two
840 * bio requests. The newly created @bio request starts at
841 * @new_sector. This split may be required due to hardware limitation
842 * such as operation crossing device boundaries in a RAID system.
843 */
844 TRACE_EVENT(block_split,
845
846 TP_PROTO(struct request_queue *q, struct bio *bio,
847 unsigned int new_sector),
848
849 TP_ARGS(q, bio, new_sector),
850
851 TP_STRUCT__entry(
852 __field( dev_t, dev )
853 __field( sector_t, sector )
854 __field( sector_t, new_sector )
855 __field( unsigned int, rwbs )
856 __array_text( char, comm, TASK_COMM_LEN )
857 ),
858
859 TP_fast_assign(
860 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
861 tp_assign(dev, bio->bi_bdev->bd_dev)
862 tp_assign(sector, bio->bi_iter.bi_sector)
863 tp_assign(new_sector, new_sector)
864 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_iter.bi_size)
865 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
866 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
867 tp_assign(dev, bio->bi_bdev->bd_dev)
868 tp_assign(sector, bio->bi_sector)
869 tp_assign(new_sector, new_sector)
870 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
871 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
872 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
873 ),
874
875 TP_printk("%d,%d %s %llu / %llu [%s]",
876 MAJOR(__entry->dev), MINOR(__entry->dev),
877 __print_rwbs_flags(__entry->rwbs),
878 (unsigned long long)__entry->sector,
879 (unsigned long long)__entry->new_sector,
880 __entry->comm)
881 )
882
883 /**
884 * block_bio_remap - map request for a logical device to the raw device
885 * @q: queue holding the operation
886 * @bio: revised operation
887 * @dev: device for the operation
888 * @from: original sector for the operation
889 *
890 * An operation for a logical device has been mapped to the
891 * raw block device.
892 */
893 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
894 TRACE_EVENT(block_bio_remap,
895 #else
896 TRACE_EVENT(block_remap,
897 #endif
898
899 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
900 sector_t from),
901
902 TP_ARGS(q, bio, dev, from),
903
904 TP_STRUCT__entry(
905 __field( dev_t, dev )
906 __field( sector_t, sector )
907 __field( unsigned int, nr_sector )
908 __field( dev_t, old_dev )
909 __field( sector_t, old_sector )
910 __field( unsigned int, rwbs )
911 ),
912
913 TP_fast_assign(
914 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
915 tp_assign(dev, bio->bi_bdev->bd_dev)
916 tp_assign(sector, bio->bi_iter.bi_sector)
917 tp_assign(nr_sector, bio_sectors(bio))
918 tp_assign(old_dev, dev)
919 tp_assign(old_sector, from)
920 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_iter.bi_size)
921 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
922 tp_assign(dev, bio->bi_bdev->bd_dev)
923 tp_assign(sector, bio->bi_sector)
924 tp_assign(nr_sector, bio->bi_size >> 9)
925 tp_assign(old_dev, dev)
926 tp_assign(old_sector, from)
927 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
928 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
929 ),
930
931 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
932 MAJOR(__entry->dev), MINOR(__entry->dev),
933 __print_rwbs_flags(__entry->rwbs),
934 (unsigned long long)__entry->sector,
935 __entry->nr_sector,
936 MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
937 (unsigned long long)__entry->old_sector)
938 )
939
940 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
941 /**
942 * block_rq_remap - map request for a block operation request
943 * @q: queue holding the operation
944 * @rq: block IO operation request
945 * @dev: device for the operation
946 * @from: original sector for the operation
947 *
948 * The block operation request @rq in @q has been remapped. The block
949 * operation request @rq holds the current information and @from hold
950 * the original sector.
951 */
952 TRACE_EVENT(block_rq_remap,
953
954 TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
955 sector_t from),
956
957 TP_ARGS(q, rq, dev, from),
958
959 TP_STRUCT__entry(
960 __field( dev_t, dev )
961 __field( sector_t, sector )
962 __field( unsigned int, nr_sector )
963 __field( dev_t, old_dev )
964 __field( sector_t, old_sector )
965 __field( unsigned int, rwbs )
966 ),
967
968 TP_fast_assign(
969 tp_assign(dev, disk_devt(rq->rq_disk))
970 tp_assign(sector, blk_rq_pos(rq))
971 tp_assign(nr_sector, blk_rq_sectors(rq))
972 tp_assign(old_dev, dev)
973 tp_assign(old_sector, from)
974 blk_fill_rwbs(rwbs, rq->cmd_flags, blk_rq_bytes(rq))
975 ),
976
977 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
978 MAJOR(__entry->dev), MINOR(__entry->dev),
979 __print_rwbs_flags(__entry->rwbs),
980 (unsigned long long)__entry->sector,
981 __entry->nr_sector,
982 MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
983 (unsigned long long)__entry->old_sector)
984 )
985 #endif
986
987 #undef __print_rwbs_flags
988 #undef blk_fill_rwbs
989
990 #endif /* _TRACE_BLOCK_H */
991
992 /* This part must be outside protection */
993 #include "../../../probes/define_trace.h"
994
This page took 0.052219 seconds and 4 git commands to generate.