X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=instrumentation%2Fevents%2Flttng-module%2Fblock.h;h=f3b8bffd6f0853bc46044059de63183ed756d4c7;hb=1aa93517990af3295adafb7c866c410a76de31f1;hp=1d5531c974fae249b0ecd382c7fc7c745667323e;hpb=41b59694ba5b85708c9b6a68743347388665a5a3;p=lttng-modules.git diff --git a/instrumentation/events/lttng-module/block.h b/instrumentation/events/lttng-module/block.h index 1d5531c9..f3b8bffd 100644 --- a/instrumentation/events/lttng-module/block.h +++ b/instrumentation/events/lttng-module/block.h @@ -8,6 +8,9 @@ #include #include #include +#include + +#define RWBS_LEN 8 #ifndef _TRACE_BLOCK_DEF_ #define _TRACE_BLOCK_DEF_ @@ -19,23 +22,45 @@ enum { RWBS_FLAG_DISCARD = (1 << 1), RWBS_FLAG_READ = (1 << 2), RWBS_FLAG_RAHEAD = (1 << 3), - RWBS_FLAG_SYNC = (1 << 4), - RWBS_FLAG_META = (1 << 5), - RWBS_FLAG_SECURE = (1 << 6), + RWBS_FLAG_BARRIER = (1 << 4), + RWBS_FLAG_SYNC = (1 << 5), + RWBS_FLAG_META = (1 << 6), + RWBS_FLAG_SECURE = (1 << 7), + RWBS_FLAG_FLUSH = (1 << 8), + RWBS_FLAG_FUA = (1 << 9), }; #endif /* _TRACE_BLOCK_DEF_ */ #define __print_rwbs_flags(rwbs) \ __print_flags(rwbs, "", \ + { RWBS_FLAG_FLUSH, "F" }, \ { RWBS_FLAG_WRITE, "W" }, \ { RWBS_FLAG_DISCARD, "D" }, \ { RWBS_FLAG_READ, "R" }, \ + { RWBS_FLAG_FUA, "F" }, \ { RWBS_FLAG_RAHEAD, "A" }, \ + { RWBS_FLAG_BARRIER, "B" }, \ { RWBS_FLAG_SYNC, "S" }, \ { RWBS_FLAG_META, "M" }, \ { RWBS_FLAG_SECURE, "E" }) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)) + +#define blk_fill_rwbs(rwbs, rw, bytes) \ + tp_assign(rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \ + ( (rw) & REQ_DISCARD ? RWBS_FLAG_DISCARD : \ + ( (bytes) ? RWBS_FLAG_READ : \ + ( 0 )))) \ + | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \ + | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \ + | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \ + | ((rw) & REQ_SECURE ? RWBS_FLAG_SECURE : 0) \ + | ((rw) & REQ_FLUSH ? RWBS_FLAG_FLUSH : 0) \ + | ((rw) & REQ_FUA ? RWBS_FLAG_FUA : 0)) + +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) + #define blk_fill_rwbs(rwbs, rw, bytes) \ tp_assign(rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \ ( (rw) & REQ_DISCARD ? RWBS_FLAG_DISCARD : \ @@ -46,6 +71,85 @@ enum { | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \ | ((rw) & REQ_SECURE ? RWBS_FLAG_SECURE : 0)) +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)) + +#define blk_fill_rwbs(rwbs, rw, bytes) \ + tp_assign(rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \ + ( (rw) & REQ_DISCARD ? RWBS_FLAG_DISCARD : \ + ( (bytes) ? RWBS_FLAG_READ : \ + ( 0 )))) \ + | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \ + | ((rw) & REQ_HARDBARRIER ? RWBS_FLAG_BARRIER : 0) \ + | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \ + | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \ + | ((rw) & REQ_SECURE ? RWBS_FLAG_SECURE : 0)) + +#else + +#define blk_fill_rwbs(rwbs, rw, bytes) \ + tp_assign(rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \ + ( (rw) & (1 << BIO_RW_DISCARD) ? RWBS_FLAG_DISCARD : \ + ( (bytes) ? RWBS_FLAG_READ : \ + ( 0 )))) \ + | ((rw) & (1 << BIO_RW_AHEAD) ? RWBS_FLAG_RAHEAD : 0) \ + | ((rw) & (1 << BIO_RW_SYNCIO) ? RWBS_FLAG_SYNC : 0) \ + | ((rw) & (1 << BIO_RW_META) ? RWBS_FLAG_META : 0) \ + | ((rw) & (1 << BIO_RW_BARRIER) ? RWBS_FLAG_BARRIER : 0)) + +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) +DECLARE_EVENT_CLASS(block_buffer, + + TP_PROTO(struct buffer_head *bh), + + TP_ARGS(bh), + + TP_STRUCT__entry ( + __field( dev_t, dev ) + __field( sector_t, sector ) + __field( size_t, size ) + ), + + TP_fast_assign( + tp_assign(dev, bh->b_bdev->bd_dev) + tp_assign(sector, bh->b_blocknr) + tp_assign(size, bh->b_size) + ), + + TP_printk("%d,%d sector=%llu size=%zu", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long long)__entry->sector, __entry->size + ) +) + +/** + * block_touch_buffer - mark a buffer accessed + * @bh: buffer_head being touched + * + * Called from touch_buffer(). + */ +DEFINE_EVENT(block_buffer, block_touch_buffer, + + TP_PROTO(struct buffer_head *bh), + + TP_ARGS(bh) +) + +/** + * block_dirty_buffer - mark a buffer dirty + * @bh: buffer_head being dirtied + * + * Called from mark_buffer_dirty(). + */ +DEFINE_EVENT(block_buffer, block_dirty_buffer, + + TP_PROTO(struct buffer_head *bh), + + TP_ARGS(bh) +) +#endif + DECLARE_EVENT_CLASS(block_rq_with_error, TP_PROTO(struct request_queue *q, struct request *rq), @@ -58,7 +162,9 @@ DECLARE_EVENT_CLASS(block_rq_with_error, __field( unsigned int, nr_sector ) __field( int, errors ) __field( unsigned int, rwbs ) - __dynamic_array_text( char, cmd, blk_cmd_buf_len(rq) ) + __dynamic_array_hex( unsigned char, cmd, + (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + rq->cmd_len : 0) ), TP_fast_assign( @@ -69,7 +175,8 @@ DECLARE_EVENT_CLASS(block_rq_with_error, 0 : blk_rq_sectors(rq)) tp_assign(errors, rq->errors) blk_fill_rwbs(rwbs, rq->cmd_flags, blk_rq_bytes(rq)) - tp_memcpy_dyn(cmd, rq->cmd) + tp_memcpy_dyn(cmd, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + rq->cmd : NULL) ), TP_printk("%d,%d %s (%s) %llu + %u [%d]", @@ -145,7 +252,9 @@ DECLARE_EVENT_CLASS(block_rq, __field( unsigned int, bytes ) __field( unsigned int, rwbs ) __array_text( char, comm, TASK_COMM_LEN ) - __dynamic_array_text( char, cmd, blk_cmd_buf_len(rq) ) + __dynamic_array_hex( unsigned char, cmd, + (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + rq->cmd_len : 0) ), TP_fast_assign( @@ -157,7 +266,8 @@ DECLARE_EVENT_CLASS(block_rq, tp_assign(bytes, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? blk_rq_bytes(rq) : 0) blk_fill_rwbs(rwbs, rq->cmd_flags, blk_rq_bytes(rq)) - tp_memcpy_dyn(cmd, rq->cmd) + tp_memcpy_dyn(cmd, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? + rq->cmd : NULL) tp_memcpy(comm, current->comm, TASK_COMM_LEN) ), @@ -255,9 +365,15 @@ TRACE_EVENT(block_bio_bounce, */ TRACE_EVENT(block_bio_complete, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)) TP_PROTO(struct request_queue *q, struct bio *bio, int error), TP_ARGS(q, bio, error), +#else + TP_PROTO(struct request_queue *q, struct bio *bio), + + TP_ARGS(q, bio), +#endif TP_STRUCT__entry( __field( dev_t, dev ) @@ -271,7 +387,11 @@ TRACE_EVENT(block_bio_complete, tp_assign(dev, bio->bi_bdev->bd_dev) tp_assign(sector, bio->bi_sector) tp_assign(nr_sector, bio->bi_size >> 9) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)) tp_assign(error, error) +#else + tp_assign(error, 0) +#endif blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size) ), @@ -282,6 +402,101 @@ TRACE_EVENT(block_bio_complete, __entry->nr_sector, __entry->error) ) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) +DECLARE_EVENT_CLASS(block_bio_merge, + + TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), + + TP_ARGS(q, rq, bio), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( sector_t, sector ) + __field( unsigned int, nr_sector ) + __field( unsigned int, rwbs ) + __array_text( char, comm, TASK_COMM_LEN ) + ), + + TP_fast_assign( + tp_assign(dev, bio->bi_bdev->bd_dev) + tp_assign(sector, bio->bi_sector) + tp_assign(nr_sector, bio->bi_size >> 9) + blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size) + tp_memcpy(comm, current->comm, TASK_COMM_LEN) + ), + + TP_printk("%d,%d %s %llu + %u [%s]", + MAJOR(__entry->dev), MINOR(__entry->dev), + __print_rwbs_flags(__entry->rwbs), + (unsigned long long)__entry->sector, + __entry->nr_sector, __entry->comm) +) + +/** + * block_bio_backmerge - merging block operation to the end of an existing operation + * @q: queue holding operation + * @bio: new block operation to merge + * + * Merging block request @bio to the end of an existing block request + * in queue @q. + */ +DEFINE_EVENT(block_bio_merge, block_bio_backmerge, + + TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), + + TP_ARGS(q, rq, bio) +) + +/** + * block_bio_frontmerge - merging block operation to the beginning of an existing operation + * @q: queue holding operation + * @bio: new block operation to merge + * + * Merging block IO operation @bio to the beginning of an existing block + * operation in queue @q. + */ +DEFINE_EVENT(block_bio_merge, block_bio_frontmerge, + + TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), + + TP_ARGS(q, rq, bio) +) + +/** + * block_bio_queue - putting new block IO operation in queue + * @q: queue holding operation + * @bio: new block operation + * + * About to place the block IO operation @bio into queue @q. + */ +TRACE_EVENT(block_bio_queue, + + TP_PROTO(struct request_queue *q, struct bio *bio), + + TP_ARGS(q, bio), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( sector_t, sector ) + __field( unsigned int, nr_sector ) + __array( char, rwbs, RWBS_LEN ) + __array( char, comm, TASK_COMM_LEN ) + ), + + TP_fast_assign( + tp_assign(dev, bio->bi_bdev->bd_dev) + tp_assign(sector, bio->bi_sector) + tp_assign(nr_sector, bio->bi_size >> 9) + blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size) + tp_memcpy(comm, current->comm, TASK_COMM_LEN) + ), + + TP_printk("%d,%d %s %llu + %u [%s]", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, + (unsigned long long)__entry->sector, + __entry->nr_sector, __entry->comm) +) +#else DECLARE_EVENT_CLASS(block_bio, TP_PROTO(struct request_queue *q, struct bio *bio), @@ -297,7 +512,7 @@ DECLARE_EVENT_CLASS(block_bio, ), TP_fast_assign( - tp_assign(dev, bio->bi_bdev->bd_dev) + tp_assign(dev, bio->bi_bdev ? bio->bi_bdev->bd_dev : 0) tp_assign(sector, bio->bi_sector) tp_assign(nr_sector, bio->bi_size >> 9) blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size) @@ -354,6 +569,7 @@ DEFINE_EVENT(block_bio, block_bio_queue, TP_ARGS(q, bio) ) +#endif DECLARE_EVENT_CLASS(block_get_rq, @@ -446,9 +662,15 @@ TRACE_EVENT(block_plug, DECLARE_EVENT_CLASS(block_unplug, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), TP_ARGS(q, depth, explicit), +#else + TP_PROTO(struct request_queue *q), + + TP_ARGS(q), +#endif TP_STRUCT__entry( __field( int, nr_rq ) @@ -456,13 +678,33 @@ DECLARE_EVENT_CLASS(block_unplug, ), TP_fast_assign( +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) tp_assign(nr_rq, depth) +#else + tp_assign(nr_rq, q->rq.count[READ] + q->rq.count[WRITE]) +#endif tp_memcpy(comm, current->comm, TASK_COMM_LEN) ), TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) ) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)) +/** + * block_unplug_timer - timed release of operations requests in queue to device driver + * @q: request queue to unplug + * + * Unplug the request queue @q because a timer expired and allow block + * operation requests to be sent to the device driver. + */ +DEFINE_EVENT(block_unplug, block_unplug_timer, + + TP_PROTO(struct request_queue *q), + + TP_ARGS(q) +) +#endif + /** * block_unplug - release of operations requests in request queue * @q: request queue to unplug @@ -472,11 +714,21 @@ DECLARE_EVENT_CLASS(block_unplug, * Unplug request queue @q because device driver is scheduled to work * on elements in the request queue. */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) DEFINE_EVENT(block_unplug, block_unplug, +#else +DEFINE_EVENT(block_unplug, block_unplug_io, +#endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), TP_ARGS(q, depth, explicit) +#else + TP_PROTO(struct request_queue *q), + + TP_ARGS(q) +#endif ) /** @@ -531,7 +783,11 @@ TRACE_EVENT(block_split, * An operation for a logical device has been mapped to the * raw block device. */ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)) TRACE_EVENT(block_bio_remap, +#else +TRACE_EVENT(block_remap, +#endif TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, sector_t from), @@ -565,6 +821,7 @@ TRACE_EVENT(block_bio_remap, (unsigned long long)__entry->old_sector) ) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) /** * block_rq_remap - map request for a block operation request * @q: queue holding the operation @@ -609,6 +866,7 @@ TRACE_EVENT(block_rq_remap, MAJOR(__entry->old_dev), MINOR(__entry->old_dev), (unsigned long long)__entry->old_sector) ) +#endif #undef __print_rwbs_flags #undef blk_fill_rwbs