X-Git-Url: https://git.lttng.org/?a=blobdiff_plain;f=instrumentation%2Fevents%2Flttng-module%2Fkmem.h;h=7607fc0c71b7f78aa6d6546d499e9c66ab83365d;hb=9cf29d3eaffa56199259919c90108de84724b4fb;hp=04f668b0ac75e7db7d8180a3d010c9808e529a26;hpb=b283666ff19841a28b0448c6a867beb2f809f11a;p=lttng-modules.git diff --git a/instrumentation/events/lttng-module/kmem.h b/instrumentation/events/lttng-module/kmem.h index 04f668b0..7607fc0c 100644 --- a/instrumentation/events/lttng-module/kmem.h +++ b/instrumentation/events/lttng-module/kmem.h @@ -4,6 +4,13 @@ #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_KMEM_H +#include +#include +#include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)) +#include +#endif + DECLARE_EVENT_CLASS(kmem_alloc, TP_PROTO(unsigned long call_site, @@ -15,19 +22,19 @@ DECLARE_EVENT_CLASS(kmem_alloc, TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags), TP_STRUCT__entry( - __field( unsigned long, call_site ) - __field( const void *, ptr ) + __field_hex( unsigned long, call_site ) + __field_hex( const void *, ptr ) __field( size_t, bytes_req ) __field( size_t, bytes_alloc ) __field( gfp_t, gfp_flags ) ), TP_fast_assign( - tp_assign(call_site, call_site); - tp_assign(ptr, ptr); - tp_assign(bytes_req, bytes_req); - tp_assign(bytes_alloc, bytes_alloc); - tp_assign(gfp_flags, gfp_flags); + tp_assign(call_site, call_site) + tp_assign(ptr, ptr) + tp_assign(bytes_req, bytes_req) + tp_assign(bytes_alloc, bytes_alloc) + tp_assign(gfp_flags, gfp_flags) ), TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", @@ -38,7 +45,9 @@ DECLARE_EVENT_CLASS(kmem_alloc, show_gfp_flags(__entry->gfp_flags)) ) -DEFINE_EVENT(kmem_alloc, kmalloc, +DEFINE_EVENT_MAP(kmem_alloc, kmalloc, + + kmem_kmalloc, TP_PROTO(unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags), @@ -66,8 +75,8 @@ DECLARE_EVENT_CLASS(kmem_alloc_node, TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node), TP_STRUCT__entry( - __field( unsigned long, call_site ) - __field( const void *, ptr ) + __field_hex( unsigned long, call_site ) + __field_hex( const void *, ptr ) __field( size_t, bytes_req ) __field( size_t, bytes_alloc ) __field( gfp_t, gfp_flags ) @@ -75,12 +84,12 @@ DECLARE_EVENT_CLASS(kmem_alloc_node, ), TP_fast_assign( - tp_assign(call_site, call_site); - tp_assign(ptr, ptr); - tp_assign(bytes_req, bytes_req); - tp_assign(bytes_alloc, bytes_alloc); - tp_assign(gfp_flags, gfp_flags); - tp_assign(node, node); + tp_assign(call_site, call_site) + tp_assign(ptr, ptr) + tp_assign(bytes_req, bytes_req) + tp_assign(bytes_alloc, bytes_alloc) + tp_assign(gfp_flags, gfp_flags) + tp_assign(node, node) ), TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d", @@ -92,7 +101,9 @@ DECLARE_EVENT_CLASS(kmem_alloc_node, __entry->node) ) -DEFINE_EVENT(kmem_alloc_node, kmalloc_node, +DEFINE_EVENT_MAP(kmem_alloc_node, kmalloc_node, + + kmem_kmalloc_node, TP_PROTO(unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, @@ -117,19 +128,21 @@ DECLARE_EVENT_CLASS(kmem_free, TP_ARGS(call_site, ptr), TP_STRUCT__entry( - __field( unsigned long, call_site ) - __field( const void *, ptr ) + __field_hex( unsigned long, call_site ) + __field_hex( const void *, ptr ) ), TP_fast_assign( - tp_assign(call_site, call_site); - tp_assign(ptr, ptr); + tp_assign(call_site, call_site) + tp_assign(ptr, ptr) ), TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) ) -DEFINE_EVENT(kmem_free, kfree, +DEFINE_EVENT_MAP(kmem_free, kfree, + + kmem_kfree, TP_PROTO(unsigned long call_site, const void *ptr), @@ -143,20 +156,25 @@ DEFINE_EVENT(kmem_free, kmem_cache_free, TP_ARGS(call_site, ptr) ) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) +TRACE_EVENT(mm_page_free, +#else TRACE_EVENT(mm_page_free_direct, +#endif TP_PROTO(struct page *page, unsigned int order), TP_ARGS(page, order), TP_STRUCT__entry( - __field( struct page *, page ) + __field_hex( struct page *, page ) __field( unsigned int, order ) ), TP_fast_assign( - tp_assign(page, page); - tp_assign(order, order); + tp_assign(page, page) + tp_assign(order, order) ), TP_printk("page=%p pfn=%lu order=%d", @@ -165,20 +183,24 @@ TRACE_EVENT(mm_page_free_direct, __entry->order) ) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) +TRACE_EVENT(mm_page_free_batched, +#else TRACE_EVENT(mm_pagevec_free, +#endif TP_PROTO(struct page *page, int cold), TP_ARGS(page, cold), TP_STRUCT__entry( - __field( struct page *, page ) + __field_hex( struct page *, page ) __field( int, cold ) ), TP_fast_assign( - tp_assign(page, page); - tp_assign(cold, cold); + tp_assign(page, page) + tp_assign(cold, cold) ), TP_printk("page=%p pfn=%lu order=0 cold=%d", @@ -195,22 +217,22 @@ TRACE_EVENT(mm_page_alloc, TP_ARGS(page, order, gfp_flags, migratetype), TP_STRUCT__entry( - __field( struct page *, page ) + __field_hex( struct page *, page ) __field( unsigned int, order ) __field( gfp_t, gfp_flags ) __field( int, migratetype ) ), TP_fast_assign( - tp_assign(page, page); - tp_assign(order, order); - tp_assign(gfp_flags, gfp_flags); - tp_assign(migratetype, migratetype); + tp_assign(page, page) + tp_assign(order, order) + tp_assign(gfp_flags, gfp_flags) + tp_assign(migratetype, migratetype) ), TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s", __entry->page, - page_to_pfn(__entry->page), + __entry->page ? page_to_pfn(__entry->page) : 0, __entry->order, __entry->migratetype, show_gfp_flags(__entry->gfp_flags)) @@ -223,20 +245,20 @@ DECLARE_EVENT_CLASS(mm_page, TP_ARGS(page, order, migratetype), TP_STRUCT__entry( - __field( struct page *, page ) + __field_hex( struct page *, page ) __field( unsigned int, order ) __field( int, migratetype ) ), TP_fast_assign( - tp_assign(page, page); - tp_assign(order, order); - tp_assign(migratetype, migratetype); + tp_assign(page, page) + tp_assign(order, order) + tp_assign(migratetype, migratetype) ), TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d", __entry->page, - page_to_pfn(__entry->page), + __entry->page ? page_to_pfn(__entry->page) : 0, __entry->order, __entry->migratetype, __entry->order == 0) @@ -251,7 +273,11 @@ DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked, DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) TP_PROTO(struct page *page, unsigned int order, int migratetype), +#else + TP_PROTO(struct page *page, int order, int migratetype), +#endif TP_ARGS(page, order, migratetype), @@ -271,7 +297,7 @@ TRACE_EVENT(mm_page_alloc_extfrag, alloc_migratetype, fallback_migratetype), TP_STRUCT__entry( - __field( struct page *, page ) + __field_hex( struct page *, page ) __field( int, alloc_order ) __field( int, fallback_order ) __field( int, alloc_migratetype ) @@ -279,11 +305,11 @@ TRACE_EVENT(mm_page_alloc_extfrag, ), TP_fast_assign( - tp_assign(page, page); - tp_assign(alloc_order, alloc_order); - tp_assign(fallback_order, fallback_order); - tp_assign(alloc_migratetype, alloc_migratetype); - tp_assign(fallback_migratetype, fallback_migratetype); + tp_assign(page, page) + tp_assign(alloc_order, alloc_order) + tp_assign(fallback_order, fallback_order) + tp_assign(alloc_migratetype, alloc_migratetype) + tp_assign(fallback_migratetype, fallback_migratetype) ), TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d", @@ -297,6 +323,7 @@ TRACE_EVENT(mm_page_alloc_extfrag, __entry->fallback_order < pageblock_order, __entry->alloc_migratetype == __entry->fallback_migratetype) ) +#endif #endif /* _TRACE_KMEM_H */