From: Pierre-Marc Fournier Date: Fri, 16 Apr 2010 14:27:47 +0000 (-0400) Subject: add custom probes support and update tracepoints X-Git-Tag: v0.5~32 X-Git-Url: https://git.lttng.org/?p=ust.git;a=commitdiff_plain;h=12e81b07455a1aef2e2bcc73004f14a7b73596fa add custom probes support and update tracepoints --- diff --git a/include/Makefile.am b/include/Makefile.am index 7249bf2..5a8b075 100644 --- a/include/Makefile.am +++ b/include/Makefile.am @@ -7,6 +7,8 @@ nobase_include_HEADERS = \ ust/probe.h \ ust/ust.h \ ust/tracectl.h \ + ust/core.h \ + ust/type-serializer.h \ ust/kcompat/kcompat.h \ ust/kcompat/compiler.h \ ust/kcompat/disable.h \ diff --git a/include/ust/core.h b/include/ust/core.h new file mode 100644 index 0000000..27c7e6d --- /dev/null +++ b/include/ust/core.h @@ -0,0 +1,39 @@ +#ifndef UST_CORE_H +#define UST_CORE_H + +#if defined(CONFIG_LTT) && defined(CONFIG_LTT_ALIGNMENT) + +/* + * Calculate the offset needed to align the type. + * size_of_type must be non-zero. + */ +static inline unsigned int ltt_align(size_t align_drift, size_t size_of_type) +{ + size_t alignment = min(sizeof(void *), size_of_type); + return (alignment - align_drift) & (alignment - 1); +} +/* Default arch alignment */ +#define LTT_ALIGN + +static inline int ltt_get_alignment(void) +{ + return sizeof(void *); +} + +#else + +static inline unsigned int ltt_align(size_t align_drift, + size_t size_of_type) +{ + return 0; +} + +#define LTT_ALIGN __attribute__((packed)) + +static inline int ltt_get_alignment(void) +{ + return 0; +} +#endif /* defined(CONFIG_LTT) && defined(CONFIG_LTT_ALIGNMENT) */ + +#endif /* UST_CORE_H */ diff --git a/include/ust/type-serializer.h b/include/ust/type-serializer.h new file mode 100644 index 0000000..8457a47 --- /dev/null +++ b/include/ust/type-serializer.h @@ -0,0 +1,181 @@ +#ifndef _LTT_TYPE_SERIALIZER_H +#define _LTT_TYPE_SERIALIZER_H + +//ust// #include "tracer.h" +#include +#include + +/* + * largest_align must be non-zero, equal to the minimum between the largest type + * and sizeof(void *). + */ +extern void _ltt_specialized_trace(const struct marker *mdata, void *probe_data, + void *serialize_private, unsigned int data_size, + unsigned int largest_align); + +/* + * Statically check that 0 < largest_align < sizeof(void *) to make sure it is + * dumb-proof. It will make sure 0 is changed into 1 and unsigned long long is + * changed into sizeof(void *) on 32-bit architectures. + */ +static inline void ltt_specialized_trace(const struct marker *mdata, + void *probe_data, + void *serialize_private, unsigned int data_size, + unsigned int largest_align) +{ + largest_align = min_t(unsigned int, largest_align, sizeof(void *)); + largest_align = max_t(unsigned int, largest_align, 1); + _ltt_specialized_trace(mdata, probe_data, serialize_private, data_size, + largest_align); +} + +/* + * Type serializer definitions. + */ + +/* + * Return size of structure without end-of-structure padding. + */ +#define serialize_sizeof(type) offsetof(typeof(type), end_field) + +struct serialize_long_int { + unsigned long f1; + unsigned int f2; + unsigned char end_field[0]; +} LTT_ALIGN; + +struct serialize_int_int_long { + unsigned int f1; + unsigned int f2; + unsigned long f3; + unsigned char end_field[0]; +} LTT_ALIGN; + +struct serialize_int_int_short { + unsigned int f1; + unsigned int f2; + unsigned short f3; + unsigned char end_field[0]; +} LTT_ALIGN; + +struct serialize_long_long_long { + unsigned long f1; + unsigned long f2; + unsigned long f3; + unsigned char end_field[0]; +} LTT_ALIGN; + +struct serialize_long_long_int { + unsigned long f1; + unsigned long f2; + unsigned int f3; + unsigned char end_field[0]; +} LTT_ALIGN; + +struct serialize_long_long_short_char { + unsigned long f1; + unsigned long f2; + unsigned short f3; + unsigned char f4; + unsigned char end_field[0]; +} LTT_ALIGN; + +struct serialize_long_long_short { + unsigned long f1; + unsigned long f2; + unsigned short f3; + unsigned char end_field[0]; +} LTT_ALIGN; + +struct serialize_long_short_char { + unsigned long f1; + unsigned short f2; + unsigned char f3; + unsigned char end_field[0]; +} LTT_ALIGN; + +struct serialize_long_short { + unsigned long f1; + unsigned short f2; + unsigned char end_field[0]; +} LTT_ALIGN; + +struct serialize_long_char { + unsigned long f1; + unsigned char f2; + unsigned char end_field[0]; +} LTT_ALIGN; + +struct serialize_sizet_int { + size_t f1; + unsigned int f2; + unsigned char end_field[0]; +} LTT_ALIGN; + +struct serialize_long_long_sizet_int { + unsigned long f1; + unsigned long f2; + size_t f3; + unsigned int f4; + unsigned char end_field[0]; +} LTT_ALIGN; + +struct serialize_long_long_sizet_int_int { + unsigned long f1; + unsigned long f2; + size_t f3; + unsigned int f4; + unsigned int f5; + unsigned char end_field[0]; +} LTT_ALIGN; + +struct serialize_l4421224411111 { + unsigned long f1; + uint32_t f2; + uint32_t f3; + uint16_t f4; + uint8_t f5; + uint16_t f6; + uint16_t f7; + uint32_t f8; + uint32_t f9; + uint8_t f10; + uint8_t f11; + uint8_t f12; + uint8_t f13; + uint8_t f14; + unsigned char end_field[0]; +} LTT_ALIGN; + +struct serialize_l214421224411111 { + unsigned long f1; + uint16_t f2; + uint8_t f3; + uint32_t f4; + uint32_t f5; + uint16_t f6; + uint8_t f7; + uint16_t f8; + uint16_t f9; + uint32_t f10; + uint32_t f11; + uint8_t f12; + uint8_t f13; + uint8_t f14; + uint8_t f15; + uint8_t f16; + uint8_t end_field[0]; +} LTT_ALIGN; + +struct serialize_l4412228 { + unsigned long f1; + uint32_t f2; + uint32_t f3; + uint8_t f4; + uint16_t f5; + uint16_t f6; + uint16_t f7; + uint64_t f8; + unsigned char end_field[0]; +} LTT_ALIGN; +#endif /* _LTT_TYPE_SERIALIZER_H */ diff --git a/include/ust/ust.h b/include/ust/ust.h index aa66b06..47e5b6e 100644 --- a/include/ust/ust.h +++ b/include/ust/ust.h @@ -24,5 +24,7 @@ #include #include #include +#include +#include #endif /* UST_H */ diff --git a/libust-initializer.c b/libust-initializer.c index 1bf709e..ec39932 100644 --- a/libust-initializer.c +++ b/libust-initializer.c @@ -18,7 +18,7 @@ * avoid this. */ -DECLARE_TRACE(ust_dummytp, TPPROTO(void), TPARGS()); +DECLARE_TRACE(ust_dummytp, TP_PROTO(void), TP_ARGS()); DEFINE_TRACE(ust_dummytp); void dummy_libust_initializer_func(void) diff --git a/libust/Makefile.am b/libust/Makefile.am index 529d4b0..2de5857 100644 --- a/libust/Makefile.am +++ b/libust/Makefile.am @@ -20,7 +20,8 @@ libust_la_SOURCES = \ tracectl.c \ $(top_builddir)/libustcomm/multipoll.c \ tracerconst.h \ - header-inline.h + header-inline.h \ + type-serializer.c libust_la_LDFLAGS = -no-undefined -version-info 0:0:0 diff --git a/libust/buffers.c b/libust/buffers.c index b7002de..ef62a30 100644 --- a/libust/buffers.c +++ b/libust/buffers.c @@ -1213,12 +1213,14 @@ static int ltt_relay_try_reserve_slow(struct ust_channel *chan, struct ust_buffe * Return : -ENOSPC if not enough space, else returns 0. * It will take care of sub-buffer switching. */ -int ltt_reserve_slot_lockless_slow(struct ust_trace *trace, - struct ust_channel *chan, void **transport_data, - size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc, - unsigned int *rflags, int largest_align, int cpu) +int ltt_reserve_slot_lockless_slow(struct ust_channel *chan, + struct ust_trace *trace, size_t data_size, + int largest_align, int cpu, + struct ust_buffer **ret_buf, + size_t *slot_size, long *buf_offset, + u64 *tsc, unsigned int *rflags) { - struct ust_buffer *buf = chan->buf[cpu]; + struct ust_buffer *buf = *ret_buf = chan->buf[cpu]; struct ltt_reserve_switch_offsets offsets; offsets.size = 0; @@ -1296,8 +1298,7 @@ static void __attribute__((destructor)) ust_buffers_exit(void) ltt_transport_unregister(&ust_relay_transport); } -size_t ltt_write_event_header_slow(struct ust_trace *trace, - struct ust_channel *channel, +size_t ltt_write_event_header_slow(struct ust_channel *channel, struct ust_buffer *buf, long buf_offset, u16 eID, u32 event_size, u64 tsc, unsigned int rflags) diff --git a/libust/buffers.h b/libust/buffers.h index d3267e1..ca78796 100644 --- a/libust/buffers.h +++ b/libust/buffers.h @@ -104,10 +104,12 @@ struct ust_buffer { */ enum force_switch_mode { FORCE_ACTIVE, FORCE_FLUSH }; -extern int ltt_reserve_slot_lockless_slow(struct ust_trace *trace, - struct ust_channel *ltt_channel, void **transport_data, - size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc, - unsigned int *rflags, int largest_align, int cpu); +extern int ltt_reserve_slot_lockless_slow(struct ust_channel *chan, + struct ust_trace *trace, size_t data_size, + int largest_align, int cpu, + struct ust_buffer **ret_buf, + size_t *slot_size, long *buf_offset, + u64 *tsc, unsigned int *rflags); extern void ltt_force_switch_lockless_slow(struct ust_buffer *buf, enum force_switch_mode mode); @@ -351,12 +353,14 @@ static __inline__ int ltt_relay_try_reserve( return 0; } -static __inline__ int ltt_reserve_slot(struct ust_trace *trace, - struct ust_channel *chan, void **transport_data, - size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc, - unsigned int *rflags, int largest_align, int cpu) +static __inline__ int ltt_reserve_slot(struct ust_channel *chan, + struct ust_trace *trace, size_t data_size, + int largest_align, int cpu, + struct ust_buffer **ret_buf, + size_t *slot_size, long *buf_offset, u64 *tsc, + unsigned int *rflags) { - struct ust_buffer *buf = chan->buf[cpu]; + struct ust_buffer *buf = *ret_buf = chan->buf[cpu]; long o_begin, o_end, o_old; size_t before_hdr_pad; @@ -400,9 +404,10 @@ static __inline__ int ltt_reserve_slot(struct ust_trace *trace, *buf_offset = o_begin + before_hdr_pad; return 0; slow_path: - return ltt_reserve_slot_lockless_slow(trace, chan, - transport_data, data_size, slot_size, buf_offset, tsc, - rflags, largest_align, cpu); + return ltt_reserve_slot_lockless_slow(chan, trace, data_size, + largest_align, cpu, ret_buf, + slot_size, buf_offset, tsc, + rflags); } /* diff --git a/libust/header-inline.h b/libust/header-inline.h index c22a1c3..04abaa9 100644 --- a/libust/header-inline.h +++ b/libust/header-inline.h @@ -19,7 +19,7 @@ #ifndef UST_HEADER_INLINE_H #define UST_HEADER_INLINE_H -#include "tracercore.h" +#include /* * ust_get_header_size diff --git a/libust/marker.c b/libust/marker.c index d2fee49..fd16860 100644 --- a/libust/marker.c +++ b/libust/marker.c @@ -577,36 +577,36 @@ static int set_marker(struct marker_entry *entry, struct marker *elem, smp_wmb(); elem->ptype = entry->ptype; -//ust// if (elem->tp_name && (active ^ _imv_read(elem->state))) { -//ust// WARN_ON(!elem->tp_cb); -//ust// /* -//ust// * It is ok to directly call the probe registration because type -//ust// * checking has been done in the __trace_mark_tp() macro. -//ust// */ -//ust// -//ust// if (active) { -//ust// /* -//ust// * try_module_get should always succeed because we hold -//ust// * markers_mutex to get the tp_cb address. -//ust// */ + if (elem->tp_name && (active ^ _imv_read(elem->state))) { + WARN_ON(!elem->tp_cb); + /* + * It is ok to directly call the probe registration because type + * checking has been done in the __trace_mark_tp() macro. + */ + + if (active) { + /* + * try_module_get should always succeed because we hold + * markers_mutex to get the tp_cb address. + */ //ust// ret = try_module_get(__module_text_address( //ust// (unsigned long)elem->tp_cb)); //ust// BUG_ON(!ret); -//ust// ret = tracepoint_probe_register_noupdate( -//ust// elem->tp_name, -//ust// elem->tp_cb); -//ust// } else { -//ust// ret = tracepoint_probe_unregister_noupdate( -//ust// elem->tp_name, -//ust// elem->tp_cb); -//ust// /* -//ust// * tracepoint_probe_update_all() must be called -//ust// * before the module containing tp_cb is unloaded. -//ust// */ + ret = tracepoint_probe_register_noupdate( + elem->tp_name, + elem->tp_cb); + } else { + ret = tracepoint_probe_unregister_noupdate( + elem->tp_name, + elem->tp_cb); + /* + * tracepoint_probe_update_all() must be called + * before the module containing tp_cb is unloaded. + */ //ust// module_put(__module_text_address( //ust// (unsigned long)elem->tp_cb)); -//ust// } -//ust// } + } + } elem->state__imv = active; return ret; @@ -620,24 +620,24 @@ static int set_marker(struct marker_entry *entry, struct marker *elem, */ static void disable_marker(struct marker *elem) { -//ust// int ret; -//ust// -//ust// /* leave "call" as is. It is known statically. */ -//ust// if (elem->tp_name && _imv_read(elem->state)) { -//ust// WARN_ON(!elem->tp_cb); -//ust// /* -//ust// * It is ok to directly call the probe registration because type -//ust// * checking has been done in the __trace_mark_tp() macro. -//ust// */ -//ust// ret = tracepoint_probe_unregister_noupdate(elem->tp_name, -//ust// elem->tp_cb); -//ust// WARN_ON(ret); -//ust// /* -//ust// * tracepoint_probe_update_all() must be called -//ust// * before the module containing tp_cb is unloaded. -//ust// */ + int ret; + + /* leave "call" as is. It is known statically. */ + if (elem->tp_name && _imv_read(elem->state)) { + WARN_ON(!elem->tp_cb); + /* + * It is ok to directly call the probe registration because type + * checking has been done in the __trace_mark_tp() macro. + */ + ret = tracepoint_probe_unregister_noupdate(elem->tp_name, + elem->tp_cb); + WARN_ON(ret); + /* + * tracepoint_probe_update_all() must be called + * before the module containing tp_cb is unloaded. + */ //ust// module_put(__module_text_address((unsigned long)elem->tp_cb)); -//ust// } + } elem->state__imv = 0; elem->single.func = __mark_empty_function; /* Update the function before setting the ptype */ @@ -746,7 +746,7 @@ static void marker_update_probes(void) /* Markers in modules. */ //ust// module_update_markers(); lib_update_markers(); -//ust// tracepoint_probe_update_all(); + tracepoint_probe_update_all(); /* Update immediate values */ core_imv_update(); //ust// module_imv_update(); /* FIXME: need to port for libs? */ diff --git a/libust/serialize.c b/libust/serialize.c index 4c23e8d..d3c8cdb 100644 --- a/libust/serialize.c +++ b/libust/serialize.c @@ -32,11 +32,12 @@ #include #include -#include #define _LGPL_SOURCE #include #include +#include +#include #include "buffers.h" #include "tracer.h" //#include "list.h" @@ -50,11 +51,6 @@ enum ltt_type { LTT_TYPE_NONE, }; -static int ust_get_cpu(void) -{ - return sched_getcpu(); -} - #define LTT_ATTRIBUTE_NETWORK_BYTE_ORDER (1<<1) /* @@ -689,10 +685,9 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data, } /* reserve space : header and data */ - ret = ltt_reserve_slot(trace, channel, &transport_data, - data_size, &slot_size, &buf_offset, - &tsc, &rflags, - largest_align, cpu); + ret = ltt_reserve_slot(channel, trace, data_size, largest_align, + cpu, &buf, &slot_size, &buf_offset, + &tsc, &rflags); if (unlikely(ret < 0)) continue; /* buffer full */ @@ -701,8 +696,7 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data, //ust// buf = ((struct rchan *)channel->trans_channel_data)->buf[cpu]; buf = channel->buf[cpu]; /* Out-of-order write : header and data */ - buf_offset = ltt_write_event_header(trace, - channel, buf, buf_offset, + buf_offset = ltt_write_event_header(channel, buf, buf_offset, eID, data_size, tsc, rflags); ltt_write_event_data(buf, buf_offset, &closure, serialize_private, diff --git a/libust/tracer.h b/libust/tracer.h index cc3974e..dc2d62f 100644 --- a/libust/tracer.h +++ b/libust/tracer.h @@ -241,8 +241,7 @@ static __inline__ size_t ltt_subbuffer_header_size(void) return offsetof(struct ltt_subbuffer_header, header_end); } -extern size_t ltt_write_event_header_slow(struct ust_trace *trace, - struct ust_channel *channel, +extern size_t ltt_write_event_header_slow(struct ust_channel *channel, struct ust_buffer *buf, long buf_offset, u16 eID, u32 event_size, u64 tsc, unsigned int rflags); @@ -264,8 +263,7 @@ extern size_t ltt_write_event_header_slow(struct ust_trace *trace, * * returns : offset where the event data must be written. */ -static __inline__ size_t ltt_write_event_header(struct ust_trace *trace, - struct ust_channel *chan, +static __inline__ size_t ltt_write_event_header(struct ust_channel *chan, struct ust_buffer *buf, long buf_offset, u16 eID, u32 event_size, u64 tsc, unsigned int rflags) @@ -283,7 +281,7 @@ static __inline__ size_t ltt_write_event_header(struct ust_trace *trace, return buf_offset; slow_path: - return ltt_write_event_header_slow(trace, chan, buf, buf_offset, + return ltt_write_event_header_slow(chan, buf, buf_offset, eID, event_size, tsc, rflags); } @@ -338,6 +336,21 @@ static __inline__ void ltt_write_trace_header(struct ust_trace *trace, header->freq_scale = trace->freq_scale; } +static __inline__ int ust_get_cpu(void) +{ +#ifndef UST_VALGRIND + return sched_getcpu(); +#else + /* Valgrind does not support the sched_getcpu() vsyscall. + * It causes it to detect a segfault in the program and stop it. + * So if we want to check libust with valgrind, we have to refrain + * from using this call. TODO: it would probably be better to return + * other values too, to better test it. + */ + return 0; +#endif +} + /* * Size reserved for high priority events (interrupts, NMI, BH) at the end of a diff --git a/libust/tracercore.h b/libust/tracercore.h index 2f3d7c4..52f75ec 100644 --- a/libust/tracercore.h +++ b/libust/tracercore.h @@ -52,39 +52,4 @@ extern ltt_run_filter_functor ltt_run_filter; extern void ltt_filter_register(ltt_run_filter_functor func); extern void ltt_filter_unregister(void); -#if defined(CONFIG_LTT) && defined(CONFIG_LTT_ALIGNMENT) - -/* - * Calculate the offset needed to align the type. - * size_of_type must be non-zero. - */ -static inline unsigned int ltt_align(size_t align_drift, size_t size_of_type) -{ - size_t alignment = min(sizeof(void *), size_of_type); - return (alignment - align_drift) & (alignment - 1); -} -/* Default arch alignment */ -#define LTT_ALIGN - -static inline int ltt_get_alignment(void) -{ - return sizeof(void *); -} - -#else - -static inline unsigned int ltt_align(size_t align_drift, - size_t size_of_type) -{ - return 0; -} - -#define LTT_ALIGN __attribute__((packed)) - -static inline int ltt_get_alignment(void) -{ - return 0; -} -#endif /* defined(CONFIG_LTT) && defined(CONFIG_LTT_ALIGNMENT) */ - #endif /* UST_TRACERCORE_H */ diff --git a/libust/type-serializer.c b/libust/type-serializer.c new file mode 100644 index 0000000..3ee54eb --- /dev/null +++ b/libust/type-serializer.c @@ -0,0 +1,111 @@ +/** + * ltt-type-serializer.c + * + * LTTng specialized type serializer. + * + * Copyright Mathieu Desnoyers, 2008. + * + * Dual LGPL v2.1/GPL v2 license. + */ +#include +#include +#include +#include "tracer.h" + +notrace +void _ltt_specialized_trace(const struct marker *mdata, void *probe_data, + void *serialize_private, unsigned int data_size, + unsigned int largest_align) +{ + int ret; + uint16_t eID; + size_t slot_size; + unsigned int chan_index; + struct ust_buffer *buf; + struct ust_channel *chan; + struct ust_trace *trace; + u64 tsc; + long buf_offset; + int cpu; + unsigned int rflags; + + /* + * If we get here, it's probably because we have useful work to do. + */ + if (unlikely(ltt_traces.num_active_traces == 0)) + return; + + rcu_read_lock(); + cpu = ust_get_cpu(); + + /* Force volatile access. */ + STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) + 1); + + /* + * asm volatile and "memory" clobber prevent the compiler from moving + * instructions out of the ltt nesting count. This is required to ensure + * that probe side-effects which can cause recursion (e.g. unforeseen + * traps, divisions by 0, ...) are triggered within the incremented + * nesting count section. + */ + barrier(); + eID = mdata->event_id; + chan_index = mdata->channel_id; + + /* + * Iterate on each trace, typically small number of active traces, + * list iteration with prefetch is usually slower. + */ + list_for_each_entry_rcu(trace, <t_traces.head, list) { + if (unlikely(!trace->active)) + continue; +//ust// if (unlikely(!ltt_run_filter(trace, eID))) +//ust// continue; +#ifdef CONFIG_LTT_DEBUG_EVENT_SIZE + rflags = LTT_RFLAG_ID_SIZE; +#else + if (unlikely(eID >= LTT_FREE_EVENTS)) + rflags = LTT_RFLAG_ID; + else + rflags = 0; +#endif + /* + * Skip channels added after trace creation. + */ + if (unlikely(chan_index >= trace->nr_channels)) + continue; + chan = &trace->channels[chan_index]; + if (!chan->active) + continue; + + /* reserve space : header and data */ + ret = ltt_reserve_slot(chan, trace, data_size, largest_align, + cpu, &buf, &slot_size, &buf_offset, &tsc, + &rflags); + if (unlikely(ret < 0)) + continue; /* buffer full */ + + /* Out-of-order write : header and data */ + buf_offset = ltt_write_event_header(chan, buf, + buf_offset, eID, data_size, + tsc, rflags); + if (data_size) { + buf_offset += ltt_align(buf_offset, largest_align); + ust_buffers_write(buf, buf_offset, + serialize_private, data_size); + buf_offset += data_size; + } + /* Out-of-order commit */ + ltt_commit_slot(chan, buf, buf_offset, data_size, slot_size); + } + /* + * asm volatile and "memory" clobber prevent the compiler from moving + * instructions out of the ltt nesting count. This is required to ensure + * that probe side-effects which can cause recursion (e.g. unforeseen + * traps, divisions by 0, ...) are triggered within the incremented + * nesting count section. + */ + barrier(); + STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) - 1); + rcu_read_unlock(); +} diff --git a/tests/hello/tp.h b/tests/hello/tp.h index f7d07ae..f34f305 100644 --- a/tests/hello/tp.h +++ b/tests/hello/tp.h @@ -18,5 +18,5 @@ #include DECLARE_TRACE(hello_tptest, - TPPROTO(int anint), - TPARGS(anint)); + TP_PROTO(int anint), + TP_ARGS(anint));