From dff06a58d6f9b7b6bf9cbebe455b7403096eb78d Mon Sep 17 00:00:00 2001 From: Pierre-Marc Fournier Date: Thu, 11 Mar 2010 18:51:17 -0500 Subject: [PATCH] remove old unused code --- libust/buffers.c | 467 ------------------------------------------- libust/tracectl.c | 49 ----- libustcomm/ustcomm.c | 16 -- 3 files changed, 532 deletions(-) diff --git a/libust/buffers.c b/libust/buffers.c index 776823e..88ec5d3 100644 --- a/libust/buffers.c +++ b/libust/buffers.c @@ -477,93 +477,6 @@ int ust_buffers_put_subbuf(struct ust_buffer *buf, unsigned long uconsumed_old) return 0; } -//ust// static void switch_buffer(unsigned long data) -//ust// { -//ust// struct ltt_channel_buf_struct *ltt_buf = -//ust// (struct ltt_channel_buf_struct *)data; -//ust// struct rchan_buf *buf = ltt_buf->rbuf; -//ust// -//ust// if (buf) -//ust// ltt_force_switch(buf, FORCE_ACTIVE); -//ust// -//ust// ltt_buf->switch_timer.expires += ltt_buf->switch_timer_interval; -//ust// add_timer_on(<t_buf->switch_timer, smp_processor_id()); -//ust// } -//ust// -//ust// static void start_switch_timer(struct ltt_channel_struct *ltt_channel) -//ust// { -//ust// struct rchan *rchan = ltt_channel->trans_channel_data; -//ust// int cpu; -//ust// -//ust// if (!ltt_channel->switch_timer_interval) -//ust// return; -//ust// -//ust// // TODO : hotplug -//ust// for_each_online_cpu(cpu) { -//ust// struct ltt_channel_buf_struct *ltt_buf; -//ust// struct rchan_buf *buf; -//ust// -//ust// buf = rchan->buf[cpu]; -//ust// ltt_buf = buf->chan_private; -//ust// buf->random_access = 1; -//ust// ltt_buf->switch_timer_interval = -//ust// ltt_channel->switch_timer_interval; -//ust// init_timer(<t_buf->switch_timer); -//ust// ltt_buf->switch_timer.function = switch_buffer; -//ust// ltt_buf->switch_timer.expires = jiffies + -//ust// ltt_buf->switch_timer_interval; -//ust// ltt_buf->switch_timer.data = (unsigned long)ltt_buf; -//ust// add_timer_on(<t_buf->switch_timer, cpu); -//ust// } -//ust// } -//ust// -//ust// /* -//ust// * Cannot use del_timer_sync with add_timer_on, so use an IPI to locally -//ust// * delete the timer. -//ust// */ -//ust// static void stop_switch_timer_ipi(void *info) -//ust// { -//ust// struct ltt_channel_buf_struct *ltt_buf = -//ust// (struct ltt_channel_buf_struct *)info; -//ust// -//ust// del_timer(<t_buf->switch_timer); -//ust// } -//ust// -//ust// static void stop_switch_timer(struct ltt_channel_struct *ltt_channel) -//ust// { -//ust// struct rchan *rchan = ltt_channel->trans_channel_data; -//ust// int cpu; -//ust// -//ust// if (!ltt_channel->switch_timer_interval) -//ust// return; -//ust// -//ust// // TODO : hotplug -//ust// for_each_online_cpu(cpu) { -//ust// struct ltt_channel_buf_struct *ltt_buf; -//ust// struct rchan_buf *buf; -//ust// -//ust// buf = rchan->buf[cpu]; -//ust// ltt_buf = buf->chan_private; -//ust// smp_call_function(stop_switch_timer_ipi, ltt_buf, 1); -//ust// buf->random_access = 0; -//ust// } -//ust// } - -//ust// static void ust_buffers_print_written(struct ust_channel *chan, -//ust// long cons_off, unsigned int cpu) -//ust// { -//ust// struct ust_buffer *buf = chan->buf[cpu]; -//ust// long cons_idx, events_count; -//ust// -//ust// cons_idx = SUBBUF_INDEX(cons_off, chan); -//ust// events_count = uatomic_read(&buf->commit_count[cons_idx].events); -//ust// -//ust// if (events_count) -//ust// printk(KERN_INFO -//ust// "channel %s: %lu events written (cpu %u, index %lu)\n", -//ust// chan->channel_name, events_count, cpu, cons_idx); -//ust// } - static void ltt_relay_print_subbuffer_errors( struct ust_channel *channel, long cons_off, int cpu) @@ -934,386 +847,6 @@ static void ltt_relay_remove_channel(struct ust_channel *channel) kref_put(&channel->kref, ltt_relay_release_channel); } -//ust// /* -//ust// * Returns : -//ust// * 0 if ok -//ust// * !0 if execution must be aborted. -//ust// */ -//ust// static inline int ltt_relay_try_reserve( -//ust// struct ust_channel *channel, struct ust_buffer *buf, -//ust// struct ltt_reserve_switch_offsets *offsets, size_t data_size, -//ust// u64 *tsc, unsigned int *rflags, int largest_align) -//ust// { -//ust// offsets->begin = uatomic_read(&buf->offset); -//ust// offsets->old = offsets->begin; -//ust// offsets->begin_switch = 0; -//ust// offsets->end_switch_current = 0; -//ust// offsets->end_switch_old = 0; -//ust// -//ust// *tsc = trace_clock_read64(); -//ust// if (last_tsc_overflow(buf, *tsc)) -//ust// *rflags = LTT_RFLAG_ID_SIZE_TSC; -//ust// -//ust// if (SUBBUF_OFFSET(offsets->begin, buf->chan) == 0) { -//ust// offsets->begin_switch = 1; /* For offsets->begin */ -//ust// } else { -//ust// offsets->size = ust_get_header_size(channel, -//ust// offsets->begin, data_size, -//ust// &offsets->before_hdr_pad, *rflags); -//ust// offsets->size += ltt_align(offsets->begin + offsets->size, -//ust// largest_align) -//ust// + data_size; -//ust// if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size) -//ust// > buf->chan->subbuf_size) { -//ust// offsets->end_switch_old = 1; /* For offsets->old */ -//ust// offsets->begin_switch = 1; /* For offsets->begin */ -//ust// } -//ust// } -//ust// if (offsets->begin_switch) { -//ust// long subbuf_index; -//ust// -//ust// if (offsets->end_switch_old) -//ust// offsets->begin = SUBBUF_ALIGN(offsets->begin, -//ust// buf->chan); -//ust// offsets->begin = offsets->begin + ltt_subbuffer_header_size(); -//ust// /* Test new buffer integrity */ -//ust// subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan); -//ust// offsets->reserve_commit_diff = -//ust// (BUFFER_TRUNC(offsets->begin, buf->chan) -//ust// >> channel->n_subbufs_order) -//ust// - (uatomic_read(&buf->commit_count[subbuf_index]) -//ust// & channel->commit_count_mask); -//ust// if (offsets->reserve_commit_diff == 0) { -//ust// long consumed; -//ust// -//ust// consumed = uatomic_read(&buf->consumed); -//ust// -//ust// /* Next buffer not corrupted. */ -//ust// if (!channel->overwrite && -//ust// (SUBBUF_TRUNC(offsets->begin, buf->chan) -//ust// - SUBBUF_TRUNC(consumed, buf->chan)) -//ust// >= channel->alloc_size) { -//ust// -//ust// long consumed_idx = SUBBUF_INDEX(consumed, buf->chan); -//ust// long commit_count = uatomic_read(&buf->commit_count[consumed_idx]); -//ust// if(((commit_count - buf->chan->subbuf_size) & channel->commit_count_mask) - (BUFFER_TRUNC(consumed, buf->chan) >> channel->n_subbufs_order) != 0) { -//ust// WARN("Event dropped. Caused by non-committed event."); -//ust// } -//ust// else { -//ust// WARN("Event dropped. Caused by non-consumed buffer."); -//ust// } -//ust// /* -//ust// * We do not overwrite non consumed buffers -//ust// * and we are full : event is lost. -//ust// */ -//ust// uatomic_inc(&buf->events_lost); -//ust// return -1; -//ust// } else { -//ust// /* -//ust// * next buffer not corrupted, we are either in -//ust// * overwrite mode or the buffer is not full. -//ust// * It's safe to write in this new subbuffer. -//ust// */ -//ust// } -//ust// } else { -//ust// /* -//ust// * Next subbuffer corrupted. Force pushing reader even -//ust// * in normal mode. It's safe to write in this new -//ust// * subbuffer. -//ust// */ -//ust// } -//ust// offsets->size = ust_get_header_size(channel, -//ust// offsets->begin, data_size, -//ust// &offsets->before_hdr_pad, *rflags); -//ust// offsets->size += ltt_align(offsets->begin + offsets->size, -//ust// largest_align) -//ust// + data_size; -//ust// if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size) -//ust// > buf->chan->subbuf_size) { -//ust// /* -//ust// * Event too big for subbuffers, report error, don't -//ust// * complete the sub-buffer switch. -//ust// */ -//ust// uatomic_inc(&buf->events_lost); -//ust// return -1; -//ust// } else { -//ust// /* -//ust// * We just made a successful buffer switch and the event -//ust// * fits in the new subbuffer. Let's write. -//ust// */ -//ust// } -//ust// } else { -//ust// /* -//ust// * Event fits in the current buffer and we are not on a switch -//ust// * boundary. It's safe to write. -//ust// */ -//ust// } -//ust// offsets->end = offsets->begin + offsets->size; -//ust// -//ust// if ((SUBBUF_OFFSET(offsets->end, buf->chan)) == 0) { -//ust// /* -//ust// * The offset_end will fall at the very beginning of the next -//ust// * subbuffer. -//ust// */ -//ust// offsets->end_switch_current = 1; /* For offsets->begin */ -//ust// } -//ust// return 0; -//ust// } -//ust// -//ust// /* -//ust// * Returns : -//ust// * 0 if ok -//ust// * !0 if execution must be aborted. -//ust// */ -//ust// static inline int ltt_relay_try_switch( -//ust// enum force_switch_mode mode, -//ust// struct ust_channel *channel, -//ust// struct ust_buffer *buf, -//ust// struct ltt_reserve_switch_offsets *offsets, -//ust// u64 *tsc) -//ust// { -//ust// long subbuf_index; -//ust// -//ust// offsets->begin = uatomic_read(&buf->offset); -//ust// offsets->old = offsets->begin; -//ust// offsets->begin_switch = 0; -//ust// offsets->end_switch_old = 0; -//ust// -//ust// *tsc = trace_clock_read64(); -//ust// -//ust// if (SUBBUF_OFFSET(offsets->begin, buf->chan) != 0) { -//ust// offsets->begin = SUBBUF_ALIGN(offsets->begin, buf->chan); -//ust// offsets->end_switch_old = 1; -//ust// } else { -//ust// /* we do not have to switch : buffer is empty */ -//ust// return -1; -//ust// } -//ust// if (mode == FORCE_ACTIVE) -//ust// offsets->begin += ltt_subbuffer_header_size(); -//ust// /* -//ust// * Always begin_switch in FORCE_ACTIVE mode. -//ust// * Test new buffer integrity -//ust// */ -//ust// subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan); -//ust// offsets->reserve_commit_diff = -//ust// (BUFFER_TRUNC(offsets->begin, buf->chan) -//ust// >> channel->n_subbufs_order) -//ust// - (uatomic_read(&buf->commit_count[subbuf_index]) -//ust// & channel->commit_count_mask); -//ust// if (offsets->reserve_commit_diff == 0) { -//ust// /* Next buffer not corrupted. */ -//ust// if (mode == FORCE_ACTIVE -//ust// && !channel->overwrite -//ust// && offsets->begin - uatomic_read(&buf->consumed) -//ust// >= channel->alloc_size) { -//ust// /* -//ust// * We do not overwrite non consumed buffers and we are -//ust// * full : ignore switch while tracing is active. -//ust// */ -//ust// return -1; -//ust// } -//ust// } else { -//ust// /* -//ust// * Next subbuffer corrupted. Force pushing reader even in normal -//ust// * mode -//ust// */ -//ust// } -//ust// offsets->end = offsets->begin; -//ust// return 0; -//ust// } -//ust// -//ust// static inline void ltt_reserve_push_reader( -//ust// struct ust_channel *channel, -//ust// struct ust_buffer *buf, -//ust// struct ltt_reserve_switch_offsets *offsets) -//ust// { -//ust// long consumed_old, consumed_new; -//ust// -//ust// do { -//ust// consumed_old = uatomic_read(&buf->consumed); -//ust// /* -//ust// * If buffer is in overwrite mode, push the reader consumed -//ust// * count if the write position has reached it and we are not -//ust// * at the first iteration (don't push the reader farther than -//ust// * the writer). This operation can be done concurrently by many -//ust// * writers in the same buffer, the writer being at the farthest -//ust// * write position sub-buffer index in the buffer being the one -//ust// * which will win this loop. -//ust// * If the buffer is not in overwrite mode, pushing the reader -//ust// * only happens if a sub-buffer is corrupted. -//ust// */ -//ust// if ((SUBBUF_TRUNC(offsets->end-1, buf->chan) -//ust// - SUBBUF_TRUNC(consumed_old, buf->chan)) -//ust// >= channel->alloc_size) -//ust// consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan); -//ust// else { -//ust// consumed_new = consumed_old; -//ust// break; -//ust// } -//ust// } while (uatomic_cmpxchg(&buf->consumed, consumed_old, -//ust// consumed_new) != consumed_old); -//ust// -//ust// if (consumed_old != consumed_new) { -//ust// /* -//ust// * Reader pushed : we are the winner of the push, we can -//ust// * therefore reequilibrate reserve and commit. Atomic increment -//ust// * of the commit count permits other writers to play around -//ust// * with this variable before us. We keep track of -//ust// * corrupted_subbuffers even in overwrite mode : -//ust// * we never want to write over a non completely committed -//ust// * sub-buffer : possible causes : the buffer size is too low -//ust// * compared to the unordered data input, or there is a writer -//ust// * that died between the reserve and the commit. -//ust// */ -//ust// if (offsets->reserve_commit_diff) { -//ust// /* -//ust// * We have to alter the sub-buffer commit count. -//ust// * We do not deliver the previous subbuffer, given it -//ust// * was either corrupted or not consumed (overwrite -//ust// * mode). -//ust// */ -//ust// uatomic_add(&buf->commit_count[SUBBUF_INDEX(offsets->begin, buf->chan)], -//ust// offsets->reserve_commit_diff); -//ust// if (!channel->overwrite -//ust// || offsets->reserve_commit_diff -//ust// != channel->subbuf_size) { -//ust// /* -//ust// * The reserve commit diff was not subbuf_size : -//ust// * it means the subbuffer was partly written to -//ust// * and is therefore corrupted. If it is multiple -//ust// * of subbuffer size and we are in flight -//ust// * recorder mode, we are skipping over a whole -//ust// * subbuffer. -//ust// */ -//ust// uatomic_inc(&buf->corrupted_subbuffers); -//ust// } -//ust// } -//ust// } -//ust// } -//ust// -//ust// /** -//ust// * ltt_relay_reserve_slot - Atomic slot reservation in a LTTng buffer. -//ust// * @trace: the trace structure to log to. -//ust// * @ltt_channel: channel structure -//ust// * @transport_data: data structure specific to ltt relay -//ust// * @data_size: size of the variable length data to log. -//ust// * @slot_size: pointer to total size of the slot (out) -//ust// * @buf_offset : pointer to reserved buffer offset (out) -//ust// * @tsc: pointer to the tsc at the slot reservation (out) -//ust// * @cpu: cpuid -//ust// * -//ust// * Return : -ENOSPC if not enough space, else returns 0. -//ust// * It will take care of sub-buffer switching. -//ust// */ -//ust// static notrace int ltt_relay_reserve_slot(struct ust_trace *trace, -//ust// struct ust_channel *channel, void **transport_data, -//ust// size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc, -//ust// unsigned int *rflags, int largest_align, int cpu) -//ust// { -//ust// struct ust_buffer *buf = *transport_data = channel->buf[cpu]; -//ust// struct ltt_reserve_switch_offsets offsets; -//ust// -//ust// offsets.reserve_commit_diff = 0; -//ust// offsets.size = 0; -//ust// -//ust// /* -//ust// * Perform retryable operations. -//ust// */ -//ust// if (ltt_nesting > 4) { -//ust// uatomic_inc(&buf->events_lost); -//ust// return -EPERM; -//ust// } -//ust// do { -//ust// if (ltt_relay_try_reserve(channel, buf, &offsets, data_size, tsc, rflags, -//ust// largest_align)) -//ust// return -ENOSPC; -//ust// } while (uatomic_cmpxchg(&buf->offset, offsets.old, -//ust// offsets.end) != offsets.old); -//ust// -//ust// /* -//ust// * Atomically update last_tsc. This update races against concurrent -//ust// * atomic updates, but the race will always cause supplementary full TSC -//ust// * events, never the opposite (missing a full TSC event when it would be -//ust// * needed). -//ust// */ -//ust// save_last_tsc(buf, *tsc); -//ust// -//ust// /* -//ust// * Push the reader if necessary -//ust// */ -//ust// ltt_reserve_push_reader(channel, buf, &offsets); -//ust// -//ust// /* -//ust// * Switch old subbuffer if needed. -//ust// */ -//ust// if (offsets.end_switch_old) -//ust// ltt_reserve_switch_old_subbuf(channel, buf, &offsets, tsc); -//ust// -//ust// /* -//ust// * Populate new subbuffer. -//ust// */ -//ust// if (offsets.begin_switch) -//ust// ltt_reserve_switch_new_subbuf(channel, buf, &offsets, tsc); -//ust// -//ust// if (offsets.end_switch_current) -//ust// ltt_reserve_end_switch_current(channel, buf, &offsets, tsc); -//ust// -//ust// *slot_size = offsets.size; -//ust// *buf_offset = offsets.begin + offsets.before_hdr_pad; -//ust// return 0; -//ust// } -//ust// -//ust// /* -//ust// * Force a sub-buffer switch for a per-cpu buffer. This operation is -//ust// * completely reentrant : can be called while tracing is active with -//ust// * absolutely no lock held. -//ust// */ -//ust// static notrace void ltt_force_switch(struct ust_buffer *buf, -//ust// enum force_switch_mode mode) -//ust// { -//ust// struct ust_channel *channel = buf->chan; -//ust// struct ltt_reserve_switch_offsets offsets; -//ust// u64 tsc; -//ust// -//ust// offsets.reserve_commit_diff = 0; -//ust// offsets.size = 0; -//ust// -//ust// /* -//ust// * Perform retryable operations. -//ust// */ -//ust// do { -//ust// if (ltt_relay_try_switch(mode, channel, buf, &offsets, &tsc)) -//ust// return; -//ust// } while (uatomic_cmpxchg(&buf->offset, offsets.old, -//ust// offsets.end) != offsets.old); -//ust// -//ust// /* -//ust// * Atomically update last_tsc. This update races against concurrent -//ust// * atomic updates, but the race will always cause supplementary full TSC -//ust// * events, never the opposite (missing a full TSC event when it would be -//ust// * needed). -//ust// */ -//ust// save_last_tsc(buf, tsc); -//ust// -//ust// /* -//ust// * Push the reader if necessary -//ust// */ -//ust// if (mode == FORCE_ACTIVE) -//ust// ltt_reserve_push_reader(channel, buf, &offsets); -//ust// -//ust// /* -//ust// * Switch old subbuffer if needed. -//ust// */ -//ust// if (offsets.end_switch_old) -//ust// ltt_reserve_switch_old_subbuf(channel, buf, &offsets, &tsc); -//ust// -//ust// /* -//ust// * Populate new subbuffer. -//ust// */ -//ust// if (mode == FORCE_ACTIVE) -//ust// ltt_reserve_switch_new_subbuf(channel, buf, &offsets, &tsc); -//ust// } - /* * ltt_reserve_switch_old_subbuf: switch old subbuffer * diff --git a/libust/tracectl.c b/libust/tracectl.c index 2fccb1c..882f81a 100644 --- a/libust/tracectl.c +++ b/libust/tracectl.c @@ -1007,55 +1007,6 @@ int process_client_cmd(char *recvbuf, struct ustcomm_source *src) free(reply); } -// else if(nth_token_is(recvbuf, "get_notifications", 0) == 1) { -// struct ust_trace *trace; -// char trace_name[] = "auto"; -// int i; -// char *channel_name; -// -// DBG("get_notifications"); -// -// channel_name = strdup_malloc(nth_token(recvbuf, 1)); -// if(channel_name == NULL) { -// ERR("put_subbuf_size: cannot parse channel"); -// goto next_cmd; -// } -// -// ltt_lock_traces(); -// trace = _ltt_trace_find(trace_name); -// ltt_unlock_traces(); -// -// if(trace == NULL) { -// ERR("cannot find trace!"); -// return (void *)1; -// } -// -// for(i=0; inr_channels; i++) { -// struct rchan *rchan = trace->channels[i].trans_channel_data; -// int fd; -// -// if(!strcmp(trace->channels[i].channel_name, channel_name)) { -// struct rchan_buf *rbuf = rchan->buf; -// struct ltt_channel_buf_struct *lttbuf = trace->channels[i].buf; -// -// result = fd = ustcomm_app_detach_client(&ustcomm_app, src); -// if(result == -1) { -// ERR("ustcomm_app_detach_client failed"); -// goto next_cmd; -// } -// -// lttbuf->wake_consumer_arg = (void *) fd; -// -// smp_wmb(); -// -// lttbuf->call_wake_consumer = 1; -// -// break; -// } -// } -// -// free(channel_name); -// } else { ERR("unable to parse message: %s", recvbuf); } diff --git a/libustcomm/ustcomm.c b/libustcomm/ustcomm.c index c764138..5dfd2a8 100644 --- a/libustcomm/ustcomm.c +++ b/libustcomm/ustcomm.c @@ -37,22 +37,6 @@ #define UNIX_PATH_MAX 108 -#define MSG_MAX 10000 - -/* FIXME: ustcomm blocks on message sending, which might be problematic in - * some cases. Fix the poll() usage so sends are buffered until they don't - * block. - */ - -//static void bt(void) -//{ -// void *buffer[100]; -// int result; -// -// result = backtrace(&buffer, 100); -// backtrace_symbols_fd(buffer, result, STDERR_FILENO); -//} - static int mkdir_p(const char *path, mode_t mode) { const char *path_p; -- 2.34.1