static int relay_alloc_buf(struct rchan_buf *buf, size_t *size)
{
- unsigned int n_pages;
- struct buf_page *buf_page, *n;
+//ust// unsigned int n_pages;
+//ust// struct buf_page *buf_page, *n;
void *ptr;
int result;
struct ltt_channel_struct *channel =
(struct ltt_channel_struct *)buf->chan->private_data;
struct ltt_channel_buf_struct *ltt_buf = channel->buf;
+ int result;
- if(ltt_buf->call_wake_consumer)
- relay_wake_consumer(ACCESS_ONCE(ltt_buf->wake_consumer_arg), 0);
+ result = write(ltt_buf->data_ready_fd_write, "1", 1);
+ if(result == -1) {
+ PERROR("write (in ltt_relay_buffer_flush)");
+ ERR("this should never happen!");
+ }
//ust// atomic_set(<t_buf->wakeup_readers, 1);
}
{
struct ltt_channel_buf_struct *ltt_buf = ltt_chan->buf;
unsigned int j;
+ int fds[2];
+ int result;
- ltt_buf->commit_count =
- zmalloc(sizeof(ltt_buf->commit_count) * n_subbufs);
- if (!ltt_buf->commit_count)
- return -ENOMEM;
+//ust// ltt_buf->commit_count =
+//ust// zmalloc(sizeof(ltt_buf->commit_count) * n_subbufs);
+//ust// if (!ltt_buf->commit_count)
+//ust// return -ENOMEM;
kref_get(&trace->kref);
kref_get(&trace->ltt_transport_kref);
kref_get(<t_chan->kref);
local_set(<t_buf->events_lost, 0);
local_set(<t_buf->corrupted_subbuffers, 0);
- ltt_buf->call_wake_consumer = 0;
- ltt_buf->wake_consumer_arg = NULL;
+ result = pipe(fds);
+ if(result == -1) {
+ PERROR("pipe");
+ return -1;
+ }
+ ltt_buf->data_ready_fd_read = fds[0];
+ ltt_buf->data_ready_fd_write = fds[1];
return 0;
}
kref_put(<t_chan->trace->ltt_transport_kref,
ltt_release_transport);
ltt_relay_print_buffer_errors(ltt_chan);
- kfree(ltt_buf->commit_count);
- ltt_buf->commit_count = NULL;
+//ust// kfree(ltt_buf->commit_count);
+//ust// ltt_buf->commit_count = NULL;
kref_put(<t_chan->kref, ltt_relay_release_channel);
kref_put(&trace->kref, ltt_release_trace);
//ust// wake_up_interruptible(&trace->kref_wq);
}
+static void ltt_chan_alloc_ltt_buf(struct ltt_channel_struct *ltt_chan)
+{
+ void *ptr;
+ int result;
+
+ /* Get one page */
+ /* FIXME: increase size if we have a commit_count array that overflows the page */
+ size_t size = PAGE_ALIGN(1);
+
+ result = ltt_chan->buf_shmid = shmget(getpid(), size, IPC_CREAT | IPC_EXCL | 0700);
+ if(ltt_chan->buf_shmid == -1) {
+ PERROR("shmget");
+ return -1;
+ }
+
+ ptr = shmat(ltt_chan->buf_shmid, NULL, 0);
+ if(ptr == (void *) -1) {
+ perror("shmat");
+ goto destroy_shmem;
+ }
+
+ /* Already mark the shared memory for destruction. This will occur only
+ * when all users have detached.
+ */
+ result = shmctl(ltt_chan->buf_shmid, IPC_RMID, NULL);
+ if(result == -1) {
+ perror("shmctl");
+ return -1;
+ }
+
+ ltt_chan->buf = ptr;
+
+ return 0;
+
+ destroy_shmem:
+ result = shmctl(ltt_chan->buf_shmid, IPC_RMID, NULL);
+ if(result == -1) {
+ perror("shmctl");
+ }
+
+ return -1;
+}
+
/*
* Create channel.
*/
ltt_chan->n_subbufs_order = get_count_order(n_subbufs);
ltt_chan->commit_count_mask = (~0UL >> ltt_chan->n_subbufs_order);
//ust// ltt_chan->buf = percpu_alloc_mask(sizeof(struct ltt_channel_buf_struct), GFP_KERNEL, cpu_possible_map);
- ltt_chan->buf = malloc(sizeof(struct ltt_channel_buf_struct));
+
+ ltt_chan_alloc_ltt_buf(ltt_chan);
+
+//ust// ltt_chan->buf = malloc(sizeof(struct ltt_channel_buf_struct));
if (!ltt_chan->buf)
goto alloc_error;
ltt_chan->trans_channel_data = ltt_relay_open(tmpname,
struct ltt_channel_struct *channel =
(struct ltt_channel_struct *)buf->chan->private_data;
struct ltt_channel_buf_struct *ltt_buf = channel->buf;
+ int result;
buf->finalized = 1;
ltt_force_switch(buf, FORCE_FLUSH);
- relay_wake_consumer(ltt_buf, 1);
+ result = write(ltt_buf->data_ready_fd_write, "1", 1);
+ if(result == -1) {
+ PERROR("write (in ltt_relay_buffer_flush)");
+ ERR("this should never happen!");
+ }
}
static void ltt_relay_async_wakeup_chan(struct ltt_channel_struct *ltt_channel)
static void ltt_relay_finish_buffer(struct ltt_channel_struct *ltt_channel)
{
struct rchan *rchan = ltt_channel->trans_channel_data;
+ int result;
if (rchan->buf) {
struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
ltt_relay_buffer_flush(rchan->buf);
//ust// ltt_relay_wake_writers(ltt_buf);
+ /* closing the pipe tells the consumer the buffer is finished */
+
+ //result = write(ltt_buf->data_ready_fd_write, "D", 1);
+ //if(result == -1) {
+ // PERROR("write (in ltt_relay_finish_buffer)");
+ // ERR("this should never happen!");
+ //}
+ close(ltt_buf->data_ready_fd_write);
}
}
* sub-buffer before this code gets executed, caution. The commit makes sure
* that this code is executed before the deliver of this sub-buffer.
*/
-static inline void ltt_reserve_switch_new_subbuf(
+static /*inline*/ void ltt_reserve_switch_new_subbuf(
struct ltt_channel_struct *ltt_channel,
struct ltt_channel_buf_struct *ltt_buf, struct rchan *rchan,
struct rchan_buf *buf,
* fill the subbuffer completely (so the subbuf index stays in the previous
* subbuffer).
*/
-#ifdef CONFIG_LTT_VMCORE
-static inline void ltt_write_commit_counter(struct rchan_buf *buf,
+//ust// #ifdef CONFIG_LTT_VMCORE
+static /*inline*/ void ltt_write_commit_counter(struct rchan_buf *buf,
long buf_offset, size_t slot_size)
{
struct ltt_channel_struct *ltt_channel =
(struct ltt_channel_struct *)buf->chan->private_data;
- struct ltt_channel_buf_struct *ltt_buf =
- percpu_ptr(ltt_channel->buf, buf->cpu);
+ struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
struct ltt_subbuffer_header *header;
long offset, subbuf_idx, commit_count;
uint32_t lost_old, lost_new;
}
}
}
-#else
-static inline void ltt_write_commit_counter(struct rchan_buf *buf,
- long buf_offset, size_t slot_size)
-{
-}
-#endif
+//ust// #else
+//ust// static inline void ltt_write_commit_counter(struct rchan_buf *buf,
+//ust// long buf_offset, size_t slot_size)
+//ust// {
+//ust// }
+//ust// #endif
/*
* Atomic unordered slot commit. Increments the commit count in the
* ltt buffers from vmcore, after crash.
*/
ltt_write_commit_counter(buf, buf_offset, slot_size);
+
+ DBG("commited slot. now commit count is %ld", commit_count);
}
/*