static int relay_alloc_buf(struct rchan_buf *buf, size_t *size)
{
- unsigned int n_pages;
- struct buf_page *buf_page, *n;
+//ust// unsigned int n_pages;
+//ust// struct buf_page *buf_page, *n;
void *ptr;
int result;
int fds[2];
int result;
- ltt_buf->commit_count =
- zmalloc(sizeof(ltt_buf->commit_count) * n_subbufs);
- if (!ltt_buf->commit_count)
- return -ENOMEM;
+//ust// ltt_buf->commit_count =
+//ust// zmalloc(sizeof(ltt_buf->commit_count) * n_subbufs);
+//ust// if (!ltt_buf->commit_count)
+//ust// return -ENOMEM;
kref_get(&trace->kref);
kref_get(&trace->ltt_transport_kref);
kref_get(<t_chan->kref);
kref_put(<t_chan->trace->ltt_transport_kref,
ltt_release_transport);
ltt_relay_print_buffer_errors(ltt_chan);
- kfree(ltt_buf->commit_count);
- ltt_buf->commit_count = NULL;
+//ust// kfree(ltt_buf->commit_count);
+//ust// ltt_buf->commit_count = NULL;
kref_put(<t_chan->kref, ltt_relay_release_channel);
kref_put(&trace->kref, ltt_release_trace);
//ust// wake_up_interruptible(&trace->kref_wq);
}
+static void ltt_chan_alloc_ltt_buf(struct ltt_channel_struct *ltt_chan)
+{
+ void *ptr;
+ int result;
+
+ /* Get one page */
+ /* FIXME: increase size if we have a commit_count array that overflows the page */
+ size_t size = PAGE_ALIGN(1);
+
+ result = ltt_chan->buf_shmid = shmget(getpid(), size, IPC_CREAT | IPC_EXCL | 0700);
+ if(ltt_chan->buf_shmid == -1) {
+ PERROR("shmget");
+ return -1;
+ }
+
+ ptr = shmat(ltt_chan->buf_shmid, NULL, 0);
+ if(ptr == (void *) -1) {
+ perror("shmat");
+ goto destroy_shmem;
+ }
+
+ /* Already mark the shared memory for destruction. This will occur only
+ * when all users have detached.
+ */
+ result = shmctl(ltt_chan->buf_shmid, IPC_RMID, NULL);
+ if(result == -1) {
+ perror("shmctl");
+ return -1;
+ }
+
+ ltt_chan->buf = ptr;
+
+ return 0;
+
+ destroy_shmem:
+ result = shmctl(ltt_chan->buf_shmid, IPC_RMID, NULL);
+ if(result == -1) {
+ perror("shmctl");
+ }
+
+ return -1;
+}
+
/*
* Create channel.
*/
ltt_chan->n_subbufs_order = get_count_order(n_subbufs);
ltt_chan->commit_count_mask = (~0UL >> ltt_chan->n_subbufs_order);
//ust// ltt_chan->buf = percpu_alloc_mask(sizeof(struct ltt_channel_buf_struct), GFP_KERNEL, cpu_possible_map);
- ltt_chan->buf = malloc(sizeof(struct ltt_channel_buf_struct));
+
+ ltt_chan_alloc_ltt_buf(ltt_chan);
+
+//ust// ltt_chan->buf = malloc(sizeof(struct ltt_channel_buf_struct));
if (!ltt_chan->buf)
goto alloc_error;
ltt_chan->trans_channel_data = ltt_relay_open(tmpname,
* sub-buffer before this code gets executed, caution. The commit makes sure
* that this code is executed before the deliver of this sub-buffer.
*/
-static inline void ltt_reserve_switch_new_subbuf(
+static /*inline*/ void ltt_reserve_switch_new_subbuf(
struct ltt_channel_struct *ltt_channel,
struct ltt_channel_buf_struct *ltt_buf, struct rchan *rchan,
struct rchan_buf *buf,
* fill the subbuffer completely (so the subbuf index stays in the previous
* subbuffer).
*/
-#ifdef CONFIG_LTT_VMCORE
-static inline void ltt_write_commit_counter(struct rchan_buf *buf,
+//ust// #ifdef CONFIG_LTT_VMCORE
+static /*inline*/ void ltt_write_commit_counter(struct rchan_buf *buf,
long buf_offset, size_t slot_size)
{
struct ltt_channel_struct *ltt_channel =
(struct ltt_channel_struct *)buf->chan->private_data;
- struct ltt_channel_buf_struct *ltt_buf =
- percpu_ptr(ltt_channel->buf, buf->cpu);
+ struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf;
struct ltt_subbuffer_header *header;
long offset, subbuf_idx, commit_count;
uint32_t lost_old, lost_new;
}
}
}
-#else
-static inline void ltt_write_commit_counter(struct rchan_buf *buf,
- long buf_offset, size_t slot_size)
-{
-}
-#endif
+//ust// #else
+//ust// static inline void ltt_write_commit_counter(struct rchan_buf *buf,
+//ust// long buf_offset, size_t slot_size)
+//ust// {
+//ust// }
+//ust// #endif
/*
* Atomic unordered slot commit. Increments the commit count in the
* ltt buffers from vmcore, after crash.
*/
ltt_write_commit_counter(buf, buf_offset, slot_size);
+
+ DBG("commited slot. now commit count is %ld", commit_count);
}
/*