projects
/
lttng-ust.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Ring buffer: use shmp (shared-memory pointers) for per-channel shm structures
[lttng-ust.git]
/
libringbuffer
/
frontend_internal.h
diff --git
a/libringbuffer/frontend_internal.h
b/libringbuffer/frontend_internal.h
index f758a6842257ece1dfaea0bb7a7e93e5d802b7ad..6a1d3a6cb523fd36cf9e69c7d28fb1c4852872e9 100644
(file)
--- a/
libringbuffer/frontend_internal.h
+++ b/
libringbuffer/frontend_internal.h
@@
-21,6
+21,7
@@
#include "config.h"
#include "backend_types.h"
#include "frontend_types.h"
#include "config.h"
#include "backend_types.h"
#include "frontend_types.h"
+#include "shm.h"
/* Buffer offset macros */
/* Buffer offset macros */
@@
-182,7
+183,7
@@
void lib_ring_buffer_vmcore_check_deliver(const struct lib_ring_buffer_config *c
unsigned long idx)
{
if (config->oops == RING_BUFFER_OOPS_CONSISTENCY)
unsigned long idx)
{
if (config->oops == RING_BUFFER_OOPS_CONSISTENCY)
- v_set(config, &
buf->commit_hot
[idx].seq, commit_count);
+ v_set(config, &
shmp(buf->commit_hot)
[idx].seq, commit_count);
}
static inline
}
static inline
@@
-194,7
+195,7
@@
int lib_ring_buffer_poll_deliver(const struct lib_ring_buffer_config *config,
consumed_old = uatomic_read(&buf->consumed);
consumed_idx = subbuf_index(consumed_old, chan);
consumed_old = uatomic_read(&buf->consumed);
consumed_idx = subbuf_index(consumed_old, chan);
- commit_count = v_read(config, &
buf->commit_cold
[consumed_idx].cc_sb);
+ commit_count = v_read(config, &
shmp(buf->commit_cold)
[consumed_idx].cc_sb);
/*
* No memory barrier here, since we are only interested
* in a statistically correct polling result. The next poll will
/*
* No memory barrier here, since we are only interested
* in a statistically correct polling result. The next poll will
@@
-269,7
+270,7
@@
int lib_ring_buffer_reserve_committed(const struct lib_ring_buffer_config *confi
do {
offset = v_read(config, &buf->offset);
idx = subbuf_index(offset, chan);
do {
offset = v_read(config, &buf->offset);
idx = subbuf_index(offset, chan);
- commit_count = v_read(config, &
buf->commit_hot
[idx].cc);
+ commit_count = v_read(config, &
shmp(buf->commit_hot)
[idx].cc);
} while (offset != v_read(config, &buf->offset));
return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
} while (offset != v_read(config, &buf->offset));
return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
@@
-317,7
+318,7
@@
void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config,
* The subbuffer size is least 2 bytes (minimum size: 1 page).
* This guarantees that old_commit_count + 1 != commit_count.
*/
* The subbuffer size is least 2 bytes (minimum size: 1 page).
* This guarantees that old_commit_count + 1 != commit_count.
*/
- if (likely(v_cmpxchg(config, &
buf->commit_cold
[idx].cc_sb,
+ if (likely(v_cmpxchg(config, &
shmp(buf->commit_cold)
[idx].cc_sb,
old_commit_count, old_commit_count + 1)
== old_commit_count)) {
/*
old_commit_count, old_commit_count + 1)
== old_commit_count)) {
/*
@@
-357,7
+358,7
@@
void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config,
*/
cmm_smp_mb();
/* End of exclusive subbuffer access */
*/
cmm_smp_mb();
/* End of exclusive subbuffer access */
- v_set(config, &
buf->commit_cold
[idx].cc_sb,
+ v_set(config, &
shmp(buf->commit_cold)
[idx].cc_sb,
commit_count);
lib_ring_buffer_vmcore_check_deliver(config, buf,
commit_count, idx);
commit_count);
lib_ring_buffer_vmcore_check_deliver(config, buf,
commit_count, idx);
@@
-409,14
+410,15
@@
void lib_ring_buffer_write_commit_counter(const struct lib_ring_buffer_config *c
if (unlikely(subbuf_offset(offset - commit_count, chan)))
return;
if (unlikely(subbuf_offset(offset - commit_count, chan)))
return;
- commit_seq_old = v_read(config, &
buf->commit_hot
[idx].seq);
+ commit_seq_old = v_read(config, &
shmp(buf->commit_hot)
[idx].seq);
while ((long) (commit_seq_old - commit_count) < 0)
while ((long) (commit_seq_old - commit_count) < 0)
- commit_seq_old = v_cmpxchg(config, &
buf->commit_hot
[idx].seq,
+ commit_seq_old = v_cmpxchg(config, &
shmp(buf->commit_hot)
[idx].seq,
commit_seq_old, commit_count);
}
extern int lib_ring_buffer_create(struct lib_ring_buffer *buf,
commit_seq_old, commit_count);
}
extern int lib_ring_buffer_create(struct lib_ring_buffer *buf,
- struct channel_backend *chanb, int cpu);
+ struct channel_backend *chanb, int cpu,
+ struct shm_header *shm_header);
extern void lib_ring_buffer_free(struct lib_ring_buffer *buf);
/* Keep track of trap nesting inside ring buffer code */
extern void lib_ring_buffer_free(struct lib_ring_buffer *buf);
/* Keep track of trap nesting inside ring buffer code */
This page took
0.02481 seconds
and
4
git commands to generate.