Move to kernel style SPDX license identifiers
[lttng-ust.git] / libringbuffer / backend_internal.h
CommitLineData
852c2936 1/*
c0c0989a 2 * SPDX-License-Identifier: LGPL-2.1-only
852c2936 3 *
e92f3e28
MD
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
c0c0989a 6 * Ring buffer backend (internal helpers).
852c2936
MD
7 */
8
c0c0989a
MJ
9#ifndef _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
10#define _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
11
b4051ad8 12#include <stddef.h>
fb31eb73 13#include <stdint.h>
14641deb
MD
14#include <unistd.h>
15#include <urcu/compiler.h>
16
4318ae1b 17#include <lttng/ringbuffer-config.h>
4931a13e
MD
18#include "backend_types.h"
19#include "frontend_types.h"
a6352fd4 20#include "shm.h"
852c2936
MD
21
22/* Ring buffer backend API presented to the frontend */
23
24/* Ring buffer and channel backend create/free */
25
4cfec15c 26int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb,
a6352fd4 27 struct channel_backend *chan, int cpu,
38fae1d3 28 struct lttng_ust_shm_handle *handle,
1d498196 29 struct shm_object *shmobj);
852c2936 30void channel_backend_unregister_notifiers(struct channel_backend *chanb);
4cfec15c 31void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend *bufb);
852c2936
MD
32int channel_backend_init(struct channel_backend *chanb,
33 const char *name,
4cfec15c 34 const struct lttng_ust_lib_ring_buffer_config *config,
a3f61e7f 35 size_t subbuf_size,
a9ff648c 36 size_t num_subbuf, struct lttng_ust_shm_handle *handle,
5ea386c3 37 const int *stream_fds);
1d498196 38void channel_backend_free(struct channel_backend *chanb,
38fae1d3 39 struct lttng_ust_shm_handle *handle);
852c2936 40
4cfec15c 41void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb,
38fae1d3 42 struct lttng_ust_shm_handle *handle);
852c2936
MD
43void channel_backend_reset(struct channel_backend *chanb);
44
45int lib_ring_buffer_backend_init(void);
46void lib_ring_buffer_backend_exit(void);
47
4cfec15c 48extern void _lib_ring_buffer_write(struct lttng_ust_lib_ring_buffer_backend *bufb,
852c2936
MD
49 size_t offset, const void *src, size_t len,
50 ssize_t pagecpy);
51
52/*
53 * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
54 * exchanged atomically.
55 *
56 * Top half word, except lowest bit, belongs to "offset", which is used to keep
57 * to count the produced buffers. For overwrite mode, this provides the
58 * consumer with the capacity to read subbuffers in order, handling the
59 * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
60 * systems) concurrently with a single execution of get_subbuf (between offset
61 * sampling and subbuffer ID exchange).
62 */
63
14641deb 64#define HALF_ULONG_BITS (CAA_BITS_PER_LONG >> 1)
852c2936
MD
65
66#define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1)
67#define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT)
68#define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1))
69/*
70 * Lowest bit of top word half belongs to noref. Used only for overwrite mode.
71 */
72#define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1)
73#define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT)
74#define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT
75/*
76 * In overwrite mode: lowest half of word is used for index.
77 * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
78 * In producer-consumer mode: whole word used for index.
79 */
80#define SB_ID_INDEX_SHIFT 0
81#define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT)
82#define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1)
83
84/*
85 * Construct the subbuffer id from offset, index and noref. Use only the index
86 * for producer-consumer mode (offset and noref are only used in overwrite
87 * mode).
88 */
89static inline
4cfec15c 90unsigned long subbuffer_id(const struct lttng_ust_lib_ring_buffer_config *config,
852c2936
MD
91 unsigned long offset, unsigned long noref,
92 unsigned long index)
93{
94 if (config->mode == RING_BUFFER_OVERWRITE)
95 return (offset << SB_ID_OFFSET_SHIFT)
96 | (noref << SB_ID_NOREF_SHIFT)
97 | index;
98 else
99 return index;
100}
101
102/*
103 * Compare offset with the offset contained within id. Return 1 if the offset
104 * bits are identical, else 0.
105 */
106static inline
4cfec15c 107int subbuffer_id_compare_offset(const struct lttng_ust_lib_ring_buffer_config *config,
852c2936
MD
108 unsigned long id, unsigned long offset)
109{
110 return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT);
111}
112
113static inline
4cfec15c 114unsigned long subbuffer_id_get_index(const struct lttng_ust_lib_ring_buffer_config *config,
852c2936
MD
115 unsigned long id)
116{
117 if (config->mode == RING_BUFFER_OVERWRITE)
118 return id & SB_ID_INDEX_MASK;
119 else
120 return id;
121}
122
123static inline
4cfec15c 124unsigned long subbuffer_id_is_noref(const struct lttng_ust_lib_ring_buffer_config *config,
852c2936
MD
125 unsigned long id)
126{
127 if (config->mode == RING_BUFFER_OVERWRITE)
128 return !!(id & SB_ID_NOREF_MASK);
129 else
130 return 1;
131}
132
133/*
134 * Only used by reader on subbuffer ID it has exclusive access to. No volatile
135 * needed.
136 */
137static inline
4cfec15c 138void subbuffer_id_set_noref(const struct lttng_ust_lib_ring_buffer_config *config,
852c2936
MD
139 unsigned long *id)
140{
141 if (config->mode == RING_BUFFER_OVERWRITE)
142 *id |= SB_ID_NOREF_MASK;
143}
144
145static inline
4cfec15c 146void subbuffer_id_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
852c2936
MD
147 unsigned long *id, unsigned long offset)
148{
149 unsigned long tmp;
150
151 if (config->mode == RING_BUFFER_OVERWRITE) {
152 tmp = *id;
153 tmp &= ~SB_ID_OFFSET_MASK;
154 tmp |= offset << SB_ID_OFFSET_SHIFT;
155 tmp |= SB_ID_NOREF_MASK;
156 /* Volatile store, read concurrently by readers. */
14641deb 157 CMM_ACCESS_ONCE(*id) = tmp;
852c2936
MD
158 }
159}
160
161/* No volatile access, since already used locally */
162static inline
4cfec15c 163void subbuffer_id_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
852c2936
MD
164 unsigned long *id)
165{
166 if (config->mode == RING_BUFFER_OVERWRITE)
167 *id &= ~SB_ID_NOREF_MASK;
168}
169
170/*
171 * For overwrite mode, cap the number of subbuffers per buffer to:
172 * 2^16 on 32-bit architectures
173 * 2^32 on 64-bit architectures
174 * This is required to fit in the index part of the ID. Return 0 on success,
175 * -EPERM on failure.
176 */
177static inline
4cfec15c 178int subbuffer_id_check_index(const struct lttng_ust_lib_ring_buffer_config *config,
852c2936
MD
179 unsigned long num_subbuf)
180{
181 if (config->mode == RING_BUFFER_OVERWRITE)
182 return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0;
183 else
184 return 0;
185}
186
a3492932
MD
187static inline
188int lib_ring_buffer_backend_get_pages(const struct lttng_ust_lib_ring_buffer_config *config,
189 struct lttng_ust_lib_ring_buffer_ctx *ctx,
190 struct lttng_ust_lib_ring_buffer_backend_pages **backend_pages)
191{
192 struct lttng_ust_lib_ring_buffer_backend *bufb = &ctx->buf->backend;
193 struct channel_backend *chanb = &ctx->chan->backend;
194 struct lttng_ust_shm_handle *handle = ctx->handle;
195 size_t sbidx;
196 size_t offset = ctx->buf_offset;
197 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
198 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
199 unsigned long sb_bindex, id;
200 struct lttng_ust_lib_ring_buffer_backend_pages *_backend_pages;
201
202 offset &= chanb->buf_size - 1;
203 sbidx = offset >> chanb->subbuf_size_order;
204 wsb = shmp_index(handle, bufb->buf_wsb, sbidx);
205 if (caa_unlikely(!wsb))
206 return -1;
207 id = wsb->id;
208 sb_bindex = subbuffer_id_get_index(config, id);
209 rpages = shmp_index(handle, bufb->array, sb_bindex);
210 if (caa_unlikely(!rpages))
211 return -1;
212 CHAN_WARN_ON(ctx->chan,
213 config->mode == RING_BUFFER_OVERWRITE
214 && subbuffer_id_is_noref(config, id));
215 _backend_pages = shmp(handle, rpages->shmp);
216 if (caa_unlikely(!_backend_pages))
217 return -1;
218 *backend_pages = _backend_pages;
219 return 0;
220}
221
222/* Get backend pages from cache. */
223static inline
224struct lttng_ust_lib_ring_buffer_backend_pages *
225 lib_ring_buffer_get_backend_pages_from_ctx(const struct lttng_ust_lib_ring_buffer_config *config,
226 struct lttng_ust_lib_ring_buffer_ctx *ctx)
227{
228 if (caa_unlikely(ctx->ctx_len
229 < sizeof(struct lttng_ust_lib_ring_buffer_ctx)))
230 return NULL;
231 return ctx->backend_pages;
232}
233
9c995331
MD
234/*
235 * The ring buffer can count events recorded and overwritten per buffer,
236 * but it is disabled by default due to its performance overhead.
237 */
238#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
852c2936 239static inline
4cfec15c 240void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
15500a1b 241 const struct lttng_ust_lib_ring_buffer_ctx *ctx,
4cfec15c 242 struct lttng_ust_lib_ring_buffer_backend *bufb,
38fae1d3 243 unsigned long idx, struct lttng_ust_shm_handle *handle)
852c2936 244{
15500a1b 245 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
852c2936 246
15500a1b
MD
247 backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
248 if (caa_unlikely(!backend_pages)) {
249 if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
250 return;
251 }
252 v_inc(config, &backend_pages->records_commit);
852c2936 253}
9c995331
MD
254#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
255static inline
256void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
15500a1b 257 const struct lttng_ust_lib_ring_buffer_ctx *ctx,
9c995331
MD
258 struct lttng_ust_lib_ring_buffer_backend *bufb,
259 unsigned long idx, struct lttng_ust_shm_handle *handle)
260{
261}
262#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
852c2936
MD
263
264/*
265 * Reader has exclusive subbuffer access for record consumption. No need to
266 * perform the decrement atomically.
267 */
268static inline
4cfec15c
MD
269void subbuffer_consume_record(const struct lttng_ust_lib_ring_buffer_config *config,
270 struct lttng_ust_lib_ring_buffer_backend *bufb,
38fae1d3 271 struct lttng_ust_shm_handle *handle)
852c2936
MD
272{
273 unsigned long sb_bindex;
15500a1b
MD
274 struct channel *chan;
275 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
276 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
852c2936
MD
277
278 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
15500a1b
MD
279 chan = shmp(handle, bufb->chan);
280 if (!chan)
281 return;
282 pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
283 if (!pages_shmp)
284 return;
285 backend_pages = shmp(handle, pages_shmp->shmp);
286 if (!backend_pages)
287 return;
288 CHAN_WARN_ON(chan, !v_read(config, &backend_pages->records_unread));
852c2936 289 /* Non-atomic decrement protected by exclusive subbuffer access */
15500a1b 290 _v_dec(config, &backend_pages->records_unread);
852c2936
MD
291 v_inc(config, &bufb->records_read);
292}
293
294static inline
295unsigned long subbuffer_get_records_count(
4cfec15c
MD
296 const struct lttng_ust_lib_ring_buffer_config *config,
297 struct lttng_ust_lib_ring_buffer_backend *bufb,
1d498196 298 unsigned long idx,
38fae1d3 299 struct lttng_ust_shm_handle *handle)
852c2936
MD
300{
301 unsigned long sb_bindex;
15500a1b
MD
302 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
303 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
304 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
852c2936 305
15500a1b
MD
306 wsb = shmp_index(handle, bufb->buf_wsb, idx);
307 if (!wsb)
308 return 0;
309 sb_bindex = subbuffer_id_get_index(config, wsb->id);
310 rpages = shmp_index(handle, bufb->array, sb_bindex);
311 if (!rpages)
312 return 0;
313 backend_pages = shmp(handle, rpages->shmp);
314 if (!backend_pages)
315 return 0;
316 return v_read(config, &backend_pages->records_commit);
852c2936
MD
317}
318
319/*
320 * Must be executed at subbuffer delivery when the writer has _exclusive_
3d1aec25
MD
321 * subbuffer access. See lib_ring_buffer_check_deliver() for details.
322 * lib_ring_buffer_get_records_count() must be called to get the records
323 * count before this function, because it resets the records_commit
324 * count.
852c2936
MD
325 */
326static inline
327unsigned long subbuffer_count_records_overrun(
4cfec15c
MD
328 const struct lttng_ust_lib_ring_buffer_config *config,
329 struct lttng_ust_lib_ring_buffer_backend *bufb,
1d498196 330 unsigned long idx,
38fae1d3 331 struct lttng_ust_shm_handle *handle)
852c2936 332{
852c2936 333 unsigned long overruns, sb_bindex;
15500a1b
MD
334 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
335 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
336 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
852c2936 337
15500a1b
MD
338 wsb = shmp_index(handle, bufb->buf_wsb, idx);
339 if (!wsb)
340 return 0;
341 sb_bindex = subbuffer_id_get_index(config, wsb->id);
342 rpages = shmp_index(handle, bufb->array, sb_bindex);
343 if (!rpages)
344 return 0;
345 backend_pages = shmp(handle, rpages->shmp);
346 if (!backend_pages)
347 return 0;
348 overruns = v_read(config, &backend_pages->records_unread);
349 v_set(config, &backend_pages->records_unread,
350 v_read(config, &backend_pages->records_commit));
351 v_set(config, &backend_pages->records_commit, 0);
852c2936
MD
352
353 return overruns;
354}
355
356static inline
4cfec15c
MD
357void subbuffer_set_data_size(const struct lttng_ust_lib_ring_buffer_config *config,
358 struct lttng_ust_lib_ring_buffer_backend *bufb,
852c2936 359 unsigned long idx,
1d498196 360 unsigned long data_size,
38fae1d3 361 struct lttng_ust_shm_handle *handle)
852c2936 362{
852c2936 363 unsigned long sb_bindex;
15500a1b
MD
364 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
365 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
366 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
852c2936 367
15500a1b
MD
368 wsb = shmp_index(handle, bufb->buf_wsb, idx);
369 if (!wsb)
370 return;
371 sb_bindex = subbuffer_id_get_index(config, wsb->id);
372 rpages = shmp_index(handle, bufb->array, sb_bindex);
373 if (!rpages)
374 return;
375 backend_pages = shmp(handle, rpages->shmp);
376 if (!backend_pages)
377 return;
378 backend_pages->data_size = data_size;
852c2936
MD
379}
380
381static inline
382unsigned long subbuffer_get_read_data_size(
4cfec15c
MD
383 const struct lttng_ust_lib_ring_buffer_config *config,
384 struct lttng_ust_lib_ring_buffer_backend *bufb,
38fae1d3 385 struct lttng_ust_shm_handle *handle)
852c2936 386{
852c2936 387 unsigned long sb_bindex;
15500a1b
MD
388 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
389 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
852c2936
MD
390
391 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
15500a1b
MD
392 pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
393 if (!pages_shmp)
394 return 0;
395 backend_pages = shmp(handle, pages_shmp->shmp);
396 if (!backend_pages)
397 return 0;
398 return backend_pages->data_size;
852c2936
MD
399}
400
401static inline
402unsigned long subbuffer_get_data_size(
4cfec15c
MD
403 const struct lttng_ust_lib_ring_buffer_config *config,
404 struct lttng_ust_lib_ring_buffer_backend *bufb,
1d498196 405 unsigned long idx,
38fae1d3 406 struct lttng_ust_shm_handle *handle)
852c2936 407{
852c2936 408 unsigned long sb_bindex;
15500a1b
MD
409 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
410 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
411 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
852c2936 412
15500a1b
MD
413 wsb = shmp_index(handle, bufb->buf_wsb, idx);
414 if (!wsb)
415 return 0;
416 sb_bindex = subbuffer_id_get_index(config, wsb->id);
417 rpages = shmp_index(handle, bufb->array, sb_bindex);
418 if (!rpages)
419 return 0;
420 backend_pages = shmp(handle, rpages->shmp);
421 if (!backend_pages)
422 return 0;
423 return backend_pages->data_size;
852c2936
MD
424}
425
1ff31389
JD
426static inline
427void subbuffer_inc_packet_count(const struct lttng_ust_lib_ring_buffer_config *config,
428 struct lttng_ust_lib_ring_buffer_backend *bufb,
429 unsigned long idx, struct lttng_ust_shm_handle *handle)
430{
15500a1b
MD
431 struct lttng_ust_lib_ring_buffer_backend_counts *counts;
432
433 counts = shmp_index(handle, bufb->buf_cnt, idx);
434 if (!counts)
435 return;
436 counts->seq_cnt++;
1ff31389
JD
437}
438
852c2936
MD
439/**
440 * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
441 * writer.
442 */
443static inline
4cfec15c
MD
444void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
445 struct lttng_ust_lib_ring_buffer_backend *bufb,
1d498196 446 unsigned long idx,
38fae1d3 447 struct lttng_ust_shm_handle *handle)
852c2936
MD
448{
449 unsigned long id, new_id;
15500a1b 450 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
852c2936
MD
451
452 if (config->mode != RING_BUFFER_OVERWRITE)
453 return;
454
455 /*
456 * Performing a volatile access to read the sb_pages, because we want to
457 * read a coherent version of the pointer and the associated noref flag.
458 */
15500a1b
MD
459 wsb = shmp_index(handle, bufb->buf_wsb, idx);
460 if (!wsb)
461 return;
462 id = CMM_ACCESS_ONCE(wsb->id);
852c2936
MD
463 for (;;) {
464 /* This check is called on the fast path for each record. */
b5a3dfa5 465 if (caa_likely(!subbuffer_id_is_noref(config, id))) {
852c2936
MD
466 /*
467 * Store after load dependency ordering the writes to
468 * the subbuffer after load and test of the noref flag
469 * matches the memory barrier implied by the cmpxchg()
470 * in update_read_sb_index().
471 */
472 return; /* Already writing to this buffer */
473 }
474 new_id = id;
475 subbuffer_id_clear_noref(config, &new_id);
15500a1b 476 new_id = uatomic_cmpxchg(&wsb->id, id, new_id);
b5a3dfa5 477 if (caa_likely(new_id == id))
852c2936
MD
478 break;
479 id = new_id;
480 }
481}
482
483/**
484 * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
485 * called by writer.
486 */
487static inline
4cfec15c
MD
488void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
489 struct lttng_ust_lib_ring_buffer_backend *bufb,
1d498196 490 unsigned long idx, unsigned long offset,
38fae1d3 491 struct lttng_ust_shm_handle *handle)
852c2936 492{
15500a1b
MD
493 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
494 struct channel *chan;
495
852c2936
MD
496 if (config->mode != RING_BUFFER_OVERWRITE)
497 return;
498
15500a1b
MD
499 wsb = shmp_index(handle, bufb->buf_wsb, idx);
500 if (!wsb)
501 return;
852c2936
MD
502 /*
503 * Because ring_buffer_set_noref() is only called by a single thread
504 * (the one which updated the cc_sb value), there are no concurrent
505 * updates to take care of: other writers have not updated cc_sb, so
506 * they cannot set the noref flag, and concurrent readers cannot modify
507 * the pointer because the noref flag is not set yet.
508 * The smp_wmb() in ring_buffer_commit() takes care of ordering writes
509 * to the subbuffer before this set noref operation.
510 * subbuffer_set_noref() uses a volatile store to deal with concurrent
511 * readers of the noref flag.
512 */
15500a1b
MD
513 chan = shmp(handle, bufb->chan);
514 if (!chan)
515 return;
516 CHAN_WARN_ON(chan, subbuffer_id_is_noref(config, wsb->id));
852c2936
MD
517 /*
518 * Memory barrier that ensures counter stores are ordered before set
519 * noref and offset.
520 */
14641deb 521 cmm_smp_mb();
15500a1b 522 subbuffer_id_set_noref_offset(config, &wsb->id, offset);
852c2936
MD
523}
524
525/**
526 * update_read_sb_index - Read-side subbuffer index update.
527 */
528static inline
4cfec15c
MD
529int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config,
530 struct lttng_ust_lib_ring_buffer_backend *bufb,
852c2936
MD
531 struct channel_backend *chanb,
532 unsigned long consumed_idx,
1d498196 533 unsigned long consumed_count,
38fae1d3 534 struct lttng_ust_shm_handle *handle)
852c2936 535{
15500a1b 536 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
852c2936
MD
537 unsigned long old_id, new_id;
538
15500a1b
MD
539 wsb = shmp_index(handle, bufb->buf_wsb, consumed_idx);
540 if (caa_unlikely(!wsb))
541 return -EPERM;
542
852c2936 543 if (config->mode == RING_BUFFER_OVERWRITE) {
15500a1b
MD
544 struct channel *chan;
545
852c2936
MD
546 /*
547 * Exchange the target writer subbuffer with our own unused
14641deb 548 * subbuffer. No need to use CMM_ACCESS_ONCE() here to read the
852c2936
MD
549 * old_wpage, because the value read will be confirmed by the
550 * following cmpxchg().
551 */
15500a1b 552 old_id = wsb->id;
b5a3dfa5 553 if (caa_unlikely(!subbuffer_id_is_noref(config, old_id)))
852c2936
MD
554 return -EAGAIN;
555 /*
556 * Make sure the offset count we are expecting matches the one
557 * indicated by the writer.
558 */
b5a3dfa5 559 if (caa_unlikely(!subbuffer_id_compare_offset(config, old_id,
852c2936
MD
560 consumed_count)))
561 return -EAGAIN;
15500a1b
MD
562 chan = shmp(handle, bufb->chan);
563 if (caa_unlikely(!chan))
564 return -EPERM;
565 CHAN_WARN_ON(chan, !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
852c2936
MD
566 subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
567 consumed_count);
15500a1b 568 new_id = uatomic_cmpxchg(&wsb->id, old_id, bufb->buf_rsb.id);
b5a3dfa5 569 if (caa_unlikely(old_id != new_id))
852c2936
MD
570 return -EAGAIN;
571 bufb->buf_rsb.id = new_id;
572 } else {
573 /* No page exchange, use the writer page directly */
15500a1b 574 bufb->buf_rsb.id = wsb->id;
852c2936
MD
575 }
576 return 0;
577}
578
0d4aa2df
MD
579#ifndef inline_memcpy
580#define inline_memcpy(dest, src, n) memcpy(dest, src, n)
581#endif
582
51b8f2fa
MD
583static inline __attribute__((always_inline))
584void lttng_inline_memcpy(void *dest, const void *src,
585 unsigned long len)
586{
587 switch (len) {
588 case 1:
589 *(uint8_t *) dest = *(const uint8_t *) src;
590 break;
591 case 2:
592 *(uint16_t *) dest = *(const uint16_t *) src;
593 break;
594 case 4:
595 *(uint32_t *) dest = *(const uint32_t *) src;
596 break;
597 case 8:
598 *(uint64_t *) dest = *(const uint64_t *) src;
599 break;
600 default:
601 inline_memcpy(dest, src, len);
602 }
603}
604
852c2936
MD
605/*
606 * Use the architecture-specific memcpy implementation for constant-sized
607 * inputs, but rely on an inline memcpy for length statically unknown.
608 * The function call to memcpy is just way too expensive for a fast path.
609 */
610#define lib_ring_buffer_do_copy(config, dest, src, len) \
611do { \
612 size_t __len = (len); \
613 if (__builtin_constant_p(len)) \
614 memcpy(dest, src, __len); \
615 else \
51b8f2fa 616 lttng_inline_memcpy(dest, src, __len); \
852c2936
MD
617} while (0)
618
a44c74d9
MD
619/*
620 * write len bytes to dest with c
621 */
622static inline
623void lib_ring_buffer_do_memset(char *dest, int c, unsigned long len)
624{
625 unsigned long i;
626
627 for (i = 0; i < len; i++)
628 dest[i] = c;
629}
630
b728d87e
MD
631/* arch-agnostic implementation */
632
bfd26582 633static inline int lttng_ust_fls(unsigned int x)
b728d87e
MD
634{
635 int r = 32;
636
637 if (!x)
638 return 0;
639 if (!(x & 0xFFFF0000U)) {
640 x <<= 16;
641 r -= 16;
642 }
643 if (!(x & 0xFF000000U)) {
644 x <<= 8;
645 r -= 8;
646 }
647 if (!(x & 0xF0000000U)) {
648 x <<= 4;
649 r -= 4;
650 }
651 if (!(x & 0xC0000000U)) {
652 x <<= 2;
653 r -= 2;
654 }
655 if (!(x & 0x80000000U)) {
e2bd33a5 656 /* No need to bit shift on last operation */
b728d87e
MD
657 r -= 1;
658 }
659 return r;
660}
661
662static inline int get_count_order(unsigned int count)
663{
664 int order;
665
bfd26582 666 order = lttng_ust_fls(count) - 1;
b728d87e
MD
667 if (count & (count - 1))
668 order++;
669 return order;
670}
671
e92f3e28 672#endif /* _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H */
This page took 0.060009 seconds and 4 git commands to generate.