Cleanup: apply `include-what-you-use` guideline for `size_t`
[lttng-ust.git] / libringbuffer / backend_internal.h
CommitLineData
e92f3e28
MD
1#ifndef _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
2#define _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
852c2936
MD
3
4/*
e92f3e28 5 * libringbuffer/backend_internal.h
852c2936
MD
6 *
7 * Ring buffer backend (internal helpers).
8 *
e92f3e28
MD
9 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
852c2936
MD
24 */
25
b4051ad8 26#include <stddef.h>
14641deb
MD
27#include <unistd.h>
28#include <urcu/compiler.h>
29
4318ae1b 30#include <lttng/ringbuffer-config.h>
4931a13e
MD
31#include "backend_types.h"
32#include "frontend_types.h"
a6352fd4 33#include "shm.h"
852c2936
MD
34
35/* Ring buffer backend API presented to the frontend */
36
37/* Ring buffer and channel backend create/free */
38
4cfec15c 39int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb,
a6352fd4 40 struct channel_backend *chan, int cpu,
38fae1d3 41 struct lttng_ust_shm_handle *handle,
1d498196 42 struct shm_object *shmobj);
852c2936 43void channel_backend_unregister_notifiers(struct channel_backend *chanb);
4cfec15c 44void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend *bufb);
852c2936
MD
45int channel_backend_init(struct channel_backend *chanb,
46 const char *name,
4cfec15c 47 const struct lttng_ust_lib_ring_buffer_config *config,
a3f61e7f 48 size_t subbuf_size,
a9ff648c 49 size_t num_subbuf, struct lttng_ust_shm_handle *handle,
5ea386c3 50 const int *stream_fds);
1d498196 51void channel_backend_free(struct channel_backend *chanb,
38fae1d3 52 struct lttng_ust_shm_handle *handle);
852c2936 53
4cfec15c 54void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb,
38fae1d3 55 struct lttng_ust_shm_handle *handle);
852c2936
MD
56void channel_backend_reset(struct channel_backend *chanb);
57
58int lib_ring_buffer_backend_init(void);
59void lib_ring_buffer_backend_exit(void);
60
4cfec15c 61extern void _lib_ring_buffer_write(struct lttng_ust_lib_ring_buffer_backend *bufb,
852c2936
MD
62 size_t offset, const void *src, size_t len,
63 ssize_t pagecpy);
64
65/*
66 * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
67 * exchanged atomically.
68 *
69 * Top half word, except lowest bit, belongs to "offset", which is used to keep
70 * to count the produced buffers. For overwrite mode, this provides the
71 * consumer with the capacity to read subbuffers in order, handling the
72 * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
73 * systems) concurrently with a single execution of get_subbuf (between offset
74 * sampling and subbuffer ID exchange).
75 */
76
14641deb 77#define HALF_ULONG_BITS (CAA_BITS_PER_LONG >> 1)
852c2936
MD
78
79#define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1)
80#define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT)
81#define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1))
82/*
83 * Lowest bit of top word half belongs to noref. Used only for overwrite mode.
84 */
85#define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1)
86#define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT)
87#define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT
88/*
89 * In overwrite mode: lowest half of word is used for index.
90 * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
91 * In producer-consumer mode: whole word used for index.
92 */
93#define SB_ID_INDEX_SHIFT 0
94#define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT)
95#define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1)
96
97/*
98 * Construct the subbuffer id from offset, index and noref. Use only the index
99 * for producer-consumer mode (offset and noref are only used in overwrite
100 * mode).
101 */
102static inline
4cfec15c 103unsigned long subbuffer_id(const struct lttng_ust_lib_ring_buffer_config *config,
852c2936
MD
104 unsigned long offset, unsigned long noref,
105 unsigned long index)
106{
107 if (config->mode == RING_BUFFER_OVERWRITE)
108 return (offset << SB_ID_OFFSET_SHIFT)
109 | (noref << SB_ID_NOREF_SHIFT)
110 | index;
111 else
112 return index;
113}
114
115/*
116 * Compare offset with the offset contained within id. Return 1 if the offset
117 * bits are identical, else 0.
118 */
119static inline
4cfec15c 120int subbuffer_id_compare_offset(const struct lttng_ust_lib_ring_buffer_config *config,
852c2936
MD
121 unsigned long id, unsigned long offset)
122{
123 return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT);
124}
125
126static inline
4cfec15c 127unsigned long subbuffer_id_get_index(const struct lttng_ust_lib_ring_buffer_config *config,
852c2936
MD
128 unsigned long id)
129{
130 if (config->mode == RING_BUFFER_OVERWRITE)
131 return id & SB_ID_INDEX_MASK;
132 else
133 return id;
134}
135
136static inline
4cfec15c 137unsigned long subbuffer_id_is_noref(const struct lttng_ust_lib_ring_buffer_config *config,
852c2936
MD
138 unsigned long id)
139{
140 if (config->mode == RING_BUFFER_OVERWRITE)
141 return !!(id & SB_ID_NOREF_MASK);
142 else
143 return 1;
144}
145
146/*
147 * Only used by reader on subbuffer ID it has exclusive access to. No volatile
148 * needed.
149 */
150static inline
4cfec15c 151void subbuffer_id_set_noref(const struct lttng_ust_lib_ring_buffer_config *config,
852c2936
MD
152 unsigned long *id)
153{
154 if (config->mode == RING_BUFFER_OVERWRITE)
155 *id |= SB_ID_NOREF_MASK;
156}
157
158static inline
4cfec15c 159void subbuffer_id_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
852c2936
MD
160 unsigned long *id, unsigned long offset)
161{
162 unsigned long tmp;
163
164 if (config->mode == RING_BUFFER_OVERWRITE) {
165 tmp = *id;
166 tmp &= ~SB_ID_OFFSET_MASK;
167 tmp |= offset << SB_ID_OFFSET_SHIFT;
168 tmp |= SB_ID_NOREF_MASK;
169 /* Volatile store, read concurrently by readers. */
14641deb 170 CMM_ACCESS_ONCE(*id) = tmp;
852c2936
MD
171 }
172}
173
174/* No volatile access, since already used locally */
175static inline
4cfec15c 176void subbuffer_id_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
852c2936
MD
177 unsigned long *id)
178{
179 if (config->mode == RING_BUFFER_OVERWRITE)
180 *id &= ~SB_ID_NOREF_MASK;
181}
182
183/*
184 * For overwrite mode, cap the number of subbuffers per buffer to:
185 * 2^16 on 32-bit architectures
186 * 2^32 on 64-bit architectures
187 * This is required to fit in the index part of the ID. Return 0 on success,
188 * -EPERM on failure.
189 */
190static inline
4cfec15c 191int subbuffer_id_check_index(const struct lttng_ust_lib_ring_buffer_config *config,
852c2936
MD
192 unsigned long num_subbuf)
193{
194 if (config->mode == RING_BUFFER_OVERWRITE)
195 return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0;
196 else
197 return 0;
198}
199
a3492932
MD
200static inline
201int lib_ring_buffer_backend_get_pages(const struct lttng_ust_lib_ring_buffer_config *config,
202 struct lttng_ust_lib_ring_buffer_ctx *ctx,
203 struct lttng_ust_lib_ring_buffer_backend_pages **backend_pages)
204{
205 struct lttng_ust_lib_ring_buffer_backend *bufb = &ctx->buf->backend;
206 struct channel_backend *chanb = &ctx->chan->backend;
207 struct lttng_ust_shm_handle *handle = ctx->handle;
208 size_t sbidx;
209 size_t offset = ctx->buf_offset;
210 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
211 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
212 unsigned long sb_bindex, id;
213 struct lttng_ust_lib_ring_buffer_backend_pages *_backend_pages;
214
215 offset &= chanb->buf_size - 1;
216 sbidx = offset >> chanb->subbuf_size_order;
217 wsb = shmp_index(handle, bufb->buf_wsb, sbidx);
218 if (caa_unlikely(!wsb))
219 return -1;
220 id = wsb->id;
221 sb_bindex = subbuffer_id_get_index(config, id);
222 rpages = shmp_index(handle, bufb->array, sb_bindex);
223 if (caa_unlikely(!rpages))
224 return -1;
225 CHAN_WARN_ON(ctx->chan,
226 config->mode == RING_BUFFER_OVERWRITE
227 && subbuffer_id_is_noref(config, id));
228 _backend_pages = shmp(handle, rpages->shmp);
229 if (caa_unlikely(!_backend_pages))
230 return -1;
231 *backend_pages = _backend_pages;
232 return 0;
233}
234
235/* Get backend pages from cache. */
236static inline
237struct lttng_ust_lib_ring_buffer_backend_pages *
238 lib_ring_buffer_get_backend_pages_from_ctx(const struct lttng_ust_lib_ring_buffer_config *config,
239 struct lttng_ust_lib_ring_buffer_ctx *ctx)
240{
241 if (caa_unlikely(ctx->ctx_len
242 < sizeof(struct lttng_ust_lib_ring_buffer_ctx)))
243 return NULL;
244 return ctx->backend_pages;
245}
246
9c995331
MD
247/*
248 * The ring buffer can count events recorded and overwritten per buffer,
249 * but it is disabled by default due to its performance overhead.
250 */
251#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
852c2936 252static inline
4cfec15c 253void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
15500a1b 254 const struct lttng_ust_lib_ring_buffer_ctx *ctx,
4cfec15c 255 struct lttng_ust_lib_ring_buffer_backend *bufb,
38fae1d3 256 unsigned long idx, struct lttng_ust_shm_handle *handle)
852c2936 257{
15500a1b 258 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
852c2936 259
15500a1b
MD
260 backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
261 if (caa_unlikely(!backend_pages)) {
262 if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
263 return;
264 }
265 v_inc(config, &backend_pages->records_commit);
852c2936 266}
9c995331
MD
267#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
268static inline
269void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
15500a1b 270 const struct lttng_ust_lib_ring_buffer_ctx *ctx,
9c995331
MD
271 struct lttng_ust_lib_ring_buffer_backend *bufb,
272 unsigned long idx, struct lttng_ust_shm_handle *handle)
273{
274}
275#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
852c2936
MD
276
277/*
278 * Reader has exclusive subbuffer access for record consumption. No need to
279 * perform the decrement atomically.
280 */
281static inline
4cfec15c
MD
282void subbuffer_consume_record(const struct lttng_ust_lib_ring_buffer_config *config,
283 struct lttng_ust_lib_ring_buffer_backend *bufb,
38fae1d3 284 struct lttng_ust_shm_handle *handle)
852c2936
MD
285{
286 unsigned long sb_bindex;
15500a1b
MD
287 struct channel *chan;
288 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
289 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
852c2936
MD
290
291 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
15500a1b
MD
292 chan = shmp(handle, bufb->chan);
293 if (!chan)
294 return;
295 pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
296 if (!pages_shmp)
297 return;
298 backend_pages = shmp(handle, pages_shmp->shmp);
299 if (!backend_pages)
300 return;
301 CHAN_WARN_ON(chan, !v_read(config, &backend_pages->records_unread));
852c2936 302 /* Non-atomic decrement protected by exclusive subbuffer access */
15500a1b 303 _v_dec(config, &backend_pages->records_unread);
852c2936
MD
304 v_inc(config, &bufb->records_read);
305}
306
307static inline
308unsigned long subbuffer_get_records_count(
4cfec15c
MD
309 const struct lttng_ust_lib_ring_buffer_config *config,
310 struct lttng_ust_lib_ring_buffer_backend *bufb,
1d498196 311 unsigned long idx,
38fae1d3 312 struct lttng_ust_shm_handle *handle)
852c2936
MD
313{
314 unsigned long sb_bindex;
15500a1b
MD
315 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
316 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
317 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
852c2936 318
15500a1b
MD
319 wsb = shmp_index(handle, bufb->buf_wsb, idx);
320 if (!wsb)
321 return 0;
322 sb_bindex = subbuffer_id_get_index(config, wsb->id);
323 rpages = shmp_index(handle, bufb->array, sb_bindex);
324 if (!rpages)
325 return 0;
326 backend_pages = shmp(handle, rpages->shmp);
327 if (!backend_pages)
328 return 0;
329 return v_read(config, &backend_pages->records_commit);
852c2936
MD
330}
331
332/*
333 * Must be executed at subbuffer delivery when the writer has _exclusive_
3d1aec25
MD
334 * subbuffer access. See lib_ring_buffer_check_deliver() for details.
335 * lib_ring_buffer_get_records_count() must be called to get the records
336 * count before this function, because it resets the records_commit
337 * count.
852c2936
MD
338 */
339static inline
340unsigned long subbuffer_count_records_overrun(
4cfec15c
MD
341 const struct lttng_ust_lib_ring_buffer_config *config,
342 struct lttng_ust_lib_ring_buffer_backend *bufb,
1d498196 343 unsigned long idx,
38fae1d3 344 struct lttng_ust_shm_handle *handle)
852c2936 345{
852c2936 346 unsigned long overruns, sb_bindex;
15500a1b
MD
347 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
348 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
349 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
852c2936 350
15500a1b
MD
351 wsb = shmp_index(handle, bufb->buf_wsb, idx);
352 if (!wsb)
353 return 0;
354 sb_bindex = subbuffer_id_get_index(config, wsb->id);
355 rpages = shmp_index(handle, bufb->array, sb_bindex);
356 if (!rpages)
357 return 0;
358 backend_pages = shmp(handle, rpages->shmp);
359 if (!backend_pages)
360 return 0;
361 overruns = v_read(config, &backend_pages->records_unread);
362 v_set(config, &backend_pages->records_unread,
363 v_read(config, &backend_pages->records_commit));
364 v_set(config, &backend_pages->records_commit, 0);
852c2936
MD
365
366 return overruns;
367}
368
369static inline
4cfec15c
MD
370void subbuffer_set_data_size(const struct lttng_ust_lib_ring_buffer_config *config,
371 struct lttng_ust_lib_ring_buffer_backend *bufb,
852c2936 372 unsigned long idx,
1d498196 373 unsigned long data_size,
38fae1d3 374 struct lttng_ust_shm_handle *handle)
852c2936 375{
852c2936 376 unsigned long sb_bindex;
15500a1b
MD
377 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
378 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
379 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
852c2936 380
15500a1b
MD
381 wsb = shmp_index(handle, bufb->buf_wsb, idx);
382 if (!wsb)
383 return;
384 sb_bindex = subbuffer_id_get_index(config, wsb->id);
385 rpages = shmp_index(handle, bufb->array, sb_bindex);
386 if (!rpages)
387 return;
388 backend_pages = shmp(handle, rpages->shmp);
389 if (!backend_pages)
390 return;
391 backend_pages->data_size = data_size;
852c2936
MD
392}
393
394static inline
395unsigned long subbuffer_get_read_data_size(
4cfec15c
MD
396 const struct lttng_ust_lib_ring_buffer_config *config,
397 struct lttng_ust_lib_ring_buffer_backend *bufb,
38fae1d3 398 struct lttng_ust_shm_handle *handle)
852c2936 399{
852c2936 400 unsigned long sb_bindex;
15500a1b
MD
401 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
402 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
852c2936
MD
403
404 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
15500a1b
MD
405 pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
406 if (!pages_shmp)
407 return 0;
408 backend_pages = shmp(handle, pages_shmp->shmp);
409 if (!backend_pages)
410 return 0;
411 return backend_pages->data_size;
852c2936
MD
412}
413
414static inline
415unsigned long subbuffer_get_data_size(
4cfec15c
MD
416 const struct lttng_ust_lib_ring_buffer_config *config,
417 struct lttng_ust_lib_ring_buffer_backend *bufb,
1d498196 418 unsigned long idx,
38fae1d3 419 struct lttng_ust_shm_handle *handle)
852c2936 420{
852c2936 421 unsigned long sb_bindex;
15500a1b
MD
422 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
423 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
424 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
852c2936 425
15500a1b
MD
426 wsb = shmp_index(handle, bufb->buf_wsb, idx);
427 if (!wsb)
428 return 0;
429 sb_bindex = subbuffer_id_get_index(config, wsb->id);
430 rpages = shmp_index(handle, bufb->array, sb_bindex);
431 if (!rpages)
432 return 0;
433 backend_pages = shmp(handle, rpages->shmp);
434 if (!backend_pages)
435 return 0;
436 return backend_pages->data_size;
852c2936
MD
437}
438
1ff31389
JD
439static inline
440void subbuffer_inc_packet_count(const struct lttng_ust_lib_ring_buffer_config *config,
441 struct lttng_ust_lib_ring_buffer_backend *bufb,
442 unsigned long idx, struct lttng_ust_shm_handle *handle)
443{
15500a1b
MD
444 struct lttng_ust_lib_ring_buffer_backend_counts *counts;
445
446 counts = shmp_index(handle, bufb->buf_cnt, idx);
447 if (!counts)
448 return;
449 counts->seq_cnt++;
1ff31389
JD
450}
451
852c2936
MD
452/**
453 * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
454 * writer.
455 */
456static inline
4cfec15c
MD
457void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
458 struct lttng_ust_lib_ring_buffer_backend *bufb,
1d498196 459 unsigned long idx,
38fae1d3 460 struct lttng_ust_shm_handle *handle)
852c2936
MD
461{
462 unsigned long id, new_id;
15500a1b 463 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
852c2936
MD
464
465 if (config->mode != RING_BUFFER_OVERWRITE)
466 return;
467
468 /*
469 * Performing a volatile access to read the sb_pages, because we want to
470 * read a coherent version of the pointer and the associated noref flag.
471 */
15500a1b
MD
472 wsb = shmp_index(handle, bufb->buf_wsb, idx);
473 if (!wsb)
474 return;
475 id = CMM_ACCESS_ONCE(wsb->id);
852c2936
MD
476 for (;;) {
477 /* This check is called on the fast path for each record. */
b5a3dfa5 478 if (caa_likely(!subbuffer_id_is_noref(config, id))) {
852c2936
MD
479 /*
480 * Store after load dependency ordering the writes to
481 * the subbuffer after load and test of the noref flag
482 * matches the memory barrier implied by the cmpxchg()
483 * in update_read_sb_index().
484 */
485 return; /* Already writing to this buffer */
486 }
487 new_id = id;
488 subbuffer_id_clear_noref(config, &new_id);
15500a1b 489 new_id = uatomic_cmpxchg(&wsb->id, id, new_id);
b5a3dfa5 490 if (caa_likely(new_id == id))
852c2936
MD
491 break;
492 id = new_id;
493 }
494}
495
496/**
497 * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
498 * called by writer.
499 */
500static inline
4cfec15c
MD
501void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
502 struct lttng_ust_lib_ring_buffer_backend *bufb,
1d498196 503 unsigned long idx, unsigned long offset,
38fae1d3 504 struct lttng_ust_shm_handle *handle)
852c2936 505{
15500a1b
MD
506 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
507 struct channel *chan;
508
852c2936
MD
509 if (config->mode != RING_BUFFER_OVERWRITE)
510 return;
511
15500a1b
MD
512 wsb = shmp_index(handle, bufb->buf_wsb, idx);
513 if (!wsb)
514 return;
852c2936
MD
515 /*
516 * Because ring_buffer_set_noref() is only called by a single thread
517 * (the one which updated the cc_sb value), there are no concurrent
518 * updates to take care of: other writers have not updated cc_sb, so
519 * they cannot set the noref flag, and concurrent readers cannot modify
520 * the pointer because the noref flag is not set yet.
521 * The smp_wmb() in ring_buffer_commit() takes care of ordering writes
522 * to the subbuffer before this set noref operation.
523 * subbuffer_set_noref() uses a volatile store to deal with concurrent
524 * readers of the noref flag.
525 */
15500a1b
MD
526 chan = shmp(handle, bufb->chan);
527 if (!chan)
528 return;
529 CHAN_WARN_ON(chan, subbuffer_id_is_noref(config, wsb->id));
852c2936
MD
530 /*
531 * Memory barrier that ensures counter stores are ordered before set
532 * noref and offset.
533 */
14641deb 534 cmm_smp_mb();
15500a1b 535 subbuffer_id_set_noref_offset(config, &wsb->id, offset);
852c2936
MD
536}
537
538/**
539 * update_read_sb_index - Read-side subbuffer index update.
540 */
541static inline
4cfec15c
MD
542int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config,
543 struct lttng_ust_lib_ring_buffer_backend *bufb,
852c2936
MD
544 struct channel_backend *chanb,
545 unsigned long consumed_idx,
1d498196 546 unsigned long consumed_count,
38fae1d3 547 struct lttng_ust_shm_handle *handle)
852c2936 548{
15500a1b 549 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
852c2936
MD
550 unsigned long old_id, new_id;
551
15500a1b
MD
552 wsb = shmp_index(handle, bufb->buf_wsb, consumed_idx);
553 if (caa_unlikely(!wsb))
554 return -EPERM;
555
852c2936 556 if (config->mode == RING_BUFFER_OVERWRITE) {
15500a1b
MD
557 struct channel *chan;
558
852c2936
MD
559 /*
560 * Exchange the target writer subbuffer with our own unused
14641deb 561 * subbuffer. No need to use CMM_ACCESS_ONCE() here to read the
852c2936
MD
562 * old_wpage, because the value read will be confirmed by the
563 * following cmpxchg().
564 */
15500a1b 565 old_id = wsb->id;
b5a3dfa5 566 if (caa_unlikely(!subbuffer_id_is_noref(config, old_id)))
852c2936
MD
567 return -EAGAIN;
568 /*
569 * Make sure the offset count we are expecting matches the one
570 * indicated by the writer.
571 */
b5a3dfa5 572 if (caa_unlikely(!subbuffer_id_compare_offset(config, old_id,
852c2936
MD
573 consumed_count)))
574 return -EAGAIN;
15500a1b
MD
575 chan = shmp(handle, bufb->chan);
576 if (caa_unlikely(!chan))
577 return -EPERM;
578 CHAN_WARN_ON(chan, !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
852c2936
MD
579 subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
580 consumed_count);
15500a1b 581 new_id = uatomic_cmpxchg(&wsb->id, old_id, bufb->buf_rsb.id);
b5a3dfa5 582 if (caa_unlikely(old_id != new_id))
852c2936
MD
583 return -EAGAIN;
584 bufb->buf_rsb.id = new_id;
585 } else {
586 /* No page exchange, use the writer page directly */
15500a1b 587 bufb->buf_rsb.id = wsb->id;
852c2936
MD
588 }
589 return 0;
590}
591
0d4aa2df
MD
592#ifndef inline_memcpy
593#define inline_memcpy(dest, src, n) memcpy(dest, src, n)
594#endif
595
51b8f2fa
MD
596static inline __attribute__((always_inline))
597void lttng_inline_memcpy(void *dest, const void *src,
598 unsigned long len)
599{
600 switch (len) {
601 case 1:
602 *(uint8_t *) dest = *(const uint8_t *) src;
603 break;
604 case 2:
605 *(uint16_t *) dest = *(const uint16_t *) src;
606 break;
607 case 4:
608 *(uint32_t *) dest = *(const uint32_t *) src;
609 break;
610 case 8:
611 *(uint64_t *) dest = *(const uint64_t *) src;
612 break;
613 default:
614 inline_memcpy(dest, src, len);
615 }
616}
617
852c2936
MD
618/*
619 * Use the architecture-specific memcpy implementation for constant-sized
620 * inputs, but rely on an inline memcpy for length statically unknown.
621 * The function call to memcpy is just way too expensive for a fast path.
622 */
623#define lib_ring_buffer_do_copy(config, dest, src, len) \
624do { \
625 size_t __len = (len); \
626 if (__builtin_constant_p(len)) \
627 memcpy(dest, src, __len); \
628 else \
51b8f2fa 629 lttng_inline_memcpy(dest, src, __len); \
852c2936
MD
630} while (0)
631
a44c74d9
MD
632/*
633 * write len bytes to dest with c
634 */
635static inline
636void lib_ring_buffer_do_memset(char *dest, int c, unsigned long len)
637{
638 unsigned long i;
639
640 for (i = 0; i < len; i++)
641 dest[i] = c;
642}
643
b728d87e
MD
644/* arch-agnostic implementation */
645
bfd26582 646static inline int lttng_ust_fls(unsigned int x)
b728d87e
MD
647{
648 int r = 32;
649
650 if (!x)
651 return 0;
652 if (!(x & 0xFFFF0000U)) {
653 x <<= 16;
654 r -= 16;
655 }
656 if (!(x & 0xFF000000U)) {
657 x <<= 8;
658 r -= 8;
659 }
660 if (!(x & 0xF0000000U)) {
661 x <<= 4;
662 r -= 4;
663 }
664 if (!(x & 0xC0000000U)) {
665 x <<= 2;
666 r -= 2;
667 }
668 if (!(x & 0x80000000U)) {
e2bd33a5 669 /* No need to bit shift on last operation */
b728d87e
MD
670 r -= 1;
671 }
672 return r;
673}
674
675static inline int get_count_order(unsigned int count)
676{
677 int order;
678
bfd26582 679 order = lttng_ust_fls(count) - 1;
b728d87e
MD
680 if (count & (count - 1))
681 order++;
682 return order;
683}
684
e92f3e28 685#endif /* _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H */
This page took 0.061148 seconds and 4 git commands to generate.