Hide libringbuffer private symbols
[lttng-ust.git] / libringbuffer / backend_internal.h
1 /*
2 * SPDX-License-Identifier: LGPL-2.1-only
3 *
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Ring buffer backend (internal helpers).
7 */
8
9 #ifndef _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
10 #define _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
11
12 #include <stddef.h>
13 #include <stdint.h>
14 #include <unistd.h>
15 #include <urcu/compiler.h>
16
17 #include <lttng/ringbuffer-config.h>
18 #include "backend_types.h"
19 #include "frontend_types.h"
20 #include "shm.h"
21 #include "ust-helper.h"
22
23 /* Ring buffer backend API presented to the frontend */
24
25 /* Ring buffer and channel backend create/free */
26
27 LTTNG_HIDDEN
28 int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb,
29 struct channel_backend *chan, int cpu,
30 struct lttng_ust_shm_handle *handle,
31 struct shm_object *shmobj);
32 LTTNG_HIDDEN
33 void channel_backend_unregister_notifiers(struct channel_backend *chanb);
34 LTTNG_HIDDEN
35 void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend *bufb);
36 LTTNG_HIDDEN
37 int channel_backend_init(struct channel_backend *chanb,
38 const char *name,
39 const struct lttng_ust_lib_ring_buffer_config *config,
40 size_t subbuf_size,
41 size_t num_subbuf, struct lttng_ust_shm_handle *handle,
42 const int *stream_fds);
43 LTTNG_HIDDEN
44 void channel_backend_free(struct channel_backend *chanb,
45 struct lttng_ust_shm_handle *handle);
46
47 LTTNG_HIDDEN
48 void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb,
49 struct lttng_ust_shm_handle *handle);
50 LTTNG_HIDDEN
51 void channel_backend_reset(struct channel_backend *chanb);
52
53 LTTNG_HIDDEN
54 int lib_ring_buffer_backend_init(void);
55 LTTNG_HIDDEN
56 void lib_ring_buffer_backend_exit(void);
57
58 LTTNG_HIDDEN
59 extern void _lib_ring_buffer_write(struct lttng_ust_lib_ring_buffer_backend *bufb,
60 size_t offset, const void *src, size_t len,
61 ssize_t pagecpy);
62
63 /*
64 * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
65 * exchanged atomically.
66 *
67 * Top half word, except lowest bit, belongs to "offset", which is used to keep
68 * to count the produced buffers. For overwrite mode, this provides the
69 * consumer with the capacity to read subbuffers in order, handling the
70 * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
71 * systems) concurrently with a single execution of get_subbuf (between offset
72 * sampling and subbuffer ID exchange).
73 */
74
75 #define HALF_ULONG_BITS (CAA_BITS_PER_LONG >> 1)
76
77 #define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1)
78 #define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT)
79 #define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1))
80 /*
81 * Lowest bit of top word half belongs to noref. Used only for overwrite mode.
82 */
83 #define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1)
84 #define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT)
85 #define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT
86 /*
87 * In overwrite mode: lowest half of word is used for index.
88 * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
89 * In producer-consumer mode: whole word used for index.
90 */
91 #define SB_ID_INDEX_SHIFT 0
92 #define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT)
93 #define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1)
94
95 /*
96 * Construct the subbuffer id from offset, index and noref. Use only the index
97 * for producer-consumer mode (offset and noref are only used in overwrite
98 * mode).
99 */
100 static inline
101 unsigned long subbuffer_id(const struct lttng_ust_lib_ring_buffer_config *config,
102 unsigned long offset, unsigned long noref,
103 unsigned long index)
104 {
105 if (config->mode == RING_BUFFER_OVERWRITE)
106 return (offset << SB_ID_OFFSET_SHIFT)
107 | (noref << SB_ID_NOREF_SHIFT)
108 | index;
109 else
110 return index;
111 }
112
113 /*
114 * Compare offset with the offset contained within id. Return 1 if the offset
115 * bits are identical, else 0.
116 */
117 static inline
118 int subbuffer_id_compare_offset(const struct lttng_ust_lib_ring_buffer_config *config,
119 unsigned long id, unsigned long offset)
120 {
121 return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT);
122 }
123
124 static inline
125 unsigned long subbuffer_id_get_index(const struct lttng_ust_lib_ring_buffer_config *config,
126 unsigned long id)
127 {
128 if (config->mode == RING_BUFFER_OVERWRITE)
129 return id & SB_ID_INDEX_MASK;
130 else
131 return id;
132 }
133
134 static inline
135 unsigned long subbuffer_id_is_noref(const struct lttng_ust_lib_ring_buffer_config *config,
136 unsigned long id)
137 {
138 if (config->mode == RING_BUFFER_OVERWRITE)
139 return !!(id & SB_ID_NOREF_MASK);
140 else
141 return 1;
142 }
143
144 /*
145 * Only used by reader on subbuffer ID it has exclusive access to. No volatile
146 * needed.
147 */
148 static inline
149 void subbuffer_id_set_noref(const struct lttng_ust_lib_ring_buffer_config *config,
150 unsigned long *id)
151 {
152 if (config->mode == RING_BUFFER_OVERWRITE)
153 *id |= SB_ID_NOREF_MASK;
154 }
155
156 static inline
157 void subbuffer_id_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
158 unsigned long *id, unsigned long offset)
159 {
160 unsigned long tmp;
161
162 if (config->mode == RING_BUFFER_OVERWRITE) {
163 tmp = *id;
164 tmp &= ~SB_ID_OFFSET_MASK;
165 tmp |= offset << SB_ID_OFFSET_SHIFT;
166 tmp |= SB_ID_NOREF_MASK;
167 /* Volatile store, read concurrently by readers. */
168 CMM_ACCESS_ONCE(*id) = tmp;
169 }
170 }
171
172 /* No volatile access, since already used locally */
173 static inline
174 void subbuffer_id_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
175 unsigned long *id)
176 {
177 if (config->mode == RING_BUFFER_OVERWRITE)
178 *id &= ~SB_ID_NOREF_MASK;
179 }
180
181 /*
182 * For overwrite mode, cap the number of subbuffers per buffer to:
183 * 2^16 on 32-bit architectures
184 * 2^32 on 64-bit architectures
185 * This is required to fit in the index part of the ID. Return 0 on success,
186 * -EPERM on failure.
187 */
188 static inline
189 int subbuffer_id_check_index(const struct lttng_ust_lib_ring_buffer_config *config,
190 unsigned long num_subbuf)
191 {
192 if (config->mode == RING_BUFFER_OVERWRITE)
193 return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0;
194 else
195 return 0;
196 }
197
198 static inline
199 int lib_ring_buffer_backend_get_pages(const struct lttng_ust_lib_ring_buffer_config *config,
200 struct lttng_ust_lib_ring_buffer_ctx *ctx,
201 struct lttng_ust_lib_ring_buffer_backend_pages **backend_pages)
202 {
203 struct lttng_ust_lib_ring_buffer_backend *bufb = &ctx->buf->backend;
204 struct channel_backend *chanb = &ctx->chan->backend;
205 struct lttng_ust_shm_handle *handle = ctx->handle;
206 size_t sbidx;
207 size_t offset = ctx->buf_offset;
208 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
209 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
210 unsigned long sb_bindex, id;
211 struct lttng_ust_lib_ring_buffer_backend_pages *_backend_pages;
212
213 offset &= chanb->buf_size - 1;
214 sbidx = offset >> chanb->subbuf_size_order;
215 wsb = shmp_index(handle, bufb->buf_wsb, sbidx);
216 if (caa_unlikely(!wsb))
217 return -1;
218 id = wsb->id;
219 sb_bindex = subbuffer_id_get_index(config, id);
220 rpages = shmp_index(handle, bufb->array, sb_bindex);
221 if (caa_unlikely(!rpages))
222 return -1;
223 CHAN_WARN_ON(ctx->chan,
224 config->mode == RING_BUFFER_OVERWRITE
225 && subbuffer_id_is_noref(config, id));
226 _backend_pages = shmp(handle, rpages->shmp);
227 if (caa_unlikely(!_backend_pages))
228 return -1;
229 *backend_pages = _backend_pages;
230 return 0;
231 }
232
233 /* Get backend pages from cache. */
234 static inline
235 struct lttng_ust_lib_ring_buffer_backend_pages *
236 lib_ring_buffer_get_backend_pages_from_ctx(const struct lttng_ust_lib_ring_buffer_config *config,
237 struct lttng_ust_lib_ring_buffer_ctx *ctx)
238 {
239 if (caa_unlikely(ctx->ctx_len
240 < sizeof(struct lttng_ust_lib_ring_buffer_ctx)))
241 return NULL;
242 return ctx->backend_pages;
243 }
244
245 /*
246 * The ring buffer can count events recorded and overwritten per buffer,
247 * but it is disabled by default due to its performance overhead.
248 */
249 #ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
250 static inline
251 void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
252 const struct lttng_ust_lib_ring_buffer_ctx *ctx,
253 struct lttng_ust_lib_ring_buffer_backend *bufb,
254 unsigned long idx, struct lttng_ust_shm_handle *handle)
255 {
256 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
257
258 backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
259 if (caa_unlikely(!backend_pages)) {
260 if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
261 return;
262 }
263 v_inc(config, &backend_pages->records_commit);
264 }
265 #else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
266 static inline
267 void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
268 const struct lttng_ust_lib_ring_buffer_ctx *ctx,
269 struct lttng_ust_lib_ring_buffer_backend *bufb,
270 unsigned long idx, struct lttng_ust_shm_handle *handle)
271 {
272 }
273 #endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
274
275 /*
276 * Reader has exclusive subbuffer access for record consumption. No need to
277 * perform the decrement atomically.
278 */
279 static inline
280 void subbuffer_consume_record(const struct lttng_ust_lib_ring_buffer_config *config,
281 struct lttng_ust_lib_ring_buffer_backend *bufb,
282 struct lttng_ust_shm_handle *handle)
283 {
284 unsigned long sb_bindex;
285 struct channel *chan;
286 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
287 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
288
289 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
290 chan = shmp(handle, bufb->chan);
291 if (!chan)
292 return;
293 pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
294 if (!pages_shmp)
295 return;
296 backend_pages = shmp(handle, pages_shmp->shmp);
297 if (!backend_pages)
298 return;
299 CHAN_WARN_ON(chan, !v_read(config, &backend_pages->records_unread));
300 /* Non-atomic decrement protected by exclusive subbuffer access */
301 _v_dec(config, &backend_pages->records_unread);
302 v_inc(config, &bufb->records_read);
303 }
304
305 static inline
306 unsigned long subbuffer_get_records_count(
307 const struct lttng_ust_lib_ring_buffer_config *config,
308 struct lttng_ust_lib_ring_buffer_backend *bufb,
309 unsigned long idx,
310 struct lttng_ust_shm_handle *handle)
311 {
312 unsigned long sb_bindex;
313 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
314 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
315 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
316
317 wsb = shmp_index(handle, bufb->buf_wsb, idx);
318 if (!wsb)
319 return 0;
320 sb_bindex = subbuffer_id_get_index(config, wsb->id);
321 rpages = shmp_index(handle, bufb->array, sb_bindex);
322 if (!rpages)
323 return 0;
324 backend_pages = shmp(handle, rpages->shmp);
325 if (!backend_pages)
326 return 0;
327 return v_read(config, &backend_pages->records_commit);
328 }
329
330 /*
331 * Must be executed at subbuffer delivery when the writer has _exclusive_
332 * subbuffer access. See lib_ring_buffer_check_deliver() for details.
333 * lib_ring_buffer_get_records_count() must be called to get the records
334 * count before this function, because it resets the records_commit
335 * count.
336 */
337 static inline
338 unsigned long subbuffer_count_records_overrun(
339 const struct lttng_ust_lib_ring_buffer_config *config,
340 struct lttng_ust_lib_ring_buffer_backend *bufb,
341 unsigned long idx,
342 struct lttng_ust_shm_handle *handle)
343 {
344 unsigned long overruns, sb_bindex;
345 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
346 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
347 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
348
349 wsb = shmp_index(handle, bufb->buf_wsb, idx);
350 if (!wsb)
351 return 0;
352 sb_bindex = subbuffer_id_get_index(config, wsb->id);
353 rpages = shmp_index(handle, bufb->array, sb_bindex);
354 if (!rpages)
355 return 0;
356 backend_pages = shmp(handle, rpages->shmp);
357 if (!backend_pages)
358 return 0;
359 overruns = v_read(config, &backend_pages->records_unread);
360 v_set(config, &backend_pages->records_unread,
361 v_read(config, &backend_pages->records_commit));
362 v_set(config, &backend_pages->records_commit, 0);
363
364 return overruns;
365 }
366
367 static inline
368 void subbuffer_set_data_size(const struct lttng_ust_lib_ring_buffer_config *config,
369 struct lttng_ust_lib_ring_buffer_backend *bufb,
370 unsigned long idx,
371 unsigned long data_size,
372 struct lttng_ust_shm_handle *handle)
373 {
374 unsigned long sb_bindex;
375 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
376 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
377 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
378
379 wsb = shmp_index(handle, bufb->buf_wsb, idx);
380 if (!wsb)
381 return;
382 sb_bindex = subbuffer_id_get_index(config, wsb->id);
383 rpages = shmp_index(handle, bufb->array, sb_bindex);
384 if (!rpages)
385 return;
386 backend_pages = shmp(handle, rpages->shmp);
387 if (!backend_pages)
388 return;
389 backend_pages->data_size = data_size;
390 }
391
392 static inline
393 unsigned long subbuffer_get_read_data_size(
394 const struct lttng_ust_lib_ring_buffer_config *config,
395 struct lttng_ust_lib_ring_buffer_backend *bufb,
396 struct lttng_ust_shm_handle *handle)
397 {
398 unsigned long sb_bindex;
399 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
400 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
401
402 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
403 pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
404 if (!pages_shmp)
405 return 0;
406 backend_pages = shmp(handle, pages_shmp->shmp);
407 if (!backend_pages)
408 return 0;
409 return backend_pages->data_size;
410 }
411
412 static inline
413 unsigned long subbuffer_get_data_size(
414 const struct lttng_ust_lib_ring_buffer_config *config,
415 struct lttng_ust_lib_ring_buffer_backend *bufb,
416 unsigned long idx,
417 struct lttng_ust_shm_handle *handle)
418 {
419 unsigned long sb_bindex;
420 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
421 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
422 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
423
424 wsb = shmp_index(handle, bufb->buf_wsb, idx);
425 if (!wsb)
426 return 0;
427 sb_bindex = subbuffer_id_get_index(config, wsb->id);
428 rpages = shmp_index(handle, bufb->array, sb_bindex);
429 if (!rpages)
430 return 0;
431 backend_pages = shmp(handle, rpages->shmp);
432 if (!backend_pages)
433 return 0;
434 return backend_pages->data_size;
435 }
436
437 static inline
438 void subbuffer_inc_packet_count(const struct lttng_ust_lib_ring_buffer_config *config,
439 struct lttng_ust_lib_ring_buffer_backend *bufb,
440 unsigned long idx, struct lttng_ust_shm_handle *handle)
441 {
442 struct lttng_ust_lib_ring_buffer_backend_counts *counts;
443
444 counts = shmp_index(handle, bufb->buf_cnt, idx);
445 if (!counts)
446 return;
447 counts->seq_cnt++;
448 }
449
450 /**
451 * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
452 * writer.
453 */
454 static inline
455 void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
456 struct lttng_ust_lib_ring_buffer_backend *bufb,
457 unsigned long idx,
458 struct lttng_ust_shm_handle *handle)
459 {
460 unsigned long id, new_id;
461 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
462
463 if (config->mode != RING_BUFFER_OVERWRITE)
464 return;
465
466 /*
467 * Performing a volatile access to read the sb_pages, because we want to
468 * read a coherent version of the pointer and the associated noref flag.
469 */
470 wsb = shmp_index(handle, bufb->buf_wsb, idx);
471 if (!wsb)
472 return;
473 id = CMM_ACCESS_ONCE(wsb->id);
474 for (;;) {
475 /* This check is called on the fast path for each record. */
476 if (caa_likely(!subbuffer_id_is_noref(config, id))) {
477 /*
478 * Store after load dependency ordering the writes to
479 * the subbuffer after load and test of the noref flag
480 * matches the memory barrier implied by the cmpxchg()
481 * in update_read_sb_index().
482 */
483 return; /* Already writing to this buffer */
484 }
485 new_id = id;
486 subbuffer_id_clear_noref(config, &new_id);
487 new_id = uatomic_cmpxchg(&wsb->id, id, new_id);
488 if (caa_likely(new_id == id))
489 break;
490 id = new_id;
491 }
492 }
493
494 /**
495 * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
496 * called by writer.
497 */
498 static inline
499 void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
500 struct lttng_ust_lib_ring_buffer_backend *bufb,
501 unsigned long idx, unsigned long offset,
502 struct lttng_ust_shm_handle *handle)
503 {
504 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
505 struct channel *chan;
506
507 if (config->mode != RING_BUFFER_OVERWRITE)
508 return;
509
510 wsb = shmp_index(handle, bufb->buf_wsb, idx);
511 if (!wsb)
512 return;
513 /*
514 * Because ring_buffer_set_noref() is only called by a single thread
515 * (the one which updated the cc_sb value), there are no concurrent
516 * updates to take care of: other writers have not updated cc_sb, so
517 * they cannot set the noref flag, and concurrent readers cannot modify
518 * the pointer because the noref flag is not set yet.
519 * The smp_wmb() in ring_buffer_commit() takes care of ordering writes
520 * to the subbuffer before this set noref operation.
521 * subbuffer_set_noref() uses a volatile store to deal with concurrent
522 * readers of the noref flag.
523 */
524 chan = shmp(handle, bufb->chan);
525 if (!chan)
526 return;
527 CHAN_WARN_ON(chan, subbuffer_id_is_noref(config, wsb->id));
528 /*
529 * Memory barrier that ensures counter stores are ordered before set
530 * noref and offset.
531 */
532 cmm_smp_mb();
533 subbuffer_id_set_noref_offset(config, &wsb->id, offset);
534 }
535
536 /**
537 * update_read_sb_index - Read-side subbuffer index update.
538 */
539 static inline
540 int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config,
541 struct lttng_ust_lib_ring_buffer_backend *bufb,
542 struct channel_backend *chanb,
543 unsigned long consumed_idx,
544 unsigned long consumed_count,
545 struct lttng_ust_shm_handle *handle)
546 {
547 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
548 unsigned long old_id, new_id;
549
550 wsb = shmp_index(handle, bufb->buf_wsb, consumed_idx);
551 if (caa_unlikely(!wsb))
552 return -EPERM;
553
554 if (config->mode == RING_BUFFER_OVERWRITE) {
555 struct channel *chan;
556
557 /*
558 * Exchange the target writer subbuffer with our own unused
559 * subbuffer. No need to use CMM_ACCESS_ONCE() here to read the
560 * old_wpage, because the value read will be confirmed by the
561 * following cmpxchg().
562 */
563 old_id = wsb->id;
564 if (caa_unlikely(!subbuffer_id_is_noref(config, old_id)))
565 return -EAGAIN;
566 /*
567 * Make sure the offset count we are expecting matches the one
568 * indicated by the writer.
569 */
570 if (caa_unlikely(!subbuffer_id_compare_offset(config, old_id,
571 consumed_count)))
572 return -EAGAIN;
573 chan = shmp(handle, bufb->chan);
574 if (caa_unlikely(!chan))
575 return -EPERM;
576 CHAN_WARN_ON(chan, !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
577 subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
578 consumed_count);
579 new_id = uatomic_cmpxchg(&wsb->id, old_id, bufb->buf_rsb.id);
580 if (caa_unlikely(old_id != new_id))
581 return -EAGAIN;
582 bufb->buf_rsb.id = new_id;
583 } else {
584 /* No page exchange, use the writer page directly */
585 bufb->buf_rsb.id = wsb->id;
586 }
587 return 0;
588 }
589
590 #ifndef inline_memcpy
591 #define inline_memcpy(dest, src, n) memcpy(dest, src, n)
592 #endif
593
594 static inline __attribute__((always_inline))
595 void lttng_inline_memcpy(void *dest, const void *src,
596 unsigned long len)
597 {
598 switch (len) {
599 case 1:
600 *(uint8_t *) dest = *(const uint8_t *) src;
601 break;
602 case 2:
603 *(uint16_t *) dest = *(const uint16_t *) src;
604 break;
605 case 4:
606 *(uint32_t *) dest = *(const uint32_t *) src;
607 break;
608 case 8:
609 *(uint64_t *) dest = *(const uint64_t *) src;
610 break;
611 default:
612 inline_memcpy(dest, src, len);
613 }
614 }
615
616 /*
617 * Use the architecture-specific memcpy implementation for constant-sized
618 * inputs, but rely on an inline memcpy for length statically unknown.
619 * The function call to memcpy is just way too expensive for a fast path.
620 */
621 #define lib_ring_buffer_do_copy(config, dest, src, len) \
622 do { \
623 size_t __len = (len); \
624 if (__builtin_constant_p(len)) \
625 memcpy(dest, src, __len); \
626 else \
627 lttng_inline_memcpy(dest, src, __len); \
628 } while (0)
629
630 /*
631 * write len bytes to dest with c
632 */
633 static inline
634 void lib_ring_buffer_do_memset(char *dest, int c, unsigned long len)
635 {
636 unsigned long i;
637
638 for (i = 0; i < len; i++)
639 dest[i] = c;
640 }
641
642 /* arch-agnostic implementation */
643
644 static inline int lttng_ust_fls(unsigned int x)
645 {
646 int r = 32;
647
648 if (!x)
649 return 0;
650 if (!(x & 0xFFFF0000U)) {
651 x <<= 16;
652 r -= 16;
653 }
654 if (!(x & 0xFF000000U)) {
655 x <<= 8;
656 r -= 8;
657 }
658 if (!(x & 0xF0000000U)) {
659 x <<= 4;
660 r -= 4;
661 }
662 if (!(x & 0xC0000000U)) {
663 x <<= 2;
664 r -= 2;
665 }
666 if (!(x & 0x80000000U)) {
667 /* No need to bit shift on last operation */
668 r -= 1;
669 }
670 return r;
671 }
672
673 static inline int get_count_order(unsigned int count)
674 {
675 int order;
676
677 order = lttng_ust_fls(count) - 1;
678 if (count & (count - 1))
679 order++;
680 return order;
681 }
682
683 #endif /* _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H */
This page took 0.042407 seconds and 4 git commands to generate.