Move to kernel style SPDX license identifiers
[lttng-ust.git] / libringbuffer / backend_internal.h
1 /*
2 * SPDX-License-Identifier: LGPL-2.1-only
3 *
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Ring buffer backend (internal helpers).
7 */
8
9 #ifndef _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
10 #define _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
11
12 #include <stddef.h>
13 #include <stdint.h>
14 #include <unistd.h>
15 #include <urcu/compiler.h>
16
17 #include <lttng/ringbuffer-config.h>
18 #include "backend_types.h"
19 #include "frontend_types.h"
20 #include "shm.h"
21
22 /* Ring buffer backend API presented to the frontend */
23
24 /* Ring buffer and channel backend create/free */
25
26 int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb,
27 struct channel_backend *chan, int cpu,
28 struct lttng_ust_shm_handle *handle,
29 struct shm_object *shmobj);
30 void channel_backend_unregister_notifiers(struct channel_backend *chanb);
31 void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend *bufb);
32 int channel_backend_init(struct channel_backend *chanb,
33 const char *name,
34 const struct lttng_ust_lib_ring_buffer_config *config,
35 size_t subbuf_size,
36 size_t num_subbuf, struct lttng_ust_shm_handle *handle,
37 const int *stream_fds);
38 void channel_backend_free(struct channel_backend *chanb,
39 struct lttng_ust_shm_handle *handle);
40
41 void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb,
42 struct lttng_ust_shm_handle *handle);
43 void channel_backend_reset(struct channel_backend *chanb);
44
45 int lib_ring_buffer_backend_init(void);
46 void lib_ring_buffer_backend_exit(void);
47
48 extern void _lib_ring_buffer_write(struct lttng_ust_lib_ring_buffer_backend *bufb,
49 size_t offset, const void *src, size_t len,
50 ssize_t pagecpy);
51
52 /*
53 * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
54 * exchanged atomically.
55 *
56 * Top half word, except lowest bit, belongs to "offset", which is used to keep
57 * to count the produced buffers. For overwrite mode, this provides the
58 * consumer with the capacity to read subbuffers in order, handling the
59 * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
60 * systems) concurrently with a single execution of get_subbuf (between offset
61 * sampling and subbuffer ID exchange).
62 */
63
64 #define HALF_ULONG_BITS (CAA_BITS_PER_LONG >> 1)
65
66 #define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1)
67 #define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT)
68 #define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1))
69 /*
70 * Lowest bit of top word half belongs to noref. Used only for overwrite mode.
71 */
72 #define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1)
73 #define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT)
74 #define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT
75 /*
76 * In overwrite mode: lowest half of word is used for index.
77 * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
78 * In producer-consumer mode: whole word used for index.
79 */
80 #define SB_ID_INDEX_SHIFT 0
81 #define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT)
82 #define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1)
83
84 /*
85 * Construct the subbuffer id from offset, index and noref. Use only the index
86 * for producer-consumer mode (offset and noref are only used in overwrite
87 * mode).
88 */
89 static inline
90 unsigned long subbuffer_id(const struct lttng_ust_lib_ring_buffer_config *config,
91 unsigned long offset, unsigned long noref,
92 unsigned long index)
93 {
94 if (config->mode == RING_BUFFER_OVERWRITE)
95 return (offset << SB_ID_OFFSET_SHIFT)
96 | (noref << SB_ID_NOREF_SHIFT)
97 | index;
98 else
99 return index;
100 }
101
102 /*
103 * Compare offset with the offset contained within id. Return 1 if the offset
104 * bits are identical, else 0.
105 */
106 static inline
107 int subbuffer_id_compare_offset(const struct lttng_ust_lib_ring_buffer_config *config,
108 unsigned long id, unsigned long offset)
109 {
110 return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT);
111 }
112
113 static inline
114 unsigned long subbuffer_id_get_index(const struct lttng_ust_lib_ring_buffer_config *config,
115 unsigned long id)
116 {
117 if (config->mode == RING_BUFFER_OVERWRITE)
118 return id & SB_ID_INDEX_MASK;
119 else
120 return id;
121 }
122
123 static inline
124 unsigned long subbuffer_id_is_noref(const struct lttng_ust_lib_ring_buffer_config *config,
125 unsigned long id)
126 {
127 if (config->mode == RING_BUFFER_OVERWRITE)
128 return !!(id & SB_ID_NOREF_MASK);
129 else
130 return 1;
131 }
132
133 /*
134 * Only used by reader on subbuffer ID it has exclusive access to. No volatile
135 * needed.
136 */
137 static inline
138 void subbuffer_id_set_noref(const struct lttng_ust_lib_ring_buffer_config *config,
139 unsigned long *id)
140 {
141 if (config->mode == RING_BUFFER_OVERWRITE)
142 *id |= SB_ID_NOREF_MASK;
143 }
144
145 static inline
146 void subbuffer_id_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
147 unsigned long *id, unsigned long offset)
148 {
149 unsigned long tmp;
150
151 if (config->mode == RING_BUFFER_OVERWRITE) {
152 tmp = *id;
153 tmp &= ~SB_ID_OFFSET_MASK;
154 tmp |= offset << SB_ID_OFFSET_SHIFT;
155 tmp |= SB_ID_NOREF_MASK;
156 /* Volatile store, read concurrently by readers. */
157 CMM_ACCESS_ONCE(*id) = tmp;
158 }
159 }
160
161 /* No volatile access, since already used locally */
162 static inline
163 void subbuffer_id_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
164 unsigned long *id)
165 {
166 if (config->mode == RING_BUFFER_OVERWRITE)
167 *id &= ~SB_ID_NOREF_MASK;
168 }
169
170 /*
171 * For overwrite mode, cap the number of subbuffers per buffer to:
172 * 2^16 on 32-bit architectures
173 * 2^32 on 64-bit architectures
174 * This is required to fit in the index part of the ID. Return 0 on success,
175 * -EPERM on failure.
176 */
177 static inline
178 int subbuffer_id_check_index(const struct lttng_ust_lib_ring_buffer_config *config,
179 unsigned long num_subbuf)
180 {
181 if (config->mode == RING_BUFFER_OVERWRITE)
182 return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0;
183 else
184 return 0;
185 }
186
187 static inline
188 int lib_ring_buffer_backend_get_pages(const struct lttng_ust_lib_ring_buffer_config *config,
189 struct lttng_ust_lib_ring_buffer_ctx *ctx,
190 struct lttng_ust_lib_ring_buffer_backend_pages **backend_pages)
191 {
192 struct lttng_ust_lib_ring_buffer_backend *bufb = &ctx->buf->backend;
193 struct channel_backend *chanb = &ctx->chan->backend;
194 struct lttng_ust_shm_handle *handle = ctx->handle;
195 size_t sbidx;
196 size_t offset = ctx->buf_offset;
197 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
198 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
199 unsigned long sb_bindex, id;
200 struct lttng_ust_lib_ring_buffer_backend_pages *_backend_pages;
201
202 offset &= chanb->buf_size - 1;
203 sbidx = offset >> chanb->subbuf_size_order;
204 wsb = shmp_index(handle, bufb->buf_wsb, sbidx);
205 if (caa_unlikely(!wsb))
206 return -1;
207 id = wsb->id;
208 sb_bindex = subbuffer_id_get_index(config, id);
209 rpages = shmp_index(handle, bufb->array, sb_bindex);
210 if (caa_unlikely(!rpages))
211 return -1;
212 CHAN_WARN_ON(ctx->chan,
213 config->mode == RING_BUFFER_OVERWRITE
214 && subbuffer_id_is_noref(config, id));
215 _backend_pages = shmp(handle, rpages->shmp);
216 if (caa_unlikely(!_backend_pages))
217 return -1;
218 *backend_pages = _backend_pages;
219 return 0;
220 }
221
222 /* Get backend pages from cache. */
223 static inline
224 struct lttng_ust_lib_ring_buffer_backend_pages *
225 lib_ring_buffer_get_backend_pages_from_ctx(const struct lttng_ust_lib_ring_buffer_config *config,
226 struct lttng_ust_lib_ring_buffer_ctx *ctx)
227 {
228 if (caa_unlikely(ctx->ctx_len
229 < sizeof(struct lttng_ust_lib_ring_buffer_ctx)))
230 return NULL;
231 return ctx->backend_pages;
232 }
233
234 /*
235 * The ring buffer can count events recorded and overwritten per buffer,
236 * but it is disabled by default due to its performance overhead.
237 */
238 #ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
239 static inline
240 void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
241 const struct lttng_ust_lib_ring_buffer_ctx *ctx,
242 struct lttng_ust_lib_ring_buffer_backend *bufb,
243 unsigned long idx, struct lttng_ust_shm_handle *handle)
244 {
245 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
246
247 backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
248 if (caa_unlikely(!backend_pages)) {
249 if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
250 return;
251 }
252 v_inc(config, &backend_pages->records_commit);
253 }
254 #else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
255 static inline
256 void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
257 const struct lttng_ust_lib_ring_buffer_ctx *ctx,
258 struct lttng_ust_lib_ring_buffer_backend *bufb,
259 unsigned long idx, struct lttng_ust_shm_handle *handle)
260 {
261 }
262 #endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
263
264 /*
265 * Reader has exclusive subbuffer access for record consumption. No need to
266 * perform the decrement atomically.
267 */
268 static inline
269 void subbuffer_consume_record(const struct lttng_ust_lib_ring_buffer_config *config,
270 struct lttng_ust_lib_ring_buffer_backend *bufb,
271 struct lttng_ust_shm_handle *handle)
272 {
273 unsigned long sb_bindex;
274 struct channel *chan;
275 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
276 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
277
278 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
279 chan = shmp(handle, bufb->chan);
280 if (!chan)
281 return;
282 pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
283 if (!pages_shmp)
284 return;
285 backend_pages = shmp(handle, pages_shmp->shmp);
286 if (!backend_pages)
287 return;
288 CHAN_WARN_ON(chan, !v_read(config, &backend_pages->records_unread));
289 /* Non-atomic decrement protected by exclusive subbuffer access */
290 _v_dec(config, &backend_pages->records_unread);
291 v_inc(config, &bufb->records_read);
292 }
293
294 static inline
295 unsigned long subbuffer_get_records_count(
296 const struct lttng_ust_lib_ring_buffer_config *config,
297 struct lttng_ust_lib_ring_buffer_backend *bufb,
298 unsigned long idx,
299 struct lttng_ust_shm_handle *handle)
300 {
301 unsigned long sb_bindex;
302 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
303 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
304 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
305
306 wsb = shmp_index(handle, bufb->buf_wsb, idx);
307 if (!wsb)
308 return 0;
309 sb_bindex = subbuffer_id_get_index(config, wsb->id);
310 rpages = shmp_index(handle, bufb->array, sb_bindex);
311 if (!rpages)
312 return 0;
313 backend_pages = shmp(handle, rpages->shmp);
314 if (!backend_pages)
315 return 0;
316 return v_read(config, &backend_pages->records_commit);
317 }
318
319 /*
320 * Must be executed at subbuffer delivery when the writer has _exclusive_
321 * subbuffer access. See lib_ring_buffer_check_deliver() for details.
322 * lib_ring_buffer_get_records_count() must be called to get the records
323 * count before this function, because it resets the records_commit
324 * count.
325 */
326 static inline
327 unsigned long subbuffer_count_records_overrun(
328 const struct lttng_ust_lib_ring_buffer_config *config,
329 struct lttng_ust_lib_ring_buffer_backend *bufb,
330 unsigned long idx,
331 struct lttng_ust_shm_handle *handle)
332 {
333 unsigned long overruns, sb_bindex;
334 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
335 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
336 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
337
338 wsb = shmp_index(handle, bufb->buf_wsb, idx);
339 if (!wsb)
340 return 0;
341 sb_bindex = subbuffer_id_get_index(config, wsb->id);
342 rpages = shmp_index(handle, bufb->array, sb_bindex);
343 if (!rpages)
344 return 0;
345 backend_pages = shmp(handle, rpages->shmp);
346 if (!backend_pages)
347 return 0;
348 overruns = v_read(config, &backend_pages->records_unread);
349 v_set(config, &backend_pages->records_unread,
350 v_read(config, &backend_pages->records_commit));
351 v_set(config, &backend_pages->records_commit, 0);
352
353 return overruns;
354 }
355
356 static inline
357 void subbuffer_set_data_size(const struct lttng_ust_lib_ring_buffer_config *config,
358 struct lttng_ust_lib_ring_buffer_backend *bufb,
359 unsigned long idx,
360 unsigned long data_size,
361 struct lttng_ust_shm_handle *handle)
362 {
363 unsigned long sb_bindex;
364 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
365 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
366 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
367
368 wsb = shmp_index(handle, bufb->buf_wsb, idx);
369 if (!wsb)
370 return;
371 sb_bindex = subbuffer_id_get_index(config, wsb->id);
372 rpages = shmp_index(handle, bufb->array, sb_bindex);
373 if (!rpages)
374 return;
375 backend_pages = shmp(handle, rpages->shmp);
376 if (!backend_pages)
377 return;
378 backend_pages->data_size = data_size;
379 }
380
381 static inline
382 unsigned long subbuffer_get_read_data_size(
383 const struct lttng_ust_lib_ring_buffer_config *config,
384 struct lttng_ust_lib_ring_buffer_backend *bufb,
385 struct lttng_ust_shm_handle *handle)
386 {
387 unsigned long sb_bindex;
388 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
389 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
390
391 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
392 pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
393 if (!pages_shmp)
394 return 0;
395 backend_pages = shmp(handle, pages_shmp->shmp);
396 if (!backend_pages)
397 return 0;
398 return backend_pages->data_size;
399 }
400
401 static inline
402 unsigned long subbuffer_get_data_size(
403 const struct lttng_ust_lib_ring_buffer_config *config,
404 struct lttng_ust_lib_ring_buffer_backend *bufb,
405 unsigned long idx,
406 struct lttng_ust_shm_handle *handle)
407 {
408 unsigned long sb_bindex;
409 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
410 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
411 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
412
413 wsb = shmp_index(handle, bufb->buf_wsb, idx);
414 if (!wsb)
415 return 0;
416 sb_bindex = subbuffer_id_get_index(config, wsb->id);
417 rpages = shmp_index(handle, bufb->array, sb_bindex);
418 if (!rpages)
419 return 0;
420 backend_pages = shmp(handle, rpages->shmp);
421 if (!backend_pages)
422 return 0;
423 return backend_pages->data_size;
424 }
425
426 static inline
427 void subbuffer_inc_packet_count(const struct lttng_ust_lib_ring_buffer_config *config,
428 struct lttng_ust_lib_ring_buffer_backend *bufb,
429 unsigned long idx, struct lttng_ust_shm_handle *handle)
430 {
431 struct lttng_ust_lib_ring_buffer_backend_counts *counts;
432
433 counts = shmp_index(handle, bufb->buf_cnt, idx);
434 if (!counts)
435 return;
436 counts->seq_cnt++;
437 }
438
439 /**
440 * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
441 * writer.
442 */
443 static inline
444 void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
445 struct lttng_ust_lib_ring_buffer_backend *bufb,
446 unsigned long idx,
447 struct lttng_ust_shm_handle *handle)
448 {
449 unsigned long id, new_id;
450 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
451
452 if (config->mode != RING_BUFFER_OVERWRITE)
453 return;
454
455 /*
456 * Performing a volatile access to read the sb_pages, because we want to
457 * read a coherent version of the pointer and the associated noref flag.
458 */
459 wsb = shmp_index(handle, bufb->buf_wsb, idx);
460 if (!wsb)
461 return;
462 id = CMM_ACCESS_ONCE(wsb->id);
463 for (;;) {
464 /* This check is called on the fast path for each record. */
465 if (caa_likely(!subbuffer_id_is_noref(config, id))) {
466 /*
467 * Store after load dependency ordering the writes to
468 * the subbuffer after load and test of the noref flag
469 * matches the memory barrier implied by the cmpxchg()
470 * in update_read_sb_index().
471 */
472 return; /* Already writing to this buffer */
473 }
474 new_id = id;
475 subbuffer_id_clear_noref(config, &new_id);
476 new_id = uatomic_cmpxchg(&wsb->id, id, new_id);
477 if (caa_likely(new_id == id))
478 break;
479 id = new_id;
480 }
481 }
482
483 /**
484 * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
485 * called by writer.
486 */
487 static inline
488 void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
489 struct lttng_ust_lib_ring_buffer_backend *bufb,
490 unsigned long idx, unsigned long offset,
491 struct lttng_ust_shm_handle *handle)
492 {
493 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
494 struct channel *chan;
495
496 if (config->mode != RING_BUFFER_OVERWRITE)
497 return;
498
499 wsb = shmp_index(handle, bufb->buf_wsb, idx);
500 if (!wsb)
501 return;
502 /*
503 * Because ring_buffer_set_noref() is only called by a single thread
504 * (the one which updated the cc_sb value), there are no concurrent
505 * updates to take care of: other writers have not updated cc_sb, so
506 * they cannot set the noref flag, and concurrent readers cannot modify
507 * the pointer because the noref flag is not set yet.
508 * The smp_wmb() in ring_buffer_commit() takes care of ordering writes
509 * to the subbuffer before this set noref operation.
510 * subbuffer_set_noref() uses a volatile store to deal with concurrent
511 * readers of the noref flag.
512 */
513 chan = shmp(handle, bufb->chan);
514 if (!chan)
515 return;
516 CHAN_WARN_ON(chan, subbuffer_id_is_noref(config, wsb->id));
517 /*
518 * Memory barrier that ensures counter stores are ordered before set
519 * noref and offset.
520 */
521 cmm_smp_mb();
522 subbuffer_id_set_noref_offset(config, &wsb->id, offset);
523 }
524
525 /**
526 * update_read_sb_index - Read-side subbuffer index update.
527 */
528 static inline
529 int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config,
530 struct lttng_ust_lib_ring_buffer_backend *bufb,
531 struct channel_backend *chanb,
532 unsigned long consumed_idx,
533 unsigned long consumed_count,
534 struct lttng_ust_shm_handle *handle)
535 {
536 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
537 unsigned long old_id, new_id;
538
539 wsb = shmp_index(handle, bufb->buf_wsb, consumed_idx);
540 if (caa_unlikely(!wsb))
541 return -EPERM;
542
543 if (config->mode == RING_BUFFER_OVERWRITE) {
544 struct channel *chan;
545
546 /*
547 * Exchange the target writer subbuffer with our own unused
548 * subbuffer. No need to use CMM_ACCESS_ONCE() here to read the
549 * old_wpage, because the value read will be confirmed by the
550 * following cmpxchg().
551 */
552 old_id = wsb->id;
553 if (caa_unlikely(!subbuffer_id_is_noref(config, old_id)))
554 return -EAGAIN;
555 /*
556 * Make sure the offset count we are expecting matches the one
557 * indicated by the writer.
558 */
559 if (caa_unlikely(!subbuffer_id_compare_offset(config, old_id,
560 consumed_count)))
561 return -EAGAIN;
562 chan = shmp(handle, bufb->chan);
563 if (caa_unlikely(!chan))
564 return -EPERM;
565 CHAN_WARN_ON(chan, !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
566 subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
567 consumed_count);
568 new_id = uatomic_cmpxchg(&wsb->id, old_id, bufb->buf_rsb.id);
569 if (caa_unlikely(old_id != new_id))
570 return -EAGAIN;
571 bufb->buf_rsb.id = new_id;
572 } else {
573 /* No page exchange, use the writer page directly */
574 bufb->buf_rsb.id = wsb->id;
575 }
576 return 0;
577 }
578
579 #ifndef inline_memcpy
580 #define inline_memcpy(dest, src, n) memcpy(dest, src, n)
581 #endif
582
583 static inline __attribute__((always_inline))
584 void lttng_inline_memcpy(void *dest, const void *src,
585 unsigned long len)
586 {
587 switch (len) {
588 case 1:
589 *(uint8_t *) dest = *(const uint8_t *) src;
590 break;
591 case 2:
592 *(uint16_t *) dest = *(const uint16_t *) src;
593 break;
594 case 4:
595 *(uint32_t *) dest = *(const uint32_t *) src;
596 break;
597 case 8:
598 *(uint64_t *) dest = *(const uint64_t *) src;
599 break;
600 default:
601 inline_memcpy(dest, src, len);
602 }
603 }
604
605 /*
606 * Use the architecture-specific memcpy implementation for constant-sized
607 * inputs, but rely on an inline memcpy for length statically unknown.
608 * The function call to memcpy is just way too expensive for a fast path.
609 */
610 #define lib_ring_buffer_do_copy(config, dest, src, len) \
611 do { \
612 size_t __len = (len); \
613 if (__builtin_constant_p(len)) \
614 memcpy(dest, src, __len); \
615 else \
616 lttng_inline_memcpy(dest, src, __len); \
617 } while (0)
618
619 /*
620 * write len bytes to dest with c
621 */
622 static inline
623 void lib_ring_buffer_do_memset(char *dest, int c, unsigned long len)
624 {
625 unsigned long i;
626
627 for (i = 0; i < len; i++)
628 dest[i] = c;
629 }
630
631 /* arch-agnostic implementation */
632
633 static inline int lttng_ust_fls(unsigned int x)
634 {
635 int r = 32;
636
637 if (!x)
638 return 0;
639 if (!(x & 0xFFFF0000U)) {
640 x <<= 16;
641 r -= 16;
642 }
643 if (!(x & 0xFF000000U)) {
644 x <<= 8;
645 r -= 8;
646 }
647 if (!(x & 0xF0000000U)) {
648 x <<= 4;
649 r -= 4;
650 }
651 if (!(x & 0xC0000000U)) {
652 x <<= 2;
653 r -= 2;
654 }
655 if (!(x & 0x80000000U)) {
656 /* No need to bit shift on last operation */
657 r -= 1;
658 }
659 return r;
660 }
661
662 static inline int get_count_order(unsigned int count)
663 {
664 int order;
665
666 order = lttng_ust_fls(count) - 1;
667 if (count & (count - 1))
668 order++;
669 return order;
670 }
671
672 #endif /* _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H */
This page took 0.041897 seconds and 4 git commands to generate.