fe8f6eb611ebb5b11e59d3611e2b20956652bf97
[lttng-ust.git] / libringbuffer / backend_internal.h
1 #ifndef _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
2 #define _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
3
4 /*
5 * libringbuffer/backend_internal.h
6 *
7 * Ring buffer backend (internal helpers).
8 *
9 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26 #include <stddef.h>
27 #include <stdint.h>
28 #include <unistd.h>
29 #include <urcu/compiler.h>
30
31 #include <lttng/ringbuffer-config.h>
32 #include "backend_types.h"
33 #include "frontend_types.h"
34 #include "shm.h"
35
36 /* Ring buffer backend API presented to the frontend */
37
38 /* Ring buffer and channel backend create/free */
39
40 int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb,
41 struct channel_backend *chan, int cpu,
42 struct lttng_ust_shm_handle *handle,
43 struct shm_object *shmobj);
44 void channel_backend_unregister_notifiers(struct channel_backend *chanb);
45 void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend *bufb);
46 int channel_backend_init(struct channel_backend *chanb,
47 const char *name,
48 const struct lttng_ust_lib_ring_buffer_config *config,
49 size_t subbuf_size,
50 size_t num_subbuf, struct lttng_ust_shm_handle *handle,
51 const int *stream_fds);
52 void channel_backend_free(struct channel_backend *chanb,
53 struct lttng_ust_shm_handle *handle);
54
55 void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb,
56 struct lttng_ust_shm_handle *handle);
57 void channel_backend_reset(struct channel_backend *chanb);
58
59 int lib_ring_buffer_backend_init(void);
60 void lib_ring_buffer_backend_exit(void);
61
62 extern void _lib_ring_buffer_write(struct lttng_ust_lib_ring_buffer_backend *bufb,
63 size_t offset, const void *src, size_t len,
64 ssize_t pagecpy);
65
66 /*
67 * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
68 * exchanged atomically.
69 *
70 * Top half word, except lowest bit, belongs to "offset", which is used to keep
71 * to count the produced buffers. For overwrite mode, this provides the
72 * consumer with the capacity to read subbuffers in order, handling the
73 * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
74 * systems) concurrently with a single execution of get_subbuf (between offset
75 * sampling and subbuffer ID exchange).
76 */
77
78 #define HALF_ULONG_BITS (CAA_BITS_PER_LONG >> 1)
79
80 #define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1)
81 #define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT)
82 #define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1))
83 /*
84 * Lowest bit of top word half belongs to noref. Used only for overwrite mode.
85 */
86 #define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1)
87 #define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT)
88 #define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT
89 /*
90 * In overwrite mode: lowest half of word is used for index.
91 * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
92 * In producer-consumer mode: whole word used for index.
93 */
94 #define SB_ID_INDEX_SHIFT 0
95 #define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT)
96 #define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1)
97
98 /*
99 * Construct the subbuffer id from offset, index and noref. Use only the index
100 * for producer-consumer mode (offset and noref are only used in overwrite
101 * mode).
102 */
103 static inline
104 unsigned long subbuffer_id(const struct lttng_ust_lib_ring_buffer_config *config,
105 unsigned long offset, unsigned long noref,
106 unsigned long index)
107 {
108 if (config->mode == RING_BUFFER_OVERWRITE)
109 return (offset << SB_ID_OFFSET_SHIFT)
110 | (noref << SB_ID_NOREF_SHIFT)
111 | index;
112 else
113 return index;
114 }
115
116 /*
117 * Compare offset with the offset contained within id. Return 1 if the offset
118 * bits are identical, else 0.
119 */
120 static inline
121 int subbuffer_id_compare_offset(const struct lttng_ust_lib_ring_buffer_config *config,
122 unsigned long id, unsigned long offset)
123 {
124 return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT);
125 }
126
127 static inline
128 unsigned long subbuffer_id_get_index(const struct lttng_ust_lib_ring_buffer_config *config,
129 unsigned long id)
130 {
131 if (config->mode == RING_BUFFER_OVERWRITE)
132 return id & SB_ID_INDEX_MASK;
133 else
134 return id;
135 }
136
137 static inline
138 unsigned long subbuffer_id_is_noref(const struct lttng_ust_lib_ring_buffer_config *config,
139 unsigned long id)
140 {
141 if (config->mode == RING_BUFFER_OVERWRITE)
142 return !!(id & SB_ID_NOREF_MASK);
143 else
144 return 1;
145 }
146
147 /*
148 * Only used by reader on subbuffer ID it has exclusive access to. No volatile
149 * needed.
150 */
151 static inline
152 void subbuffer_id_set_noref(const struct lttng_ust_lib_ring_buffer_config *config,
153 unsigned long *id)
154 {
155 if (config->mode == RING_BUFFER_OVERWRITE)
156 *id |= SB_ID_NOREF_MASK;
157 }
158
159 static inline
160 void subbuffer_id_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
161 unsigned long *id, unsigned long offset)
162 {
163 unsigned long tmp;
164
165 if (config->mode == RING_BUFFER_OVERWRITE) {
166 tmp = *id;
167 tmp &= ~SB_ID_OFFSET_MASK;
168 tmp |= offset << SB_ID_OFFSET_SHIFT;
169 tmp |= SB_ID_NOREF_MASK;
170 /* Volatile store, read concurrently by readers. */
171 CMM_ACCESS_ONCE(*id) = tmp;
172 }
173 }
174
175 /* No volatile access, since already used locally */
176 static inline
177 void subbuffer_id_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
178 unsigned long *id)
179 {
180 if (config->mode == RING_BUFFER_OVERWRITE)
181 *id &= ~SB_ID_NOREF_MASK;
182 }
183
184 /*
185 * For overwrite mode, cap the number of subbuffers per buffer to:
186 * 2^16 on 32-bit architectures
187 * 2^32 on 64-bit architectures
188 * This is required to fit in the index part of the ID. Return 0 on success,
189 * -EPERM on failure.
190 */
191 static inline
192 int subbuffer_id_check_index(const struct lttng_ust_lib_ring_buffer_config *config,
193 unsigned long num_subbuf)
194 {
195 if (config->mode == RING_BUFFER_OVERWRITE)
196 return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0;
197 else
198 return 0;
199 }
200
201 static inline
202 int lib_ring_buffer_backend_get_pages(const struct lttng_ust_lib_ring_buffer_config *config,
203 struct lttng_ust_lib_ring_buffer_ctx *ctx,
204 struct lttng_ust_lib_ring_buffer_backend_pages **backend_pages)
205 {
206 struct lttng_ust_lib_ring_buffer_backend *bufb = &ctx->buf->backend;
207 struct channel_backend *chanb = &ctx->chan->backend;
208 struct lttng_ust_shm_handle *handle = ctx->handle;
209 size_t sbidx;
210 size_t offset = ctx->buf_offset;
211 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
212 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
213 unsigned long sb_bindex, id;
214 struct lttng_ust_lib_ring_buffer_backend_pages *_backend_pages;
215
216 offset &= chanb->buf_size - 1;
217 sbidx = offset >> chanb->subbuf_size_order;
218 wsb = shmp_index(handle, bufb->buf_wsb, sbidx);
219 if (caa_unlikely(!wsb))
220 return -1;
221 id = wsb->id;
222 sb_bindex = subbuffer_id_get_index(config, id);
223 rpages = shmp_index(handle, bufb->array, sb_bindex);
224 if (caa_unlikely(!rpages))
225 return -1;
226 CHAN_WARN_ON(ctx->chan,
227 config->mode == RING_BUFFER_OVERWRITE
228 && subbuffer_id_is_noref(config, id));
229 _backend_pages = shmp(handle, rpages->shmp);
230 if (caa_unlikely(!_backend_pages))
231 return -1;
232 *backend_pages = _backend_pages;
233 return 0;
234 }
235
236 /* Get backend pages from cache. */
237 static inline
238 struct lttng_ust_lib_ring_buffer_backend_pages *
239 lib_ring_buffer_get_backend_pages_from_ctx(const struct lttng_ust_lib_ring_buffer_config *config,
240 struct lttng_ust_lib_ring_buffer_ctx *ctx)
241 {
242 if (caa_unlikely(ctx->ctx_len
243 < sizeof(struct lttng_ust_lib_ring_buffer_ctx)))
244 return NULL;
245 return ctx->backend_pages;
246 }
247
248 /*
249 * The ring buffer can count events recorded and overwritten per buffer,
250 * but it is disabled by default due to its performance overhead.
251 */
252 #ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
253 static inline
254 void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
255 const struct lttng_ust_lib_ring_buffer_ctx *ctx,
256 struct lttng_ust_lib_ring_buffer_backend *bufb,
257 unsigned long idx, struct lttng_ust_shm_handle *handle)
258 {
259 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
260
261 backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
262 if (caa_unlikely(!backend_pages)) {
263 if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
264 return;
265 }
266 v_inc(config, &backend_pages->records_commit);
267 }
268 #else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
269 static inline
270 void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
271 const struct lttng_ust_lib_ring_buffer_ctx *ctx,
272 struct lttng_ust_lib_ring_buffer_backend *bufb,
273 unsigned long idx, struct lttng_ust_shm_handle *handle)
274 {
275 }
276 #endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
277
278 /*
279 * Reader has exclusive subbuffer access for record consumption. No need to
280 * perform the decrement atomically.
281 */
282 static inline
283 void subbuffer_consume_record(const struct lttng_ust_lib_ring_buffer_config *config,
284 struct lttng_ust_lib_ring_buffer_backend *bufb,
285 struct lttng_ust_shm_handle *handle)
286 {
287 unsigned long sb_bindex;
288 struct channel *chan;
289 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
290 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
291
292 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
293 chan = shmp(handle, bufb->chan);
294 if (!chan)
295 return;
296 pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
297 if (!pages_shmp)
298 return;
299 backend_pages = shmp(handle, pages_shmp->shmp);
300 if (!backend_pages)
301 return;
302 CHAN_WARN_ON(chan, !v_read(config, &backend_pages->records_unread));
303 /* Non-atomic decrement protected by exclusive subbuffer access */
304 _v_dec(config, &backend_pages->records_unread);
305 v_inc(config, &bufb->records_read);
306 }
307
308 static inline
309 unsigned long subbuffer_get_records_count(
310 const struct lttng_ust_lib_ring_buffer_config *config,
311 struct lttng_ust_lib_ring_buffer_backend *bufb,
312 unsigned long idx,
313 struct lttng_ust_shm_handle *handle)
314 {
315 unsigned long sb_bindex;
316 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
317 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
318 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
319
320 wsb = shmp_index(handle, bufb->buf_wsb, idx);
321 if (!wsb)
322 return 0;
323 sb_bindex = subbuffer_id_get_index(config, wsb->id);
324 rpages = shmp_index(handle, bufb->array, sb_bindex);
325 if (!rpages)
326 return 0;
327 backend_pages = shmp(handle, rpages->shmp);
328 if (!backend_pages)
329 return 0;
330 return v_read(config, &backend_pages->records_commit);
331 }
332
333 /*
334 * Must be executed at subbuffer delivery when the writer has _exclusive_
335 * subbuffer access. See lib_ring_buffer_check_deliver() for details.
336 * lib_ring_buffer_get_records_count() must be called to get the records
337 * count before this function, because it resets the records_commit
338 * count.
339 */
340 static inline
341 unsigned long subbuffer_count_records_overrun(
342 const struct lttng_ust_lib_ring_buffer_config *config,
343 struct lttng_ust_lib_ring_buffer_backend *bufb,
344 unsigned long idx,
345 struct lttng_ust_shm_handle *handle)
346 {
347 unsigned long overruns, sb_bindex;
348 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
349 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
350 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
351
352 wsb = shmp_index(handle, bufb->buf_wsb, idx);
353 if (!wsb)
354 return 0;
355 sb_bindex = subbuffer_id_get_index(config, wsb->id);
356 rpages = shmp_index(handle, bufb->array, sb_bindex);
357 if (!rpages)
358 return 0;
359 backend_pages = shmp(handle, rpages->shmp);
360 if (!backend_pages)
361 return 0;
362 overruns = v_read(config, &backend_pages->records_unread);
363 v_set(config, &backend_pages->records_unread,
364 v_read(config, &backend_pages->records_commit));
365 v_set(config, &backend_pages->records_commit, 0);
366
367 return overruns;
368 }
369
370 static inline
371 void subbuffer_set_data_size(const struct lttng_ust_lib_ring_buffer_config *config,
372 struct lttng_ust_lib_ring_buffer_backend *bufb,
373 unsigned long idx,
374 unsigned long data_size,
375 struct lttng_ust_shm_handle *handle)
376 {
377 unsigned long sb_bindex;
378 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
379 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
380 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
381
382 wsb = shmp_index(handle, bufb->buf_wsb, idx);
383 if (!wsb)
384 return;
385 sb_bindex = subbuffer_id_get_index(config, wsb->id);
386 rpages = shmp_index(handle, bufb->array, sb_bindex);
387 if (!rpages)
388 return;
389 backend_pages = shmp(handle, rpages->shmp);
390 if (!backend_pages)
391 return;
392 backend_pages->data_size = data_size;
393 }
394
395 static inline
396 unsigned long subbuffer_get_read_data_size(
397 const struct lttng_ust_lib_ring_buffer_config *config,
398 struct lttng_ust_lib_ring_buffer_backend *bufb,
399 struct lttng_ust_shm_handle *handle)
400 {
401 unsigned long sb_bindex;
402 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
403 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
404
405 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
406 pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
407 if (!pages_shmp)
408 return 0;
409 backend_pages = shmp(handle, pages_shmp->shmp);
410 if (!backend_pages)
411 return 0;
412 return backend_pages->data_size;
413 }
414
415 static inline
416 unsigned long subbuffer_get_data_size(
417 const struct lttng_ust_lib_ring_buffer_config *config,
418 struct lttng_ust_lib_ring_buffer_backend *bufb,
419 unsigned long idx,
420 struct lttng_ust_shm_handle *handle)
421 {
422 unsigned long sb_bindex;
423 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
424 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
425 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
426
427 wsb = shmp_index(handle, bufb->buf_wsb, idx);
428 if (!wsb)
429 return 0;
430 sb_bindex = subbuffer_id_get_index(config, wsb->id);
431 rpages = shmp_index(handle, bufb->array, sb_bindex);
432 if (!rpages)
433 return 0;
434 backend_pages = shmp(handle, rpages->shmp);
435 if (!backend_pages)
436 return 0;
437 return backend_pages->data_size;
438 }
439
440 static inline
441 void subbuffer_inc_packet_count(const struct lttng_ust_lib_ring_buffer_config *config,
442 struct lttng_ust_lib_ring_buffer_backend *bufb,
443 unsigned long idx, struct lttng_ust_shm_handle *handle)
444 {
445 struct lttng_ust_lib_ring_buffer_backend_counts *counts;
446
447 counts = shmp_index(handle, bufb->buf_cnt, idx);
448 if (!counts)
449 return;
450 counts->seq_cnt++;
451 }
452
453 /**
454 * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
455 * writer.
456 */
457 static inline
458 void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
459 struct lttng_ust_lib_ring_buffer_backend *bufb,
460 unsigned long idx,
461 struct lttng_ust_shm_handle *handle)
462 {
463 unsigned long id, new_id;
464 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
465
466 if (config->mode != RING_BUFFER_OVERWRITE)
467 return;
468
469 /*
470 * Performing a volatile access to read the sb_pages, because we want to
471 * read a coherent version of the pointer and the associated noref flag.
472 */
473 wsb = shmp_index(handle, bufb->buf_wsb, idx);
474 if (!wsb)
475 return;
476 id = CMM_ACCESS_ONCE(wsb->id);
477 for (;;) {
478 /* This check is called on the fast path for each record. */
479 if (caa_likely(!subbuffer_id_is_noref(config, id))) {
480 /*
481 * Store after load dependency ordering the writes to
482 * the subbuffer after load and test of the noref flag
483 * matches the memory barrier implied by the cmpxchg()
484 * in update_read_sb_index().
485 */
486 return; /* Already writing to this buffer */
487 }
488 new_id = id;
489 subbuffer_id_clear_noref(config, &new_id);
490 new_id = uatomic_cmpxchg(&wsb->id, id, new_id);
491 if (caa_likely(new_id == id))
492 break;
493 id = new_id;
494 }
495 }
496
497 /**
498 * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
499 * called by writer.
500 */
501 static inline
502 void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
503 struct lttng_ust_lib_ring_buffer_backend *bufb,
504 unsigned long idx, unsigned long offset,
505 struct lttng_ust_shm_handle *handle)
506 {
507 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
508 struct channel *chan;
509
510 if (config->mode != RING_BUFFER_OVERWRITE)
511 return;
512
513 wsb = shmp_index(handle, bufb->buf_wsb, idx);
514 if (!wsb)
515 return;
516 /*
517 * Because ring_buffer_set_noref() is only called by a single thread
518 * (the one which updated the cc_sb value), there are no concurrent
519 * updates to take care of: other writers have not updated cc_sb, so
520 * they cannot set the noref flag, and concurrent readers cannot modify
521 * the pointer because the noref flag is not set yet.
522 * The smp_wmb() in ring_buffer_commit() takes care of ordering writes
523 * to the subbuffer before this set noref operation.
524 * subbuffer_set_noref() uses a volatile store to deal with concurrent
525 * readers of the noref flag.
526 */
527 chan = shmp(handle, bufb->chan);
528 if (!chan)
529 return;
530 CHAN_WARN_ON(chan, subbuffer_id_is_noref(config, wsb->id));
531 /*
532 * Memory barrier that ensures counter stores are ordered before set
533 * noref and offset.
534 */
535 cmm_smp_mb();
536 subbuffer_id_set_noref_offset(config, &wsb->id, offset);
537 }
538
539 /**
540 * update_read_sb_index - Read-side subbuffer index update.
541 */
542 static inline
543 int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config,
544 struct lttng_ust_lib_ring_buffer_backend *bufb,
545 struct channel_backend *chanb,
546 unsigned long consumed_idx,
547 unsigned long consumed_count,
548 struct lttng_ust_shm_handle *handle)
549 {
550 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
551 unsigned long old_id, new_id;
552
553 wsb = shmp_index(handle, bufb->buf_wsb, consumed_idx);
554 if (caa_unlikely(!wsb))
555 return -EPERM;
556
557 if (config->mode == RING_BUFFER_OVERWRITE) {
558 struct channel *chan;
559
560 /*
561 * Exchange the target writer subbuffer with our own unused
562 * subbuffer. No need to use CMM_ACCESS_ONCE() here to read the
563 * old_wpage, because the value read will be confirmed by the
564 * following cmpxchg().
565 */
566 old_id = wsb->id;
567 if (caa_unlikely(!subbuffer_id_is_noref(config, old_id)))
568 return -EAGAIN;
569 /*
570 * Make sure the offset count we are expecting matches the one
571 * indicated by the writer.
572 */
573 if (caa_unlikely(!subbuffer_id_compare_offset(config, old_id,
574 consumed_count)))
575 return -EAGAIN;
576 chan = shmp(handle, bufb->chan);
577 if (caa_unlikely(!chan))
578 return -EPERM;
579 CHAN_WARN_ON(chan, !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
580 subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
581 consumed_count);
582 new_id = uatomic_cmpxchg(&wsb->id, old_id, bufb->buf_rsb.id);
583 if (caa_unlikely(old_id != new_id))
584 return -EAGAIN;
585 bufb->buf_rsb.id = new_id;
586 } else {
587 /* No page exchange, use the writer page directly */
588 bufb->buf_rsb.id = wsb->id;
589 }
590 return 0;
591 }
592
593 #ifndef inline_memcpy
594 #define inline_memcpy(dest, src, n) memcpy(dest, src, n)
595 #endif
596
597 static inline __attribute__((always_inline))
598 void lttng_inline_memcpy(void *dest, const void *src,
599 unsigned long len)
600 {
601 switch (len) {
602 case 1:
603 *(uint8_t *) dest = *(const uint8_t *) src;
604 break;
605 case 2:
606 *(uint16_t *) dest = *(const uint16_t *) src;
607 break;
608 case 4:
609 *(uint32_t *) dest = *(const uint32_t *) src;
610 break;
611 case 8:
612 *(uint64_t *) dest = *(const uint64_t *) src;
613 break;
614 default:
615 inline_memcpy(dest, src, len);
616 }
617 }
618
619 /*
620 * Use the architecture-specific memcpy implementation for constant-sized
621 * inputs, but rely on an inline memcpy for length statically unknown.
622 * The function call to memcpy is just way too expensive for a fast path.
623 */
624 #define lib_ring_buffer_do_copy(config, dest, src, len) \
625 do { \
626 size_t __len = (len); \
627 if (__builtin_constant_p(len)) \
628 memcpy(dest, src, __len); \
629 else \
630 lttng_inline_memcpy(dest, src, __len); \
631 } while (0)
632
633 /*
634 * write len bytes to dest with c
635 */
636 static inline
637 void lib_ring_buffer_do_memset(char *dest, int c, unsigned long len)
638 {
639 unsigned long i;
640
641 for (i = 0; i < len; i++)
642 dest[i] = c;
643 }
644
645 /* arch-agnostic implementation */
646
647 static inline int lttng_ust_fls(unsigned int x)
648 {
649 int r = 32;
650
651 if (!x)
652 return 0;
653 if (!(x & 0xFFFF0000U)) {
654 x <<= 16;
655 r -= 16;
656 }
657 if (!(x & 0xFF000000U)) {
658 x <<= 8;
659 r -= 8;
660 }
661 if (!(x & 0xF0000000U)) {
662 x <<= 4;
663 r -= 4;
664 }
665 if (!(x & 0xC0000000U)) {
666 x <<= 2;
667 r -= 2;
668 }
669 if (!(x & 0x80000000U)) {
670 /* No need to bit shift on last operation */
671 r -= 1;
672 }
673 return r;
674 }
675
676 static inline int get_count_order(unsigned int count)
677 {
678 int order;
679
680 order = lttng_ust_fls(count) - 1;
681 if (count & (count - 1))
682 order++;
683 return order;
684 }
685
686 #endif /* _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H */
This page took 0.041729 seconds and 3 git commands to generate.