X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=lib%2Fringbuffer%2Fbackend_internal.h;h=3a2ce26f145669b913f6a0cde6e5e1c2ee818c59;hb=369708f464bedc0682151df9308cebfa14dbdb2b;hp=3e262c2e27f65ca7b8b3ea5ca50fdbb8a7b92824;hpb=886d51a3d7ed5fa6b41d7f19b3e14ae6c535a44c;p=lttng-modules.git diff --git a/lib/ringbuffer/backend_internal.h b/lib/ringbuffer/backend_internal.h index 3e262c2e..3a2ce26f 100644 --- a/lib/ringbuffer/backend_internal.h +++ b/lib/ringbuffer/backend_internal.h @@ -1,31 +1,18 @@ -#ifndef _LIB_RING_BUFFER_BACKEND_INTERNAL_H -#define _LIB_RING_BUFFER_BACKEND_INTERNAL_H - -/* +/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only) + * * lib/ringbuffer/backend_internal.h * * Ring buffer backend (internal helpers). * * Copyright (C) 2008-2012 Mathieu Desnoyers - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; only - * version 2.1 of the License. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -#include "../../wrapper/ringbuffer/config.h" -#include "../../wrapper/ringbuffer/backend_types.h" -#include "../../wrapper/ringbuffer/frontend_types.h" +#ifndef _LIB_RING_BUFFER_BACKEND_INTERNAL_H +#define _LIB_RING_BUFFER_BACKEND_INTERNAL_H + +#include +#include +#include #include #include @@ -52,13 +39,19 @@ void lib_ring_buffer_backend_exit(void); extern void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset, const void *src, size_t len, - ssize_t pagecpy); + size_t pagecpy); extern void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb, size_t offset, int c, size_t len, - ssize_t pagecpy); -extern void _lib_ring_buffer_copy_from_user(struct lib_ring_buffer_backend *bufb, + size_t pagecpy); +extern void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb, + size_t offset, const char *src, size_t len, + size_t pagecpy, int pad); +extern void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb, size_t offset, const void *src, - size_t len, ssize_t pagecpy); + size_t len, size_t pagecpy); +extern void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb, + size_t offset, const char __user *src, size_t len, + size_t pagecpy, int pad); /* * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be @@ -165,7 +158,7 @@ void subbuffer_id_set_noref_offset(const struct lib_ring_buffer_config *config, tmp |= offset << SB_ID_OFFSET_SHIFT; tmp |= SB_ID_NOREF_MASK; /* Volatile store, read concurrently by readers. */ - ACCESS_ONCE(*id) = tmp; + WRITE_ONCE(*id, tmp); } } @@ -195,6 +188,42 @@ int subbuffer_id_check_index(const struct lib_ring_buffer_config *config, return 0; } +static inline +void lib_ring_buffer_backend_get_pages(const struct lib_ring_buffer_config *config, + struct lib_ring_buffer_ctx *ctx, + struct lib_ring_buffer_backend_pages **backend_pages) +{ + struct lib_ring_buffer_backend *bufb = &ctx->buf->backend; + struct channel_backend *chanb = &ctx->chan->backend; + size_t sbidx, offset = ctx->buf_offset; + unsigned long sb_bindex, id; + struct lib_ring_buffer_backend_pages *rpages; + + offset &= chanb->buf_size - 1; + sbidx = offset >> chanb->subbuf_size_order; + id = bufb->buf_wsb[sbidx].id; + sb_bindex = subbuffer_id_get_index(config, id); + rpages = bufb->array[sb_bindex]; + CHAN_WARN_ON(ctx->chan, + config->mode == RING_BUFFER_OVERWRITE + && subbuffer_id_is_noref(config, id)); + *backend_pages = rpages; +} + +/* Get backend pages from cache. */ +static inline +struct lib_ring_buffer_backend_pages * + lib_ring_buffer_get_backend_pages_from_ctx(const struct lib_ring_buffer_config *config, + struct lib_ring_buffer_ctx *ctx) +{ + return ctx->backend_pages; +} + +/* + * The ring buffer can count events recorded and overwritten per buffer, + * but it is disabled by default due to its performance overhead. + */ +#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS static inline void subbuffer_count_record(const struct lib_ring_buffer_config *config, struct lib_ring_buffer_backend *bufb, @@ -205,6 +234,14 @@ void subbuffer_count_record(const struct lib_ring_buffer_config *config, sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id); v_inc(config, &bufb->array[sb_bindex]->records_commit); } +#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */ +static inline +void subbuffer_count_record(const struct lib_ring_buffer_config *config, + struct lib_ring_buffer_backend *bufb, + unsigned long idx) +{ +} +#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */ /* * Reader has exclusive subbuffer access for record consumption. No need to @@ -238,9 +275,10 @@ unsigned long subbuffer_get_records_count( /* * Must be executed at subbuffer delivery when the writer has _exclusive_ - * subbuffer access. See ring_buffer_check_deliver() for details. - * ring_buffer_get_records_count() must be called to get the records count - * before this function, because it resets the records_commit count. + * subbuffer access. See lib_ring_buffer_check_deliver() for details. + * lib_ring_buffer_get_records_count() must be called to get the records + * count before this function, because it resets the records_commit + * count. */ static inline unsigned long subbuffer_count_records_overrun( @@ -302,6 +340,14 @@ unsigned long subbuffer_get_data_size( return pages->data_size; } +static inline +void subbuffer_inc_packet_count(const struct lib_ring_buffer_config *config, + struct lib_ring_buffer_backend *bufb, + unsigned long idx) +{ + bufb->buf_cnt[idx].seq_cnt++; +} + /** * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by * writer. @@ -320,7 +366,7 @@ void lib_ring_buffer_clear_noref(const struct lib_ring_buffer_config *config, * Performing a volatile access to read the sb_pages, because we want to * read a coherent version of the pointer and the associated noref flag. */ - id = ACCESS_ONCE(bufb->buf_wsb[idx].id); + id = READ_ONCE(bufb->buf_wsb[idx].id); for (;;) { /* This check is called on the fast path for each record. */ if (likely(!subbuffer_id_is_noref(config, id))) { @@ -389,7 +435,7 @@ int update_read_sb_index(const struct lib_ring_buffer_config *config, if (config->mode == RING_BUFFER_OVERWRITE) { /* * Exchange the target writer subbuffer with our own unused - * subbuffer. No need to use ACCESS_ONCE() here to read the + * subbuffer. No need to use READ_ONCE() here to read the * old_wpage, because the value read will be confirmed by the * following cmpxchg(). */ @@ -419,6 +465,28 @@ int update_read_sb_index(const struct lib_ring_buffer_config *config, return 0; } +static inline __attribute__((always_inline)) +void lttng_inline_memcpy(void *dest, const void *src, + unsigned long len) +{ + switch (len) { + case 1: + *(uint8_t *) dest = *(const uint8_t *) src; + break; + case 2: + *(uint16_t *) dest = *(const uint16_t *) src; + break; + case 4: + *(uint32_t *) dest = *(const uint32_t *) src; + break; + case 8: + *(uint64_t *) dest = *(const uint64_t *) src; + break; + default: + memcpy(dest, src, len); + } +} + /* * Use the architecture-specific memcpy implementation for constant-sized * inputs, but rely on an inline memcpy for length statically unknown. @@ -430,19 +498,21 @@ do { \ if (__builtin_constant_p(len)) \ memcpy(dest, src, __len); \ else \ - inline_memcpy(dest, src, __len); \ + lttng_inline_memcpy(dest, src, __len); \ } while (0) /* - * We use __copy_from_user to copy userspace data since we already + * We use __copy_from_user_inatomic to copy userspace data since we already * did the access_ok for the whole range. + * + * Return 0 if OK, nonzero on error. */ static inline -unsigned long lib_ring_buffer_do_copy_from_user(void *dest, +unsigned long lib_ring_buffer_do_copy_from_user_inatomic(void *dest, const void __user *src, unsigned long len) { - return __copy_from_user(dest, src, len); + return __copy_from_user_inatomic(dest, src, len); } /*