Move to kernel style SPDX license identifiers
[lttng-ust.git] / libringbuffer / backend.h
CommitLineData
852c2936 1/*
c0c0989a 2 * SPDX-License-Identifier: LGPL-2.1-only
852c2936 3 *
e92f3e28
MD
4 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
c0c0989a 6 * Ring buffer backend (API).
852c2936
MD
7 *
8 * Credits to Steven Rostedt for proposing to use an extra-subbuffer owned by
9 * the reader in flight recorder mode.
10 */
11
c0c0989a
MJ
12#ifndef _LTTNG_RING_BUFFER_BACKEND_H
13#define _LTTNG_RING_BUFFER_BACKEND_H
14
b4051ad8 15#include <stddef.h>
14641deb
MD
16#include <unistd.h>
17
852c2936 18/* Internal helpers */
4931a13e
MD
19#include "backend_internal.h"
20#include "frontend_internal.h"
852c2936
MD
21
22/* Ring buffer backend API */
23
24/* Ring buffer backend access (read/write) */
25
4cfec15c 26extern size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend *bufb,
1d498196 27 size_t offset, void *dest, size_t len,
38fae1d3 28 struct lttng_ust_shm_handle *handle);
852c2936 29
4cfec15c 30extern int lib_ring_buffer_read_cstr(struct lttng_ust_lib_ring_buffer_backend *bufb,
1d498196 31 size_t offset, void *dest, size_t len,
38fae1d3 32 struct lttng_ust_shm_handle *handle);
852c2936 33
852c2936
MD
34/*
35 * Return the address where a given offset is located.
36 * Should be used to get the current subbuffer header pointer. Given we know
37 * it's never on a page boundary, it's safe to write directly to this address,
38 * as long as the write is never bigger than a page size.
39 */
40extern void *
4cfec15c 41lib_ring_buffer_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
1d498196 42 size_t offset,
38fae1d3 43 struct lttng_ust_shm_handle *handle);
852c2936 44extern void *
4cfec15c 45lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
1d498196 46 size_t offset,
38fae1d3 47 struct lttng_ust_shm_handle *handle);
852c2936
MD
48
49/**
50 * lib_ring_buffer_write - write data to a buffer backend
51 * @config : ring buffer instance configuration
52 * @ctx: ring buffer context. (input arguments only)
53 * @src : source pointer to copy from
54 * @len : length of data to copy
55 *
56 * This function copies "len" bytes of data from a source pointer to a buffer
57 * backend, at the current context offset. This is more or less a buffer
58 * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write)
59 * if copy is crossing a page boundary.
60 */
00d0f8eb 61static inline __attribute__((always_inline))
4cfec15c
MD
62void lib_ring_buffer_write(const struct lttng_ust_lib_ring_buffer_config *config,
63 struct lttng_ust_lib_ring_buffer_ctx *ctx,
852c2936
MD
64 const void *src, size_t len)
65{
852c2936 66 struct channel_backend *chanb = &ctx->chan->backend;
38fae1d3 67 struct lttng_ust_shm_handle *handle = ctx->handle;
852c2936 68 size_t offset = ctx->buf_offset;
15500a1b
MD
69 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
70 void *p;
852c2936 71
0bf3c920
MD
72 if (caa_unlikely(!len))
73 return;
a6352fd4
MD
74 /*
75 * Underlying layer should never ask for writes across
76 * subbuffers.
77 */
a3492932
MD
78 CHAN_WARN_ON(chanb, (offset & (chanb->buf_size - 1)) + len > chanb->buf_size);
79 backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
80 if (caa_unlikely(!backend_pages)) {
81 if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
82 return;
83 }
15500a1b
MD
84 p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
85 if (caa_unlikely(!p))
86 return;
87 lib_ring_buffer_do_copy(config, p, src, len);
852c2936
MD
88 ctx->buf_offset += len;
89}
90
a44c74d9
MD
91/*
92 * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
93 * terminating character is found in @src. Returns the number of bytes
94 * copied. Does *not* terminate @dest with NULL terminating character.
95 */
00d0f8eb 96static inline __attribute__((always_inline))
a44c74d9
MD
97size_t lib_ring_buffer_do_strcpy(const struct lttng_ust_lib_ring_buffer_config *config,
98 char *dest, const char *src, size_t len)
99{
100 size_t count;
101
102 for (count = 0; count < len; count++) {
103 char c;
104
105 /*
106 * Only read source character once, in case it is
107 * modified concurrently.
108 */
109 c = CMM_LOAD_SHARED(src[count]);
110 if (!c)
111 break;
112 lib_ring_buffer_do_copy(config, &dest[count], &c, 1);
113 }
114 return count;
115}
116
117/**
118 * lib_ring_buffer_strcpy - write string data to a buffer backend
119 * @config : ring buffer instance configuration
120 * @ctx: ring buffer context. (input arguments only)
121 * @src : source pointer to copy from
122 * @len : length of data to copy
123 * @pad : character to use for padding
124 *
125 * This function copies @len - 1 bytes of string data from a source
126 * pointer to a buffer backend, followed by a terminating '\0'
127 * character, at the current context offset. This is more or less a
128 * buffer backend-specific strncpy() operation. If a terminating '\0'
129 * character is found in @src before @len - 1 characters are copied, pad
130 * the buffer with @pad characters (e.g. '#').
131 */
00d0f8eb 132static inline __attribute__((always_inline))
a44c74d9
MD
133void lib_ring_buffer_strcpy(const struct lttng_ust_lib_ring_buffer_config *config,
134 struct lttng_ust_lib_ring_buffer_ctx *ctx,
135 const char *src, size_t len, int pad)
136{
a44c74d9
MD
137 struct channel_backend *chanb = &ctx->chan->backend;
138 struct lttng_ust_shm_handle *handle = ctx->handle;
a3492932 139 size_t count;
a44c74d9 140 size_t offset = ctx->buf_offset;
a3492932
MD
141 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
142 void *p;
a44c74d9
MD
143
144 if (caa_unlikely(!len))
145 return;
a44c74d9
MD
146 /*
147 * Underlying layer should never ask for writes across
148 * subbuffers.
149 */
a3492932
MD
150 CHAN_WARN_ON(chanb, (offset & (chanb->buf_size - 1)) + len > chanb->buf_size);
151 backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
152 if (caa_unlikely(!backend_pages)) {
153 if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
154 return;
155 }
156 p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
157 if (caa_unlikely(!p))
158 return;
159
160 count = lib_ring_buffer_do_strcpy(config, p, src, len - 1);
a44c74d9
MD
161 offset += count;
162 /* Padding */
163 if (caa_unlikely(count < len - 1)) {
164 size_t pad_len = len - 1 - count;
165
a3492932
MD
166 p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
167 if (caa_unlikely(!p))
168 return;
169 lib_ring_buffer_do_memset(p, pad, pad_len);
a44c74d9
MD
170 offset += pad_len;
171 }
172 /* Final '\0' */
a3492932
MD
173 p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
174 if (caa_unlikely(!p))
175 return;
176 lib_ring_buffer_do_memset(p, '\0', 1);
a44c74d9
MD
177 ctx->buf_offset += len;
178}
179
852c2936
MD
180/*
181 * This accessor counts the number of unread records in a buffer.
182 * It only provides a consistent value if no reads not writes are performed
183 * concurrently.
184 */
185static inline
186unsigned long lib_ring_buffer_get_records_unread(
4cfec15c
MD
187 const struct lttng_ust_lib_ring_buffer_config *config,
188 struct lttng_ust_lib_ring_buffer *buf,
38fae1d3 189 struct lttng_ust_shm_handle *handle)
852c2936 190{
4cfec15c 191 struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
15500a1b 192 unsigned long records_unread = 0, sb_bindex;
852c2936 193 unsigned int i;
15500a1b 194 struct channel *chan;
852c2936 195
15500a1b
MD
196 chan = shmp(handle, bufb->chan);
197 if (!chan)
198 return 0;
199 for (i = 0; i < chan->backend.num_subbuf; i++) {
200 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
201 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
202 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
203
204 wsb = shmp_index(handle, bufb->buf_wsb, i);
205 if (!wsb)
206 return 0;
207 sb_bindex = subbuffer_id_get_index(config, wsb->id);
208 rpages = shmp_index(handle, bufb->array, sb_bindex);
209 if (!rpages)
210 return 0;
211 backend_pages = shmp(handle, rpages->shmp);
212 if (!backend_pages)
213 return 0;
214 records_unread += v_read(config, &backend_pages->records_unread);
852c2936
MD
215 }
216 if (config->mode == RING_BUFFER_OVERWRITE) {
15500a1b
MD
217 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
218 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
219
220 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
221 rpages = shmp_index(handle, bufb->array, sb_bindex);
222 if (!rpages)
223 return 0;
224 backend_pages = shmp(handle, rpages->shmp);
225 if (!backend_pages)
226 return 0;
227 records_unread += v_read(config, &backend_pages->records_unread);
852c2936
MD
228 }
229 return records_unread;
230}
231
e92f3e28 232#endif /* _LTTNG_RING_BUFFER_BACKEND_H */
This page took 0.039102 seconds and 4 git commands to generate.