Fix: pad strings that are modified concurrently with tracing
[lttng-ust.git] / libringbuffer / backend.h
1 #ifndef _LTTNG_RING_BUFFER_BACKEND_H
2 #define _LTTNG_RING_BUFFER_BACKEND_H
3
4 /*
5 * libringbuffer/backend.h
6 *
7 * Ring buffer backend (API).
8 *
9 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 * Credits to Steven Rostedt for proposing to use an extra-subbuffer owned by
26 * the reader in flight recorder mode.
27 */
28
29 #include <unistd.h>
30
31 /* Internal helpers */
32 #include "backend_internal.h"
33 #include "frontend_internal.h"
34
35 /* Ring buffer backend API */
36
37 /* Ring buffer backend access (read/write) */
38
39 extern size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend *bufb,
40 size_t offset, void *dest, size_t len,
41 struct lttng_ust_shm_handle *handle);
42
43 extern int lib_ring_buffer_read_cstr(struct lttng_ust_lib_ring_buffer_backend *bufb,
44 size_t offset, void *dest, size_t len,
45 struct lttng_ust_shm_handle *handle);
46
47 /*
48 * Return the address where a given offset is located.
49 * Should be used to get the current subbuffer header pointer. Given we know
50 * it's never on a page boundary, it's safe to write directly to this address,
51 * as long as the write is never bigger than a page size.
52 */
53 extern void *
54 lib_ring_buffer_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
55 size_t offset,
56 struct lttng_ust_shm_handle *handle);
57 extern void *
58 lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
59 size_t offset,
60 struct lttng_ust_shm_handle *handle);
61
62 /**
63 * lib_ring_buffer_write - write data to a buffer backend
64 * @config : ring buffer instance configuration
65 * @ctx: ring buffer context. (input arguments only)
66 * @src : source pointer to copy from
67 * @len : length of data to copy
68 *
69 * This function copies "len" bytes of data from a source pointer to a buffer
70 * backend, at the current context offset. This is more or less a buffer
71 * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write)
72 * if copy is crossing a page boundary.
73 */
74 static inline
75 void lib_ring_buffer_write(const struct lttng_ust_lib_ring_buffer_config *config,
76 struct lttng_ust_lib_ring_buffer_ctx *ctx,
77 const void *src, size_t len)
78 {
79 struct lttng_ust_lib_ring_buffer_backend *bufb = &ctx->buf->backend;
80 struct channel_backend *chanb = &ctx->chan->backend;
81 struct lttng_ust_shm_handle *handle = ctx->handle;
82 size_t sbidx;
83 size_t offset = ctx->buf_offset;
84 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
85 unsigned long sb_bindex, id;
86
87 if (caa_unlikely(!len))
88 return;
89 offset &= chanb->buf_size - 1;
90 sbidx = offset >> chanb->subbuf_size_order;
91 id = shmp_index(handle, bufb->buf_wsb, sbidx)->id;
92 sb_bindex = subbuffer_id_get_index(config, id);
93 rpages = shmp_index(handle, bufb->array, sb_bindex);
94 CHAN_WARN_ON(ctx->chan,
95 config->mode == RING_BUFFER_OVERWRITE
96 && subbuffer_id_is_noref(config, id));
97 /*
98 * Underlying layer should never ask for writes across
99 * subbuffers.
100 */
101 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
102 lib_ring_buffer_do_copy(config,
103 shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1)),
104 src, len);
105 ctx->buf_offset += len;
106 }
107
108 /*
109 * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
110 * terminating character is found in @src. Returns the number of bytes
111 * copied. Does *not* terminate @dest with NULL terminating character.
112 */
113 static inline
114 size_t lib_ring_buffer_do_strcpy(const struct lttng_ust_lib_ring_buffer_config *config,
115 char *dest, const char *src, size_t len)
116 {
117 size_t count;
118
119 for (count = 0; count < len; count++) {
120 char c;
121
122 /*
123 * Only read source character once, in case it is
124 * modified concurrently.
125 */
126 c = CMM_LOAD_SHARED(src[count]);
127 if (!c)
128 break;
129 lib_ring_buffer_do_copy(config, &dest[count], &c, 1);
130 }
131 return count;
132 }
133
134 /**
135 * lib_ring_buffer_strcpy - write string data to a buffer backend
136 * @config : ring buffer instance configuration
137 * @ctx: ring buffer context. (input arguments only)
138 * @src : source pointer to copy from
139 * @len : length of data to copy
140 * @pad : character to use for padding
141 *
142 * This function copies @len - 1 bytes of string data from a source
143 * pointer to a buffer backend, followed by a terminating '\0'
144 * character, at the current context offset. This is more or less a
145 * buffer backend-specific strncpy() operation. If a terminating '\0'
146 * character is found in @src before @len - 1 characters are copied, pad
147 * the buffer with @pad characters (e.g. '#').
148 */
149 static inline
150 void lib_ring_buffer_strcpy(const struct lttng_ust_lib_ring_buffer_config *config,
151 struct lttng_ust_lib_ring_buffer_ctx *ctx,
152 const char *src, size_t len, int pad)
153 {
154 struct lttng_ust_lib_ring_buffer_backend *bufb = &ctx->buf->backend;
155 struct channel_backend *chanb = &ctx->chan->backend;
156 struct lttng_ust_shm_handle *handle = ctx->handle;
157 size_t sbidx, count;
158 size_t offset = ctx->buf_offset;
159 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
160 unsigned long sb_bindex, id;
161
162 if (caa_unlikely(!len))
163 return;
164 offset &= chanb->buf_size - 1;
165 sbidx = offset >> chanb->subbuf_size_order;
166 id = shmp_index(handle, bufb->buf_wsb, sbidx)->id;
167 sb_bindex = subbuffer_id_get_index(config, id);
168 rpages = shmp_index(handle, bufb->array, sb_bindex);
169 CHAN_WARN_ON(ctx->chan,
170 config->mode == RING_BUFFER_OVERWRITE
171 && subbuffer_id_is_noref(config, id));
172 /*
173 * Underlying layer should never ask for writes across
174 * subbuffers.
175 */
176 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
177 count = lib_ring_buffer_do_strcpy(config,
178 shmp_index(handle, shmp(handle, rpages->shmp)->p,
179 offset & (chanb->subbuf_size - 1)),
180 src, len - 1);
181 offset += count;
182 /* Padding */
183 if (caa_unlikely(count < len - 1)) {
184 size_t pad_len = len - 1 - count;
185
186 lib_ring_buffer_do_memset(shmp_index(handle, shmp(handle, rpages->shmp)->p,
187 offset & (chanb->subbuf_size - 1)),
188 pad, pad_len);
189 offset += pad_len;
190 }
191 /* Final '\0' */
192 lib_ring_buffer_do_memset(shmp_index(handle, shmp(handle, rpages->shmp)->p,
193 offset & (chanb->subbuf_size - 1)),
194 '\0', 1);
195 ctx->buf_offset += len;
196 }
197
198 /*
199 * This accessor counts the number of unread records in a buffer.
200 * It only provides a consistent value if no reads not writes are performed
201 * concurrently.
202 */
203 static inline
204 unsigned long lib_ring_buffer_get_records_unread(
205 const struct lttng_ust_lib_ring_buffer_config *config,
206 struct lttng_ust_lib_ring_buffer *buf,
207 struct lttng_ust_shm_handle *handle)
208 {
209 struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
210 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages;
211 unsigned long records_unread = 0, sb_bindex, id;
212 unsigned int i;
213
214 for (i = 0; i < shmp(handle, bufb->chan)->backend.num_subbuf; i++) {
215 id = shmp_index(handle, bufb->buf_wsb, i)->id;
216 sb_bindex = subbuffer_id_get_index(config, id);
217 pages = shmp_index(handle, bufb->array, sb_bindex);
218 records_unread += v_read(config, &shmp(handle, pages->shmp)->records_unread);
219 }
220 if (config->mode == RING_BUFFER_OVERWRITE) {
221 id = bufb->buf_rsb.id;
222 sb_bindex = subbuffer_id_get_index(config, id);
223 pages = shmp_index(handle, bufb->array, sb_bindex);
224 records_unread += v_read(config, &shmp(handle, pages->shmp)->records_unread);
225 }
226 return records_unread;
227 }
228
229 #endif /* _LTTNG_RING_BUFFER_BACKEND_H */
This page took 0.03494 seconds and 5 git commands to generate.