1 #ifndef _LIB_RING_BUFFER_BACKEND_H
2 #define _LIB_RING_BUFFER_BACKEND_H
5 * lib/ringbuffer/backend.h
7 * Ring buffer backend (API).
9 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * Credits to Steven Rostedt for proposing to use an extra-subbuffer owned by
26 * the reader in flight recorder mode.
29 #include <linux/types.h>
30 #include <linux/sched.h>
31 #include <linux/timer.h>
32 #include <linux/wait.h>
33 #include <linux/poll.h>
34 #include <linux/list.h>
37 #include <linux/uaccess.h>
39 /* Internal helpers */
40 #include "../../wrapper/ringbuffer/backend_internal.h"
41 #include "../../wrapper/ringbuffer/frontend_internal.h"
43 /* Ring buffer backend API */
45 /* Ring buffer backend access (read/write) */
47 extern size_t lib_ring_buffer_read(struct lib_ring_buffer_backend
*bufb
,
48 size_t offset
, void *dest
, size_t len
);
50 extern int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend
*bufb
,
51 size_t offset
, void __user
*dest
,
54 extern int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend
*bufb
,
55 size_t offset
, void *dest
, size_t len
);
58 lib_ring_buffer_read_get_page(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
62 * Return the address where a given offset is located.
63 * Should be used to get the current subbuffer header pointer. Given we know
64 * it's never on a page boundary, it's safe to write directly to this address,
65 * as long as the write is never bigger than a page size.
68 lib_ring_buffer_offset_address(struct lib_ring_buffer_backend
*bufb
,
71 lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend
*bufb
,
75 * lib_ring_buffer_write - write data to a buffer backend
76 * @config : ring buffer instance configuration
77 * @ctx: ring buffer context. (input arguments only)
78 * @src : source pointer to copy from
79 * @len : length of data to copy
81 * This function copies "len" bytes of data from a source pointer to a buffer
82 * backend, at the current context offset. This is more or less a buffer
83 * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write)
84 * if copy is crossing a page boundary.
87 void lib_ring_buffer_write(const struct lib_ring_buffer_config
*config
,
88 struct lib_ring_buffer_ctx
*ctx
,
89 const void *src
, size_t len
)
91 struct lib_ring_buffer_backend
*bufb
= &ctx
->buf
->backend
;
92 struct channel_backend
*chanb
= &ctx
->chan
->backend
;
94 size_t offset
= ctx
->buf_offset
;
96 struct lib_ring_buffer_backend_pages
*rpages
;
97 unsigned long sb_bindex
, id
;
101 offset
&= chanb
->buf_size
- 1;
102 sbidx
= offset
>> chanb
->subbuf_size_order
;
103 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
104 pagecpy
= min_t(size_t, len
, (-offset
) & ~PAGE_MASK
);
105 id
= bufb
->buf_wsb
[sbidx
].id
;
106 sb_bindex
= subbuffer_id_get_index(config
, id
);
107 rpages
= bufb
->array
[sb_bindex
];
108 CHAN_WARN_ON(ctx
->chan
,
109 config
->mode
== RING_BUFFER_OVERWRITE
110 && subbuffer_id_is_noref(config
, id
));
111 if (likely(pagecpy
== len
))
112 lib_ring_buffer_do_copy(config
,
113 rpages
->p
[index
].virt
114 + (offset
& ~PAGE_MASK
),
117 _lib_ring_buffer_write(bufb
, offset
, src
, len
, 0);
118 ctx
->buf_offset
+= len
;
122 * lib_ring_buffer_memset - write len bytes of c to a buffer backend
123 * @config : ring buffer instance configuration
124 * @bufb : ring buffer backend
125 * @offset : offset within the buffer
126 * @c : the byte to copy
127 * @len : number of bytes to copy
129 * This function writes "len" bytes of "c" to a buffer backend, at a specific
130 * offset. This is more or less a buffer backend-specific memset() operation.
131 * Calls the slow path (_ring_buffer_memset) if write is crossing a page
135 void lib_ring_buffer_memset(const struct lib_ring_buffer_config
*config
,
136 struct lib_ring_buffer_ctx
*ctx
, int c
, size_t len
)
139 struct lib_ring_buffer_backend
*bufb
= &ctx
->buf
->backend
;
140 struct channel_backend
*chanb
= &ctx
->chan
->backend
;
142 size_t offset
= ctx
->buf_offset
;
144 struct lib_ring_buffer_backend_pages
*rpages
;
145 unsigned long sb_bindex
, id
;
149 offset
&= chanb
->buf_size
- 1;
150 sbidx
= offset
>> chanb
->subbuf_size_order
;
151 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
152 pagecpy
= min_t(size_t, len
, (-offset
) & ~PAGE_MASK
);
153 id
= bufb
->buf_wsb
[sbidx
].id
;
154 sb_bindex
= subbuffer_id_get_index(config
, id
);
155 rpages
= bufb
->array
[sb_bindex
];
156 CHAN_WARN_ON(ctx
->chan
,
157 config
->mode
== RING_BUFFER_OVERWRITE
158 && subbuffer_id_is_noref(config
, id
));
159 if (likely(pagecpy
== len
))
160 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
161 + (offset
& ~PAGE_MASK
),
164 _lib_ring_buffer_memset(bufb
, offset
, c
, len
, 0);
165 ctx
->buf_offset
+= len
;
169 * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
170 * terminating character is found in @src. Returns the number of bytes
171 * copied. Does *not* terminate @dest with NULL terminating character.
174 size_t lib_ring_buffer_do_strcpy(const struct lib_ring_buffer_config
*config
,
175 char *dest
, const char *src
, size_t len
)
179 for (count
= 0; count
< len
; count
++) {
183 * Only read source character once, in case it is
184 * modified concurrently.
186 c
= ACCESS_ONCE(src
[count
]);
189 lib_ring_buffer_do_copy(config
, &dest
[count
], &c
, 1);
195 * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
196 * terminating character is found in @src, or when a fault occurs.
197 * Returns the number of bytes copied. Does *not* terminate @dest with
198 * NULL terminating character.
200 * This function deals with userspace pointers, it should never be called
201 * directly without having the src pointer checked with access_ok()
205 size_t lib_ring_buffer_do_strcpy_from_user_inatomic(const struct lib_ring_buffer_config
*config
,
206 char *dest
, const char __user
*src
, size_t len
)
210 for (count
= 0; count
< len
; count
++) {
214 ret
= __get_user(c
, &src
[count
]);
217 lib_ring_buffer_do_copy(config
, &dest
[count
], &c
, 1);
223 * lib_ring_buffer_strcpy - write string data to a buffer backend
224 * @config : ring buffer instance configuration
225 * @ctx: ring buffer context. (input arguments only)
226 * @src : source pointer to copy from
227 * @len : length of data to copy
228 * @pad : character to use for padding
230 * This function copies @len - 1 bytes of string data from a source
231 * pointer to a buffer backend, followed by a terminating '\0'
232 * character, at the current context offset. This is more or less a
233 * buffer backend-specific strncpy() operation. If a terminating '\0'
234 * character is found in @src before @len - 1 characters are copied, pad
235 * the buffer with @pad characters (e.g. '#'). Calls the slow path
236 * (_ring_buffer_strcpy) if copy is crossing a page boundary.
239 void lib_ring_buffer_strcpy(const struct lib_ring_buffer_config
*config
,
240 struct lib_ring_buffer_ctx
*ctx
,
241 const char *src
, size_t len
, int pad
)
243 struct lib_ring_buffer_backend
*bufb
= &ctx
->buf
->backend
;
244 struct channel_backend
*chanb
= &ctx
->chan
->backend
;
245 size_t sbidx
, index
, pagecpy
;
246 size_t offset
= ctx
->buf_offset
;
247 struct lib_ring_buffer_backend_pages
*rpages
;
248 unsigned long sb_bindex
, id
;
252 offset
&= chanb
->buf_size
- 1;
253 sbidx
= offset
>> chanb
->subbuf_size_order
;
254 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
255 pagecpy
= min_t(size_t, len
, (-offset
) & ~PAGE_MASK
);
256 id
= bufb
->buf_wsb
[sbidx
].id
;
257 sb_bindex
= subbuffer_id_get_index(config
, id
);
258 rpages
= bufb
->array
[sb_bindex
];
259 CHAN_WARN_ON(ctx
->chan
,
260 config
->mode
== RING_BUFFER_OVERWRITE
261 && subbuffer_id_is_noref(config
, id
));
262 if (likely(pagecpy
== len
)) {
265 count
= lib_ring_buffer_do_strcpy(config
,
266 rpages
->p
[index
].virt
267 + (offset
& ~PAGE_MASK
),
271 if (unlikely(count
< len
- 1)) {
272 size_t pad_len
= len
- 1 - count
;
274 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
275 + (offset
& ~PAGE_MASK
),
280 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
281 + (offset
& ~PAGE_MASK
),
284 _lib_ring_buffer_strcpy(bufb
, offset
, src
, len
, 0, pad
);
286 ctx
->buf_offset
+= len
;
290 * lib_ring_buffer_copy_from_user_inatomic - write userspace data to a buffer backend
291 * @config : ring buffer instance configuration
292 * @ctx: ring buffer context. (input arguments only)
293 * @src : userspace source pointer to copy from
294 * @len : length of data to copy
296 * This function copies "len" bytes of data from a userspace pointer to a
297 * buffer backend, at the current context offset. This is more or less a buffer
298 * backend-specific memcpy() operation. Calls the slow path
299 * (_ring_buffer_write_from_user_inatomic) if copy is crossing a page boundary.
300 * Disable the page fault handler to ensure we never try to take the mmap_sem.
303 void lib_ring_buffer_copy_from_user_inatomic(const struct lib_ring_buffer_config
*config
,
304 struct lib_ring_buffer_ctx
*ctx
,
305 const void __user
*src
, size_t len
)
307 struct lib_ring_buffer_backend
*bufb
= &ctx
->buf
->backend
;
308 struct channel_backend
*chanb
= &ctx
->chan
->backend
;
310 size_t offset
= ctx
->buf_offset
;
312 struct lib_ring_buffer_backend_pages
*rpages
;
313 unsigned long sb_bindex
, id
;
315 mm_segment_t old_fs
= get_fs();
319 offset
&= chanb
->buf_size
- 1;
320 sbidx
= offset
>> chanb
->subbuf_size_order
;
321 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
322 pagecpy
= min_t(size_t, len
, (-offset
) & ~PAGE_MASK
);
323 id
= bufb
->buf_wsb
[sbidx
].id
;
324 sb_bindex
= subbuffer_id_get_index(config
, id
);
325 rpages
= bufb
->array
[sb_bindex
];
326 CHAN_WARN_ON(ctx
->chan
,
327 config
->mode
== RING_BUFFER_OVERWRITE
328 && subbuffer_id_is_noref(config
, id
));
332 if (unlikely(!access_ok(VERIFY_READ
, src
, len
)))
335 if (likely(pagecpy
== len
)) {
336 ret
= lib_ring_buffer_do_copy_from_user_inatomic(
337 rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
339 if (unlikely(ret
> 0)) {
340 len
-= (pagecpy
- ret
);
341 offset
+= (pagecpy
- ret
);
345 _lib_ring_buffer_copy_from_user_inatomic(bufb
, offset
, src
, len
, 0);
349 ctx
->buf_offset
+= len
;
357 * In the error path we call the slow path version to avoid
358 * the pollution of static inline code.
360 _lib_ring_buffer_memset(bufb
, offset
, 0, len
, 0);
364 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a buffer backend
365 * @config : ring buffer instance configuration
366 * @ctx: ring buffer context (input arguments only)
367 * @src : userspace source pointer to copy from
368 * @len : length of data to copy
369 * @pad : character to use for padding
371 * This function copies @len - 1 bytes of string data from a userspace
372 * source pointer to a buffer backend, followed by a terminating '\0'
373 * character, at the current context offset. This is more or less a
374 * buffer backend-specific strncpy() operation. If a terminating '\0'
375 * character is found in @src before @len - 1 characters are copied, pad
376 * the buffer with @pad characters (e.g. '#'). Calls the slow path
377 * (_ring_buffer_strcpy_from_user_inatomic) if copy is crossing a page
378 * boundary. Disable the page fault handler to ensure we never try to
382 void lib_ring_buffer_strcpy_from_user_inatomic(const struct lib_ring_buffer_config
*config
,
383 struct lib_ring_buffer_ctx
*ctx
,
384 const void __user
*src
, size_t len
, int pad
)
386 struct lib_ring_buffer_backend
*bufb
= &ctx
->buf
->backend
;
387 struct channel_backend
*chanb
= &ctx
->chan
->backend
;
388 size_t sbidx
, index
, pagecpy
;
389 size_t offset
= ctx
->buf_offset
;
390 struct lib_ring_buffer_backend_pages
*rpages
;
391 unsigned long sb_bindex
, id
;
392 mm_segment_t old_fs
= get_fs();
396 offset
&= chanb
->buf_size
- 1;
397 sbidx
= offset
>> chanb
->subbuf_size_order
;
398 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
399 pagecpy
= min_t(size_t, len
, (-offset
) & ~PAGE_MASK
);
400 id
= bufb
->buf_wsb
[sbidx
].id
;
401 sb_bindex
= subbuffer_id_get_index(config
, id
);
402 rpages
= bufb
->array
[sb_bindex
];
403 CHAN_WARN_ON(ctx
->chan
,
404 config
->mode
== RING_BUFFER_OVERWRITE
405 && subbuffer_id_is_noref(config
, id
));
409 if (unlikely(!access_ok(VERIFY_READ
, src
, len
)))
412 if (likely(pagecpy
== len
)) {
415 count
= lib_ring_buffer_do_strcpy_from_user_inatomic(config
,
416 rpages
->p
[index
].virt
417 + (offset
& ~PAGE_MASK
),
421 if (unlikely(count
< len
- 1)) {
422 size_t pad_len
= len
- 1 - count
;
424 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
425 + (offset
& ~PAGE_MASK
),
430 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
431 + (offset
& ~PAGE_MASK
),
434 _lib_ring_buffer_strcpy_from_user_inatomic(bufb
, offset
, src
,
439 ctx
->buf_offset
+= len
;
447 * In the error path we call the slow path version to avoid
448 * the pollution of static inline code.
450 _lib_ring_buffer_memset(bufb
, offset
, pad
, len
- 1, 0);
452 _lib_ring_buffer_memset(bufb
, offset
, '\0', 1, 0);
456 * This accessor counts the number of unread records in a buffer.
457 * It only provides a consistent value if no reads not writes are performed
461 unsigned long lib_ring_buffer_get_records_unread(
462 const struct lib_ring_buffer_config
*config
,
463 struct lib_ring_buffer
*buf
)
465 struct lib_ring_buffer_backend
*bufb
= &buf
->backend
;
466 struct lib_ring_buffer_backend_pages
*pages
;
467 unsigned long records_unread
= 0, sb_bindex
, id
;
470 for (i
= 0; i
< bufb
->chan
->backend
.num_subbuf
; i
++) {
471 id
= bufb
->buf_wsb
[i
].id
;
472 sb_bindex
= subbuffer_id_get_index(config
, id
);
473 pages
= bufb
->array
[sb_bindex
];
474 records_unread
+= v_read(config
, &pages
->records_unread
);
476 if (config
->mode
== RING_BUFFER_OVERWRITE
) {
477 id
= bufb
->buf_rsb
.id
;
478 sb_bindex
= subbuffer_id_get_index(config
, id
);
479 pages
= bufb
->array
[sb_bindex
];
480 records_unread
+= v_read(config
, &pages
->records_unread
);
482 return records_unread
;
485 #endif /* _LIB_RING_BUFFER_BACKEND_H */