Privatize headers
[ust.git] / libust / buffers.h
CommitLineData
b5b073e2
PMF
1/*
2 * buffers.h
a09dac63 3 * LTTng userspace tracer buffering system
b5b073e2
PMF
4 *
5 * Copyright (C) 2009 - Pierre-Marc Fournier (pierre-marc dot fournier at polymtl dot ca)
6 * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
7 *
a09dac63
PMF
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
b5b073e2
PMF
21 */
22
23#ifndef _UST_BUFFERS_H
24#define _UST_BUFFERS_H
25
b5b073e2 26#include <assert.h>
518d7abb
PMF
27
28#include <ust/core.h>
fbae86d6 29#include <ust/clock.h>
518d7abb 30
30ffe279 31#include "usterr_signal_safe.h"
b5b073e2 32#include "channels.h"
b73a4c47
PMF
33#include "tracerconst.h"
34#include "tracercore.h"
b5b073e2 35
dc284811 36/***** FIXME: SHOULD BE REMOVED ***** */
b5b073e2
PMF
37
38/*
39 * BUFFER_TRUNC zeroes the subbuffer offset and the subbuffer number parts of
40 * the offset, which leaves only the buffer number.
41 */
42#define BUFFER_TRUNC(offset, chan) \
43 ((offset) & (~((chan)->alloc_size-1)))
44#define BUFFER_OFFSET(offset, chan) ((offset) & ((chan)->alloc_size - 1))
45#define SUBBUF_OFFSET(offset, chan) ((offset) & ((chan)->subbuf_size - 1))
46#define SUBBUF_ALIGN(offset, chan) \
47 (((offset) + (chan)->subbuf_size) & (~((chan)->subbuf_size - 1)))
48#define SUBBUF_TRUNC(offset, chan) \
49 ((offset) & (~((chan)->subbuf_size - 1)))
50#define SUBBUF_INDEX(offset, chan) \
51 (BUFFER_OFFSET((offset), chan) >> (chan)->subbuf_size_order)
52
53/*
54 * Tracks changes to rchan/rchan_buf structs
55 */
56#define UST_CHANNEL_VERSION 8
57
b73a4c47
PMF
58/**************************************/
59
60struct commit_counters {
b102c2b0
PMF
61 long cc; /* ATOMIC */
62 long cc_sb; /* ATOMIC - Incremented _once_ at sb switch */
b73a4c47
PMF
63};
64
b5b073e2
PMF
65struct ust_buffer {
66 /* First 32 bytes cache-hot cacheline */
b102c2b0 67 long offset; /* Current offset in the buffer *atomic* */
b73a4c47 68 struct commit_counters *commit_count; /* Commit count per sub-buffer */
b102c2b0 69 long consumed; /* Current offset in the buffer *atomic* access (shared) */
b5b073e2
PMF
70 unsigned long last_tsc; /*
71 * Last timestamp written in the buffer.
72 */
73 /* End of first 32 bytes cacheline */
b102c2b0
PMF
74 long active_readers; /* ATOMIC - Active readers count standard atomic access (shared) */
75 long events_lost; /* ATOMIC */
76 long corrupted_subbuffers; /* *ATOMIC* */
b5b073e2
PMF
77 /* one byte is written to this pipe when data is available, in order
78 to wake the consumer */
79 /* portability: Single byte writes must be as quick as possible. The kernel-side
80 buffer must be large enough so the writer doesn't block. From the pipe(7)
81 man page: Since linux 2.6.11, the pipe capacity is 65536 bytes. */
82 int data_ready_fd_write;
83 /* the reading end of the pipe */
84 int data_ready_fd_read;
4723ca09
NC
85 /*
86 * List of buffers with an open pipe, used for fork and forced subbuffer
87 * switch.
88 */
0222e121 89 struct cds_list_head open_buffers_list;
b5b073e2 90
b73a4c47
PMF
91 unsigned int finalized;
92//ust// struct timer_list switch_timer; /* timer for periodical switch */
93 unsigned long switch_timer_interval; /* 0 = unset */
94
b5b073e2 95 struct ust_channel *chan;
b73a4c47 96
205f7ca7 97 struct urcu_ref urcu_ref;
b5b073e2
PMF
98 void *buf_data;
99 size_t buf_size;
100 int shmid;
204141ee 101 unsigned int cpu;
b5b073e2
PMF
102
103 /* commit count per subbuffer; must be at end of struct */
c57b6f82 104 long commit_seq[0]; /* ATOMIC */
b5b073e2
PMF
105} ____cacheline_aligned;
106
b5b073e2 107/*
b73a4c47
PMF
108 * A switch is done during tracing or as a final flush after tracing (so it
109 * won't write in the new sub-buffer).
110 * FIXME: make this message clearer
b5b073e2 111 */
b73a4c47
PMF
112enum force_switch_mode { FORCE_ACTIVE, FORCE_FLUSH };
113
12e81b07
PMF
114extern int ltt_reserve_slot_lockless_slow(struct ust_channel *chan,
115 struct ust_trace *trace, size_t data_size,
116 int largest_align, int cpu,
117 struct ust_buffer **ret_buf,
118 size_t *slot_size, long *buf_offset,
119 u64 *tsc, unsigned int *rflags);
b73a4c47
PMF
120
121extern void ltt_force_switch_lockless_slow(struct ust_buffer *buf,
122 enum force_switch_mode mode);
123
9edd34bd
MD
124#ifndef HAVE_EFFICIENT_UNALIGNED_ACCESS
125
126/*
127 * Calculate the offset needed to align the type.
128 * size_of_type must be non-zero.
129 */
130static inline unsigned int ltt_align(size_t align_drift, size_t size_of_type)
131{
132 size_t alignment = min(sizeof(void *), size_of_type);
133 return (alignment - align_drift) & (alignment - 1);
134}
135/* Default arch alignment */
136#define LTT_ALIGN
137
138static inline int ltt_get_alignment(void)
139{
140 return sizeof(void *);
141}
142
143#else /* HAVE_EFFICIENT_UNALIGNED_ACCESS */
144
145static inline unsigned int ltt_align(size_t align_drift,
146 size_t size_of_type)
147{
148 return 0;
149}
150
151#define LTT_ALIGN __attribute__((packed))
152
153static inline int ltt_get_alignment(void)
154{
155 return 0;
156}
157#endif /* HAVE_EFFICIENT_UNALIGNED_ACCESS */
b5b073e2 158
a2fd50ef 159static __inline__ void ust_buffers_do_copy(void *dest, const void *src, size_t len)
b5b073e2
PMF
160{
161 union {
162 const void *src;
163 const u8 *src8;
164 const u16 *src16;
165 const u32 *src32;
166 const u64 *src64;
167 } u = { .src = src };
168
169 switch (len) {
b73a4c47
PMF
170 case 0: break;
171 case 1: *(u8 *)dest = *u.src8;
b5b073e2 172 break;
b73a4c47 173 case 2: *(u16 *)dest = *u.src16;
b5b073e2 174 break;
b73a4c47 175 case 4: *(u32 *)dest = *u.src32;
b5b073e2 176 break;
b73a4c47 177 case 8: *(u64 *)dest = *u.src64;
b5b073e2
PMF
178 break;
179 default:
180 memcpy(dest, src, len);
181 }
182}
183
b73a4c47
PMF
184static __inline__ void *ust_buffers_offset_address(struct ust_buffer *buf, size_t offset)
185{
186 return ((char *)buf->buf_data)+offset;
187}
188
189/*
190 * Last TSC comparison functions. Check if the current TSC overflows
191 * LTT_TSC_BITS bits from the last TSC read. Reads and writes last_tsc
192 * atomically.
193 */
194
195/* FIXME: does this test work properly? */
196#if (BITS_PER_LONG == 32)
197static __inline__ void save_last_tsc(struct ust_buffer *ltt_buf,
198 u64 tsc)
199{
200 ltt_buf->last_tsc = (unsigned long)(tsc >> LTT_TSC_BITS);
201}
202
203static __inline__ int last_tsc_overflow(struct ust_buffer *ltt_buf,
204 u64 tsc)
205{
206 unsigned long tsc_shifted = (unsigned long)(tsc >> LTT_TSC_BITS);
207
208 if (unlikely((tsc_shifted - ltt_buf->last_tsc)))
209 return 1;
210 else
211 return 0;
212}
213#else
214static __inline__ void save_last_tsc(struct ust_buffer *ltt_buf,
215 u64 tsc)
216{
217 ltt_buf->last_tsc = (unsigned long)tsc;
218}
219
220static __inline__ int last_tsc_overflow(struct ust_buffer *ltt_buf,
221 u64 tsc)
222{
223 if (unlikely((tsc - ltt_buf->last_tsc) >> LTT_TSC_BITS))
224 return 1;
225 else
226 return 0;
227}
228#endif
229
9edd34bd
MD
230/*
231 * ust_get_header_size
232 *
233 * Calculate alignment offset to 32-bits. This is the alignment offset of the
234 * event header.
235 *
236 * Important note :
237 * The event header must be 32-bits. The total offset calculated here :
238 *
239 * Alignment of header struct on 32 bits (min arch size, header size)
240 * + sizeof(header struct) (32-bits)
241 * + (opt) u16 (ext. event id)
242 * + (opt) u16 (event_size) (if event_size == 0xFFFFUL, has ext. event size)
243 * + (opt) u32 (ext. event size)
244 * + (opt) u64 full TSC (aligned on min(64-bits, arch size))
245 *
246 * The payload must itself determine its own alignment from the biggest type it
247 * contains.
248 * */
249static __inline__ unsigned char ust_get_header_size(
250 struct ust_channel *channel,
251 size_t offset,
252 size_t data_size,
253 size_t *before_hdr_pad,
254 unsigned int rflags)
255{
256 size_t orig_offset = offset;
257 size_t padding;
258
259 padding = ltt_align(offset, sizeof(struct ltt_event_header));
260 offset += padding;
261 offset += sizeof(struct ltt_event_header);
262
263 if(unlikely(rflags)) {
264 switch (rflags) {
265 case LTT_RFLAG_ID_SIZE_TSC:
266 offset += sizeof(u16) + sizeof(u16);
267 if (data_size >= 0xFFFFU)
268 offset += sizeof(u32);
269 offset += ltt_align(offset, sizeof(u64));
270 offset += sizeof(u64);
271 break;
272 case LTT_RFLAG_ID_SIZE:
273 offset += sizeof(u16) + sizeof(u16);
274 if (data_size >= 0xFFFFU)
275 offset += sizeof(u32);
276 break;
277 case LTT_RFLAG_ID:
278 offset += sizeof(u16);
279 break;
280 }
281 }
282
283 *before_hdr_pad = padding;
284 return offset - orig_offset;
285}
286
b73a4c47
PMF
287static __inline__ void ltt_reserve_push_reader(
288 struct ust_channel *rchan,
289 struct ust_buffer *buf,
290 long offset)
291{
292 long consumed_old, consumed_new;
293
294 do {
b102c2b0 295 consumed_old = uatomic_read(&buf->consumed);
b73a4c47
PMF
296 /*
297 * If buffer is in overwrite mode, push the reader consumed
298 * count if the write position has reached it and we are not
299 * at the first iteration (don't push the reader farther than
300 * the writer). This operation can be done concurrently by many
301 * writers in the same buffer, the writer being at the farthest
302 * write position sub-buffer index in the buffer being the one
303 * which will win this loop.
304 * If the buffer is not in overwrite mode, pushing the reader
305 * only happens if a sub-buffer is corrupted.
306 */
307 if (unlikely((SUBBUF_TRUNC(offset, buf->chan)
308 - SUBBUF_TRUNC(consumed_old, buf->chan))
309 >= rchan->alloc_size))
310 consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan);
311 else
312 return;
b102c2b0 313 } while (unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old,
b73a4c47
PMF
314 consumed_new) != consumed_old));
315}
316
317static __inline__ void ltt_vmcore_check_deliver(
318 struct ust_buffer *buf,
319 long commit_count, long idx)
320{
b102c2b0 321 uatomic_set(&buf->commit_seq[idx], commit_count);
b73a4c47
PMF
322}
323
324static __inline__ void ltt_check_deliver(struct ust_channel *chan,
325 struct ust_buffer *buf,
326 long offset, long commit_count, long idx)
327{
328 long old_commit_count = commit_count - chan->subbuf_size;
329
330 /* Check if all commits have been done */
331 if (unlikely((BUFFER_TRUNC(offset, chan)
332 >> chan->n_subbufs_order)
333 - (old_commit_count
334 & chan->commit_count_mask) == 0)) {
335 /*
336 * If we succeeded in updating the cc_sb, we are delivering
337 * the subbuffer. Deals with concurrent updates of the "cc"
338 * value without adding a add_return atomic operation to the
339 * fast path.
340 */
b102c2b0 341 if (likely(uatomic_cmpxchg(&buf->commit_count[idx].cc_sb,
b73a4c47
PMF
342 old_commit_count, commit_count)
343 == old_commit_count)) {
344 int result;
345
346 /*
347 * Set noref flag for this subbuffer.
348 */
349//ust// ltt_set_noref_flag(rchan, buf, idx);
350 ltt_vmcore_check_deliver(buf, commit_count, idx);
351
352 /* wakeup consumer */
353 result = write(buf->data_ready_fd_write, "1", 1);
354 if(result == -1) {
355 PERROR("write (in ltt_relay_buffer_flush)");
356 ERR("this should never happen!");
357 }
358 }
359 }
360}
361
362static __inline__ int ltt_poll_deliver(struct ust_channel *chan, struct ust_buffer *buf)
363{
364 long consumed_old, consumed_idx, commit_count, write_offset;
365
b102c2b0 366 consumed_old = uatomic_read(&buf->consumed);
b73a4c47 367 consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan);
b102c2b0 368 commit_count = uatomic_read(&buf->commit_count[consumed_idx].cc_sb);
b73a4c47 369 /*
0222e121 370 * No memory cmm_barrier here, since we are only interested
b73a4c47
PMF
371 * in a statistically correct polling result. The next poll will
372 * get the data is we are racing. The mb() that ensures correct
373 * memory order is in get_subbuf.
374 */
b102c2b0 375 write_offset = uatomic_read(&buf->offset);
b73a4c47
PMF
376
377 /*
378 * Check that the subbuffer we are trying to consume has been
379 * already fully committed.
380 */
381
382 if (((commit_count - chan->subbuf_size)
383 & chan->commit_count_mask)
384 - (BUFFER_TRUNC(consumed_old, buf->chan)
385 >> chan->n_subbufs_order)
386 != 0)
387 return 0;
388
389 /*
390 * Check that we are not about to read the same subbuffer in
391 * which the writer head is.
392 */
393 if ((SUBBUF_TRUNC(write_offset, buf->chan)
394 - SUBBUF_TRUNC(consumed_old, buf->chan))
395 == 0)
396 return 0;
397
398 return 1;
399
400}
401
402/*
403 * returns 0 if reserve ok, or 1 if the slow path must be taken.
404 */
405static __inline__ int ltt_relay_try_reserve(
406 struct ust_channel *chan,
407 struct ust_buffer *buf,
408 size_t data_size,
409 u64 *tsc, unsigned int *rflags, int largest_align,
410 long *o_begin, long *o_end, long *o_old,
411 size_t *before_hdr_pad, size_t *size)
412{
b102c2b0 413 *o_begin = uatomic_read(&buf->offset);
b73a4c47
PMF
414 *o_old = *o_begin;
415
416 *tsc = trace_clock_read64();
417
418//ust// #ifdef CONFIG_LTT_VMCORE
419//ust// prefetch(&buf->commit_count[SUBBUF_INDEX(*o_begin, rchan)]);
420//ust// prefetch(&buf->commit_seq[SUBBUF_INDEX(*o_begin, rchan)]);
421//ust// #else
422//ust// prefetchw(&buf->commit_count[SUBBUF_INDEX(*o_begin, rchan)]);
423//ust// #endif
424 if (last_tsc_overflow(buf, *tsc))
425 *rflags = LTT_RFLAG_ID_SIZE_TSC;
426
427 if (unlikely(SUBBUF_OFFSET(*o_begin, buf->chan) == 0))
428 return 1;
429
430 *size = ust_get_header_size(chan,
431 *o_begin, data_size,
432 before_hdr_pad, *rflags);
433 *size += ltt_align(*o_begin + *size, largest_align) + data_size;
434 if (unlikely((SUBBUF_OFFSET(*o_begin, buf->chan) + *size)
435 > buf->chan->subbuf_size))
436 return 1;
437
438 /*
439 * Event fits in the current buffer and we are not on a switch
440 * boundary. It's safe to write.
441 */
442 *o_end = *o_begin + *size;
443
444 if (unlikely((SUBBUF_OFFSET(*o_end, buf->chan)) == 0))
445 /*
446 * The offset_end will fall at the very beginning of the next
447 * subbuffer.
448 */
449 return 1;
450
451 return 0;
452}
453
12e81b07
PMF
454static __inline__ int ltt_reserve_slot(struct ust_channel *chan,
455 struct ust_trace *trace, size_t data_size,
456 int largest_align, int cpu,
457 struct ust_buffer **ret_buf,
458 size_t *slot_size, long *buf_offset, u64 *tsc,
459 unsigned int *rflags)
b73a4c47 460{
12e81b07 461 struct ust_buffer *buf = *ret_buf = chan->buf[cpu];
b73a4c47
PMF
462 long o_begin, o_end, o_old;
463 size_t before_hdr_pad;
464
465 /*
466 * Perform retryable operations.
467 */
6a843332 468 /* FIXME: make this really per cpu? */
0222e121 469 if (unlikely(CMM_LOAD_SHARED(ltt_nesting) > 4)) {
e5bc3b0f 470 DBG("Dropping event because nesting is too deep.");
b102c2b0 471 uatomic_inc(&buf->events_lost);
b73a4c47
PMF
472 return -EPERM;
473 }
474
475 if (unlikely(ltt_relay_try_reserve(chan, buf,
476 data_size, tsc, rflags,
477 largest_align, &o_begin, &o_end, &o_old,
478 &before_hdr_pad, slot_size)))
479 goto slow_path;
480
b102c2b0 481 if (unlikely(uatomic_cmpxchg(&buf->offset, o_old, o_end) != o_old))
b73a4c47
PMF
482 goto slow_path;
483
484 /*
485 * Atomically update last_tsc. This update races against concurrent
486 * atomic updates, but the race will always cause supplementary full TSC
487 * events, never the opposite (missing a full TSC event when it would be
488 * needed).
489 */
490 save_last_tsc(buf, *tsc);
491
492 /*
493 * Push the reader if necessary
494 */
495 ltt_reserve_push_reader(chan, buf, o_end - 1);
496
497 /*
498 * Clear noref flag for this subbuffer.
499 */
500//ust// ltt_clear_noref_flag(chan, buf, SUBBUF_INDEX(o_end - 1, chan));
501
502 *buf_offset = o_begin + before_hdr_pad;
503 return 0;
504slow_path:
12e81b07
PMF
505 return ltt_reserve_slot_lockless_slow(chan, trace, data_size,
506 largest_align, cpu, ret_buf,
507 slot_size, buf_offset, tsc,
508 rflags);
b73a4c47
PMF
509}
510
511/*
512 * Force a sub-buffer switch for a per-cpu buffer. This operation is
513 * completely reentrant : can be called while tracing is active with
514 * absolutely no lock held.
b73a4c47
PMF
515 */
516static __inline__ void ltt_force_switch(struct ust_buffer *buf,
517 enum force_switch_mode mode)
518{
519 return ltt_force_switch_lockless_slow(buf, mode);
520}
521
522/*
523 * for flight recording. must be called after relay_commit.
8c36d1ee
PMF
524 * This function increments the subbuffers's commit_seq counter each time the
525 * commit count reaches back the reserve offset (module subbuffer size). It is
526 * useful for crash dump.
b73a4c47 527 */
1e8c9e7b
PMF
528//ust// #ifdef CONFIG_LTT_VMCORE
529static __inline__ void ltt_write_commit_counter(struct ust_channel *chan,
530 struct ust_buffer *buf, long idx, long buf_offset,
531 long commit_count, size_t data_size)
b73a4c47
PMF
532{
533 long offset;
534 long commit_seq_old;
535
536 offset = buf_offset + data_size;
537
538 /*
539 * SUBBUF_OFFSET includes commit_count_mask. We can simply
540 * compare the offsets within the subbuffer without caring about
541 * buffer full/empty mismatch because offset is never zero here
542 * (subbuffer header and event headers have non-zero length).
543 */
544 if (unlikely(SUBBUF_OFFSET(offset - commit_count, buf->chan)))
545 return;
546
b102c2b0 547 commit_seq_old = uatomic_read(&buf->commit_seq[idx]);
b73a4c47 548 while (commit_seq_old < commit_count)
b102c2b0 549 commit_seq_old = uatomic_cmpxchg(&buf->commit_seq[idx],
b73a4c47 550 commit_seq_old, commit_count);
1e8c9e7b
PMF
551
552 DBG("commit_seq for channel %s_%d, subbuf %ld is now %ld", buf->chan->channel_name, buf->cpu, idx, commit_count);
b73a4c47 553}
1e8c9e7b
PMF
554//ust// #else
555//ust// static __inline__ void ltt_write_commit_counter(struct ust_buffer *buf,
556//ust// long idx, long buf_offset, long commit_count, size_t data_size)
557//ust// {
558//ust// }
559//ust// #endif
b73a4c47
PMF
560
561/*
562 * Atomic unordered slot commit. Increments the commit count in the
563 * specified sub-buffer, and delivers it if necessary.
564 *
565 * Parameters:
566 *
567 * @ltt_channel : channel structure
568 * @transport_data: transport-specific data
569 * @buf_offset : offset following the event header.
570 * @data_size : size of the event data.
571 * @slot_size : size of the reserved slot.
572 */
573static __inline__ void ltt_commit_slot(
574 struct ust_channel *chan,
575 struct ust_buffer *buf, long buf_offset,
576 size_t data_size, size_t slot_size)
577{
578 long offset_end = buf_offset;
579 long endidx = SUBBUF_INDEX(offset_end - 1, chan);
580 long commit_count;
581
0222e121 582 cmm_smp_wmb();
6a843332 583
b102c2b0 584 uatomic_add(&buf->commit_count[endidx].cc, slot_size);
b73a4c47
PMF
585 /*
586 * commit count read can race with concurrent OOO commit count updates.
587 * This is only needed for ltt_check_deliver (for non-polling delivery
588 * only) and for ltt_write_commit_counter. The race can only cause the
589 * counter to be read with the same value more than once, which could
590 * cause :
591 * - Multiple delivery for the same sub-buffer (which is handled
592 * gracefully by the reader code) if the value is for a full
593 * sub-buffer. It's important that we can never miss a sub-buffer
b102c2b0 594 * delivery. Re-reading the value after the uatomic_add ensures this.
b73a4c47
PMF
595 * - Reading a commit_count with a higher value that what was actually
596 * added to it for the ltt_write_commit_counter call (again caused by
597 * a concurrent committer). It does not matter, because this function
598 * is interested in the fact that the commit count reaches back the
599 * reserve offset for a specific sub-buffer, which is completely
600 * independent of the order.
601 */
b102c2b0 602 commit_count = uatomic_read(&buf->commit_count[endidx].cc);
b73a4c47
PMF
603
604 ltt_check_deliver(chan, buf, offset_end - 1, commit_count, endidx);
605 /*
8c36d1ee 606 * Update data_size for each commit. It's needed only for extracting
b73a4c47
PMF
607 * ltt buffers from vmcore, after crash.
608 */
1e8c9e7b 609 ltt_write_commit_counter(chan, buf, endidx, buf_offset, commit_count, data_size);
b73a4c47
PMF
610}
611
bb3132c8
MD
612void _ust_buffers_strncpy_fixup(struct ust_buffer *buf, size_t offset,
613 size_t len, size_t copied, int terminated);
b73a4c47 614
a2fd50ef 615static __inline__ int ust_buffers_write(struct ust_buffer *buf, size_t offset,
b73a4c47 616 const void *src, size_t len)
b5b073e2 617{
b5b073e2
PMF
618 size_t buf_offset = BUFFER_OFFSET(offset, buf->chan);
619
620 assert(buf_offset < buf->chan->subbuf_size*buf->chan->subbuf_cnt);
578e232a
MD
621 assert(buf_offset + len
622 <= buf->chan->subbuf_size*buf->chan->subbuf_cnt);
b5b073e2 623
bb3132c8 624 ust_buffers_do_copy(buf->buf_data + buf_offset, src, len);
b73a4c47 625
bb3132c8
MD
626 return len;
627}
628
629/*
630 * ust_buffers_do_memset - write character into dest.
631 * @dest: destination
632 * @src: source character
633 * @len: length to write
634 */
635static __inline__
636void ust_buffers_do_memset(void *dest, char src, size_t len)
637{
638 /*
639 * What we really want here is an __inline__ memset, but we
640 * don't have constants, so gcc generally uses a function call.
641 */
642 for (; len > 0; len--)
643 *(u8 *)dest++ = src;
644}
645
646/*
647 * ust_buffers_do_strncpy - copy a string up to a certain number of bytes
648 * @dest: destination
649 * @src: source
650 * @len: max. length to copy
651 * @terminated: output string ends with \0 (output)
652 *
653 * returns the number of bytes copied. Does not finalize with \0 if len is
654 * reached.
655 */
656static __inline__
657size_t ust_buffers_do_strncpy(void *dest, const void *src, size_t len,
658 int *terminated)
659{
660 size_t orig_len = len;
661
662 *terminated = 0;
663 /*
664 * What we really want here is an __inline__ strncpy, but we
665 * don't have constants, so gcc generally uses a function call.
666 */
667 for (; len > 0; len--) {
0222e121 668 *(u8 *)dest = CMM_LOAD_SHARED(*(const u8 *)src);
bb3132c8
MD
669 /* Check with dest, because src may be modified concurrently */
670 if (*(const u8 *)dest == '\0') {
671 len--;
672 *terminated = 1;
673 break;
674 }
675 dest++;
676 src++;
677 }
678 return orig_len - len;
679}
680
681static __inline__
682int ust_buffers_strncpy(struct ust_buffer *buf, size_t offset, const void *src,
683 size_t len)
684{
685 size_t buf_offset = BUFFER_OFFSET(offset, buf->chan);
686 ssize_t copied;
687 int terminated;
688
689 assert(buf_offset < buf->chan->subbuf_size*buf->chan->subbuf_cnt);
578e232a
MD
690 assert(buf_offset + len
691 <= buf->chan->subbuf_size*buf->chan->subbuf_cnt);
bb3132c8
MD
692
693 copied = ust_buffers_do_strncpy(buf->buf_data + buf_offset,
694 src, len, &terminated);
695 if (unlikely(copied < len || !terminated))
696 _ust_buffers_strncpy_fixup(buf, offset, len, copied,
697 terminated);
b5b073e2
PMF
698 return len;
699}
700
dc284811
PMF
701extern int ust_buffers_get_subbuf(struct ust_buffer *buf, long *consumed);
702extern int ust_buffers_put_subbuf(struct ust_buffer *buf, unsigned long uconsumed_old);
703
704extern void init_ustrelay_transport(void);
b5b073e2
PMF
705
706#endif /* _UST_BUFFERS_H */
This page took 0.06424 seconds and 4 git commands to generate.