Fix: consumer should await for initial streams
[lttng-tools.git] / src / common / ust-consumer / ust-consumer.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _GNU_SOURCE
20 #include <assert.h>
21 #include <lttng/ust-ctl.h>
22 #include <poll.h>
23 #include <pthread.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/mman.h>
27 #include <sys/socket.h>
28 #include <sys/stat.h>
29 #include <sys/types.h>
30 #include <inttypes.h>
31 #include <unistd.h>
32
33 #include <common/common.h>
34 #include <common/sessiond-comm/sessiond-comm.h>
35 #include <common/relayd/relayd.h>
36 #include <common/compat/fcntl.h>
37
38 #include "ust-consumer.h"
39
40 extern struct lttng_consumer_global_data consumer_data;
41 extern int consumer_poll_timeout;
42 extern volatile int consumer_quit;
43
44 /*
45 * Wrapper over the mmap() read offset from ust-ctl library. Since this can be
46 * compiled out, we isolate it in this library.
47 */
48 int lttng_ustctl_get_mmap_read_offset(struct lttng_ust_shm_handle *handle,
49 struct lttng_ust_lib_ring_buffer *buf, unsigned long *off)
50 {
51 return ustctl_get_mmap_read_offset(handle, buf, off);
52 };
53
54 /*
55 * Take a snapshot for a specific fd
56 *
57 * Returns 0 on success, < 0 on error
58 */
59 int lttng_ustconsumer_take_snapshot(struct lttng_consumer_local_data *ctx,
60 struct lttng_consumer_stream *stream)
61 {
62 int ret = 0;
63
64 ret = ustctl_snapshot(stream->chan->handle, stream->buf);
65 if (ret != 0) {
66 errno = -ret;
67 PERROR("Getting sub-buffer snapshot.");
68 }
69
70 return ret;
71 }
72
73 /*
74 * Get the produced position
75 *
76 * Returns 0 on success, < 0 on error
77 */
78 int lttng_ustconsumer_get_produced_snapshot(
79 struct lttng_consumer_local_data *ctx,
80 struct lttng_consumer_stream *stream,
81 unsigned long *pos)
82 {
83 int ret;
84
85 ret = ustctl_snapshot_get_produced(stream->chan->handle,
86 stream->buf, pos);
87 if (ret != 0) {
88 errno = -ret;
89 PERROR("kernctl_snapshot_get_produced");
90 }
91
92 return ret;
93 }
94
95 /*
96 * Receive command from session daemon and process it.
97 *
98 * Return 1 on success else a negative value or 0.
99 */
100 int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
101 int sock, struct pollfd *consumer_sockpoll)
102 {
103 ssize_t ret;
104 struct lttcomm_consumer_msg msg;
105
106 ret = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg));
107 if (ret != sizeof(msg)) {
108 DBG("Consumer received unexpected message size %zd (expects %zu)",
109 ret, sizeof(msg));
110 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD);
111 return ret;
112 }
113 if (msg.cmd_type == LTTNG_CONSUMER_STOP) {
114 return -ENOENT;
115 }
116
117 /* relayd needs RCU read-side lock */
118 rcu_read_lock();
119
120 switch (msg.cmd_type) {
121 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
122 {
123 ret = consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
124 msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll,
125 &msg.u.relayd_sock.sock);
126 goto end_nosignal;
127 }
128 case LTTNG_CONSUMER_ADD_CHANNEL:
129 {
130 struct lttng_consumer_channel *new_channel;
131 int fds[1];
132 size_t nb_fd = 1;
133
134 DBG("UST Consumer adding channel");
135
136 /* block */
137 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
138 rcu_read_unlock();
139 return -EINTR;
140 }
141 ret = lttcomm_recv_fds_unix_sock(sock, fds, nb_fd);
142 if (ret != sizeof(fds)) {
143 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD);
144 rcu_read_unlock();
145 return ret;
146 }
147
148 DBG("consumer_add_channel %d", msg.u.channel.channel_key);
149
150 new_channel = consumer_allocate_channel(msg.u.channel.channel_key,
151 fds[0], -1,
152 msg.u.channel.mmap_len,
153 msg.u.channel.max_sb_size,
154 msg.u.channel.nb_init_streams);
155 if (new_channel == NULL) {
156 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
157 goto end_nosignal;
158 }
159 if (ctx->on_recv_channel != NULL) {
160 ret = ctx->on_recv_channel(new_channel);
161 if (ret == 0) {
162 consumer_add_channel(new_channel);
163 } else if (ret < 0) {
164 goto end_nosignal;
165 }
166 } else {
167 consumer_add_channel(new_channel);
168 }
169 goto end_nosignal;
170 }
171 case LTTNG_CONSUMER_ADD_STREAM:
172 {
173 struct lttng_consumer_stream *new_stream;
174 int fds[2];
175 size_t nb_fd = 2;
176 struct consumer_relayd_sock_pair *relayd = NULL;
177
178 DBG("UST Consumer adding stream");
179
180 /* block */
181 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
182 rcu_read_unlock();
183 return -EINTR;
184 }
185 ret = lttcomm_recv_fds_unix_sock(sock, fds, nb_fd);
186 if (ret != sizeof(fds)) {
187 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD);
188 rcu_read_unlock();
189 return ret;
190 }
191
192 DBG("consumer_add_stream chan %d stream %d",
193 msg.u.stream.channel_key,
194 msg.u.stream.stream_key);
195
196 assert(msg.u.stream.output == LTTNG_EVENT_MMAP);
197 new_stream = consumer_allocate_stream(msg.u.stream.channel_key,
198 msg.u.stream.stream_key,
199 fds[0], fds[1],
200 msg.u.stream.state,
201 msg.u.stream.mmap_len,
202 msg.u.stream.output,
203 msg.u.stream.path_name,
204 msg.u.stream.uid,
205 msg.u.stream.gid,
206 msg.u.stream.net_index,
207 msg.u.stream.metadata_flag);
208 if (new_stream == NULL) {
209 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
210 goto end_nosignal;
211 }
212
213 /* The stream is not metadata. Get relayd reference if exists. */
214 relayd = consumer_find_relayd(msg.u.stream.net_index);
215 if (relayd != NULL) {
216 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
217 /* Add stream on the relayd */
218 ret = relayd_add_stream(&relayd->control_sock,
219 msg.u.stream.name, msg.u.stream.path_name,
220 &new_stream->relayd_stream_id);
221 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
222 if (ret < 0) {
223 goto end_nosignal;
224 }
225 } else if (msg.u.stream.net_index != -1) {
226 ERR("Network sequence index %d unknown. Not adding stream.",
227 msg.u.stream.net_index);
228 free(new_stream);
229 goto end_nosignal;
230 }
231
232 /* Send stream to the metadata thread */
233 if (new_stream->metadata_flag) {
234 if (ctx->on_recv_stream) {
235 ret = ctx->on_recv_stream(new_stream);
236 if (ret < 0) {
237 goto end_nosignal;
238 }
239 }
240
241 do {
242 ret = write(ctx->consumer_metadata_pipe[1], new_stream,
243 sizeof(struct lttng_consumer_stream));
244 } while (ret < 0 && errno == EINTR);
245 if (ret < 0) {
246 PERROR("write metadata pipe");
247 }
248 } else {
249 if (ctx->on_recv_stream) {
250 ret = ctx->on_recv_stream(new_stream);
251 if (ret < 0) {
252 goto end_nosignal;
253 }
254 }
255 consumer_add_stream(new_stream);
256 }
257
258 DBG("UST consumer_add_stream %s (%d,%d) with relayd id %" PRIu64,
259 msg.u.stream.path_name, fds[0], fds[1],
260 new_stream->relayd_stream_id);
261 break;
262 }
263 case LTTNG_CONSUMER_DESTROY_RELAYD:
264 {
265 uint64_t index = msg.u.destroy_relayd.net_seq_idx;
266 struct consumer_relayd_sock_pair *relayd;
267
268 DBG("UST consumer destroying relayd %" PRIu64, index);
269
270 /* Get relayd reference if exists. */
271 relayd = consumer_find_relayd(index);
272 if (relayd == NULL) {
273 ERR("Unable to find relayd %" PRIu64, index);
274 goto end_nosignal;
275 }
276
277 /*
278 * Each relayd socket pair has a refcount of stream attached to it
279 * which tells if the relayd is still active or not depending on the
280 * refcount value.
281 *
282 * This will set the destroy flag of the relayd object and destroy it
283 * if the refcount reaches zero when called.
284 *
285 * The destroy can happen either here or when a stream fd hangs up.
286 */
287 consumer_flag_relayd_for_destroy(relayd);
288
289 goto end_nosignal;
290 }
291 case LTTNG_CONSUMER_UPDATE_STREAM:
292 {
293 rcu_read_unlock();
294 return -ENOSYS;
295 #if 0
296 if (ctx->on_update_stream != NULL) {
297 ret = ctx->on_update_stream(msg.u.stream.stream_key, msg.u.stream.state);
298 if (ret == 0) {
299 consumer_change_stream_state(msg.u.stream.stream_key, msg.u.stream.state);
300 } else if (ret < 0) {
301 goto end;
302 }
303 } else {
304 consumer_change_stream_state(msg.u.stream.stream_key,
305 msg.u.stream.state);
306 }
307 break;
308 #endif
309 }
310 default:
311 break;
312 }
313
314 /*
315 * Wake-up the other end by writing a null byte in the pipe (non-blocking).
316 * Important note: Because writing into the pipe is non-blocking (and
317 * therefore we allow dropping wakeup data, as long as there is wakeup data
318 * present in the pipe buffer to wake up the other end), the other end
319 * should perform the following sequence for waiting:
320 *
321 * 1) empty the pipe (reads).
322 * 2) perform update operation.
323 * 3) wait on the pipe (poll).
324 */
325 do {
326 ret = write(ctx->consumer_poll_pipe[1], "", 1);
327 } while (ret < 0 && errno == EINTR);
328 end_nosignal:
329 rcu_read_unlock();
330
331 /*
332 * Return 1 to indicate success since the 0 value can be a socket
333 * shutdown during the recv() or send() call.
334 */
335 return 1;
336 }
337
338 int lttng_ustconsumer_allocate_channel(struct lttng_consumer_channel *chan)
339 {
340 struct lttng_ust_object_data obj;
341
342 obj.handle = -1;
343 obj.shm_fd = chan->shm_fd;
344 obj.wait_fd = chan->wait_fd;
345 obj.memory_map_size = chan->mmap_len;
346 chan->handle = ustctl_map_channel(&obj);
347 if (!chan->handle) {
348 return -ENOMEM;
349 }
350 chan->wait_fd_is_copy = 1;
351 chan->shm_fd = -1;
352
353 return 0;
354 }
355
356 void lttng_ustconsumer_on_stream_hangup(struct lttng_consumer_stream *stream)
357 {
358 ustctl_flush_buffer(stream->chan->handle, stream->buf, 0);
359 stream->hangup_flush_done = 1;
360 }
361
362 void lttng_ustconsumer_del_channel(struct lttng_consumer_channel *chan)
363 {
364 ustctl_unmap_channel(chan->handle);
365 }
366
367 int lttng_ustconsumer_allocate_stream(struct lttng_consumer_stream *stream)
368 {
369 struct lttng_ust_object_data obj;
370 int ret;
371
372 obj.handle = -1;
373 obj.shm_fd = stream->shm_fd;
374 obj.wait_fd = stream->wait_fd;
375 obj.memory_map_size = stream->mmap_len;
376 ret = ustctl_add_stream(stream->chan->handle, &obj);
377 if (ret)
378 return ret;
379 stream->buf = ustctl_open_stream_read(stream->chan->handle, stream->cpu);
380 if (!stream->buf)
381 return -EBUSY;
382 /* ustctl_open_stream_read has closed the shm fd. */
383 stream->wait_fd_is_copy = 1;
384 stream->shm_fd = -1;
385
386 stream->mmap_base = ustctl_get_mmap_base(stream->chan->handle, stream->buf);
387 if (!stream->mmap_base) {
388 return -EINVAL;
389 }
390
391 return 0;
392 }
393
394 void lttng_ustconsumer_del_stream(struct lttng_consumer_stream *stream)
395 {
396 ustctl_close_stream_read(stream->chan->handle, stream->buf);
397 }
398
399
400 int lttng_ustconsumer_read_subbuffer(struct lttng_consumer_stream *stream,
401 struct lttng_consumer_local_data *ctx)
402 {
403 unsigned long len, subbuf_size, padding;
404 int err;
405 long ret = 0;
406 struct lttng_ust_shm_handle *handle;
407 struct lttng_ust_lib_ring_buffer *buf;
408 char dummy;
409 ssize_t readlen;
410
411 DBG("In read_subbuffer (wait_fd: %d, stream key: %d)",
412 stream->wait_fd, stream->key);
413
414 /* We can consume the 1 byte written into the wait_fd by UST */
415 if (!stream->hangup_flush_done) {
416 do {
417 readlen = read(stream->wait_fd, &dummy, 1);
418 } while (readlen == -1 && errno == EINTR);
419 if (readlen == -1) {
420 ret = readlen;
421 goto end;
422 }
423 }
424
425 buf = stream->buf;
426 handle = stream->chan->handle;
427 /* Get the next subbuffer */
428 err = ustctl_get_next_subbuf(handle, buf);
429 if (err != 0) {
430 ret = err; /* ustctl_get_next_subbuf returns negative, caller expect positive. */
431 /*
432 * This is a debug message even for single-threaded consumer,
433 * because poll() have more relaxed criterions than get subbuf,
434 * so get_subbuf may fail for short race windows where poll()
435 * would issue wakeups.
436 */
437 DBG("Reserving sub buffer failed (everything is normal, "
438 "it is due to concurrency)");
439 goto end;
440 }
441 assert(stream->output == LTTNG_EVENT_MMAP);
442 /* Get the full padded subbuffer size */
443 err = ustctl_get_padded_subbuf_size(handle, buf, &len);
444 assert(err == 0);
445
446 /* Get subbuffer data size (without padding) */
447 err = ustctl_get_subbuf_size(handle, buf, &subbuf_size);
448 assert(err == 0);
449
450 /* Make sure we don't get a subbuffer size bigger than the padded */
451 assert(len >= subbuf_size);
452
453 padding = len - subbuf_size;
454 /* write the subbuffer to the tracefile */
455 ret = lttng_consumer_on_read_subbuffer_mmap(ctx, stream, subbuf_size, padding);
456 /*
457 * The mmap operation should write subbuf_size amount of data when network
458 * streaming or the full padding (len) size when we are _not_ streaming.
459 */
460 if ((ret != subbuf_size && stream->net_seq_idx != -1) ||
461 (ret != len && stream->net_seq_idx == -1)) {
462 /*
463 * Display the error but continue processing to try to release the
464 * subbuffer
465 */
466 ERR("Error writing to tracefile "
467 "(ret: %zd != len: %lu != subbuf_size: %lu)",
468 ret, len, subbuf_size);
469
470 }
471 err = ustctl_put_next_subbuf(handle, buf);
472 assert(err == 0);
473 end:
474 return ret;
475 }
476
477 int lttng_ustconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
478 {
479 int ret;
480
481 /* Opening the tracefile in write mode */
482 if (stream->path_name != NULL && stream->net_seq_idx == -1) {
483 ret = run_as_open(stream->path_name,
484 O_WRONLY|O_CREAT|O_TRUNC,
485 S_IRWXU|S_IRWXG|S_IRWXO,
486 stream->uid, stream->gid);
487 if (ret < 0) {
488 ERR("Opening %s", stream->path_name);
489 PERROR("open");
490 goto error;
491 }
492 stream->out_fd = ret;
493 }
494
495 /* we return 0 to let the library handle the FD internally */
496 return 0;
497
498 error:
499 return ret;
500 }
This page took 0.061947 seconds and 4 git commands to generate.