Fix trace UST destroy channel
[lttng-tools.git] / liblttng-consumer / lttng-consumer.c
CommitLineData
3bd1e081
MD
1/*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; only version 2
8 * of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#define _GNU_SOURCE
21#include <assert.h>
22#include <fcntl.h>
23#include <poll.h>
24#include <pthread.h>
25#include <stdlib.h>
26#include <string.h>
27#include <sys/mman.h>
28#include <sys/socket.h>
29#include <sys/types.h>
30#include <unistd.h>
31
32#include <lttng-kernel-ctl.h>
33#include <lttng-sessiond-comm.h>
34#include <lttng/lttng-consumer.h>
35#include <lttng/lttng-kconsumer.h>
36#include <lttng/lttng-ustconsumer.h>
37#include <lttngerr.h>
38
39struct lttng_consumer_global_data consumer_data = {
3bd1e081
MD
40 .stream_count = 0,
41 .need_update = 1,
42 .type = LTTNG_CONSUMER_UNKNOWN,
43};
44
45/* timeout parameter, to control the polling thread grace period. */
46int consumer_poll_timeout = -1;
47
48/*
49 * Flag to inform the polling thread to quit when all fd hung up. Updated by
50 * the consumer_thread_receive_fds when it notices that all fds has hung up.
51 * Also updated by the signal handler (consumer_should_exit()). Read by the
52 * polling threads.
53 */
54volatile int consumer_quit = 0;
55
56/*
57 * Find a stream. The consumer_data.lock must be locked during this
58 * call.
59 */
60static struct lttng_consumer_stream *consumer_find_stream(int key)
61{
e4421fec
DG
62 struct lttng_ht_iter iter;
63 struct lttng_ht_node_ulong *node;
64 struct lttng_consumer_stream *stream = NULL;
3bd1e081 65
7ad0a0cb
MD
66 /* Negative keys are lookup failures */
67 if (key < 0)
68 return NULL;
e4421fec 69
6065ceec
DG
70 rcu_read_lock();
71
e4421fec
DG
72 lttng_ht_lookup(consumer_data.stream_ht, (void *)((unsigned long) key),
73 &iter);
74 node = lttng_ht_iter_get_node_ulong(&iter);
75 if (node != NULL) {
76 stream = caa_container_of(node, struct lttng_consumer_stream, node);
3bd1e081 77 }
e4421fec 78
6065ceec
DG
79 rcu_read_unlock();
80
e4421fec 81 return stream;
3bd1e081
MD
82}
83
7ad0a0cb
MD
84static void consumer_steal_stream_key(int key)
85{
86 struct lttng_consumer_stream *stream;
87
88 stream = consumer_find_stream(key);
89 if (stream)
90 stream->key = -1;
91}
92
3bd1e081
MD
93static struct lttng_consumer_channel *consumer_find_channel(int key)
94{
e4421fec
DG
95 struct lttng_ht_iter iter;
96 struct lttng_ht_node_ulong *node;
97 struct lttng_consumer_channel *channel = NULL;
3bd1e081 98
7ad0a0cb
MD
99 /* Negative keys are lookup failures */
100 if (key < 0)
101 return NULL;
e4421fec 102
6065ceec
DG
103 rcu_read_lock();
104
e4421fec
DG
105 lttng_ht_lookup(consumer_data.channel_ht, (void *)((unsigned long) key),
106 &iter);
107 node = lttng_ht_iter_get_node_ulong(&iter);
108 if (node != NULL) {
109 channel = caa_container_of(node, struct lttng_consumer_channel, node);
3bd1e081 110 }
e4421fec 111
6065ceec
DG
112 rcu_read_unlock();
113
e4421fec 114 return channel;
3bd1e081
MD
115}
116
7ad0a0cb
MD
117static void consumer_steal_channel_key(int key)
118{
119 struct lttng_consumer_channel *channel;
120
121 channel = consumer_find_channel(key);
122 if (channel)
123 channel->key = -1;
124}
125
3bd1e081
MD
126/*
127 * Remove a stream from the global list protected by a mutex. This
128 * function is also responsible for freeing its data structures.
129 */
130void consumer_del_stream(struct lttng_consumer_stream *stream)
131{
132 int ret;
e4421fec 133 struct lttng_ht_iter iter;
3bd1e081
MD
134 struct lttng_consumer_channel *free_chan = NULL;
135
136 pthread_mutex_lock(&consumer_data.lock);
137
138 switch (consumer_data.type) {
139 case LTTNG_CONSUMER_KERNEL:
140 if (stream->mmap_base != NULL) {
141 ret = munmap(stream->mmap_base, stream->mmap_len);
142 if (ret != 0) {
143 perror("munmap");
144 }
145 }
146 break;
7753dea8
MD
147 case LTTNG_CONSUMER32_UST:
148 case LTTNG_CONSUMER64_UST:
3bd1e081
MD
149 lttng_ustconsumer_del_stream(stream);
150 break;
151 default:
152 ERR("Unknown consumer_data type");
153 assert(0);
154 goto end;
155 }
156
6065ceec
DG
157 rcu_read_lock();
158
e4421fec
DG
159 /* Get stream node from hash table */
160 lttng_ht_lookup(consumer_data.stream_ht,
161 (void *)((unsigned long) stream->key), &iter);
162 /* Remove stream node from hash table */
163 ret = lttng_ht_del(consumer_data.stream_ht, &iter);
164 assert(!ret);
165
6065ceec
DG
166 rcu_read_unlock();
167
3bd1e081
MD
168 if (consumer_data.stream_count <= 0) {
169 goto end;
170 }
171 consumer_data.stream_count--;
172 if (!stream) {
173 goto end;
174 }
175 if (stream->out_fd >= 0) {
176 close(stream->out_fd);
177 }
b5c5fc29 178 if (stream->wait_fd >= 0 && !stream->wait_fd_is_copy) {
3bd1e081
MD
179 close(stream->wait_fd);
180 }
b5c5fc29
MD
181 if (stream->shm_fd >= 0 && stream->wait_fd != stream->shm_fd
182 && !stream->shm_fd_is_copy) {
3bd1e081
MD
183 close(stream->shm_fd);
184 }
185 if (!--stream->chan->refcount)
186 free_chan = stream->chan;
187 free(stream);
188end:
189 consumer_data.need_update = 1;
190 pthread_mutex_unlock(&consumer_data.lock);
191
192 if (free_chan)
193 consumer_del_channel(free_chan);
194}
195
6065ceec
DG
196static void consumer_del_stream_rcu(struct rcu_head *head)
197{
198 struct lttng_ht_node_ulong *node =
199 caa_container_of(head, struct lttng_ht_node_ulong, head);
200 struct lttng_consumer_stream *stream =
201 caa_container_of(node, struct lttng_consumer_stream, node);
202
203 consumer_del_stream(stream);
204}
205
3bd1e081
MD
206struct lttng_consumer_stream *consumer_allocate_stream(
207 int channel_key, int stream_key,
208 int shm_fd, int wait_fd,
209 enum lttng_consumer_stream_state state,
210 uint64_t mmap_len,
211 enum lttng_event_output output,
6df2e2c9
MD
212 const char *path_name,
213 uid_t uid,
214 gid_t gid)
3bd1e081
MD
215{
216 struct lttng_consumer_stream *stream;
217 int ret;
218
effcf122 219 stream = zmalloc(sizeof(*stream));
3bd1e081
MD
220 if (stream == NULL) {
221 perror("malloc struct lttng_consumer_stream");
222 goto end;
223 }
224 stream->chan = consumer_find_channel(channel_key);
225 if (!stream->chan) {
226 perror("Unable to find channel key");
227 goto end;
228 }
229 stream->chan->refcount++;
230 stream->key = stream_key;
231 stream->shm_fd = shm_fd;
232 stream->wait_fd = wait_fd;
233 stream->out_fd = -1;
234 stream->out_fd_offset = 0;
235 stream->state = state;
236 stream->mmap_len = mmap_len;
237 stream->mmap_base = NULL;
238 stream->output = output;
6df2e2c9
MD
239 stream->uid = uid;
240 stream->gid = gid;
3bd1e081
MD
241 strncpy(stream->path_name, path_name, PATH_MAX - 1);
242 stream->path_name[PATH_MAX - 1] = '\0';
e4421fec 243 lttng_ht_node_init_ulong(&stream->node, stream->key);
3bd1e081
MD
244
245 switch (consumer_data.type) {
246 case LTTNG_CONSUMER_KERNEL:
247 break;
7753dea8
MD
248 case LTTNG_CONSUMER32_UST:
249 case LTTNG_CONSUMER64_UST:
5af2f756 250 stream->cpu = stream->chan->cpucount++;
3bd1e081
MD
251 ret = lttng_ustconsumer_allocate_stream(stream);
252 if (ret) {
253 free(stream);
254 return NULL;
255 }
256 break;
257 default:
258 ERR("Unknown consumer_data type");
259 assert(0);
260 goto end;
261 }
262 DBG("Allocated stream %s (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, out_fd %d)",
263 stream->path_name, stream->key,
264 stream->shm_fd,
265 stream->wait_fd,
266 (unsigned long long) stream->mmap_len,
267 stream->out_fd);
268end:
269 return stream;
270}
271
272/*
273 * Add a stream to the global list protected by a mutex.
274 */
275int consumer_add_stream(struct lttng_consumer_stream *stream)
276{
277 int ret = 0;
278
279 pthread_mutex_lock(&consumer_data.lock);
7ad0a0cb
MD
280 /* Steal stream identifier, for UST */
281 consumer_steal_stream_key(stream->key);
6065ceec 282 rcu_read_lock();
e4421fec 283 lttng_ht_add_unique_ulong(consumer_data.stream_ht, &stream->node);
6065ceec 284 rcu_read_unlock();
3bd1e081
MD
285 consumer_data.stream_count++;
286 consumer_data.need_update = 1;
287
288 switch (consumer_data.type) {
289 case LTTNG_CONSUMER_KERNEL:
290 break;
7753dea8
MD
291 case LTTNG_CONSUMER32_UST:
292 case LTTNG_CONSUMER64_UST:
3bd1e081
MD
293 /* Streams are in CPU number order (we rely on this) */
294 stream->cpu = stream->chan->nr_streams++;
295 break;
296 default:
297 ERR("Unknown consumer_data type");
298 assert(0);
299 goto end;
300 }
301
302end:
303 pthread_mutex_unlock(&consumer_data.lock);
304 return ret;
305}
306
307/*
308 * Update a stream according to what we just received.
309 */
310void consumer_change_stream_state(int stream_key,
311 enum lttng_consumer_stream_state state)
312{
313 struct lttng_consumer_stream *stream;
314
315 pthread_mutex_lock(&consumer_data.lock);
316 stream = consumer_find_stream(stream_key);
317 if (stream) {
318 stream->state = state;
319 }
320 consumer_data.need_update = 1;
321 pthread_mutex_unlock(&consumer_data.lock);
322}
323
324/*
325 * Remove a channel from the global list protected by a mutex. This
326 * function is also responsible for freeing its data structures.
327 */
328void consumer_del_channel(struct lttng_consumer_channel *channel)
329{
330 int ret;
e4421fec 331 struct lttng_ht_iter iter;
3bd1e081
MD
332
333 pthread_mutex_lock(&consumer_data.lock);
334
335 switch (consumer_data.type) {
336 case LTTNG_CONSUMER_KERNEL:
337 break;
7753dea8
MD
338 case LTTNG_CONSUMER32_UST:
339 case LTTNG_CONSUMER64_UST:
3bd1e081
MD
340 lttng_ustconsumer_del_channel(channel);
341 break;
342 default:
343 ERR("Unknown consumer_data type");
344 assert(0);
345 goto end;
346 }
347
6065ceec
DG
348 rcu_read_lock();
349
e4421fec
DG
350 lttng_ht_lookup(consumer_data.channel_ht,
351 (void *)((unsigned long) channel->key), &iter);
352 ret = lttng_ht_del(consumer_data.channel_ht, &iter);
353 assert(!ret);
354
6065ceec
DG
355 rcu_read_unlock();
356
3bd1e081
MD
357 if (channel->mmap_base != NULL) {
358 ret = munmap(channel->mmap_base, channel->mmap_len);
359 if (ret != 0) {
360 perror("munmap");
361 }
362 }
b5c5fc29 363 if (channel->wait_fd >= 0 && !channel->wait_fd_is_copy) {
3bd1e081
MD
364 close(channel->wait_fd);
365 }
b5c5fc29
MD
366 if (channel->shm_fd >= 0 && channel->wait_fd != channel->shm_fd
367 && !channel->shm_fd_is_copy) {
3bd1e081
MD
368 close(channel->shm_fd);
369 }
370 free(channel);
371end:
372 pthread_mutex_unlock(&consumer_data.lock);
373}
374
6065ceec
DG
375static void consumer_del_channel_rcu(struct rcu_head *head)
376{
377 struct lttng_ht_node_ulong *node =
378 caa_container_of(head, struct lttng_ht_node_ulong, head);
379 struct lttng_consumer_channel *channel=
380 caa_container_of(node, struct lttng_consumer_channel, node);
381
382 consumer_del_channel(channel);
383}
384
3bd1e081
MD
385struct lttng_consumer_channel *consumer_allocate_channel(
386 int channel_key,
387 int shm_fd, int wait_fd,
388 uint64_t mmap_len,
389 uint64_t max_sb_size)
390{
391 struct lttng_consumer_channel *channel;
392 int ret;
393
276b26d1 394 channel = zmalloc(sizeof(*channel));
3bd1e081
MD
395 if (channel == NULL) {
396 perror("malloc struct lttng_consumer_channel");
397 goto end;
398 }
399 channel->key = channel_key;
400 channel->shm_fd = shm_fd;
401 channel->wait_fd = wait_fd;
402 channel->mmap_len = mmap_len;
403 channel->max_sb_size = max_sb_size;
404 channel->refcount = 0;
405 channel->nr_streams = 0;
e4421fec 406 lttng_ht_node_init_ulong(&channel->node, channel->key);
3bd1e081
MD
407
408 switch (consumer_data.type) {
409 case LTTNG_CONSUMER_KERNEL:
410 channel->mmap_base = NULL;
411 channel->mmap_len = 0;
412 break;
7753dea8
MD
413 case LTTNG_CONSUMER32_UST:
414 case LTTNG_CONSUMER64_UST:
3bd1e081
MD
415 ret = lttng_ustconsumer_allocate_channel(channel);
416 if (ret) {
417 free(channel);
418 return NULL;
419 }
420 break;
421 default:
422 ERR("Unknown consumer_data type");
423 assert(0);
424 goto end;
425 }
426 DBG("Allocated channel (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, max_sb_size %llu)",
427 channel->key,
428 channel->shm_fd,
429 channel->wait_fd,
430 (unsigned long long) channel->mmap_len,
431 (unsigned long long) channel->max_sb_size);
432end:
433 return channel;
434}
435
436/*
437 * Add a channel to the global list protected by a mutex.
438 */
439int consumer_add_channel(struct lttng_consumer_channel *channel)
440{
3bd1e081 441 pthread_mutex_lock(&consumer_data.lock);
7ad0a0cb
MD
442 /* Steal channel identifier, for UST */
443 consumer_steal_channel_key(channel->key);
6065ceec 444 rcu_read_lock();
e4421fec 445 lttng_ht_add_unique_ulong(consumer_data.channel_ht, &channel->node);
6065ceec 446 rcu_read_unlock();
3bd1e081 447 pthread_mutex_unlock(&consumer_data.lock);
7ad0a0cb 448 return 0;
3bd1e081
MD
449}
450
451/*
452 * Allocate the pollfd structure and the local view of the out fds to avoid
453 * doing a lookup in the linked list and concurrency issues when writing is
454 * needed. Called with consumer_data.lock held.
455 *
456 * Returns the number of fds in the structures.
457 */
458int consumer_update_poll_array(
459 struct lttng_consumer_local_data *ctx, struct pollfd **pollfd,
460 struct lttng_consumer_stream **local_stream)
461{
3bd1e081 462 int i = 0;
e4421fec
DG
463 struct lttng_ht_iter iter;
464 struct lttng_consumer_stream *stream;
3bd1e081
MD
465
466 DBG("Updating poll fd array");
e4421fec
DG
467 cds_lfht_for_each_entry(consumer_data.stream_ht->ht, &iter.iter, stream,
468 node.node) {
469 if (stream->state != LTTNG_CONSUMER_ACTIVE_STREAM) {
3bd1e081
MD
470 continue;
471 }
e4421fec
DG
472 DBG("Active FD %d", stream->wait_fd);
473 (*pollfd)[i].fd = stream->wait_fd;
3bd1e081 474 (*pollfd)[i].events = POLLIN | POLLPRI;
e4421fec 475 local_stream[i] = stream;
3bd1e081
MD
476 i++;
477 }
478
479 /*
480 * Insert the consumer_poll_pipe at the end of the array and don't
481 * increment i so nb_fd is the number of real FD.
482 */
483 (*pollfd)[i].fd = ctx->consumer_poll_pipe[0];
484 (*pollfd)[i].events = POLLIN;
485 return i;
486}
487
488/*
489 * Poll on the should_quit pipe and the command socket return -1 on error and
490 * should exit, 0 if data is available on the command socket
491 */
492int lttng_consumer_poll_socket(struct pollfd *consumer_sockpoll)
493{
494 int num_rdy;
495
496 num_rdy = poll(consumer_sockpoll, 2, -1);
497 if (num_rdy == -1) {
498 perror("Poll error");
499 goto exit;
500 }
501 if (consumer_sockpoll[0].revents == POLLIN) {
502 DBG("consumer_should_quit wake up");
503 goto exit;
504 }
505 return 0;
506
507exit:
508 return -1;
509}
510
511/*
512 * Set the error socket.
513 */
514void lttng_consumer_set_error_sock(
515 struct lttng_consumer_local_data *ctx, int sock)
516{
517 ctx->consumer_error_socket = sock;
518}
519
520/*
521 * Set the command socket path.
522 */
523
524void lttng_consumer_set_command_sock_path(
525 struct lttng_consumer_local_data *ctx, char *sock)
526{
527 ctx->consumer_command_sock_path = sock;
528}
529
530/*
531 * Send return code to the session daemon.
532 * If the socket is not defined, we return 0, it is not a fatal error
533 */
534int lttng_consumer_send_error(
535 struct lttng_consumer_local_data *ctx, int cmd)
536{
537 if (ctx->consumer_error_socket > 0) {
538 return lttcomm_send_unix_sock(ctx->consumer_error_socket, &cmd,
539 sizeof(enum lttcomm_sessiond_command));
540 }
541
542 return 0;
543}
544
545/*
546 * Close all the tracefiles and stream fds, should be called when all instances
547 * are destroyed.
548 */
549void lttng_consumer_cleanup(void)
550{
e4421fec
DG
551 int ret;
552 struct lttng_ht_iter iter;
6065ceec
DG
553 struct lttng_ht_node_ulong *node;
554
555 rcu_read_lock();
3bd1e081
MD
556
557 /*
6065ceec
DG
558 * close all outfd. Called when there are no more threads running (after
559 * joining on the threads), no need to protect list iteration with mutex.
3bd1e081 560 */
6065ceec
DG
561 cds_lfht_for_each_entry(consumer_data.stream_ht->ht, &iter.iter, node,
562 node) {
e4421fec
DG
563 ret = lttng_ht_del(consumer_data.stream_ht, &iter);
564 assert(!ret);
6065ceec 565 call_rcu(&node->head, consumer_del_stream_rcu);
3bd1e081 566 }
e4421fec 567
6065ceec
DG
568 cds_lfht_for_each_entry(consumer_data.channel_ht->ht, &iter.iter, node,
569 node) {
e4421fec
DG
570 ret = lttng_ht_del(consumer_data.channel_ht, &iter);
571 assert(!ret);
6065ceec 572 call_rcu(&node->head, consumer_del_channel_rcu);
3bd1e081 573 }
6065ceec
DG
574
575 rcu_read_unlock();
3bd1e081
MD
576}
577
578/*
579 * Called from signal handler.
580 */
581void lttng_consumer_should_exit(struct lttng_consumer_local_data *ctx)
582{
583 int ret;
584 consumer_quit = 1;
585 ret = write(ctx->consumer_should_quit[1], "4", 1);
586 if (ret < 0) {
587 perror("write consumer quit");
588 }
589}
590
591void lttng_consumer_sync_trace_file(
592 struct lttng_consumer_stream *stream, off_t orig_offset)
593{
594 int outfd = stream->out_fd;
595
596 /*
597 * This does a blocking write-and-wait on any page that belongs to the
598 * subbuffer prior to the one we just wrote.
599 * Don't care about error values, as these are just hints and ways to
600 * limit the amount of page cache used.
601 */
602 if (orig_offset < stream->chan->max_sb_size) {
603 return;
604 }
605 sync_file_range(outfd, orig_offset - stream->chan->max_sb_size,
606 stream->chan->max_sb_size,
607 SYNC_FILE_RANGE_WAIT_BEFORE
608 | SYNC_FILE_RANGE_WRITE
609 | SYNC_FILE_RANGE_WAIT_AFTER);
610 /*
611 * Give hints to the kernel about how we access the file:
612 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
613 * we write it.
614 *
615 * We need to call fadvise again after the file grows because the
616 * kernel does not seem to apply fadvise to non-existing parts of the
617 * file.
618 *
619 * Call fadvise _after_ having waited for the page writeback to
620 * complete because the dirty page writeback semantic is not well
621 * defined. So it can be expected to lead to lower throughput in
622 * streaming.
623 */
624 posix_fadvise(outfd, orig_offset - stream->chan->max_sb_size,
625 stream->chan->max_sb_size, POSIX_FADV_DONTNEED);
626}
627
628/*
629 * Initialise the necessary environnement :
630 * - create a new context
631 * - create the poll_pipe
632 * - create the should_quit pipe (for signal handler)
633 * - create the thread pipe (for splice)
634 *
635 * Takes a function pointer as argument, this function is called when data is
636 * available on a buffer. This function is responsible to do the
637 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
638 * buffer configuration and then kernctl_put_next_subbuf at the end.
639 *
640 * Returns a pointer to the new context or NULL on error.
641 */
642struct lttng_consumer_local_data *lttng_consumer_create(
643 enum lttng_consumer_type type,
d41f73b7
MD
644 int (*buffer_ready)(struct lttng_consumer_stream *stream,
645 struct lttng_consumer_local_data *ctx),
3bd1e081
MD
646 int (*recv_channel)(struct lttng_consumer_channel *channel),
647 int (*recv_stream)(struct lttng_consumer_stream *stream),
648 int (*update_stream)(int stream_key, uint32_t state))
649{
650 int ret, i;
651 struct lttng_consumer_local_data *ctx;
652
653 assert(consumer_data.type == LTTNG_CONSUMER_UNKNOWN ||
654 consumer_data.type == type);
655 consumer_data.type = type;
656
effcf122 657 ctx = zmalloc(sizeof(struct lttng_consumer_local_data));
3bd1e081
MD
658 if (ctx == NULL) {
659 perror("allocating context");
660 goto error;
661 }
662
663 ctx->consumer_error_socket = -1;
664 /* assign the callbacks */
665 ctx->on_buffer_ready = buffer_ready;
666 ctx->on_recv_channel = recv_channel;
667 ctx->on_recv_stream = recv_stream;
668 ctx->on_update_stream = update_stream;
669
670 ret = pipe(ctx->consumer_poll_pipe);
671 if (ret < 0) {
672 perror("Error creating poll pipe");
673 goto error_poll_pipe;
674 }
675
676 ret = pipe(ctx->consumer_should_quit);
677 if (ret < 0) {
678 perror("Error creating recv pipe");
679 goto error_quit_pipe;
680 }
681
682 ret = pipe(ctx->consumer_thread_pipe);
683 if (ret < 0) {
684 perror("Error creating thread pipe");
685 goto error_thread_pipe;
686 }
687
688 return ctx;
689
690
691error_thread_pipe:
692 for (i = 0; i < 2; i++) {
693 int err;
694
695 err = close(ctx->consumer_should_quit[i]);
696 assert(!err);
697 }
698error_quit_pipe:
699 for (i = 0; i < 2; i++) {
700 int err;
701
702 err = close(ctx->consumer_poll_pipe[i]);
703 assert(!err);
704 }
705error_poll_pipe:
706 free(ctx);
707error:
708 return NULL;
709}
710
711/*
712 * Close all fds associated with the instance and free the context.
713 */
714void lttng_consumer_destroy(struct lttng_consumer_local_data *ctx)
715{
716 close(ctx->consumer_error_socket);
717 close(ctx->consumer_thread_pipe[0]);
718 close(ctx->consumer_thread_pipe[1]);
719 close(ctx->consumer_poll_pipe[0]);
720 close(ctx->consumer_poll_pipe[1]);
721 close(ctx->consumer_should_quit[0]);
722 close(ctx->consumer_should_quit[1]);
723 unlink(ctx->consumer_command_sock_path);
724 free(ctx);
725}
726
727/*
728 * Mmap the ring buffer, read it and write the data to the tracefile.
729 *
730 * Returns the number of bytes written
731 */
732int lttng_consumer_on_read_subbuffer_mmap(
733 struct lttng_consumer_local_data *ctx,
734 struct lttng_consumer_stream *stream, unsigned long len)
735{
736 switch (consumer_data.type) {
737 case LTTNG_CONSUMER_KERNEL:
738 return lttng_kconsumer_on_read_subbuffer_mmap(ctx, stream, len);
7753dea8
MD
739 case LTTNG_CONSUMER32_UST:
740 case LTTNG_CONSUMER64_UST:
3bd1e081
MD
741 return lttng_ustconsumer_on_read_subbuffer_mmap(ctx, stream, len);
742 default:
743 ERR("Unknown consumer_data type");
744 assert(0);
745 }
746}
747
748/*
749 * Splice the data from the ring buffer to the tracefile.
750 *
751 * Returns the number of bytes spliced.
752 */
753int lttng_consumer_on_read_subbuffer_splice(
754 struct lttng_consumer_local_data *ctx,
755 struct lttng_consumer_stream *stream, unsigned long len)
756{
757 switch (consumer_data.type) {
758 case LTTNG_CONSUMER_KERNEL:
759 return lttng_kconsumer_on_read_subbuffer_splice(ctx, stream, len);
7753dea8
MD
760 case LTTNG_CONSUMER32_UST:
761 case LTTNG_CONSUMER64_UST:
3bd1e081
MD
762 return -ENOSYS;
763 default:
764 ERR("Unknown consumer_data type");
765 assert(0);
766 return -ENOSYS;
767 }
768
769}
770
771/*
772 * Take a snapshot for a specific fd
773 *
774 * Returns 0 on success, < 0 on error
775 */
776int lttng_consumer_take_snapshot(struct lttng_consumer_local_data *ctx,
777 struct lttng_consumer_stream *stream)
778{
779 switch (consumer_data.type) {
780 case LTTNG_CONSUMER_KERNEL:
781 return lttng_kconsumer_take_snapshot(ctx, stream);
7753dea8
MD
782 case LTTNG_CONSUMER32_UST:
783 case LTTNG_CONSUMER64_UST:
3bd1e081
MD
784 return lttng_ustconsumer_take_snapshot(ctx, stream);
785 default:
786 ERR("Unknown consumer_data type");
787 assert(0);
788 return -ENOSYS;
789 }
790
791}
792
793/*
794 * Get the produced position
795 *
796 * Returns 0 on success, < 0 on error
797 */
798int lttng_consumer_get_produced_snapshot(
799 struct lttng_consumer_local_data *ctx,
800 struct lttng_consumer_stream *stream,
801 unsigned long *pos)
802{
803 switch (consumer_data.type) {
804 case LTTNG_CONSUMER_KERNEL:
805 return lttng_kconsumer_get_produced_snapshot(ctx, stream, pos);
7753dea8
MD
806 case LTTNG_CONSUMER32_UST:
807 case LTTNG_CONSUMER64_UST:
3bd1e081
MD
808 return lttng_ustconsumer_get_produced_snapshot(ctx, stream, pos);
809 default:
810 ERR("Unknown consumer_data type");
811 assert(0);
812 return -ENOSYS;
813 }
814}
815
816int lttng_consumer_recv_cmd(struct lttng_consumer_local_data *ctx,
817 int sock, struct pollfd *consumer_sockpoll)
818{
819 switch (consumer_data.type) {
820 case LTTNG_CONSUMER_KERNEL:
821 return lttng_kconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
7753dea8
MD
822 case LTTNG_CONSUMER32_UST:
823 case LTTNG_CONSUMER64_UST:
3bd1e081
MD
824 return lttng_ustconsumer_recv_cmd(ctx, sock, consumer_sockpoll);
825 default:
826 ERR("Unknown consumer_data type");
827 assert(0);
828 return -ENOSYS;
829 }
830}
831
832/*
e4421fec 833 * This thread polls the fds in the set to consume the data and write
3bd1e081
MD
834 * it to tracefile if necessary.
835 */
836void *lttng_consumer_thread_poll_fds(void *data)
837{
838 int num_rdy, num_hup, high_prio, ret, i;
839 struct pollfd *pollfd = NULL;
840 /* local view of the streams */
841 struct lttng_consumer_stream **local_stream = NULL;
842 /* local view of consumer_data.fds_count */
843 int nb_fd = 0;
844 char tmp;
845 int tmp2;
846 struct lttng_consumer_local_data *ctx = data;
847
effcf122 848 local_stream = zmalloc(sizeof(struct lttng_consumer_stream));
3bd1e081
MD
849
850 while (1) {
851 high_prio = 0;
852 num_hup = 0;
853
854 /*
e4421fec 855 * the fds set has been updated, we need to update our
3bd1e081
MD
856 * local array as well
857 */
858 pthread_mutex_lock(&consumer_data.lock);
859 if (consumer_data.need_update) {
860 if (pollfd != NULL) {
861 free(pollfd);
862 pollfd = NULL;
863 }
864 if (local_stream != NULL) {
865 free(local_stream);
866 local_stream = NULL;
867 }
868
869 /* allocate for all fds + 1 for the consumer_poll_pipe */
effcf122 870 pollfd = zmalloc((consumer_data.stream_count + 1) * sizeof(struct pollfd));
3bd1e081
MD
871 if (pollfd == NULL) {
872 perror("pollfd malloc");
873 pthread_mutex_unlock(&consumer_data.lock);
874 goto end;
875 }
876
877 /* allocate for all fds + 1 for the consumer_poll_pipe */
effcf122 878 local_stream = zmalloc((consumer_data.stream_count + 1) *
3bd1e081
MD
879 sizeof(struct lttng_consumer_stream));
880 if (local_stream == NULL) {
881 perror("local_stream malloc");
882 pthread_mutex_unlock(&consumer_data.lock);
883 goto end;
884 }
885 ret = consumer_update_poll_array(ctx, &pollfd, local_stream);
886 if (ret < 0) {
887 ERR("Error in allocating pollfd or local_outfds");
888 lttng_consumer_send_error(ctx, CONSUMERD_POLL_ERROR);
889 pthread_mutex_unlock(&consumer_data.lock);
890 goto end;
891 }
892 nb_fd = ret;
893 consumer_data.need_update = 0;
894 }
895 pthread_mutex_unlock(&consumer_data.lock);
896
897 /* poll on the array of fds */
898 DBG("polling on %d fd", nb_fd + 1);
899 num_rdy = poll(pollfd, nb_fd + 1, consumer_poll_timeout);
900 DBG("poll num_rdy : %d", num_rdy);
901 if (num_rdy == -1) {
902 perror("Poll error");
903 lttng_consumer_send_error(ctx, CONSUMERD_POLL_ERROR);
904 goto end;
905 } else if (num_rdy == 0) {
906 DBG("Polling thread timed out");
907 goto end;
908 }
909
d41f73b7 910 /* No FDs and consumer_quit, consumer_cleanup the thread */
3bd1e081
MD
911 if (nb_fd == 0 && consumer_quit == 1) {
912 goto end;
913 }
914
915 /*
916 * If the consumer_poll_pipe triggered poll go
917 * directly to the beginning of the loop to update the
918 * array. We want to prioritize array update over
919 * low-priority reads.
920 */
d41f73b7 921 if (pollfd[nb_fd].revents & POLLIN) {
3bd1e081
MD
922 DBG("consumer_poll_pipe wake up");
923 tmp2 = read(ctx->consumer_poll_pipe[0], &tmp, 1);
924 if (tmp2 < 0) {
d41f73b7 925 perror("read consumer poll");
3bd1e081
MD
926 }
927 continue;
928 }
929
930 /* Take care of high priority channels first. */
931 for (i = 0; i < nb_fd; i++) {
d41f73b7
MD
932 if (pollfd[i].revents & POLLPRI) {
933 DBG("Urgent read on fd %d", pollfd[i].fd);
934 high_prio = 1;
935 ret = ctx->on_buffer_ready(local_stream[i], ctx);
936 /* it's ok to have an unavailable sub-buffer */
937 if (ret == EAGAIN) {
938 ret = 0;
939 }
940 } else if (pollfd[i].revents & POLLERR) {
3bd1e081 941 ERR("Error returned in polling fd %d.", pollfd[i].fd);
6065ceec
DG
942 rcu_read_lock();
943 consumer_del_stream_rcu(&local_stream[i]->node.head);
944 rcu_read_unlock();
3bd1e081 945 num_hup++;
d41f73b7
MD
946 } else if (pollfd[i].revents & POLLNVAL) {
947 ERR("Polling fd %d tells fd is not open.", pollfd[i].fd);
6065ceec
DG
948 rcu_read_lock();
949 consumer_del_stream_rcu(&local_stream[i]->node.head);
950 rcu_read_unlock();
3bd1e081 951 num_hup++;
1c3c14ac
MD
952 } else if ((pollfd[i].revents & POLLHUP) &&
953 !(pollfd[i].revents & POLLIN)) {
7753dea8
MD
954 if (consumer_data.type == LTTNG_CONSUMER32_UST
955 || consumer_data.type == LTTNG_CONSUMER64_UST) {
d056b477
MD
956 DBG("Polling fd %d tells it has hung up. Attempting flush and read.",
957 pollfd[i].fd);
958 if (!local_stream[i]->hangup_flush_done) {
959 lttng_ustconsumer_on_stream_hangup(local_stream[i]);
effcf122
MD
960 /* read after flush */
961 do {
962 ret = ctx->on_buffer_ready(local_stream[i], ctx);
963 } while (ret == EAGAIN);
d056b477
MD
964 }
965 } else {
966 DBG("Polling fd %d tells it has hung up.", pollfd[i].fd);
967 }
6065ceec
DG
968 rcu_read_lock();
969 consumer_del_stream_rcu(&local_stream[i]->node.head);
970 rcu_read_unlock();
3bd1e081 971 num_hup++;
3bd1e081
MD
972 }
973 }
974
975 /* If every buffer FD has hung up, we end the read loop here */
976 if (nb_fd > 0 && num_hup == nb_fd) {
977 DBG("every buffer FD has hung up\n");
978 if (consumer_quit == 1) {
979 goto end;
980 }
981 continue;
982 }
983
984 /* Take care of low priority channels. */
985 if (high_prio == 0) {
986 for (i = 0; i < nb_fd; i++) {
d41f73b7 987 if (pollfd[i].revents & POLLIN) {
3bd1e081 988 DBG("Normal read on fd %d", pollfd[i].fd);
d41f73b7 989 ret = ctx->on_buffer_ready(local_stream[i], ctx);
3bd1e081
MD
990 /* it's ok to have an unavailable subbuffer */
991 if (ret == EAGAIN) {
992 ret = 0;
993 }
994 }
995 }
996 }
997 }
998end:
999 DBG("polling thread exiting");
1000 if (pollfd != NULL) {
1001 free(pollfd);
1002 pollfd = NULL;
1003 }
1004 if (local_stream != NULL) {
1005 free(local_stream);
1006 local_stream = NULL;
1007 }
1008 return NULL;
1009}
1010
1011/*
1012 * This thread listens on the consumerd socket and receives the file
1013 * descriptors from the session daemon.
1014 */
1015void *lttng_consumer_thread_receive_fds(void *data)
1016{
1017 int sock, client_socket, ret;
1018 /*
1019 * structure to poll for incoming data on communication socket avoids
1020 * making blocking sockets.
1021 */
1022 struct pollfd consumer_sockpoll[2];
1023 struct lttng_consumer_local_data *ctx = data;
1024
1025 DBG("Creating command socket %s", ctx->consumer_command_sock_path);
1026 unlink(ctx->consumer_command_sock_path);
1027 client_socket = lttcomm_create_unix_sock(ctx->consumer_command_sock_path);
1028 if (client_socket < 0) {
1029 ERR("Cannot create command socket");
1030 goto end;
1031 }
1032
1033 ret = lttcomm_listen_unix_sock(client_socket);
1034 if (ret < 0) {
1035 goto end;
1036 }
1037
32258573 1038 DBG("Sending ready command to lttng-sessiond");
3bd1e081
MD
1039 ret = lttng_consumer_send_error(ctx, CONSUMERD_COMMAND_SOCK_READY);
1040 /* return < 0 on error, but == 0 is not fatal */
1041 if (ret < 0) {
32258573 1042 ERR("Error sending ready command to lttng-sessiond");
3bd1e081
MD
1043 goto end;
1044 }
1045
1046 ret = fcntl(client_socket, F_SETFL, O_NONBLOCK);
1047 if (ret < 0) {
1048 perror("fcntl O_NONBLOCK");
1049 goto end;
1050 }
1051
1052 /* prepare the FDs to poll : to client socket and the should_quit pipe */
1053 consumer_sockpoll[0].fd = ctx->consumer_should_quit[0];
1054 consumer_sockpoll[0].events = POLLIN | POLLPRI;
1055 consumer_sockpoll[1].fd = client_socket;
1056 consumer_sockpoll[1].events = POLLIN | POLLPRI;
1057
1058 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
1059 goto end;
1060 }
1061 DBG("Connection on client_socket");
1062
1063 /* Blocking call, waiting for transmission */
1064 sock = lttcomm_accept_unix_sock(client_socket);
1065 if (sock <= 0) {
1066 WARN("On accept");
1067 goto end;
1068 }
1069 ret = fcntl(sock, F_SETFL, O_NONBLOCK);
1070 if (ret < 0) {
1071 perror("fcntl O_NONBLOCK");
1072 goto end;
1073 }
1074
1075 /* update the polling structure to poll on the established socket */
1076 consumer_sockpoll[1].fd = sock;
1077 consumer_sockpoll[1].events = POLLIN | POLLPRI;
1078
1079 while (1) {
1080 if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
1081 goto end;
1082 }
1083 DBG("Incoming command on sock");
1084 ret = lttng_consumer_recv_cmd(ctx, sock, consumer_sockpoll);
1085 if (ret == -ENOENT) {
1086 DBG("Received STOP command");
1087 goto end;
1088 }
1089 if (ret < 0) {
1090 ERR("Communication interrupted on command socket");
1091 goto end;
1092 }
1093 if (consumer_quit) {
1094 DBG("consumer_thread_receive_fds received quit from signal");
1095 goto end;
1096 }
1097 DBG("received fds on sock");
1098 }
1099end:
1100 DBG("consumer_thread_receive_fds exiting");
1101
1102 /*
1103 * when all fds have hung up, the polling thread
1104 * can exit cleanly
1105 */
1106 consumer_quit = 1;
1107
1108 /*
1109 * 2s of grace period, if no polling events occur during
1110 * this period, the polling thread will exit even if there
1111 * are still open FDs (should not happen, but safety mechanism).
1112 */
1113 consumer_poll_timeout = LTTNG_CONSUMER_POLL_TIMEOUT;
1114
1115 /* wake up the polling thread */
1116 ret = write(ctx->consumer_poll_pipe[1], "4", 1);
1117 if (ret < 0) {
1118 perror("poll pipe write");
1119 }
1120 return NULL;
1121}
d41f73b7
MD
1122
1123int lttng_consumer_read_subbuffer(struct lttng_consumer_stream *stream,
1124 struct lttng_consumer_local_data *ctx)
1125{
1126 switch (consumer_data.type) {
1127 case LTTNG_CONSUMER_KERNEL:
1128 return lttng_kconsumer_read_subbuffer(stream, ctx);
7753dea8
MD
1129 case LTTNG_CONSUMER32_UST:
1130 case LTTNG_CONSUMER64_UST:
d41f73b7
MD
1131 return lttng_ustconsumer_read_subbuffer(stream, ctx);
1132 default:
1133 ERR("Unknown consumer_data type");
1134 assert(0);
1135 return -ENOSYS;
1136 }
1137}
1138
1139int lttng_consumer_on_recv_stream(struct lttng_consumer_stream *stream)
1140{
1141 switch (consumer_data.type) {
1142 case LTTNG_CONSUMER_KERNEL:
1143 return lttng_kconsumer_on_recv_stream(stream);
7753dea8
MD
1144 case LTTNG_CONSUMER32_UST:
1145 case LTTNG_CONSUMER64_UST:
d41f73b7
MD
1146 return lttng_ustconsumer_on_recv_stream(stream);
1147 default:
1148 ERR("Unknown consumer_data type");
1149 assert(0);
1150 return -ENOSYS;
1151 }
1152}
e4421fec
DG
1153
1154/*
1155 * Allocate and set consumer data hash tables.
1156 */
1157void lttng_consumer_init(void)
1158{
1159 consumer_data.stream_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1160 consumer_data.channel_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1161}
1162
This page took 0.070607 seconds and 4 git commands to generate.