808cae236ded5d142a6c5b962b27e760c41eac2a
[lttng-tools.git] / src / common / consumer-stream.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2013 - David Goulet <dgoulet@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License, version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 51
17 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #define _GNU_SOURCE
21 #include <assert.h>
22 #include <inttypes.h>
23 #include <sys/mman.h>
24 #include <unistd.h>
25
26 #include <common/common.h>
27 #include <common/index/index.h>
28 #include <common/kernel-consumer/kernel-consumer.h>
29 #include <common/relayd/relayd.h>
30 #include <common/ust-consumer/ust-consumer.h>
31
32 #include "consumer-stream.h"
33
34 /*
35 * RCU call to free stream. MUST only be used with call_rcu().
36 */
37 static void free_stream_rcu(struct rcu_head *head)
38 {
39 struct lttng_ht_node_u64 *node =
40 caa_container_of(head, struct lttng_ht_node_u64, head);
41 struct lttng_consumer_stream *stream =
42 caa_container_of(node, struct lttng_consumer_stream, node);
43
44 pthread_mutex_destroy(&stream->lock);
45 free(stream);
46 }
47
48 /*
49 * Close stream on the relayd side. This call can destroy a relayd if the
50 * conditions are met.
51 *
52 * A RCU read side lock MUST be acquired if the relayd object was looked up in
53 * a hash table before calling this.
54 */
55 void consumer_stream_relayd_close(struct lttng_consumer_stream *stream,
56 struct consumer_relayd_sock_pair *relayd)
57 {
58 int ret;
59
60 assert(stream);
61 assert(relayd);
62
63 if (stream->sent_to_relayd) {
64 uatomic_dec(&relayd->refcount);
65 assert(uatomic_read(&relayd->refcount) >= 0);
66 }
67
68 /* Closing streams requires to lock the control socket. */
69 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
70 ret = relayd_send_close_stream(&relayd->control_sock,
71 stream->relayd_stream_id,
72 stream->next_net_seq_num - 1);
73 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
74 if (ret < 0) {
75 DBG("Unable to close stream on the relayd. Continuing");
76 /*
77 * Continue here. There is nothing we can do for the relayd.
78 * Chances are that the relayd has closed the socket so we just
79 * continue cleaning up.
80 */
81 }
82
83 /* Both conditions are met, we destroy the relayd. */
84 if (uatomic_read(&relayd->refcount) == 0 &&
85 uatomic_read(&relayd->destroy_flag)) {
86 consumer_destroy_relayd(relayd);
87 }
88 stream->net_seq_idx = (uint64_t) -1ULL;
89 stream->sent_to_relayd = 0;
90 }
91
92 /*
93 * Close stream's file descriptors and, if needed, close stream also on the
94 * relayd side.
95 *
96 * The consumer data lock MUST be acquired.
97 * The stream lock MUST be acquired.
98 */
99 void consumer_stream_close(struct lttng_consumer_stream *stream)
100 {
101 int ret;
102 struct consumer_relayd_sock_pair *relayd;
103
104 assert(stream);
105
106 switch (consumer_data.type) {
107 case LTTNG_CONSUMER_KERNEL:
108 if (stream->mmap_base != NULL) {
109 ret = munmap(stream->mmap_base, stream->mmap_len);
110 if (ret != 0) {
111 PERROR("munmap");
112 }
113 }
114
115 if (stream->wait_fd >= 0) {
116 ret = close(stream->wait_fd);
117 if (ret) {
118 PERROR("close");
119 }
120 stream->wait_fd = -1;
121 }
122 break;
123 case LTTNG_CONSUMER32_UST:
124 case LTTNG_CONSUMER64_UST:
125 break;
126 default:
127 ERR("Unknown consumer_data type");
128 assert(0);
129 }
130
131 /* Close output fd. Could be a socket or local file at this point. */
132 if (stream->out_fd >= 0) {
133 ret = close(stream->out_fd);
134 if (ret) {
135 PERROR("close");
136 }
137 stream->out_fd = -1;
138 }
139
140 if (stream->index_fd >= 0) {
141 ret = close(stream->index_fd);
142 if (ret) {
143 PERROR("close stream index_fd");
144 }
145 stream->index_fd = -1;
146 }
147
148 /* Check and cleanup relayd if needed. */
149 rcu_read_lock();
150 relayd = consumer_find_relayd(stream->net_seq_idx);
151 if (relayd != NULL) {
152 consumer_stream_relayd_close(stream, relayd);
153 }
154 rcu_read_unlock();
155 }
156
157 /*
158 * Delete the stream from all possible hash tables.
159 *
160 * The consumer data lock MUST be acquired.
161 * The stream lock MUST be acquired.
162 */
163 void consumer_stream_delete(struct lttng_consumer_stream *stream,
164 struct lttng_ht *ht)
165 {
166 int ret;
167 struct lttng_ht_iter iter;
168
169 assert(stream);
170 /* Should NEVER be called not in monitor mode. */
171 assert(stream->chan->monitor);
172
173 rcu_read_lock();
174
175 if (ht) {
176 iter.iter.node = &stream->node.node;
177 ret = lttng_ht_del(ht, &iter);
178 assert(!ret);
179 }
180
181 /* Delete from stream per channel ID hash table. */
182 iter.iter.node = &stream->node_channel_id.node;
183 /*
184 * The returned value is of no importance. Even if the node is NOT in the
185 * hash table, we continue since we may have been called by a code path
186 * that did not add the stream to a (all) hash table. Same goes for the
187 * next call ht del call.
188 */
189 (void) lttng_ht_del(consumer_data.stream_per_chan_id_ht, &iter);
190
191 /* Delete from the global stream list. */
192 iter.iter.node = &stream->node_session_id.node;
193 /* See the previous ht del on why we ignore the returned value. */
194 (void) lttng_ht_del(consumer_data.stream_list_ht, &iter);
195
196 rcu_read_unlock();
197
198 /* Decrement the stream count of the global consumer data. */
199 assert(consumer_data.stream_count > 0);
200 consumer_data.stream_count--;
201 }
202
203 /*
204 * Free the given stream within a RCU call.
205 */
206 void consumer_stream_free(struct lttng_consumer_stream *stream)
207 {
208 assert(stream);
209
210 call_rcu(&stream->node.head, free_stream_rcu);
211 }
212
213 /*
214 * Destroy the stream's buffers of the tracer.
215 */
216 void consumer_stream_destroy_buffers(struct lttng_consumer_stream *stream)
217 {
218 assert(stream);
219
220 switch (consumer_data.type) {
221 case LTTNG_CONSUMER_KERNEL:
222 break;
223 case LTTNG_CONSUMER32_UST:
224 case LTTNG_CONSUMER64_UST:
225 lttng_ustconsumer_del_stream(stream);
226 break;
227 default:
228 ERR("Unknown consumer_data type");
229 assert(0);
230 }
231 }
232
233 /*
234 * Destroy and close a already created stream.
235 */
236 static void destroy_close_stream(struct lttng_consumer_stream *stream)
237 {
238 assert(stream);
239
240 DBG("Consumer stream destroy monitored key: %" PRIu64, stream->key);
241
242 /* Destroy tracer buffers of the stream. */
243 consumer_stream_destroy_buffers(stream);
244 /* Close down everything including the relayd if one. */
245 consumer_stream_close(stream);
246 }
247
248 /*
249 * Decrement the stream's channel refcount and if down to 0, return the channel
250 * pointer so it can be destroyed by the caller or NULL if not.
251 */
252 static struct lttng_consumer_channel *unref_channel(
253 struct lttng_consumer_stream *stream)
254 {
255 struct lttng_consumer_channel *free_chan = NULL;
256
257 assert(stream);
258 assert(stream->chan);
259
260 /* Update refcount of channel and see if we need to destroy it. */
261 if (!uatomic_sub_return(&stream->chan->refcount, 1)
262 && !uatomic_read(&stream->chan->nb_init_stream_left)) {
263 free_chan = stream->chan;
264 }
265
266 return free_chan;
267 }
268
269 /*
270 * Destroy a stream completely. This will delete, close and free the stream.
271 * Once return, the stream is NO longer usable. Its channel may get destroyed
272 * if conditions are met for a monitored stream.
273 *
274 * This MUST be called WITHOUT the consumer data and stream lock acquired if
275 * the stream is in _monitor_ mode else it does not matter.
276 */
277 void consumer_stream_destroy(struct lttng_consumer_stream *stream,
278 struct lttng_ht *ht)
279 {
280 assert(stream);
281
282 /* Stream is in monitor mode. */
283 if (stream->monitor) {
284 struct lttng_consumer_channel *free_chan = NULL;
285
286 /*
287 * This means that the stream was successfully removed from the streams
288 * list of the channel and sent to the right thread managing this
289 * stream thus being globally visible.
290 */
291 if (stream->globally_visible) {
292 pthread_mutex_lock(&consumer_data.lock);
293 pthread_mutex_lock(&stream->chan->lock);
294 pthread_mutex_lock(&stream->lock);
295 /* Remove every reference of the stream in the consumer. */
296 consumer_stream_delete(stream, ht);
297
298 destroy_close_stream(stream);
299
300 /* Update channel's refcount of the stream. */
301 free_chan = unref_channel(stream);
302
303 /* Indicates that the consumer data state MUST be updated after this. */
304 consumer_data.need_update = 1;
305
306 pthread_mutex_unlock(&stream->lock);
307 pthread_mutex_unlock(&stream->chan->lock);
308 pthread_mutex_unlock(&consumer_data.lock);
309 } else {
310 /*
311 * If the stream is not visible globally, this needs to be done
312 * outside of the consumer data lock section.
313 */
314 free_chan = unref_channel(stream);
315 }
316
317 if (free_chan) {
318 consumer_del_channel(free_chan);
319 }
320 } else {
321 destroy_close_stream(stream);
322 }
323
324 /* Free stream within a RCU call. */
325 consumer_stream_free(stream);
326 }
327
328 /*
329 * Write index of a specific stream either on the relayd or local disk.
330 *
331 * Return 0 on success or else a negative value.
332 */
333 int consumer_stream_write_index(struct lttng_consumer_stream *stream,
334 struct lttng_packet_index *index)
335 {
336 int ret;
337 struct consumer_relayd_sock_pair *relayd;
338
339 assert(stream);
340 assert(index);
341
342 rcu_read_lock();
343 relayd = consumer_find_relayd(stream->net_seq_idx);
344 if (relayd) {
345 ret = relayd_send_index(&relayd->control_sock, index,
346 stream->relayd_stream_id, stream->next_net_seq_num - 1);
347 } else {
348 ret = index_write(stream->index_fd, index,
349 sizeof(struct lttng_packet_index));
350 }
351 if (ret < 0) {
352 goto error;
353 }
354
355 error:
356 rcu_read_unlock();
357 return ret;
358 }
359
360 /*
361 * Synchronize the metadata using a given session ID. A successful acquisition
362 * of a metadata stream will trigger a request to the session daemon and a
363 * snapshot so the metadata thread can consume it.
364 *
365 * This function call is a rendez-vous point between the metadata thread and
366 * the data thread.
367 *
368 * Return 0 on success or else a negative value.
369 */
370 int consumer_stream_sync_metadata(struct lttng_consumer_local_data *ctx,
371 uint64_t session_id)
372 {
373 int ret;
374 struct lttng_consumer_stream *metadata = NULL, *stream = NULL;
375 struct lttng_ht_iter iter;
376 struct lttng_ht *ht;
377
378 assert(ctx);
379
380 /* Ease our life a bit. */
381 ht = consumer_data.stream_list_ht;
382
383 rcu_read_lock();
384
385 /* Search the metadata associated with the session id of the given stream. */
386
387 cds_lfht_for_each_entry_duplicate(ht->ht,
388 ht->hash_fct(&session_id, lttng_ht_seed), ht->match_fct,
389 &session_id, &iter.iter, stream, node_session_id.node) {
390 if (stream->metadata_flag) {
391 metadata = stream;
392 break;
393 }
394 }
395 if (!metadata) {
396 ret = 0;
397 goto end_unlock_rcu;
398 }
399
400 /*
401 * In UST, since we have to write the metadata from the cache packet
402 * by packet, we might need to start this procedure multiple times
403 * until all the metadata from the cache has been extracted.
404 */
405 do {
406 /*
407 * Steps :
408 * - Lock the metadata stream
409 * - Check if metadata stream node was deleted before locking.
410 * - if yes, release and return success
411 * - Check if new metadata is ready (flush + snapshot pos)
412 * - If nothing : release and return.
413 * - Lock the metadata_rdv_lock
414 * - Unlock the metadata stream
415 * - cond_wait on metadata_rdv to wait the wakeup from the
416 * metadata thread
417 * - Unlock the metadata_rdv_lock
418 */
419 pthread_mutex_lock(&metadata->lock);
420
421 /*
422 * There is a possibility that we were able to acquire a reference on the
423 * stream from the RCU hash table but between then and now, the node might
424 * have been deleted just before the lock is acquired. Thus, after locking,
425 * we make sure the metadata node has not been deleted which means that the
426 * buffers are closed.
427 *
428 * In that case, there is no need to sync the metadata hence returning a
429 * success return code.
430 */
431 ret = cds_lfht_is_node_deleted(&metadata->node.node);
432 if (ret) {
433 ret = 0;
434 goto end_unlock_mutex;
435 }
436
437 switch (ctx->type) {
438 case LTTNG_CONSUMER_KERNEL:
439 /*
440 * Empty the metadata cache and flush the current stream.
441 */
442 ret = lttng_kconsumer_sync_metadata(metadata);
443 break;
444 case LTTNG_CONSUMER32_UST:
445 case LTTNG_CONSUMER64_UST:
446 /*
447 * Ask the sessiond if we have new metadata waiting and update the
448 * consumer metadata cache.
449 */
450 ret = lttng_ustconsumer_sync_metadata(ctx, metadata);
451 break;
452 default:
453 assert(0);
454 ret = -1;
455 break;
456 }
457 /*
458 * Error or no new metadata, we exit here.
459 */
460 if (ret <= 0 || ret == ENODATA) {
461 goto end_unlock_mutex;
462 }
463
464 /*
465 * At this point, new metadata have been flushed, so we wait on the
466 * rendez-vous point for the metadata thread to wake us up when it
467 * finishes consuming the metadata and continue execution.
468 */
469
470 pthread_mutex_lock(&metadata->metadata_rdv_lock);
471
472 /*
473 * Release metadata stream lock so the metadata thread can process it.
474 */
475 pthread_mutex_unlock(&metadata->lock);
476
477 /*
478 * Wait on the rendez-vous point. Once woken up, it means the metadata was
479 * consumed and thus synchronization is achieved.
480 */
481 pthread_cond_wait(&metadata->metadata_rdv, &metadata->metadata_rdv_lock);
482 pthread_mutex_unlock(&metadata->metadata_rdv_lock);
483 } while (ret == EAGAIN);
484
485 ret = 0;
486 goto end_unlock_rcu;
487
488 end_unlock_mutex:
489 pthread_mutex_unlock(&metadata->lock);
490 end_unlock_rcu:
491 rcu_read_unlock();
492 return ret;
493 }
This page took 0.037852 seconds and 3 git commands to generate.