Fix: read subbuffer mmap/splice signedness issue
[lttng-tools.git] / src / common / consumer-stream.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2013 - David Goulet <dgoulet@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License, version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 51
17 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #define _GNU_SOURCE
21 #include <assert.h>
22 #include <inttypes.h>
23 #include <sys/mman.h>
24 #include <unistd.h>
25
26 #include <common/common.h>
27 #include <common/index/index.h>
28 #include <common/kernel-consumer/kernel-consumer.h>
29 #include <common/relayd/relayd.h>
30 #include <common/ust-consumer/ust-consumer.h>
31
32 #include "consumer-stream.h"
33
34 /*
35 * RCU call to free stream. MUST only be used with call_rcu().
36 */
37 static void free_stream_rcu(struct rcu_head *head)
38 {
39 struct lttng_ht_node_u64 *node =
40 caa_container_of(head, struct lttng_ht_node_u64, head);
41 struct lttng_consumer_stream *stream =
42 caa_container_of(node, struct lttng_consumer_stream, node);
43
44 pthread_mutex_destroy(&stream->lock);
45 free(stream);
46 }
47
48 /*
49 * Close stream on the relayd side. This call can destroy a relayd if the
50 * conditions are met.
51 *
52 * A RCU read side lock MUST be acquired if the relayd object was looked up in
53 * a hash table before calling this.
54 */
55 void consumer_stream_relayd_close(struct lttng_consumer_stream *stream,
56 struct consumer_relayd_sock_pair *relayd)
57 {
58 int ret;
59
60 assert(stream);
61 assert(relayd);
62
63 if (stream->sent_to_relayd) {
64 uatomic_dec(&relayd->refcount);
65 assert(uatomic_read(&relayd->refcount) >= 0);
66 }
67
68 /* Closing streams requires to lock the control socket. */
69 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
70 ret = relayd_send_close_stream(&relayd->control_sock,
71 stream->relayd_stream_id,
72 stream->next_net_seq_num - 1);
73 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
74 if (ret < 0) {
75 DBG("Unable to close stream on the relayd. Continuing");
76 /*
77 * Continue here. There is nothing we can do for the relayd.
78 * Chances are that the relayd has closed the socket so we just
79 * continue cleaning up.
80 */
81 }
82
83 /* Both conditions are met, we destroy the relayd. */
84 if (uatomic_read(&relayd->refcount) == 0 &&
85 uatomic_read(&relayd->destroy_flag)) {
86 consumer_destroy_relayd(relayd);
87 }
88 stream->net_seq_idx = (uint64_t) -1ULL;
89 stream->sent_to_relayd = 0;
90 }
91
92 /*
93 * Close stream's file descriptors and, if needed, close stream also on the
94 * relayd side.
95 *
96 * The consumer data lock MUST be acquired.
97 * The stream lock MUST be acquired.
98 */
99 void consumer_stream_close(struct lttng_consumer_stream *stream)
100 {
101 int ret;
102 struct consumer_relayd_sock_pair *relayd;
103
104 assert(stream);
105
106 switch (consumer_data.type) {
107 case LTTNG_CONSUMER_KERNEL:
108 if (stream->mmap_base != NULL) {
109 ret = munmap(stream->mmap_base, stream->mmap_len);
110 if (ret != 0) {
111 PERROR("munmap");
112 }
113 }
114
115 if (stream->wait_fd >= 0) {
116 ret = close(stream->wait_fd);
117 if (ret) {
118 PERROR("close");
119 }
120 stream->wait_fd = -1;
121 }
122 break;
123 case LTTNG_CONSUMER32_UST:
124 case LTTNG_CONSUMER64_UST:
125 break;
126 default:
127 ERR("Unknown consumer_data type");
128 assert(0);
129 }
130
131 /* Close output fd. Could be a socket or local file at this point. */
132 if (stream->out_fd >= 0) {
133 ret = close(stream->out_fd);
134 if (ret) {
135 PERROR("close");
136 }
137 stream->out_fd = -1;
138 }
139
140 if (stream->index_fd >= 0) {
141 ret = close(stream->index_fd);
142 if (ret) {
143 PERROR("close stream index_fd");
144 }
145 stream->index_fd = -1;
146 }
147
148 /* Check and cleanup relayd if needed. */
149 rcu_read_lock();
150 relayd = consumer_find_relayd(stream->net_seq_idx);
151 if (relayd != NULL) {
152 consumer_stream_relayd_close(stream, relayd);
153 }
154 rcu_read_unlock();
155 }
156
157 /*
158 * Delete the stream from all possible hash tables.
159 *
160 * The consumer data lock MUST be acquired.
161 * The stream lock MUST be acquired.
162 */
163 void consumer_stream_delete(struct lttng_consumer_stream *stream,
164 struct lttng_ht *ht)
165 {
166 int ret;
167 struct lttng_ht_iter iter;
168
169 assert(stream);
170 /* Should NEVER be called not in monitor mode. */
171 assert(stream->chan->monitor);
172
173 rcu_read_lock();
174
175 if (ht) {
176 iter.iter.node = &stream->node.node;
177 ret = lttng_ht_del(ht, &iter);
178 assert(!ret);
179 }
180
181 /* Delete from stream per channel ID hash table. */
182 iter.iter.node = &stream->node_channel_id.node;
183 /*
184 * The returned value is of no importance. Even if the node is NOT in the
185 * hash table, we continue since we may have been called by a code path
186 * that did not add the stream to a (all) hash table. Same goes for the
187 * next call ht del call.
188 */
189 (void) lttng_ht_del(consumer_data.stream_per_chan_id_ht, &iter);
190
191 /* Delete from the global stream list. */
192 iter.iter.node = &stream->node_session_id.node;
193 /* See the previous ht del on why we ignore the returned value. */
194 (void) lttng_ht_del(consumer_data.stream_list_ht, &iter);
195
196 rcu_read_unlock();
197
198 /* Decrement the stream count of the global consumer data. */
199 assert(consumer_data.stream_count > 0);
200 consumer_data.stream_count--;
201 }
202
203 /*
204 * Free the given stream within a RCU call.
205 */
206 void consumer_stream_free(struct lttng_consumer_stream *stream)
207 {
208 assert(stream);
209
210 call_rcu(&stream->node.head, free_stream_rcu);
211 }
212
213 /*
214 * Destroy the stream's buffers of the tracer.
215 */
216 void consumer_stream_destroy_buffers(struct lttng_consumer_stream *stream)
217 {
218 assert(stream);
219
220 switch (consumer_data.type) {
221 case LTTNG_CONSUMER_KERNEL:
222 break;
223 case LTTNG_CONSUMER32_UST:
224 case LTTNG_CONSUMER64_UST:
225 lttng_ustconsumer_del_stream(stream);
226 break;
227 default:
228 ERR("Unknown consumer_data type");
229 assert(0);
230 }
231 }
232
233 /*
234 * Destroy and close a already created stream.
235 */
236 static void destroy_close_stream(struct lttng_consumer_stream *stream)
237 {
238 assert(stream);
239
240 DBG("Consumer stream destroy monitored key: %" PRIu64, stream->key);
241
242 /* Destroy tracer buffers of the stream. */
243 consumer_stream_destroy_buffers(stream);
244 /* Close down everything including the relayd if one. */
245 consumer_stream_close(stream);
246 }
247
248 /*
249 * Decrement the stream's channel refcount and if down to 0, return the channel
250 * pointer so it can be destroyed by the caller or NULL if not.
251 */
252 static struct lttng_consumer_channel *unref_channel(
253 struct lttng_consumer_stream *stream)
254 {
255 struct lttng_consumer_channel *free_chan = NULL;
256
257 assert(stream);
258 assert(stream->chan);
259
260 /* Update refcount of channel and see if we need to destroy it. */
261 if (!uatomic_sub_return(&stream->chan->refcount, 1)
262 && !uatomic_read(&stream->chan->nb_init_stream_left)) {
263 free_chan = stream->chan;
264 }
265
266 return free_chan;
267 }
268
269 /*
270 * Destroy a stream completely. This will delete, close and free the stream.
271 * Once return, the stream is NO longer usable. Its channel may get destroyed
272 * if conditions are met for a monitored stream.
273 *
274 * This MUST be called WITHOUT the consumer data and stream lock acquired if
275 * the stream is in _monitor_ mode else it does not matter.
276 */
277 void consumer_stream_destroy(struct lttng_consumer_stream *stream,
278 struct lttng_ht *ht)
279 {
280 assert(stream);
281
282 /* Stream is in monitor mode. */
283 if (stream->monitor) {
284 struct lttng_consumer_channel *free_chan = NULL;
285
286 /*
287 * This means that the stream was successfully removed from the streams
288 * list of the channel and sent to the right thread managing this
289 * stream thus being globally visible.
290 */
291 if (stream->globally_visible) {
292 pthread_mutex_lock(&consumer_data.lock);
293 pthread_mutex_lock(&stream->chan->lock);
294 pthread_mutex_lock(&stream->lock);
295 /* Remove every reference of the stream in the consumer. */
296 consumer_stream_delete(stream, ht);
297
298 destroy_close_stream(stream);
299
300 /* Update channel's refcount of the stream. */
301 free_chan = unref_channel(stream);
302
303 /* Indicates that the consumer data state MUST be updated after this. */
304 consumer_data.need_update = 1;
305
306 pthread_mutex_unlock(&stream->lock);
307 pthread_mutex_unlock(&stream->chan->lock);
308 pthread_mutex_unlock(&consumer_data.lock);
309 } else {
310 /*
311 * If the stream is not visible globally, this needs to be done
312 * outside of the consumer data lock section.
313 */
314 free_chan = unref_channel(stream);
315 }
316
317 if (free_chan) {
318 consumer_del_channel(free_chan);
319 }
320 } else {
321 destroy_close_stream(stream);
322 }
323
324 /* Free stream within a RCU call. */
325 consumer_stream_free(stream);
326 }
327
328 /*
329 * Write index of a specific stream either on the relayd or local disk.
330 *
331 * Return 0 on success or else a negative value.
332 */
333 int consumer_stream_write_index(struct lttng_consumer_stream *stream,
334 struct ctf_packet_index *index)
335 {
336 int ret;
337 struct consumer_relayd_sock_pair *relayd;
338
339 assert(stream);
340 assert(index);
341
342 rcu_read_lock();
343 relayd = consumer_find_relayd(stream->net_seq_idx);
344 if (relayd) {
345 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
346 ret = relayd_send_index(&relayd->control_sock, index,
347 stream->relayd_stream_id, stream->next_net_seq_num - 1);
348 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
349 } else {
350 ssize_t size_ret;
351
352 size_ret = index_write(stream->index_fd, index,
353 sizeof(struct ctf_packet_index));
354 if (size_ret < sizeof(struct ctf_packet_index)) {
355 ret = -1;
356 } else {
357 ret = 0;
358 }
359 }
360 if (ret < 0) {
361 goto error;
362 }
363
364 error:
365 rcu_read_unlock();
366 return ret;
367 }
368
369 /*
370 * Synchronize the metadata using a given session ID. A successful acquisition
371 * of a metadata stream will trigger a request to the session daemon and a
372 * snapshot so the metadata thread can consume it.
373 *
374 * This function call is a rendez-vous point between the metadata thread and
375 * the data thread.
376 *
377 * Return 0 on success or else a negative value.
378 */
379 int consumer_stream_sync_metadata(struct lttng_consumer_local_data *ctx,
380 uint64_t session_id)
381 {
382 int ret;
383 struct lttng_consumer_stream *metadata = NULL, *stream = NULL;
384 struct lttng_ht_iter iter;
385 struct lttng_ht *ht;
386
387 assert(ctx);
388
389 /* Ease our life a bit. */
390 ht = consumer_data.stream_list_ht;
391
392 rcu_read_lock();
393
394 /* Search the metadata associated with the session id of the given stream. */
395
396 cds_lfht_for_each_entry_duplicate(ht->ht,
397 ht->hash_fct(&session_id, lttng_ht_seed), ht->match_fct,
398 &session_id, &iter.iter, stream, node_session_id.node) {
399 if (stream->metadata_flag) {
400 metadata = stream;
401 break;
402 }
403 }
404 if (!metadata) {
405 ret = 0;
406 goto end_unlock_rcu;
407 }
408
409 /*
410 * In UST, since we have to write the metadata from the cache packet
411 * by packet, we might need to start this procedure multiple times
412 * until all the metadata from the cache has been extracted.
413 */
414 do {
415 /*
416 * Steps :
417 * - Lock the metadata stream
418 * - Check if metadata stream node was deleted before locking.
419 * - if yes, release and return success
420 * - Check if new metadata is ready (flush + snapshot pos)
421 * - If nothing : release and return.
422 * - Lock the metadata_rdv_lock
423 * - Unlock the metadata stream
424 * - cond_wait on metadata_rdv to wait the wakeup from the
425 * metadata thread
426 * - Unlock the metadata_rdv_lock
427 */
428 pthread_mutex_lock(&metadata->lock);
429
430 /*
431 * There is a possibility that we were able to acquire a reference on the
432 * stream from the RCU hash table but between then and now, the node might
433 * have been deleted just before the lock is acquired. Thus, after locking,
434 * we make sure the metadata node has not been deleted which means that the
435 * buffers are closed.
436 *
437 * In that case, there is no need to sync the metadata hence returning a
438 * success return code.
439 */
440 ret = cds_lfht_is_node_deleted(&metadata->node.node);
441 if (ret) {
442 ret = 0;
443 goto end_unlock_mutex;
444 }
445
446 switch (ctx->type) {
447 case LTTNG_CONSUMER_KERNEL:
448 /*
449 * Empty the metadata cache and flush the current stream.
450 */
451 ret = lttng_kconsumer_sync_metadata(metadata);
452 break;
453 case LTTNG_CONSUMER32_UST:
454 case LTTNG_CONSUMER64_UST:
455 /*
456 * Ask the sessiond if we have new metadata waiting and update the
457 * consumer metadata cache.
458 */
459 ret = lttng_ustconsumer_sync_metadata(ctx, metadata);
460 break;
461 default:
462 assert(0);
463 ret = -1;
464 break;
465 }
466 /*
467 * Error or no new metadata, we exit here.
468 */
469 if (ret <= 0 || ret == ENODATA) {
470 goto end_unlock_mutex;
471 }
472
473 /*
474 * At this point, new metadata have been flushed, so we wait on the
475 * rendez-vous point for the metadata thread to wake us up when it
476 * finishes consuming the metadata and continue execution.
477 */
478
479 pthread_mutex_lock(&metadata->metadata_rdv_lock);
480
481 /*
482 * Release metadata stream lock so the metadata thread can process it.
483 */
484 pthread_mutex_unlock(&metadata->lock);
485
486 /*
487 * Wait on the rendez-vous point. Once woken up, it means the metadata was
488 * consumed and thus synchronization is achieved.
489 */
490 pthread_cond_wait(&metadata->metadata_rdv, &metadata->metadata_rdv_lock);
491 pthread_mutex_unlock(&metadata->metadata_rdv_lock);
492 } while (ret == EAGAIN);
493
494 ret = 0;
495 goto end_unlock_rcu;
496
497 end_unlock_mutex:
498 pthread_mutex_unlock(&metadata->lock);
499 end_unlock_rcu:
500 rcu_read_unlock();
501 return ret;
502 }
This page took 0.039112 seconds and 5 git commands to generate.