2 * Copyright (C) 2013 - Julien Desfossez <jdesfossez@efficios.com>
3 * David Goulet <dgoulet@efficios.com>
4 * 2015 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License, version 2 only, as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 51
17 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 #include <common/common.h>
22 #include <common/utils.h>
23 #include <common/defaults.h>
24 #include <urcu/rculist.h>
27 #include "lttng-relayd.h"
30 #include "viewer-stream.h"
32 /* Should be called with RCU read-side lock held. */
33 bool stream_get(struct relay_stream
*stream
)
37 pthread_mutex_lock(&stream
->reflock
);
38 if (stream
->ref
.refcount
!= 0) {
40 urcu_ref_get(&stream
->ref
);
42 pthread_mutex_unlock(&stream
->reflock
);
48 * Get stream from stream id from the streams hash table. Return stream
49 * if found else NULL. A stream reference is taken when a stream is
50 * returned. stream_put() must be called on that stream.
52 struct relay_stream
*stream_get_by_id(uint64_t stream_id
)
54 struct lttng_ht_node_u64
*node
;
55 struct lttng_ht_iter iter
;
56 struct relay_stream
*stream
= NULL
;
59 lttng_ht_lookup(relay_streams_ht
, &stream_id
, &iter
);
60 node
= lttng_ht_iter_get_node_u64(&iter
);
62 DBG("Relay stream %" PRIu64
" not found", stream_id
);
65 stream
= caa_container_of(node
, struct relay_stream
, node
);
66 if (!stream_get(stream
)) {
75 * We keep ownership of path_name and channel_name.
77 struct relay_stream
*stream_create(struct ctf_trace
*trace
,
78 uint64_t stream_handle
, char *path_name
,
79 char *channel_name
, uint64_t tracefile_size
,
80 uint64_t tracefile_count
)
83 struct relay_stream
*stream
= NULL
;
84 struct relay_session
*session
= trace
->session
;
86 stream
= zmalloc(sizeof(struct relay_stream
));
88 PERROR("relay stream zmalloc");
93 stream
->stream_handle
= stream_handle
;
94 stream
->prev_seq
= -1ULL;
95 stream
->last_net_seq_num
= -1ULL;
96 stream
->ctf_stream_id
= -1ULL;
97 stream
->tracefile_size
= tracefile_size
;
98 stream
->tracefile_count
= tracefile_count
;
99 stream
->path_name
= path_name
;
100 stream
->channel_name
= channel_name
;
101 lttng_ht_node_init_u64(&stream
->node
, stream
->stream_handle
);
102 pthread_mutex_init(&stream
->lock
, NULL
);
103 pthread_mutex_init(&stream
->reflock
, NULL
);
104 urcu_ref_init(&stream
->ref
);
105 ctf_trace_get(trace
);
106 stream
->trace
= trace
;
108 stream
->indexes_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
109 if (!stream
->indexes_ht
) {
110 ERR("Cannot created indexes_ht");
115 ret
= utils_mkdir_recursive(stream
->path_name
, S_IRWXU
| S_IRWXG
,
118 ERR("relay creating output directory");
123 * No need to use run_as API here because whatever we receive,
124 * the relayd uses its own credentials for the stream files.
126 ret
= utils_create_stream_file(stream
->path_name
, stream
->channel_name
,
127 stream
->tracefile_size
, 0, -1, -1, NULL
);
129 ERR("Create output file");
132 stream
->stream_fd
= stream_fd_create(ret
);
133 if (!stream
->stream_fd
) {
135 PERROR("Error closing file %d", ret
);
140 stream
->tfa
= tracefile_array_create(stream
->tracefile_count
);
145 if (stream
->tracefile_size
) {
146 DBG("Tracefile %s/%s_0 created", stream
->path_name
, stream
->channel_name
);
148 DBG("Tracefile %s/%s created", stream
->path_name
, stream
->channel_name
);
151 if (!strncmp(stream
->channel_name
, DEFAULT_METADATA_NAME
, LTTNG_NAME_MAX
)) {
152 stream
->is_metadata
= 1;
155 stream
->in_recv_list
= true;
158 * Add the stream in the recv list of the session. Once the end stream
159 * message is received, all session streams are published.
161 pthread_mutex_lock(&session
->recv_list_lock
);
162 cds_list_add_rcu(&stream
->recv_node
, &session
->recv_list
);
163 session
->stream_count
++;
164 pthread_mutex_unlock(&session
->recv_list_lock
);
167 * Both in the ctf_trace object and the global stream ht since the data
168 * side of the relayd does not have the concept of session.
170 lttng_ht_add_unique_u64(relay_streams_ht
, &stream
->node
);
171 stream
->in_stream_ht
= true;
173 DBG("Relay new stream added %s with ID %" PRIu64
, stream
->channel_name
,
174 stream
->stream_handle
);
179 if (stream
->stream_fd
) {
180 stream_fd_put(stream
->stream_fd
);
181 stream
->stream_fd
= NULL
;
190 * path_name and channel_name need to be freed explicitly here
191 * because we cannot rely on stream_put().
199 * Called with the session lock held.
201 void stream_publish(struct relay_stream
*stream
)
203 struct relay_session
*session
;
205 pthread_mutex_lock(&stream
->lock
);
206 if (stream
->published
) {
210 session
= stream
->trace
->session
;
212 pthread_mutex_lock(&session
->recv_list_lock
);
213 if (stream
->in_recv_list
) {
214 cds_list_del_rcu(&stream
->recv_node
);
215 stream
->in_recv_list
= false;
217 pthread_mutex_unlock(&session
->recv_list_lock
);
219 pthread_mutex_lock(&stream
->trace
->stream_list_lock
);
220 cds_list_add_rcu(&stream
->stream_node
, &stream
->trace
->stream_list
);
221 pthread_mutex_unlock(&stream
->trace
->stream_list_lock
);
223 stream
->published
= true;
225 pthread_mutex_unlock(&stream
->lock
);
229 * Stream must be protected by holding the stream lock or by virtue of being
230 * called from stream_destroy, in which case it is guaranteed to be accessed
231 * from a single thread by the reflock.
233 static void stream_unpublish(struct relay_stream
*stream
)
235 if (stream
->in_stream_ht
) {
236 struct lttng_ht_iter iter
;
239 iter
.iter
.node
= &stream
->node
.node
;
240 ret
= lttng_ht_del(relay_streams_ht
, &iter
);
242 stream
->in_stream_ht
= false;
244 if (stream
->published
) {
245 pthread_mutex_lock(&stream
->trace
->stream_list_lock
);
246 cds_list_del_rcu(&stream
->stream_node
);
247 pthread_mutex_unlock(&stream
->trace
->stream_list_lock
);
248 stream
->published
= false;
252 static void stream_destroy(struct relay_stream
*stream
)
254 if (stream
->indexes_ht
) {
256 * Calling lttng_ht_destroy in call_rcu worker thread so
257 * we don't hold the RCU read-side lock while calling
260 lttng_ht_destroy(stream
->indexes_ht
);
263 tracefile_array_destroy(stream
->tfa
);
265 free(stream
->path_name
);
266 free(stream
->channel_name
);
270 static void stream_destroy_rcu(struct rcu_head
*rcu_head
)
272 struct relay_stream
*stream
=
273 caa_container_of(rcu_head
, struct relay_stream
, rcu_node
);
275 stream_destroy(stream
);
279 * No need to take stream->lock since this is only called on the final
280 * stream_put which ensures that a single thread may act on the stream.
282 * At that point, the object is also protected by the reflock which
283 * guarantees that no other thread may share ownership of this stream.
285 static void stream_release(struct urcu_ref
*ref
)
287 struct relay_stream
*stream
=
288 caa_container_of(ref
, struct relay_stream
, ref
);
289 struct relay_session
*session
;
291 session
= stream
->trace
->session
;
293 DBG("Releasing stream id %" PRIu64
, stream
->stream_handle
);
295 pthread_mutex_lock(&session
->recv_list_lock
);
296 session
->stream_count
--;
297 if (stream
->in_recv_list
) {
298 cds_list_del_rcu(&stream
->recv_node
);
299 stream
->in_recv_list
= false;
301 pthread_mutex_unlock(&session
->recv_list_lock
);
303 stream_unpublish(stream
);
305 if (stream
->stream_fd
) {
306 stream_fd_put(stream
->stream_fd
);
307 stream
->stream_fd
= NULL
;
309 if (stream
->index_fd
) {
310 stream_fd_put(stream
->index_fd
);
311 stream
->index_fd
= NULL
;
314 ctf_trace_put(stream
->trace
);
315 stream
->trace
= NULL
;
318 call_rcu(&stream
->rcu_node
, stream_destroy_rcu
);
321 void stream_put(struct relay_stream
*stream
)
323 DBG("stream put for stream id %" PRIu64
, stream
->stream_handle
);
325 * Ensure existence of stream->reflock for stream unlock.
329 * Stream reflock ensures that concurrent test and update of
330 * stream ref is atomic.
332 pthread_mutex_lock(&stream
->reflock
);
333 assert(stream
->ref
.refcount
!= 0);
335 * Wait until we have processed all the stream packets before
336 * actually putting our last stream reference.
338 DBG("stream put stream id %" PRIu64
" refcount %d",
339 stream
->stream_handle
,
340 (int) stream
->ref
.refcount
);
341 urcu_ref_put(&stream
->ref
, stream_release
);
342 pthread_mutex_unlock(&stream
->reflock
);
346 void try_stream_close(struct relay_stream
*stream
)
348 DBG("Trying to close stream %" PRIu64
, stream
->stream_handle
);
349 pthread_mutex_lock(&stream
->lock
);
351 * Can be called concurently by connection close and reception of last
354 if (stream
->closed
) {
355 pthread_mutex_unlock(&stream
->lock
);
356 DBG("closing stream %" PRIu64
" aborted since it is already marked as closed", stream
->stream_handle
);
360 stream
->close_requested
= true;
362 if (stream
->last_net_seq_num
== -1ULL) {
364 * Handle connection close without explicit stream close
367 * We can be clever about indexes partially received in
368 * cases where we received the data socket part, but not
369 * the control socket part: since we're currently closing
370 * the stream on behalf of the control socket, we *know*
371 * there won't be any more control information for this
372 * socket. Therefore, we can destroy all indexes for
373 * which we have received only the file descriptor (from
374 * data socket). This takes care of consumerd crashes
375 * between sending the data and control information for
376 * a packet. Since those are sent in that order, we take
377 * care of consumerd crashes.
379 relay_index_close_partial_fd(stream
);
381 * Use the highest net_seq_num we currently have pending
382 * As end of stream indicator. Leave last_net_seq_num
383 * at -1ULL if we cannot find any index.
385 stream
->last_net_seq_num
= relay_index_find_last(stream
);
386 /* Fall-through into the next check. */
389 if (stream
->last_net_seq_num
!= -1ULL &&
390 ((int64_t) (stream
->prev_seq
- stream
->last_net_seq_num
)) < 0) {
392 * Don't close since we still have data pending. This
393 * handles cases where an explicit close command has
394 * been received for this stream, and cases where the
395 * connection has been closed, and we are awaiting for
396 * index information from the data socket. It is
397 * therefore expected that all the index fd information
398 * we need has already been received on the control
399 * socket. Matching index information from data socket
400 * should be Expected Soon(TM).
402 * TODO: We should implement a timer to garbage collect
403 * streams after a timeout to be resilient against a
404 * consumerd implementation that would not match this
407 pthread_mutex_unlock(&stream
->lock
);
408 DBG("closing stream %" PRIu64
" aborted since it still has data pending", stream
->stream_handle
);
412 * We received all the indexes we can expect.
414 stream_unpublish(stream
);
415 stream
->closed
= true;
416 /* Relay indexes are only used by the "consumer/sessiond" end. */
417 relay_index_close_all(stream
);
418 pthread_mutex_unlock(&stream
->lock
);
419 DBG("Succeeded in closing stream %" PRIu64
, stream
->stream_handle
);
423 static void print_stream_indexes(struct relay_stream
*stream
)
425 struct lttng_ht_iter iter
;
426 struct relay_index
*index
;
429 cds_lfht_for_each_entry(stream
->indexes_ht
->ht
, &iter
.iter
, index
,
431 DBG("index %p net_seq_num %" PRIu64
" refcount %ld"
432 " stream %" PRIu64
" trace %" PRIu64
436 stream
->ref
.refcount
,
437 index
->stream
->stream_handle
,
438 index
->stream
->trace
->id
,
439 index
->stream
->trace
->session
->id
);
444 void print_relay_streams(void)
446 struct lttng_ht_iter iter
;
447 struct relay_stream
*stream
;
449 if (!relay_streams_ht
) {
454 cds_lfht_for_each_entry(relay_streams_ht
->ht
, &iter
.iter
, stream
,
456 if (!stream_get(stream
)) {
459 DBG("stream %p refcount %ld stream %" PRIu64
" trace %" PRIu64
462 stream
->ref
.refcount
,
463 stream
->stream_handle
,
465 stream
->trace
->session
->id
);
466 print_stream_indexes(stream
);
This page took 0.048588 seconds and 4 git commands to generate.