Fix: doc/man: lttng-relayd(1) -> lttng-relayd(8)
[lttng-tools.git] / src / bin / lttng-relayd / stream.c
1 /*
2 * Copyright (C) 2013 - Julien Desfossez <jdesfossez@efficios.com>
3 * David Goulet <dgoulet@efficios.com>
4 * 2015 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License, version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 51
17 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #define _LGPL_SOURCE
21 #include <common/common.h>
22 #include <common/utils.h>
23 #include <common/defaults.h>
24 #include <urcu/rculist.h>
25 #include <sys/stat.h>
26
27 #include "lttng-relayd.h"
28 #include "index.h"
29 #include "stream.h"
30 #include "viewer-stream.h"
31
32 /* Should be called with RCU read-side lock held. */
33 bool stream_get(struct relay_stream *stream)
34 {
35 bool has_ref = false;
36
37 pthread_mutex_lock(&stream->reflock);
38 if (stream->ref.refcount != 0) {
39 has_ref = true;
40 urcu_ref_get(&stream->ref);
41 }
42 pthread_mutex_unlock(&stream->reflock);
43
44 return has_ref;
45 }
46
47 /*
48 * Get stream from stream id from the streams hash table. Return stream
49 * if found else NULL. A stream reference is taken when a stream is
50 * returned. stream_put() must be called on that stream.
51 */
52 struct relay_stream *stream_get_by_id(uint64_t stream_id)
53 {
54 struct lttng_ht_node_u64 *node;
55 struct lttng_ht_iter iter;
56 struct relay_stream *stream = NULL;
57
58 rcu_read_lock();
59 lttng_ht_lookup(relay_streams_ht, &stream_id, &iter);
60 node = lttng_ht_iter_get_node_u64(&iter);
61 if (!node) {
62 DBG("Relay stream %" PRIu64 " not found", stream_id);
63 goto end;
64 }
65 stream = caa_container_of(node, struct relay_stream, node);
66 if (!stream_get(stream)) {
67 stream = NULL;
68 }
69 end:
70 rcu_read_unlock();
71 return stream;
72 }
73
74 /*
75 * We keep ownership of path_name and channel_name.
76 */
77 struct relay_stream *stream_create(struct ctf_trace *trace,
78 uint64_t stream_handle, char *path_name,
79 char *channel_name, uint64_t tracefile_size,
80 uint64_t tracefile_count)
81 {
82 int ret;
83 struct relay_stream *stream = NULL;
84 struct relay_session *session = trace->session;
85
86 stream = zmalloc(sizeof(struct relay_stream));
87 if (stream == NULL) {
88 PERROR("relay stream zmalloc");
89 ret = -1;
90 goto error_no_alloc;
91 }
92
93 stream->stream_handle = stream_handle;
94 stream->prev_seq = -1ULL;
95 stream->last_net_seq_num = -1ULL;
96 stream->ctf_stream_id = -1ULL;
97 stream->tracefile_size = tracefile_size;
98 stream->tracefile_count = tracefile_count;
99 stream->path_name = path_name;
100 stream->channel_name = channel_name;
101 lttng_ht_node_init_u64(&stream->node, stream->stream_handle);
102 pthread_mutex_init(&stream->lock, NULL);
103 pthread_mutex_init(&stream->reflock, NULL);
104 urcu_ref_init(&stream->ref);
105 ctf_trace_get(trace);
106 stream->trace = trace;
107
108 stream->indexes_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
109 if (!stream->indexes_ht) {
110 ERR("Cannot created indexes_ht");
111 ret = -1;
112 goto end;
113 }
114
115 ret = utils_mkdir_recursive(stream->path_name, S_IRWXU | S_IRWXG,
116 -1, -1);
117 if (ret < 0) {
118 ERR("relay creating output directory");
119 goto end;
120 }
121
122 /*
123 * No need to use run_as API here because whatever we receive,
124 * the relayd uses its own credentials for the stream files.
125 */
126 ret = utils_create_stream_file(stream->path_name, stream->channel_name,
127 stream->tracefile_size, 0, -1, -1, NULL);
128 if (ret < 0) {
129 ERR("Create output file");
130 goto end;
131 }
132 stream->stream_fd = stream_fd_create(ret);
133 if (!stream->stream_fd) {
134 if (close(ret)) {
135 PERROR("Error closing file %d", ret);
136 }
137 ret = -1;
138 goto end;
139 }
140 stream->tfa = tracefile_array_create(stream->tracefile_count);
141 if (!stream->tfa) {
142 ret = -1;
143 goto end;
144 }
145 if (stream->tracefile_size) {
146 DBG("Tracefile %s/%s_0 created", stream->path_name, stream->channel_name);
147 } else {
148 DBG("Tracefile %s/%s created", stream->path_name, stream->channel_name);
149 }
150
151 if (!strncmp(stream->channel_name, DEFAULT_METADATA_NAME, LTTNG_NAME_MAX)) {
152 stream->is_metadata = 1;
153 }
154
155 stream->in_recv_list = true;
156
157 /*
158 * Add the stream in the recv list of the session. Once the end stream
159 * message is received, all session streams are published.
160 */
161 pthread_mutex_lock(&session->recv_list_lock);
162 cds_list_add_rcu(&stream->recv_node, &session->recv_list);
163 session->stream_count++;
164 pthread_mutex_unlock(&session->recv_list_lock);
165
166 /*
167 * Both in the ctf_trace object and the global stream ht since the data
168 * side of the relayd does not have the concept of session.
169 */
170 lttng_ht_add_unique_u64(relay_streams_ht, &stream->node);
171 stream->in_stream_ht = true;
172
173 DBG("Relay new stream added %s with ID %" PRIu64, stream->channel_name,
174 stream->stream_handle);
175 ret = 0;
176
177 end:
178 if (ret) {
179 if (stream->stream_fd) {
180 stream_fd_put(stream->stream_fd);
181 stream->stream_fd = NULL;
182 }
183 stream_put(stream);
184 stream = NULL;
185 }
186 return stream;
187
188 error_no_alloc:
189 /*
190 * path_name and channel_name need to be freed explicitly here
191 * because we cannot rely on stream_put().
192 */
193 free(path_name);
194 free(channel_name);
195 return NULL;
196 }
197
198 /*
199 * Called with the session lock held.
200 */
201 void stream_publish(struct relay_stream *stream)
202 {
203 struct relay_session *session;
204
205 pthread_mutex_lock(&stream->lock);
206 if (stream->published) {
207 goto unlock;
208 }
209
210 session = stream->trace->session;
211
212 pthread_mutex_lock(&session->recv_list_lock);
213 if (stream->in_recv_list) {
214 cds_list_del_rcu(&stream->recv_node);
215 stream->in_recv_list = false;
216 }
217 pthread_mutex_unlock(&session->recv_list_lock);
218
219 pthread_mutex_lock(&stream->trace->stream_list_lock);
220 cds_list_add_rcu(&stream->stream_node, &stream->trace->stream_list);
221 pthread_mutex_unlock(&stream->trace->stream_list_lock);
222
223 stream->published = true;
224 unlock:
225 pthread_mutex_unlock(&stream->lock);
226 }
227
228 /*
229 * Stream must be protected by holding the stream lock or by virtue of being
230 * called from stream_destroy, in which case it is guaranteed to be accessed
231 * from a single thread by the reflock.
232 */
233 static void stream_unpublish(struct relay_stream *stream)
234 {
235 if (stream->in_stream_ht) {
236 struct lttng_ht_iter iter;
237 int ret;
238
239 iter.iter.node = &stream->node.node;
240 ret = lttng_ht_del(relay_streams_ht, &iter);
241 assert(!ret);
242 stream->in_stream_ht = false;
243 }
244 if (stream->published) {
245 pthread_mutex_lock(&stream->trace->stream_list_lock);
246 cds_list_del_rcu(&stream->stream_node);
247 pthread_mutex_unlock(&stream->trace->stream_list_lock);
248 stream->published = false;
249 }
250 }
251
252 static void stream_destroy(struct relay_stream *stream)
253 {
254 if (stream->indexes_ht) {
255 /*
256 * Calling lttng_ht_destroy in call_rcu worker thread so
257 * we don't hold the RCU read-side lock while calling
258 * it.
259 */
260 lttng_ht_destroy(stream->indexes_ht);
261 }
262 if (stream->tfa) {
263 tracefile_array_destroy(stream->tfa);
264 }
265 free(stream->path_name);
266 free(stream->channel_name);
267 free(stream);
268 }
269
270 static void stream_destroy_rcu(struct rcu_head *rcu_head)
271 {
272 struct relay_stream *stream =
273 caa_container_of(rcu_head, struct relay_stream, rcu_node);
274
275 stream_destroy(stream);
276 }
277
278 /*
279 * No need to take stream->lock since this is only called on the final
280 * stream_put which ensures that a single thread may act on the stream.
281 *
282 * At that point, the object is also protected by the reflock which
283 * guarantees that no other thread may share ownership of this stream.
284 */
285 static void stream_release(struct urcu_ref *ref)
286 {
287 struct relay_stream *stream =
288 caa_container_of(ref, struct relay_stream, ref);
289 struct relay_session *session;
290
291 session = stream->trace->session;
292
293 DBG("Releasing stream id %" PRIu64, stream->stream_handle);
294
295 pthread_mutex_lock(&session->recv_list_lock);
296 session->stream_count--;
297 if (stream->in_recv_list) {
298 cds_list_del_rcu(&stream->recv_node);
299 stream->in_recv_list = false;
300 }
301 pthread_mutex_unlock(&session->recv_list_lock);
302
303 stream_unpublish(stream);
304
305 if (stream->stream_fd) {
306 stream_fd_put(stream->stream_fd);
307 stream->stream_fd = NULL;
308 }
309 if (stream->index_fd) {
310 stream_fd_put(stream->index_fd);
311 stream->index_fd = NULL;
312 }
313 if (stream->trace) {
314 ctf_trace_put(stream->trace);
315 stream->trace = NULL;
316 }
317
318 call_rcu(&stream->rcu_node, stream_destroy_rcu);
319 }
320
321 void stream_put(struct relay_stream *stream)
322 {
323 DBG("stream put for stream id %" PRIu64, stream->stream_handle);
324 /*
325 * Ensure existence of stream->reflock for stream unlock.
326 */
327 rcu_read_lock();
328 /*
329 * Stream reflock ensures that concurrent test and update of
330 * stream ref is atomic.
331 */
332 pthread_mutex_lock(&stream->reflock);
333 assert(stream->ref.refcount != 0);
334 /*
335 * Wait until we have processed all the stream packets before
336 * actually putting our last stream reference.
337 */
338 DBG("stream put stream id %" PRIu64 " refcount %d",
339 stream->stream_handle,
340 (int) stream->ref.refcount);
341 urcu_ref_put(&stream->ref, stream_release);
342 pthread_mutex_unlock(&stream->reflock);
343 rcu_read_unlock();
344 }
345
346 void try_stream_close(struct relay_stream *stream)
347 {
348 DBG("Trying to close stream %" PRIu64, stream->stream_handle);
349 pthread_mutex_lock(&stream->lock);
350 /*
351 * Can be called concurently by connection close and reception of last
352 * pending data.
353 */
354 if (stream->closed) {
355 pthread_mutex_unlock(&stream->lock);
356 DBG("closing stream %" PRIu64 " aborted since it is already marked as closed", stream->stream_handle);
357 return;
358 }
359
360 stream->close_requested = true;
361
362 if (stream->last_net_seq_num == -1ULL) {
363 /*
364 * Handle connection close without explicit stream close
365 * command.
366 *
367 * We can be clever about indexes partially received in
368 * cases where we received the data socket part, but not
369 * the control socket part: since we're currently closing
370 * the stream on behalf of the control socket, we *know*
371 * there won't be any more control information for this
372 * socket. Therefore, we can destroy all indexes for
373 * which we have received only the file descriptor (from
374 * data socket). This takes care of consumerd crashes
375 * between sending the data and control information for
376 * a packet. Since those are sent in that order, we take
377 * care of consumerd crashes.
378 */
379 relay_index_close_partial_fd(stream);
380 /*
381 * Use the highest net_seq_num we currently have pending
382 * As end of stream indicator. Leave last_net_seq_num
383 * at -1ULL if we cannot find any index.
384 */
385 stream->last_net_seq_num = relay_index_find_last(stream);
386 /* Fall-through into the next check. */
387 }
388
389 if (stream->last_net_seq_num != -1ULL &&
390 ((int64_t) (stream->prev_seq - stream->last_net_seq_num)) < 0) {
391 /*
392 * Don't close since we still have data pending. This
393 * handles cases where an explicit close command has
394 * been received for this stream, and cases where the
395 * connection has been closed, and we are awaiting for
396 * index information from the data socket. It is
397 * therefore expected that all the index fd information
398 * we need has already been received on the control
399 * socket. Matching index information from data socket
400 * should be Expected Soon(TM).
401 *
402 * TODO: We should implement a timer to garbage collect
403 * streams after a timeout to be resilient against a
404 * consumerd implementation that would not match this
405 * expected behavior.
406 */
407 pthread_mutex_unlock(&stream->lock);
408 DBG("closing stream %" PRIu64 " aborted since it still has data pending", stream->stream_handle);
409 return;
410 }
411 /*
412 * We received all the indexes we can expect.
413 */
414 stream_unpublish(stream);
415 stream->closed = true;
416 /* Relay indexes are only used by the "consumer/sessiond" end. */
417 relay_index_close_all(stream);
418 pthread_mutex_unlock(&stream->lock);
419 DBG("Succeeded in closing stream %" PRIu64, stream->stream_handle);
420 stream_put(stream);
421 }
422
423 static void print_stream_indexes(struct relay_stream *stream)
424 {
425 struct lttng_ht_iter iter;
426 struct relay_index *index;
427
428 rcu_read_lock();
429 cds_lfht_for_each_entry(stream->indexes_ht->ht, &iter.iter, index,
430 index_n.node) {
431 DBG("index %p net_seq_num %" PRIu64 " refcount %ld"
432 " stream %" PRIu64 " trace %" PRIu64
433 " session %" PRIu64,
434 index,
435 index->index_n.key,
436 stream->ref.refcount,
437 index->stream->stream_handle,
438 index->stream->trace->id,
439 index->stream->trace->session->id);
440 }
441 rcu_read_unlock();
442 }
443
444 void print_relay_streams(void)
445 {
446 struct lttng_ht_iter iter;
447 struct relay_stream *stream;
448
449 if (!relay_streams_ht) {
450 return;
451 }
452
453 rcu_read_lock();
454 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
455 node.node) {
456 if (!stream_get(stream)) {
457 continue;
458 }
459 DBG("stream %p refcount %ld stream %" PRIu64 " trace %" PRIu64
460 " session %" PRIu64,
461 stream,
462 stream->ref.refcount,
463 stream->stream_handle,
464 stream->trace->id,
465 stream->trace->session->id);
466 print_stream_indexes(stream);
467 stream_put(stream);
468 }
469 rcu_read_unlock();
470 }
This page took 0.048588 seconds and 4 git commands to generate.