Fix relayd: stream index file created in the wrong directory
[lttng-tools.git] / src / bin / lttng-relayd / stream.c
1 /*
2 * Copyright (C) 2013 - Julien Desfossez <jdesfossez@efficios.com>
3 * David Goulet <dgoulet@efficios.com>
4 * 2015 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License, version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 51
17 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #define _LGPL_SOURCE
21 #include <common/common.h>
22 #include <common/utils.h>
23 #include <common/defaults.h>
24 #include <urcu/rculist.h>
25 #include <sys/stat.h>
26
27 #include "lttng-relayd.h"
28 #include "index.h"
29 #include "stream.h"
30 #include "viewer-stream.h"
31
32 /* Should be called with RCU read-side lock held. */
33 bool stream_get(struct relay_stream *stream)
34 {
35 return urcu_ref_get_unless_zero(&stream->ref);
36 }
37
38 /*
39 * Get stream from stream id from the streams hash table. Return stream
40 * if found else NULL. A stream reference is taken when a stream is
41 * returned. stream_put() must be called on that stream.
42 */
43 struct relay_stream *stream_get_by_id(uint64_t stream_id)
44 {
45 struct lttng_ht_node_u64 *node;
46 struct lttng_ht_iter iter;
47 struct relay_stream *stream = NULL;
48
49 rcu_read_lock();
50 lttng_ht_lookup(relay_streams_ht, &stream_id, &iter);
51 node = lttng_ht_iter_get_node_u64(&iter);
52 if (!node) {
53 DBG("Relay stream %" PRIu64 " not found", stream_id);
54 goto end;
55 }
56 stream = caa_container_of(node, struct relay_stream, node);
57 if (!stream_get(stream)) {
58 stream = NULL;
59 }
60 end:
61 rcu_read_unlock();
62 return stream;
63 }
64
65 /*
66 * We keep ownership of path_name and channel_name.
67 */
68 struct relay_stream *stream_create(struct ctf_trace *trace,
69 uint64_t stream_handle, char *path_name,
70 char *channel_name, uint64_t tracefile_size,
71 uint64_t tracefile_count,
72 const struct relay_stream_chunk_id *chunk_id)
73 {
74 int ret;
75 struct relay_stream *stream = NULL;
76 struct relay_session *session = trace->session;
77
78 stream = zmalloc(sizeof(struct relay_stream));
79 if (stream == NULL) {
80 PERROR("relay stream zmalloc");
81 goto error_no_alloc;
82 }
83
84 stream->stream_handle = stream_handle;
85 stream->prev_data_seq = -1ULL;
86 stream->prev_index_seq = -1ULL;
87 stream->last_net_seq_num = -1ULL;
88 stream->ctf_stream_id = -1ULL;
89 stream->tracefile_size = tracefile_size;
90 stream->tracefile_count = tracefile_count;
91 stream->path_name = path_name;
92 stream->prev_path_name = NULL;
93 stream->channel_name = channel_name;
94 stream->rotate_at_seq_num = -1ULL;
95 lttng_ht_node_init_u64(&stream->node, stream->stream_handle);
96 pthread_mutex_init(&stream->lock, NULL);
97 urcu_ref_init(&stream->ref);
98 ctf_trace_get(trace);
99 stream->trace = trace;
100 stream->current_chunk_id = *chunk_id;
101
102 stream->indexes_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
103 if (!stream->indexes_ht) {
104 ERR("Cannot created indexes_ht");
105 ret = -1;
106 goto end;
107 }
108
109 ret = utils_mkdir_recursive(stream->path_name, S_IRWXU | S_IRWXG,
110 -1, -1);
111 if (ret < 0) {
112 ERR("relay creating output directory");
113 goto end;
114 }
115
116 /*
117 * No need to use run_as API here because whatever we receive,
118 * the relayd uses its own credentials for the stream files.
119 */
120 ret = utils_create_stream_file(stream->path_name, stream->channel_name,
121 stream->tracefile_size, 0, -1, -1, NULL);
122 if (ret < 0) {
123 ERR("Create output file");
124 goto end;
125 }
126 stream->stream_fd = stream_fd_create(ret);
127 if (!stream->stream_fd) {
128 if (close(ret)) {
129 PERROR("Error closing file %d", ret);
130 }
131 ret = -1;
132 goto end;
133 }
134 stream->tfa = tracefile_array_create(stream->tracefile_count);
135 if (!stream->tfa) {
136 ret = -1;
137 goto end;
138 }
139 if (stream->tracefile_size) {
140 DBG("Tracefile %s/%s_0 created", stream->path_name, stream->channel_name);
141 } else {
142 DBG("Tracefile %s/%s created", stream->path_name, stream->channel_name);
143 }
144
145 if (!strncmp(stream->channel_name, DEFAULT_METADATA_NAME, LTTNG_NAME_MAX)) {
146 stream->is_metadata = 1;
147 }
148
149 stream->in_recv_list = true;
150
151 /*
152 * Add the stream in the recv list of the session. Once the end stream
153 * message is received, all session streams are published.
154 */
155 pthread_mutex_lock(&session->recv_list_lock);
156 cds_list_add_rcu(&stream->recv_node, &session->recv_list);
157 session->stream_count++;
158 pthread_mutex_unlock(&session->recv_list_lock);
159
160 /*
161 * Both in the ctf_trace object and the global stream ht since the data
162 * side of the relayd does not have the concept of session.
163 */
164 lttng_ht_add_unique_u64(relay_streams_ht, &stream->node);
165 stream->in_stream_ht = true;
166
167 DBG("Relay new stream added %s with ID %" PRIu64, stream->channel_name,
168 stream->stream_handle);
169 ret = 0;
170
171 end:
172 if (ret) {
173 if (stream->stream_fd) {
174 stream_fd_put(stream->stream_fd);
175 stream->stream_fd = NULL;
176 }
177 stream_put(stream);
178 stream = NULL;
179 }
180 return stream;
181
182 error_no_alloc:
183 /*
184 * path_name and channel_name need to be freed explicitly here
185 * because we cannot rely on stream_put().
186 */
187 free(path_name);
188 free(channel_name);
189 return NULL;
190 }
191
192 /*
193 * Called with the session lock held.
194 */
195 void stream_publish(struct relay_stream *stream)
196 {
197 struct relay_session *session;
198
199 pthread_mutex_lock(&stream->lock);
200 if (stream->published) {
201 goto unlock;
202 }
203
204 session = stream->trace->session;
205
206 pthread_mutex_lock(&session->recv_list_lock);
207 if (stream->in_recv_list) {
208 cds_list_del_rcu(&stream->recv_node);
209 stream->in_recv_list = false;
210 }
211 pthread_mutex_unlock(&session->recv_list_lock);
212
213 pthread_mutex_lock(&stream->trace->stream_list_lock);
214 cds_list_add_rcu(&stream->stream_node, &stream->trace->stream_list);
215 pthread_mutex_unlock(&stream->trace->stream_list_lock);
216
217 stream->published = true;
218 unlock:
219 pthread_mutex_unlock(&stream->lock);
220 }
221
222 /*
223 * Stream must be protected by holding the stream lock or by virtue of being
224 * called from stream_destroy.
225 */
226 static void stream_unpublish(struct relay_stream *stream)
227 {
228 if (stream->in_stream_ht) {
229 struct lttng_ht_iter iter;
230 int ret;
231
232 iter.iter.node = &stream->node.node;
233 ret = lttng_ht_del(relay_streams_ht, &iter);
234 assert(!ret);
235 stream->in_stream_ht = false;
236 }
237 if (stream->published) {
238 pthread_mutex_lock(&stream->trace->stream_list_lock);
239 cds_list_del_rcu(&stream->stream_node);
240 pthread_mutex_unlock(&stream->trace->stream_list_lock);
241 stream->published = false;
242 }
243 }
244
245 static void stream_destroy(struct relay_stream *stream)
246 {
247 if (stream->indexes_ht) {
248 /*
249 * Calling lttng_ht_destroy in call_rcu worker thread so
250 * we don't hold the RCU read-side lock while calling
251 * it.
252 */
253 lttng_ht_destroy(stream->indexes_ht);
254 }
255 if (stream->tfa) {
256 tracefile_array_destroy(stream->tfa);
257 }
258 free(stream->path_name);
259 free(stream->prev_path_name);
260 free(stream->channel_name);
261 free(stream);
262 }
263
264 static void stream_destroy_rcu(struct rcu_head *rcu_head)
265 {
266 struct relay_stream *stream =
267 caa_container_of(rcu_head, struct relay_stream, rcu_node);
268
269 stream_destroy(stream);
270 }
271
272 /*
273 * No need to take stream->lock since this is only called on the final
274 * stream_put which ensures that a single thread may act on the stream.
275 */
276 static void stream_release(struct urcu_ref *ref)
277 {
278 struct relay_stream *stream =
279 caa_container_of(ref, struct relay_stream, ref);
280 struct relay_session *session;
281
282 session = stream->trace->session;
283
284 DBG("Releasing stream id %" PRIu64, stream->stream_handle);
285
286 pthread_mutex_lock(&session->recv_list_lock);
287 session->stream_count--;
288 if (stream->in_recv_list) {
289 cds_list_del_rcu(&stream->recv_node);
290 stream->in_recv_list = false;
291 }
292 pthread_mutex_unlock(&session->recv_list_lock);
293
294 stream_unpublish(stream);
295
296 if (stream->stream_fd) {
297 stream_fd_put(stream->stream_fd);
298 stream->stream_fd = NULL;
299 }
300 if (stream->index_file) {
301 lttng_index_file_put(stream->index_file);
302 stream->index_file = NULL;
303 }
304 if (stream->trace) {
305 ctf_trace_put(stream->trace);
306 stream->trace = NULL;
307 }
308
309 call_rcu(&stream->rcu_node, stream_destroy_rcu);
310 }
311
312 void stream_put(struct relay_stream *stream)
313 {
314 DBG("stream put for stream id %" PRIu64, stream->stream_handle);
315 rcu_read_lock();
316 assert(stream->ref.refcount != 0);
317 /*
318 * Wait until we have processed all the stream packets before
319 * actually putting our last stream reference.
320 */
321 DBG("stream put stream id %" PRIu64 " refcount %d",
322 stream->stream_handle,
323 (int) stream->ref.refcount);
324 urcu_ref_put(&stream->ref, stream_release);
325 rcu_read_unlock();
326 }
327
328 void try_stream_close(struct relay_stream *stream)
329 {
330 bool session_aborted;
331 struct relay_session *session = stream->trace->session;
332
333 DBG("Trying to close stream %" PRIu64, stream->stream_handle);
334
335 pthread_mutex_lock(&session->lock);
336 session_aborted = session->aborted;
337 pthread_mutex_unlock(&session->lock);
338
339 pthread_mutex_lock(&stream->lock);
340 /*
341 * Can be called concurently by connection close and reception of last
342 * pending data.
343 */
344 if (stream->closed) {
345 pthread_mutex_unlock(&stream->lock);
346 DBG("closing stream %" PRIu64 " aborted since it is already marked as closed", stream->stream_handle);
347 return;
348 }
349
350 stream->close_requested = true;
351
352 if (stream->last_net_seq_num == -1ULL) {
353 /*
354 * Handle connection close without explicit stream close
355 * command.
356 *
357 * We can be clever about indexes partially received in
358 * cases where we received the data socket part, but not
359 * the control socket part: since we're currently closing
360 * the stream on behalf of the control socket, we *know*
361 * there won't be any more control information for this
362 * socket. Therefore, we can destroy all indexes for
363 * which we have received only the file descriptor (from
364 * data socket). This takes care of consumerd crashes
365 * between sending the data and control information for
366 * a packet. Since those are sent in that order, we take
367 * care of consumerd crashes.
368 */
369 DBG("relay_index_close_partial_fd");
370 relay_index_close_partial_fd(stream);
371 /*
372 * Use the highest net_seq_num we currently have pending
373 * As end of stream indicator. Leave last_net_seq_num
374 * at -1ULL if we cannot find any index.
375 */
376 stream->last_net_seq_num = relay_index_find_last(stream);
377 DBG("Updating stream->last_net_seq_num to %" PRIu64, stream->last_net_seq_num);
378 /* Fall-through into the next check. */
379 }
380
381 if (stream->last_net_seq_num != -1ULL &&
382 ((int64_t) (stream->prev_data_seq - stream->last_net_seq_num)) < 0
383 && !session_aborted) {
384 /*
385 * Don't close since we still have data pending. This
386 * handles cases where an explicit close command has
387 * been received for this stream, and cases where the
388 * connection has been closed, and we are awaiting for
389 * index information from the data socket. It is
390 * therefore expected that all the index fd information
391 * we need has already been received on the control
392 * socket. Matching index information from data socket
393 * should be Expected Soon(TM).
394 *
395 * TODO: We should implement a timer to garbage collect
396 * streams after a timeout to be resilient against a
397 * consumerd implementation that would not match this
398 * expected behavior.
399 */
400 pthread_mutex_unlock(&stream->lock);
401 DBG("closing stream %" PRIu64 " aborted since it still has data pending", stream->stream_handle);
402 return;
403 }
404 /*
405 * We received all the indexes we can expect.
406 */
407 stream_unpublish(stream);
408 stream->closed = true;
409 /* Relay indexes are only used by the "consumer/sessiond" end. */
410 relay_index_close_all(stream);
411 pthread_mutex_unlock(&stream->lock);
412 DBG("Succeeded in closing stream %" PRIu64, stream->stream_handle);
413 stream_put(stream);
414 }
415
416 static void print_stream_indexes(struct relay_stream *stream)
417 {
418 struct lttng_ht_iter iter;
419 struct relay_index *index;
420
421 rcu_read_lock();
422 cds_lfht_for_each_entry(stream->indexes_ht->ht, &iter.iter, index,
423 index_n.node) {
424 DBG("index %p net_seq_num %" PRIu64 " refcount %ld"
425 " stream %" PRIu64 " trace %" PRIu64
426 " session %" PRIu64,
427 index,
428 index->index_n.key,
429 stream->ref.refcount,
430 index->stream->stream_handle,
431 index->stream->trace->id,
432 index->stream->trace->session->id);
433 }
434 rcu_read_unlock();
435 }
436
437 void print_relay_streams(void)
438 {
439 struct lttng_ht_iter iter;
440 struct relay_stream *stream;
441
442 if (!relay_streams_ht) {
443 return;
444 }
445
446 rcu_read_lock();
447 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
448 node.node) {
449 if (!stream_get(stream)) {
450 continue;
451 }
452 DBG("stream %p refcount %ld stream %" PRIu64 " trace %" PRIu64
453 " session %" PRIu64,
454 stream,
455 stream->ref.refcount,
456 stream->stream_handle,
457 stream->trace->id,
458 stream->trace->session->id);
459 print_stream_indexes(stream);
460 stream_put(stream);
461 }
462 rcu_read_unlock();
463 }
This page took 0.038612 seconds and 5 git commands to generate.