Fix: relay_recv_metadata does not check for partial write
[lttng-tools.git] / src / bin / lttng-relayd / stream.c
1 /*
2 * Copyright (C) 2013 - Julien Desfossez <jdesfossez@efficios.com>
3 * David Goulet <dgoulet@efficios.com>
4 * 2015 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License, version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 51
17 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #define _LGPL_SOURCE
21 #include <common/common.h>
22 #include <common/utils.h>
23 #include <common/defaults.h>
24 #include <urcu/rculist.h>
25 #include <sys/stat.h>
26
27 #include "lttng-relayd.h"
28 #include "index.h"
29 #include "stream.h"
30 #include "viewer-stream.h"
31
32 /* Should be called with RCU read-side lock held. */
33 bool stream_get(struct relay_stream *stream)
34 {
35 return urcu_ref_get_unless_zero(&stream->ref);
36 }
37
38 /*
39 * Get stream from stream id from the streams hash table. Return stream
40 * if found else NULL. A stream reference is taken when a stream is
41 * returned. stream_put() must be called on that stream.
42 */
43 struct relay_stream *stream_get_by_id(uint64_t stream_id)
44 {
45 struct lttng_ht_node_u64 *node;
46 struct lttng_ht_iter iter;
47 struct relay_stream *stream = NULL;
48
49 rcu_read_lock();
50 lttng_ht_lookup(relay_streams_ht, &stream_id, &iter);
51 node = lttng_ht_iter_get_node_u64(&iter);
52 if (!node) {
53 DBG("Relay stream %" PRIu64 " not found", stream_id);
54 goto end;
55 }
56 stream = caa_container_of(node, struct relay_stream, node);
57 if (!stream_get(stream)) {
58 stream = NULL;
59 }
60 end:
61 rcu_read_unlock();
62 return stream;
63 }
64
65 /*
66 * We keep ownership of path_name and channel_name.
67 */
68 struct relay_stream *stream_create(struct ctf_trace *trace,
69 uint64_t stream_handle, char *path_name,
70 char *channel_name, uint64_t tracefile_size,
71 uint64_t tracefile_count)
72 {
73 int ret;
74 struct relay_stream *stream = NULL;
75 struct relay_session *session = trace->session;
76
77 stream = zmalloc(sizeof(struct relay_stream));
78 if (stream == NULL) {
79 PERROR("relay stream zmalloc");
80 goto error_no_alloc;
81 }
82
83 stream->stream_handle = stream_handle;
84 stream->prev_seq = -1ULL;
85 stream->last_net_seq_num = -1ULL;
86 stream->ctf_stream_id = -1ULL;
87 stream->tracefile_size = tracefile_size;
88 stream->tracefile_count = tracefile_count;
89 stream->path_name = path_name;
90 stream->channel_name = channel_name;
91 stream->rotate_at_seq_num = -1ULL;
92 lttng_ht_node_init_u64(&stream->node, stream->stream_handle);
93 pthread_mutex_init(&stream->lock, NULL);
94 urcu_ref_init(&stream->ref);
95 ctf_trace_get(trace);
96 stream->trace = trace;
97
98 stream->indexes_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
99 if (!stream->indexes_ht) {
100 ERR("Cannot created indexes_ht");
101 ret = -1;
102 goto end;
103 }
104
105 ret = utils_mkdir_recursive(stream->path_name, S_IRWXU | S_IRWXG,
106 -1, -1);
107 if (ret < 0) {
108 ERR("relay creating output directory");
109 goto end;
110 }
111
112 /*
113 * No need to use run_as API here because whatever we receive,
114 * the relayd uses its own credentials for the stream files.
115 */
116 ret = utils_create_stream_file(stream->path_name, stream->channel_name,
117 stream->tracefile_size, 0, -1, -1, NULL);
118 if (ret < 0) {
119 ERR("Create output file");
120 goto end;
121 }
122 stream->stream_fd = stream_fd_create(ret);
123 if (!stream->stream_fd) {
124 if (close(ret)) {
125 PERROR("Error closing file %d", ret);
126 }
127 ret = -1;
128 goto end;
129 }
130 stream->tfa = tracefile_array_create(stream->tracefile_count);
131 if (!stream->tfa) {
132 ret = -1;
133 goto end;
134 }
135 if (stream->tracefile_size) {
136 DBG("Tracefile %s/%s_0 created", stream->path_name, stream->channel_name);
137 } else {
138 DBG("Tracefile %s/%s created", stream->path_name, stream->channel_name);
139 }
140
141 if (!strncmp(stream->channel_name, DEFAULT_METADATA_NAME, LTTNG_NAME_MAX)) {
142 stream->is_metadata = 1;
143 }
144
145 stream->in_recv_list = true;
146
147 /*
148 * Add the stream in the recv list of the session. Once the end stream
149 * message is received, all session streams are published.
150 */
151 pthread_mutex_lock(&session->recv_list_lock);
152 cds_list_add_rcu(&stream->recv_node, &session->recv_list);
153 session->stream_count++;
154 pthread_mutex_unlock(&session->recv_list_lock);
155
156 /*
157 * Both in the ctf_trace object and the global stream ht since the data
158 * side of the relayd does not have the concept of session.
159 */
160 lttng_ht_add_unique_u64(relay_streams_ht, &stream->node);
161 stream->in_stream_ht = true;
162
163 DBG("Relay new stream added %s with ID %" PRIu64, stream->channel_name,
164 stream->stream_handle);
165 ret = 0;
166
167 end:
168 if (ret) {
169 if (stream->stream_fd) {
170 stream_fd_put(stream->stream_fd);
171 stream->stream_fd = NULL;
172 }
173 stream_put(stream);
174 stream = NULL;
175 }
176 return stream;
177
178 error_no_alloc:
179 /*
180 * path_name and channel_name need to be freed explicitly here
181 * because we cannot rely on stream_put().
182 */
183 free(path_name);
184 free(channel_name);
185 return NULL;
186 }
187
188 /*
189 * Called with the session lock held.
190 */
191 void stream_publish(struct relay_stream *stream)
192 {
193 struct relay_session *session;
194
195 pthread_mutex_lock(&stream->lock);
196 if (stream->published) {
197 goto unlock;
198 }
199
200 session = stream->trace->session;
201
202 pthread_mutex_lock(&session->recv_list_lock);
203 if (stream->in_recv_list) {
204 cds_list_del_rcu(&stream->recv_node);
205 stream->in_recv_list = false;
206 }
207 pthread_mutex_unlock(&session->recv_list_lock);
208
209 pthread_mutex_lock(&stream->trace->stream_list_lock);
210 cds_list_add_rcu(&stream->stream_node, &stream->trace->stream_list);
211 pthread_mutex_unlock(&stream->trace->stream_list_lock);
212
213 stream->published = true;
214 unlock:
215 pthread_mutex_unlock(&stream->lock);
216 }
217
218 /*
219 * Stream must be protected by holding the stream lock or by virtue of being
220 * called from stream_destroy.
221 */
222 static void stream_unpublish(struct relay_stream *stream)
223 {
224 if (stream->in_stream_ht) {
225 struct lttng_ht_iter iter;
226 int ret;
227
228 iter.iter.node = &stream->node.node;
229 ret = lttng_ht_del(relay_streams_ht, &iter);
230 assert(!ret);
231 stream->in_stream_ht = false;
232 }
233 if (stream->published) {
234 pthread_mutex_lock(&stream->trace->stream_list_lock);
235 cds_list_del_rcu(&stream->stream_node);
236 pthread_mutex_unlock(&stream->trace->stream_list_lock);
237 stream->published = false;
238 }
239 }
240
241 static void stream_destroy(struct relay_stream *stream)
242 {
243 if (stream->indexes_ht) {
244 /*
245 * Calling lttng_ht_destroy in call_rcu worker thread so
246 * we don't hold the RCU read-side lock while calling
247 * it.
248 */
249 lttng_ht_destroy(stream->indexes_ht);
250 }
251 if (stream->tfa) {
252 tracefile_array_destroy(stream->tfa);
253 }
254 free(stream->path_name);
255 free(stream->channel_name);
256 free(stream);
257 }
258
259 static void stream_destroy_rcu(struct rcu_head *rcu_head)
260 {
261 struct relay_stream *stream =
262 caa_container_of(rcu_head, struct relay_stream, rcu_node);
263
264 stream_destroy(stream);
265 }
266
267 /*
268 * No need to take stream->lock since this is only called on the final
269 * stream_put which ensures that a single thread may act on the stream.
270 */
271 static void stream_release(struct urcu_ref *ref)
272 {
273 struct relay_stream *stream =
274 caa_container_of(ref, struct relay_stream, ref);
275 struct relay_session *session;
276
277 session = stream->trace->session;
278
279 DBG("Releasing stream id %" PRIu64, stream->stream_handle);
280
281 pthread_mutex_lock(&session->recv_list_lock);
282 session->stream_count--;
283 if (stream->in_recv_list) {
284 cds_list_del_rcu(&stream->recv_node);
285 stream->in_recv_list = false;
286 }
287 pthread_mutex_unlock(&session->recv_list_lock);
288
289 stream_unpublish(stream);
290
291 if (stream->stream_fd) {
292 stream_fd_put(stream->stream_fd);
293 stream->stream_fd = NULL;
294 }
295 if (stream->index_file) {
296 lttng_index_file_put(stream->index_file);
297 stream->index_file = NULL;
298 }
299 if (stream->trace) {
300 ctf_trace_put(stream->trace);
301 stream->trace = NULL;
302 }
303
304 call_rcu(&stream->rcu_node, stream_destroy_rcu);
305 }
306
307 void stream_put(struct relay_stream *stream)
308 {
309 DBG("stream put for stream id %" PRIu64, stream->stream_handle);
310 rcu_read_lock();
311 assert(stream->ref.refcount != 0);
312 /*
313 * Wait until we have processed all the stream packets before
314 * actually putting our last stream reference.
315 */
316 DBG("stream put stream id %" PRIu64 " refcount %d",
317 stream->stream_handle,
318 (int) stream->ref.refcount);
319 urcu_ref_put(&stream->ref, stream_release);
320 rcu_read_unlock();
321 }
322
323 void try_stream_close(struct relay_stream *stream)
324 {
325 bool session_aborted;
326 struct relay_session *session = stream->trace->session;
327
328 DBG("Trying to close stream %" PRIu64, stream->stream_handle);
329
330 pthread_mutex_lock(&session->lock);
331 session_aborted = session->aborted;
332 pthread_mutex_unlock(&session->lock);
333
334 pthread_mutex_lock(&stream->lock);
335 /*
336 * Can be called concurently by connection close and reception of last
337 * pending data.
338 */
339 if (stream->closed) {
340 pthread_mutex_unlock(&stream->lock);
341 DBG("closing stream %" PRIu64 " aborted since it is already marked as closed", stream->stream_handle);
342 return;
343 }
344
345 stream->close_requested = true;
346
347 if (stream->last_net_seq_num == -1ULL) {
348 /*
349 * Handle connection close without explicit stream close
350 * command.
351 *
352 * We can be clever about indexes partially received in
353 * cases where we received the data socket part, but not
354 * the control socket part: since we're currently closing
355 * the stream on behalf of the control socket, we *know*
356 * there won't be any more control information for this
357 * socket. Therefore, we can destroy all indexes for
358 * which we have received only the file descriptor (from
359 * data socket). This takes care of consumerd crashes
360 * between sending the data and control information for
361 * a packet. Since those are sent in that order, we take
362 * care of consumerd crashes.
363 */
364 DBG("relay_index_close_partial_fd");
365 relay_index_close_partial_fd(stream);
366 /*
367 * Use the highest net_seq_num we currently have pending
368 * As end of stream indicator. Leave last_net_seq_num
369 * at -1ULL if we cannot find any index.
370 */
371 stream->last_net_seq_num = relay_index_find_last(stream);
372 DBG("Updating stream->last_net_seq_num to %" PRIu64, stream->last_net_seq_num);
373 /* Fall-through into the next check. */
374 }
375
376 if (stream->last_net_seq_num != -1ULL &&
377 ((int64_t) (stream->prev_seq - stream->last_net_seq_num)) < 0
378 && !session_aborted) {
379 /*
380 * Don't close since we still have data pending. This
381 * handles cases where an explicit close command has
382 * been received for this stream, and cases where the
383 * connection has been closed, and we are awaiting for
384 * index information from the data socket. It is
385 * therefore expected that all the index fd information
386 * we need has already been received on the control
387 * socket. Matching index information from data socket
388 * should be Expected Soon(TM).
389 *
390 * TODO: We should implement a timer to garbage collect
391 * streams after a timeout to be resilient against a
392 * consumerd implementation that would not match this
393 * expected behavior.
394 */
395 pthread_mutex_unlock(&stream->lock);
396 DBG("closing stream %" PRIu64 " aborted since it still has data pending", stream->stream_handle);
397 return;
398 }
399 /*
400 * We received all the indexes we can expect.
401 */
402 stream_unpublish(stream);
403 stream->closed = true;
404 /* Relay indexes are only used by the "consumer/sessiond" end. */
405 relay_index_close_all(stream);
406 pthread_mutex_unlock(&stream->lock);
407 DBG("Succeeded in closing stream %" PRIu64, stream->stream_handle);
408 stream_put(stream);
409 }
410
411 static void print_stream_indexes(struct relay_stream *stream)
412 {
413 struct lttng_ht_iter iter;
414 struct relay_index *index;
415
416 rcu_read_lock();
417 cds_lfht_for_each_entry(stream->indexes_ht->ht, &iter.iter, index,
418 index_n.node) {
419 DBG("index %p net_seq_num %" PRIu64 " refcount %ld"
420 " stream %" PRIu64 " trace %" PRIu64
421 " session %" PRIu64,
422 index,
423 index->index_n.key,
424 stream->ref.refcount,
425 index->stream->stream_handle,
426 index->stream->trace->id,
427 index->stream->trace->session->id);
428 }
429 rcu_read_unlock();
430 }
431
432 void print_relay_streams(void)
433 {
434 struct lttng_ht_iter iter;
435 struct relay_stream *stream;
436
437 if (!relay_streams_ht) {
438 return;
439 }
440
441 rcu_read_lock();
442 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
443 node.node) {
444 if (!stream_get(stream)) {
445 continue;
446 }
447 DBG("stream %p refcount %ld stream %" PRIu64 " trace %" PRIu64
448 " session %" PRIu64,
449 stream,
450 stream->ref.refcount,
451 stream->stream_handle,
452 stream->trace->id,
453 stream->trace->session->id);
454 print_stream_indexes(stream);
455 stream_put(stream);
456 }
457 rcu_read_unlock();
458 }
This page took 0.052443 seconds and 4 git commands to generate.