* Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-#define _GNU_SOURCE
+#define _LGPL_SOURCE
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <common/common.h>
#include <common/defaults.h>
#include <common/uri.h>
+#include <common/relayd/relayd.h>
#include "consumer.h"
-#include "health.h"
+#include "health-sessiond.h"
#include "ust-app.h"
#include "utils.h"
goto end;
}
- if (reply.ret_code == LTTNG_OK) {
+ if (reply.ret_code == LTTCOMM_CONSUMERD_SUCCESS) {
/* All good. */
ret = 0;
} else {
}
/* An error is possible so don't touch the key and stream_count. */
- if (reply.ret_code != LTTNG_OK) {
+ if (reply.ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
ret = -1;
goto end;
}
*key = reply.key;
*stream_count = reply.stream_count;
+ ret = 0;
end:
return ret;
DBG2("Sending destroy relayd command to consumer sock %d", *sock->fd_ptr);
+ memset(&msg, 0, sizeof(msg));
msg.cmd_type = LTTNG_CONSUMER_DESTROY_RELAYD;
msg.u.destroy_relayd.net_seq_idx = consumer->net_seq_index;
output->enabled = 1;
output->type = type;
output->net_seq_index = (uint64_t) -1ULL;
+ urcu_ref_init(&output->ref);
output->socks = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
*
* Should *NOT* be called with RCU read-side lock held.
*/
-void consumer_destroy_output(struct consumer_output *obj)
+static void consumer_release_output(struct urcu_ref *ref)
{
- if (obj == NULL) {
- return;
- }
+ struct consumer_output *obj =
+ caa_container_of(ref, struct consumer_output, ref);
consumer_destroy_output_sockets(obj);
free(obj);
}
+/*
+ * Get the consumer_output object.
+ */
+void consumer_output_get(struct consumer_output *obj)
+{
+ urcu_ref_get(&obj->ref);
+}
+
+/*
+ * Put the consumer_output object.
+ *
+ * Should *NOT* be called with RCU read-side lock held.
+ */
+void consumer_output_put(struct consumer_output *obj)
+{
+ if (!obj) {
+ return;
+ }
+ urcu_ref_put(&obj->ref, consumer_release_output);
+}
+
/*
* Copy consumer output and returned the newly allocated copy.
*
struct consumer_output *consumer_copy_output(struct consumer_output *obj)
{
int ret;
- struct lttng_ht *tmp_ht_ptr;
struct consumer_output *output;
assert(obj);
output = consumer_create_output(obj->type);
if (output == NULL) {
- goto error;
+ goto end;
}
- /* Avoid losing the HT reference after the memcpy() */
- tmp_ht_ptr = output->socks;
-
- memcpy(output, obj, sizeof(struct consumer_output));
-
- /* Putting back the HT pointer and start copying socket(s). */
- output->socks = tmp_ht_ptr;
-
+ output->enabled = obj->enabled;
+ output->net_seq_index = obj->net_seq_index;
+ memcpy(output->subdir, obj->subdir, PATH_MAX);
+ output->snapshot = obj->snapshot;
+ output->relay_major_version = obj->relay_major_version;
+ output->relay_minor_version = obj->relay_minor_version;
+ memcpy(&output->dst, &obj->dst, sizeof(output->dst));
ret = consumer_copy_sockets(output, obj);
if (ret < 0) {
- goto malloc_error;
+ goto error_put;
}
-
-error:
+end:
return output;
-malloc_error:
- consumer_destroy_output(output);
+error_put:
+ consumer_output_put(output);
return NULL;
}
goto error;
}
- strncpy(obj->subdir, tmp_path, sizeof(obj->subdir));
+ if (lttng_strncpy(obj->subdir, tmp_path, sizeof(obj->subdir))) {
+ ret = -LTTNG_ERR_INVALID;
+ goto error;
+ }
DBG3("Consumer set network uri subdir path %s", tmp_path);
}
}
ret = consumer_recv_status_reply(sock);
-
error:
return ret;
}
int overwrite,
unsigned int switch_timer_interval,
unsigned int read_timer_interval,
+ unsigned int live_timer_interval,
+ unsigned int monitor_timer_interval,
int output,
int type,
uint64_t session_id,
uint64_t tracefile_count,
uint64_t session_id_per_pid,
unsigned int monitor,
- uint32_t ust_app_uid)
+ uint32_t ust_app_uid,
+ int64_t blocking_timeout,
+ const char *root_shm_path,
+ const char *shm_path)
{
assert(msg);
msg->u.ask_channel.overwrite = overwrite;
msg->u.ask_channel.switch_timer_interval = switch_timer_interval;
msg->u.ask_channel.read_timer_interval = read_timer_interval;
+ msg->u.ask_channel.live_timer_interval = live_timer_interval;
+ msg->u.ask_channel.monitor_timer_interval = monitor_timer_interval;
msg->u.ask_channel.output = output;
msg->u.ask_channel.type = type;
msg->u.ask_channel.session_id = session_id;
msg->u.ask_channel.tracefile_count = tracefile_count;
msg->u.ask_channel.monitor = monitor;
msg->u.ask_channel.ust_app_uid = ust_app_uid;
+ msg->u.ask_channel.blocking_timeout = blocking_timeout;
memcpy(msg->u.ask_channel.uuid, uuid, sizeof(msg->u.ask_channel.uuid));
strncpy(msg->u.ask_channel.name, name, sizeof(msg->u.ask_channel.name));
msg->u.ask_channel.name[sizeof(msg->u.ask_channel.name) - 1] = '\0';
+
+ if (root_shm_path) {
+ strncpy(msg->u.ask_channel.root_shm_path, root_shm_path,
+ sizeof(msg->u.ask_channel.root_shm_path));
+ msg->u.ask_channel.root_shm_path[sizeof(msg->u.ask_channel.root_shm_path) - 1] = '\0';
+ }
+ if (shm_path) {
+ strncpy(msg->u.ask_channel.shm_path, shm_path,
+ sizeof(msg->u.ask_channel.shm_path));
+ msg->u.ask_channel.shm_path[sizeof(msg->u.ask_channel.shm_path) - 1] = '\0';
+ }
}
/*
int type,
uint64_t tracefile_size,
uint64_t tracefile_count,
- unsigned int monitor)
+ unsigned int monitor,
+ unsigned int live_timer_interval,
+ unsigned int monitor_timer_interval)
{
assert(msg);
msg->u.channel.tracefile_size = tracefile_size;
msg->u.channel.tracefile_count = tracefile_count;
msg->u.channel.monitor = monitor;
+ msg->u.channel.live_timer_interval = live_timer_interval;
+ msg->u.channel.monitor_timer_interval = monitor_timer_interval;
strncpy(msg->u.channel.pathname, pathname,
sizeof(msg->u.channel.pathname));
msg->u.stream.cpu = cpu;
}
+void consumer_init_streams_sent_comm_msg(struct lttcomm_consumer_msg *msg,
+ enum lttng_consumer_command cmd,
+ uint64_t channel_key, uint64_t net_seq_idx)
+{
+ assert(msg);
+
+ memset(msg, 0, sizeof(struct lttcomm_consumer_msg));
+
+ msg->cmd_type = cmd;
+ msg->u.sent_streams.channel_key = channel_key;
+ msg->u.sent_streams.net_seq_idx = net_seq_idx;
+}
+
/*
* Send stream communication structure to the consumer.
*/
*/
int consumer_send_relayd_socket(struct consumer_socket *consumer_sock,
struct lttcomm_relayd_sock *rsock, struct consumer_output *consumer,
- enum lttng_stream_type type, uint64_t session_id)
+ enum lttng_stream_type type, uint64_t session_id,
+ char *session_name, char *hostname, int session_live_timer)
{
int ret;
struct lttcomm_consumer_msg msg;
assert(consumer);
assert(consumer_sock);
+ memset(&msg, 0, sizeof(msg));
/* Bail out if consumer is disabled */
if (!consumer->enabled) {
ret = LTTNG_OK;
goto error;
}
+ if (type == LTTNG_STREAM_CONTROL) {
+ ret = relayd_create_session(rsock,
+ &msg.u.relayd_sock.relayd_session_id,
+ session_name, hostname, session_live_timer,
+ consumer->snapshot);
+ if (ret < 0) {
+ /* Close the control socket. */
+ (void) relayd_close(rsock);
+ goto error;
+ }
+ }
+
msg.cmd_type = LTTNG_CONSUMER_ADD_RELAYD_SOCKET;
/*
* Assign network consumer output index using the temporary consumer since
return ret;
}
+int consumer_send_channel_monitor_pipe(struct consumer_socket *consumer_sock,
+ int pipe)
+{
+ int ret;
+ struct lttcomm_consumer_msg msg;
+
+ /* Code flow error. Safety net. */
+
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd_type = LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE;
+
+ DBG3("Sending set_channel_monitor_pipe command to consumer");
+ ret = consumer_send_msg(consumer_sock, &msg);
+ if (ret < 0) {
+ goto error;
+ }
+
+ DBG3("Sending channel monitoring pipe %d to consumer on socket %d",
+ pipe, *consumer_sock->fd_ptr);
+ ret = consumer_send_fds(consumer_sock, &pipe, 1);
+ if (ret < 0) {
+ goto error;
+ }
+
+ DBG2("Channel monitoring pipe successfully sent");
+error:
+ return ret;
+}
+
/*
* Set consumer subdirectory using the session name and a generated datetime if
* needed. This is appended to the current subdirectory.
goto error;
}
- strncpy(consumer->subdir, tmp_path, sizeof(consumer->subdir));
+ if (lttng_strncpy(consumer->subdir, tmp_path,
+ sizeof(consumer->subdir))) {
+ ret = -EINVAL;
+ goto error;
+ }
DBG2("Consumer subdir set to %s", consumer->subdir);
error:
}
/*
- * Ask the consumer if the data is ready to read (NOT pending) for the specific
- * session id.
- *
- * This function has a different behavior with the consumer i.e. that it waits
- * for a reply from the consumer if yes or no the data is pending.
+ * Ask the consumer if the data is pending for the specific session id.
+ * Returns 1 if data is pending, 0 otherwise, or < 0 on error.
*/
int consumer_is_data_pending(uint64_t session_id,
struct consumer_output *consumer)
assert(consumer);
- msg.cmd_type = LTTNG_CONSUMER_DATA_PENDING;
+ DBG3("Consumer data pending for id %" PRIu64, session_id);
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd_type = LTTNG_CONSUMER_DATA_PENDING;
msg.u.data_pending.session_id = session_id;
- DBG3("Consumer data pending for id %" PRIu64, session_id);
-
/* Send command for each consumer */
rcu_read_lock();
cds_lfht_for_each_entry(consumer->socks->ht, &iter.iter, socket,
DBG2("Consumer flush channel key %" PRIu64, key);
+ memset(&msg, 0, sizeof(msg));
msg.cmd_type = LTTNG_CONSUMER_FLUSH_CHANNEL;
msg.u.flush_channel.key = key;
}
/*
- * Send a close metdata command to consumer using the given channel key.
+ * Send a clear quiescent command to consumer using the given channel key.
+ *
+ * Return 0 on success else a negative value.
+ */
+int consumer_clear_quiescent_channel(struct consumer_socket *socket, uint64_t key)
+{
+ int ret;
+ struct lttcomm_consumer_msg msg;
+
+ assert(socket);
+
+ DBG2("Consumer clear quiescent channel key %" PRIu64, key);
+
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd_type = LTTNG_CONSUMER_CLEAR_QUIESCENT_CHANNEL;
+ msg.u.clear_quiescent_channel.key = key;
+
+ pthread_mutex_lock(socket->lock);
+ health_code_update();
+
+ ret = consumer_send_msg(socket, &msg);
+ if (ret < 0) {
+ goto end;
+ }
+
+end:
+ health_code_update();
+ pthread_mutex_unlock(socket->lock);
+ return ret;
+}
+
+/*
+ * Send a close metadata command to consumer using the given channel key.
+ * Called with registry lock held.
*
* Return 0 on success else a negative value.
*/
DBG2("Consumer close metadata channel key %" PRIu64, metadata_key);
+ memset(&msg, 0, sizeof(msg));
msg.cmd_type = LTTNG_CONSUMER_CLOSE_METADATA;
msg.u.close_metadata.key = metadata_key;
DBG2("Consumer setup metadata channel key %" PRIu64, metadata_key);
+ memset(&msg, 0, sizeof(msg));
msg.cmd_type = LTTNG_CONSUMER_SETUP_METADATA;
msg.u.setup_metadata.key = metadata_key;
}
/*
- * Send metadata string to consumer. Socket lock MUST be acquired.
+ * Send metadata string to consumer.
+ * RCU read-side lock must be held to guarantee existence of socket.
*
* Return 0 on success else a negative value.
*/
int consumer_push_metadata(struct consumer_socket *socket,
uint64_t metadata_key, char *metadata_str, size_t len,
- size_t target_offset)
+ size_t target_offset, uint64_t version)
{
int ret;
struct lttcomm_consumer_msg msg;
DBG2("Consumer push metadata to consumer socket %d", *socket->fd_ptr);
+ pthread_mutex_lock(socket->lock);
+
+ memset(&msg, 0, sizeof(msg));
msg.cmd_type = LTTNG_CONSUMER_PUSH_METADATA;
msg.u.push_metadata.key = metadata_key;
msg.u.push_metadata.target_offset = target_offset;
msg.u.push_metadata.len = len;
+ msg.u.push_metadata.version = version;
health_code_update();
ret = consumer_send_msg(socket, &msg);
}
end:
+ pthread_mutex_unlock(socket->lock);
health_code_update();
return ret;
}
*/
int consumer_snapshot_channel(struct consumer_socket *socket, uint64_t key,
struct snapshot_output *output, int metadata, uid_t uid, gid_t gid,
- const char *session_path, int wait, int max_stream_size)
+ const char *session_path, int wait, uint64_t nb_packets_per_stream)
{
int ret;
struct lttcomm_consumer_msg msg;
memset(&msg, 0, sizeof(msg));
msg.cmd_type = LTTNG_CONSUMER_SNAPSHOT_CHANNEL;
msg.u.snapshot_channel.key = key;
- msg.u.snapshot_channel.max_stream_size = max_stream_size;
+ msg.u.snapshot_channel.nb_packets_per_stream = nb_packets_per_stream;
msg.u.snapshot_channel.metadata = metadata;
if (output->consumer->type == CONSUMER_DST_NET) {
ret = run_as_mkdir_recursive(msg.u.snapshot_channel.pathname,
S_IRWXU | S_IRWXG, uid, gid);
if (ret < 0) {
- if (ret != -EEXIST) {
+ if (errno != EEXIST) {
ERR("Trace directory creation error");
goto error;
}
health_code_update();
return ret;
}
+
+/*
+ * Ask the consumer the number of discarded events for a channel.
+ */
+int consumer_get_discarded_events(uint64_t session_id, uint64_t channel_key,
+ struct consumer_output *consumer, uint64_t *discarded)
+{
+ int ret;
+ struct consumer_socket *socket;
+ struct lttng_ht_iter iter;
+ struct lttcomm_consumer_msg msg;
+
+ assert(consumer);
+
+ DBG3("Consumer discarded events id %" PRIu64, session_id);
+
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd_type = LTTNG_CONSUMER_DISCARDED_EVENTS;
+ msg.u.discarded_events.session_id = session_id;
+ msg.u.discarded_events.channel_key = channel_key;
+
+ *discarded = 0;
+
+ /* Send command for each consumer */
+ rcu_read_lock();
+ cds_lfht_for_each_entry(consumer->socks->ht, &iter.iter, socket,
+ node.node) {
+ uint64_t consumer_discarded = 0;
+ pthread_mutex_lock(socket->lock);
+ ret = consumer_socket_send(socket, &msg, sizeof(msg));
+ if (ret < 0) {
+ pthread_mutex_unlock(socket->lock);
+ goto end;
+ }
+
+ /*
+ * No need for a recv reply status because the answer to the
+ * command is the reply status message.
+ */
+ ret = consumer_socket_recv(socket, &consumer_discarded,
+ sizeof(consumer_discarded));
+ if (ret < 0) {
+ ERR("get discarded events");
+ pthread_mutex_unlock(socket->lock);
+ goto end;
+ }
+ pthread_mutex_unlock(socket->lock);
+ *discarded += consumer_discarded;
+ }
+ ret = 0;
+ DBG("Consumer discarded %" PRIu64 " events in session id %" PRIu64,
+ *discarded, session_id);
+
+end:
+ rcu_read_unlock();
+ return ret;
+}
+
+/*
+ * Ask the consumer the number of lost packets for a channel.
+ */
+int consumer_get_lost_packets(uint64_t session_id, uint64_t channel_key,
+ struct consumer_output *consumer, uint64_t *lost)
+{
+ int ret;
+ struct consumer_socket *socket;
+ struct lttng_ht_iter iter;
+ struct lttcomm_consumer_msg msg;
+
+ assert(consumer);
+
+ DBG3("Consumer lost packets id %" PRIu64, session_id);
+
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd_type = LTTNG_CONSUMER_LOST_PACKETS;
+ msg.u.lost_packets.session_id = session_id;
+ msg.u.lost_packets.channel_key = channel_key;
+
+ *lost = 0;
+
+ /* Send command for each consumer */
+ rcu_read_lock();
+ cds_lfht_for_each_entry(consumer->socks->ht, &iter.iter, socket,
+ node.node) {
+ uint64_t consumer_lost = 0;
+ pthread_mutex_lock(socket->lock);
+ ret = consumer_socket_send(socket, &msg, sizeof(msg));
+ if (ret < 0) {
+ pthread_mutex_unlock(socket->lock);
+ goto end;
+ }
+
+ /*
+ * No need for a recv reply status because the answer to the
+ * command is the reply status message.
+ */
+ ret = consumer_socket_recv(socket, &consumer_lost,
+ sizeof(consumer_lost));
+ if (ret < 0) {
+ ERR("get lost packets");
+ pthread_mutex_unlock(socket->lock);
+ goto end;
+ }
+ pthread_mutex_unlock(socket->lock);
+ *lost += consumer_lost;
+ }
+ ret = 0;
+ DBG("Consumer lost %" PRIu64 " packets in session id %" PRIu64,
+ *lost, session_id);
+
+end:
+ rcu_read_unlock();
+ return ret;
+}