* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-#define _GNU_SOURCE
+#define _LGPL_SOURCE
#include <errno.h>
#include <inttypes.h>
#include <pthread.h>
#include "ust-ctl.h"
#include "utils.h"
+static
+int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
+
/* Next available channel key. Access under next_channel_key_lock. */
static uint64_t _next_channel_key;
static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
{
struct ust_app_event *event;
const struct ust_app_ht_key *key;
+ int ev_loglevel_value;
assert(node);
assert(_key);
event = caa_container_of(node, struct ust_app_event, node.node);
key = _key;
+ ev_loglevel_value = event->attr.loglevel;
/* Match the 4 elements of the key: name, filter, loglevel, exclusions */
}
/* Event loglevel. */
- if (event->attr.loglevel != key->loglevel) {
+ if (ev_loglevel_value != key->loglevel_type) {
if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
- && key->loglevel == 0 && event->attr.loglevel == -1) {
+ && key->loglevel_type == 0 &&
+ ev_loglevel_value == -1) {
/*
* Match is accepted. This is because on event creation, the
* loglevel is set to -1 if the event loglevel type is ALL so 0 and
ht = ua_chan->events;
key.name = event->attr.name;
key.filter = event->filter;
- key.loglevel = event->attr.loglevel;
+ key.loglevel_type = event->attr.loglevel;
key.exclusion = event->exclusion;
node_ptr = cds_lfht_add_unique(ht->ht,
* this function.
*/
static
-void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx)
+void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
+ struct ust_app *app)
{
int ret;
assert(ua_ctx);
if (ua_ctx->obj) {
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_release_object(sock, ua_ctx->obj);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
sock, ua_ctx->obj->handle, ret);
* this function.
*/
static
-void delete_ust_app_event(int sock, struct ust_app_event *ua_event)
+void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
+ struct ust_app *app)
{
int ret;
if (ua_event->exclusion != NULL)
free(ua_event->exclusion);
if (ua_event->obj != NULL) {
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_release_object(sock, ua_event->obj);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app sock %d release event obj failed with ret %d",
sock, ret);
*
* Return 0 on success or else a negative value.
*/
-static int release_ust_app_stream(int sock, struct ust_app_stream *stream)
+static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
+ struct ust_app *app)
{
int ret = 0;
assert(stream);
if (stream->obj) {
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_release_object(sock, stream->obj);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app sock %d release stream obj failed with ret %d",
sock, ret);
* this function.
*/
static
-void delete_ust_app_stream(int sock, struct ust_app_stream *stream)
+void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
+ struct ust_app *app)
{
assert(stream);
- (void) release_ust_app_stream(sock, stream);
+ (void) release_ust_app_stream(sock, stream, app);
free(stream);
}
/* Wipe stream */
cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
cds_list_del(&stream->list);
- delete_ust_app_stream(sock, stream);
+ delete_ust_app_stream(sock, stream, app);
}
/* Wipe context */
cds_list_del(&ua_ctx->list);
ret = lttng_ht_del(ua_chan->ctx, &iter);
assert(!ret);
- delete_ust_app_ctx(sock, ua_ctx);
+ delete_ust_app_ctx(sock, ua_ctx, app);
}
/* Wipe events */
node.node) {
ret = lttng_ht_del(ua_chan->events, &iter);
assert(!ret);
- delete_ust_app_event(sock, ua_event);
+ delete_ust_app_event(sock, ua_event, app);
}
if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
iter.iter.node = &ua_chan->ust_objd_node.node;
ret = lttng_ht_del(app->ust_objd, &iter);
assert(!ret);
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_release_object(sock, ua_chan->obj);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app sock %d release channel obj failed with ret %d",
sock, ret);
call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
}
+int ust_app_register_done(struct ust_app *app)
+{
+ int ret;
+
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_register_done(app->sock);
+ pthread_mutex_unlock(&app->sock_lock);
+ return ret;
+}
+
+int ust_app_release_object(struct ust_app *app, struct lttng_ust_object_data *data)
+{
+ int ret, sock;
+
+ if (app) {
+ pthread_mutex_lock(&app->sock_lock);
+ sock = app->sock;
+ } else {
+ sock = -1;
+ }
+ ret = ustctl_release_object(sock, data);
+ if (app) {
+ pthread_mutex_unlock(&app->sock_lock);
+ }
+ return ret;
+}
+
/*
* Push metadata to consumer socket.
*
- * The socket lock MUST be acquired.
- * The ust app session lock MUST be acquired.
+ * RCU read-side lock must be held to guarantee existance of socket.
+ * Must be called with the ust app session lock held.
+ * Must be called with the registry lock held.
*
* On success, return the len of metadata pushed or else a negative value.
+ * Returning a -EPIPE return value means we could not send the metadata,
+ * but it can be caused by recoverable errors (e.g. the application has
+ * terminated concurrently).
*/
ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
struct consumer_socket *socket, int send_zero_data)
{
int ret;
char *metadata_str = NULL;
- size_t len, offset;
+ size_t len, offset, new_metadata_len_sent;
ssize_t ret_val;
+ uint64_t metadata_key;
assert(registry);
assert(socket);
+ metadata_key = registry->metadata_key;
+
/*
- * On a push metadata error either the consumer is dead or the metadata
- * channel has been destroyed because its endpoint might have died (e.g:
- * relayd). If so, the metadata closed flag is set to 1 so we deny pushing
- * metadata again which is not valid anymore on the consumer side.
- *
- * The ust app session mutex locked allows us to make this check without
- * the registry lock.
+ * Means that no metadata was assigned to the session. This can
+ * happens if no start has been done previously.
+ */
+ if (!metadata_key) {
+ return 0;
+ }
+
+ /*
+ * On a push metadata error either the consumer is dead or the
+ * metadata channel has been destroyed because its endpoint
+ * might have died (e.g: relayd), or because the application has
+ * exited. If so, the metadata closed flag is set to 1 so we
+ * deny pushing metadata again which is not valid anymore on the
+ * consumer side.
*/
if (registry->metadata_closed) {
return -EPIPE;
}
- pthread_mutex_lock(®istry->lock);
-
offset = registry->metadata_len_sent;
len = registry->metadata_len - registry->metadata_len_sent;
+ new_metadata_len_sent = registry->metadata_len;
if (len == 0) {
DBG3("No metadata to push for metadata key %" PRIu64,
registry->metadata_key);
ret_val = -ENOMEM;
goto error;
}
- /* Copy what we haven't send out. */
+ /* Copy what we haven't sent out. */
memcpy(metadata_str, registry->metadata + offset, len);
- registry->metadata_len_sent += len;
push_data:
pthread_mutex_unlock(®istry->lock);
- ret = consumer_push_metadata(socket, registry->metadata_key,
+ /*
+ * We need to unlock the registry while we push metadata to
+ * break a circular dependency between the consumerd metadata
+ * lock and the sessiond registry lock. Indeed, pushing metadata
+ * to the consumerd awaits that it gets pushed all the way to
+ * relayd, but doing so requires grabbing the metadata lock. If
+ * a concurrent metadata request is being performed by
+ * consumerd, this can try to grab the registry lock on the
+ * sessiond while holding the metadata lock on the consumer
+ * daemon. Those push and pull schemes are performed on two
+ * different bidirectionnal communication sockets.
+ */
+ ret = consumer_push_metadata(socket, metadata_key,
metadata_str, len, offset);
+ pthread_mutex_lock(®istry->lock);
if (ret < 0) {
/*
- * There is an acceptable race here between the registry metadata key
- * assignment and the creation on the consumer. The session daemon can
- * concurrently push metadata for this registry while being created on
- * the consumer since the metadata key of the registry is assigned
- * *before* it is setup to avoid the consumer to ask for metadata that
- * could possibly be not found in the session daemon.
+ * There is an acceptable race here between the registry
+ * metadata key assignment and the creation on the
+ * consumer. The session daemon can concurrently push
+ * metadata for this registry while being created on the
+ * consumer since the metadata key of the registry is
+ * assigned *before* it is setup to avoid the consumer
+ * to ask for metadata that could possibly be not found
+ * in the session daemon.
*
- * The metadata will get pushed either by the session being stopped or
- * the consumer requesting metadata if that race is triggered.
+ * The metadata will get pushed either by the session
+ * being stopped or the consumer requesting metadata if
+ * that race is triggered.
*/
if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
ret = 0;
+ } else {
+ ERR("Error pushing metadata to consumer");
}
-
- /* Update back the actual metadata len sent since it failed here. */
- pthread_mutex_lock(®istry->lock);
- registry->metadata_len_sent -= len;
- pthread_mutex_unlock(®istry->lock);
ret_val = ret;
goto error_push;
+ } else {
+ /*
+ * Metadata may have been concurrently pushed, since
+ * we're not holding the registry lock while pushing to
+ * consumer. This is handled by the fact that we send
+ * the metadata content, size, and the offset at which
+ * that metadata belongs. This may arrive out of order
+ * on the consumer side, and the consumer is able to
+ * deal with overlapping fragments. The consumer
+ * supports overlapping fragments, which must be
+ * contiguous starting from offset 0. We keep the
+ * largest metadata_len_sent value of the concurrent
+ * send.
+ */
+ registry->metadata_len_sent =
+ max_t(size_t, registry->metadata_len_sent,
+ new_metadata_len_sent);
}
-
free(metadata_str);
return len;
end:
error:
- pthread_mutex_unlock(®istry->lock);
+ if (ret_val) {
+ /*
+ * On error, flag the registry that the metadata is
+ * closed. We were unable to push anything and this
+ * means that either the consumer is not responding or
+ * the metadata cache has been destroyed on the
+ * consumer.
+ */
+ registry->metadata_closed = 1;
+ }
error_push:
free(metadata_str);
return ret_val;
}
/*
- * For a given application and session, push metadata to consumer. The session
- * lock MUST be acquired here before calling this.
+ * For a given application and session, push metadata to consumer.
* Either sock or consumer is required : if sock is NULL, the default
* socket to send the metadata is retrieved from consumer, if sock
* is not NULL we use it to send the metadata.
+ * RCU read-side lock must be held while calling this function,
+ * therefore ensuring existance of registry. It also ensures existance
+ * of socket throughout this function.
*
* Return 0 on success else a negative error.
+ * Returning a -EPIPE return value means we could not send the metadata,
+ * but it can be caused by recoverable errors (e.g. the application has
+ * terminated concurrently).
*/
static int push_metadata(struct ust_registry_session *registry,
struct consumer_output *consumer)
assert(registry);
assert(consumer);
- rcu_read_lock();
-
- /*
- * Means that no metadata was assigned to the session. This can happens if
- * no start has been done previously.
- */
- if (!registry->metadata_key) {
- ret_val = 0;
- goto end_rcu_unlock;
+ pthread_mutex_lock(®istry->lock);
+ if (registry->metadata_closed) {
+ ret_val = -EPIPE;
+ goto error;
}
/* Get consumer socket to use to push the metadata.*/
consumer);
if (!socket) {
ret_val = -1;
- goto error_rcu_unlock;
+ goto error;
}
- /*
- * TODO: Currently, we hold the socket lock around sampling of the next
- * metadata segment to ensure we send metadata over the consumer socket in
- * the correct order. This makes the registry lock nest inside the socket
- * lock.
- *
- * Please note that this is a temporary measure: we should move this lock
- * back into ust_consumer_push_metadata() when the consumer gets the
- * ability to reorder the metadata it receives.
- */
- pthread_mutex_lock(socket->lock);
ret = ust_app_push_metadata(registry, socket, 0);
- pthread_mutex_unlock(socket->lock);
if (ret < 0) {
ret_val = ret;
- goto error_rcu_unlock;
+ goto error;
}
-
- rcu_read_unlock();
+ pthread_mutex_unlock(®istry->lock);
return 0;
-error_rcu_unlock:
- /*
- * On error, flag the registry that the metadata is closed. We were unable
- * to push anything and this means that either the consumer is not
- * responding or the metadata cache has been destroyed on the consumer.
- */
- registry->metadata_closed = 1;
-end_rcu_unlock:
- rcu_read_unlock();
+error:
+ pthread_mutex_unlock(®istry->lock);
return ret_val;
}
/*
* Send to the consumer a close metadata command for the given session. Once
* done, the metadata channel is deleted and the session metadata pointer is
- * nullified. The session lock MUST be acquired here unless the application is
+ * nullified. The session lock MUST be held unless the application is
* in the destroy path.
*
* Return 0 on success else a negative value.
rcu_read_lock();
+ pthread_mutex_lock(®istry->lock);
+
if (!registry->metadata_key || registry->metadata_closed) {
ret = 0;
goto end;
*/
registry->metadata_closed = 1;
end:
+ pthread_mutex_unlock(®istry->lock);
rcu_read_unlock();
return ret;
}
pthread_mutex_lock(&ua_sess->lock);
+ assert(!ua_sess->deleted);
+ ua_sess->deleted = true;
+
registry = get_session_registry(ua_sess);
- if (registry && !registry->metadata_closed) {
+ if (registry) {
/* Push metadata for application before freeing the application. */
(void) push_metadata(registry, ua_sess->consumer);
* previous push metadata could have flag the metadata registry to
* close so don't send a close command if closed.
*/
- if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
- !registry->metadata_closed) {
+ if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
/* And ask to close it for this session registry. */
(void) close_metadata(registry, ua_sess->consumer);
}
}
if (ua_sess->handle != -1) {
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_release_handle(sock, ua_sess->handle);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app sock %d release session handle failed with ret %d",
sock, ret);
}
pthread_mutex_unlock(&ua_sess->lock);
+ consumer_output_put(ua_sess->consumer);
+
call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
}
*
* Return allocated filter or NULL on error.
*/
-static struct lttng_ust_filter_bytecode *alloc_copy_ust_app_filter(
- struct lttng_ust_filter_bytecode *orig_f)
+static struct lttng_filter_bytecode *copy_filter_bytecode(
+ struct lttng_filter_bytecode *orig_f)
{
- struct lttng_ust_filter_bytecode *filter = NULL;
+ struct lttng_filter_bytecode *filter = NULL;
/* Copy filter bytecode */
filter = zmalloc(sizeof(*filter) + orig_f->len);
if (!filter) {
- PERROR("zmalloc alloc ust app filter");
+ PERROR("zmalloc alloc filter bytecode");
goto error;
}
return filter;
}
+/*
+ * Create a liblttng-ust filter bytecode from given bytecode.
+ *
+ * Return allocated filter or NULL on error.
+ */
+static struct lttng_ust_filter_bytecode *create_ust_bytecode_from_bytecode(
+ struct lttng_filter_bytecode *orig_f)
+{
+ struct lttng_ust_filter_bytecode *filter = NULL;
+
+ /* Copy filter bytecode */
+ filter = zmalloc(sizeof(*filter) + orig_f->len);
+ if (!filter) {
+ PERROR("zmalloc alloc ust filter bytecode");
+ goto error;
+ }
+
+ assert(sizeof(struct lttng_filter_bytecode) ==
+ sizeof(struct lttng_ust_filter_bytecode));
+ memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
+error:
+ return filter;
+}
+
/*
* Find an ust_app using the sock and return it. RCU read side lock must be
* held before calling this helper function.
* Return an ust_app_event object or NULL on error.
*/
static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
- char *name, struct lttng_ust_filter_bytecode *filter, int loglevel,
+ char *name, struct lttng_filter_bytecode *filter,
+ int loglevel_value,
const struct lttng_event_exclusion *exclusion)
{
struct lttng_ht_iter iter;
/* Setup key for event lookup. */
key.name = name;
key.filter = filter;
- key.loglevel = loglevel;
+ key.loglevel_type = loglevel_value;
/* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
- key.exclusion = (struct lttng_ust_event_exclusion *)exclusion;
+ key.exclusion = exclusion;
/* Lookup using the event name as hash and a custom match fct. */
cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
health_code_update();
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
ua_chan->obj, &ua_ctx->obj);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app create channel context failed for app (pid: %d) "
struct ust_app *app)
{
int ret;
+ struct lttng_ust_filter_bytecode *ust_bytecode = NULL;
health_code_update();
goto error;
}
- ret = ustctl_set_filter(app->sock, ua_event->filter,
+ ust_bytecode = create_ust_bytecode_from_bytecode(ua_event->filter);
+ if (!ust_bytecode) {
+ ret = -LTTNG_ERR_NOMEM;
+ goto error;
+ }
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_set_filter(app->sock, ust_bytecode,
ua_event->obj);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app event %s filter failed for app (pid: %d) "
error:
health_code_update();
+ free(ust_bytecode);
return ret;
}
+static
+struct lttng_ust_event_exclusion *create_ust_exclusion_from_exclusion(
+ struct lttng_event_exclusion *exclusion)
+{
+ struct lttng_ust_event_exclusion *ust_exclusion = NULL;
+ size_t exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
+ LTTNG_UST_SYM_NAME_LEN * exclusion->count;
+
+ ust_exclusion = zmalloc(exclusion_alloc_size);
+ if (!ust_exclusion) {
+ PERROR("malloc");
+ goto end;
+ }
+
+ assert(sizeof(struct lttng_event_exclusion) ==
+ sizeof(struct lttng_ust_event_exclusion));
+ memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
+end:
+ return ust_exclusion;
+}
+
/*
* Set event exclusions on the tracer.
*/
struct ust_app *app)
{
int ret;
+ struct lttng_ust_event_exclusion *ust_exclusion = NULL;
health_code_update();
goto error;
}
- ret = ustctl_set_exclusion(app->sock, ua_event->exclusion,
- ua_event->obj);
+ ust_exclusion = create_ust_exclusion_from_exclusion(
+ ua_event->exclusion);
+ if (!ust_exclusion) {
+ ret = -LTTNG_ERR_NOMEM;
+ goto error;
+ }
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_set_exclusion(app->sock, ust_exclusion, ua_event->obj);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app event %s exclusions failed for app (pid: %d) "
error:
health_code_update();
+ free(ust_exclusion);
return ret;
}
health_code_update();
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_disable(app->sock, ua_event->obj);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app event %s disable failed for app (pid: %d) "
health_code_update();
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_disable(app->sock, ua_chan->obj);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app channel %s disable failed for app (pid: %d) "
health_code_update();
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_enable(app->sock, ua_chan->obj);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app channel %s enable failed for app (pid: %d) "
health_code_update();
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_enable(app->sock, ua_event->obj);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app event %s enable failed for app (pid: %d) "
/* Send channel to the application. */
ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
- if (ret < 0) {
+ if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+ ret = -ENOTCONN; /* Caused by app exiting. */
+ goto error;
+ } else if (ret < 0) {
goto error;
}
/* Send all streams to application. */
cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
- if (ret < 0) {
+ if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+ ret = -ENOTCONN; /* Caused by app exiting. */
+ goto error;
+ } else if (ret < 0) {
goto error;
}
/* We don't need the stream anymore once sent to the tracer. */
cds_list_del(&stream->list);
- delete_ust_app_stream(-1, stream);
+ delete_ust_app_stream(-1, stream, app);
}
/* Flag the channel that it is sent to the application. */
ua_chan->is_sent = 1;
health_code_update();
/* Create UST event on tracer */
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
&ua_event->obj);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("Error ustctl create event %s for app pid: %d with ret %d",
}
/* If event not enabled, disable it on the tracer */
- if (ua_event->enabled == 0) {
- ret = disable_ust_event(app, ua_sess, ua_event);
+ if (ua_event->enabled) {
+ /*
+ * We now need to explicitly enable the event, since it
+ * is now disabled at creation.
+ */
+ ret = enable_ust_event(app, ua_sess, ua_event);
if (ret < 0) {
/*
- * If we hit an EPERM, something is wrong with our disable call. If
+ * If we hit an EPERM, something is wrong with our enable call. If
* we get an EEXIST, there is a problem on the tracer side since we
* just created it.
*/
/* Copy filter bytecode */
if (uevent->filter) {
- ua_event->filter = alloc_copy_ust_app_filter(uevent->filter);
+ ua_event->filter = copy_filter_bytecode(uevent->filter);
/* Filter might be NULL here in case of ENONEM. */
}
/* Copy exclusion data */
if (uevent->exclusion) {
- exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
+ exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
ua_event->exclusion = zmalloc(exclusion_alloc_size);
if (ua_event->exclusion == NULL) {
struct tm *timeinfo;
char datetime[16];
int ret;
+ char tmp_shm_path[PATH_MAX];
/* Get date and time for unique app path */
time(&rawtime);
ua_sess->egid = usess->gid;
ua_sess->buffer_type = usess->buffer_type;
ua_sess->bits_per_long = app->bits_per_long;
+
/* There is only one consumer object per session possible. */
+ consumer_output_get(usess->consumer);
ua_sess->consumer = usess->consumer;
+
ua_sess->output_traces = usess->output_traces;
ua_sess->live_timer_interval = usess->live_timer_interval;
copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
goto error;
}
+ strncpy(ua_sess->root_shm_path, usess->root_shm_path,
+ sizeof(ua_sess->root_shm_path));
+ ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
+ strncpy(ua_sess->shm_path, usess->shm_path,
+ sizeof(ua_sess->shm_path));
+ ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
+ if (ua_sess->shm_path[0]) {
+ switch (ua_sess->buffer_type) {
+ case LTTNG_BUFFER_PER_PID:
+ ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
+ DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
+ app->name, app->pid, datetime);
+ break;
+ case LTTNG_BUFFER_PER_UID:
+ ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
+ DEFAULT_UST_TRACE_UID_PATH,
+ app->uid, app->bits_per_long);
+ break;
+ default:
+ assert(0);
+ goto error;
+ }
+ if (ret < 0) {
+ PERROR("sprintf UST shadow copy session");
+ assert(0);
+ goto error;
+ }
+ strncat(ua_sess->shm_path, tmp_shm_path,
+ sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
+ ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
+ }
+
/* Iterate over all channels in global domain. */
cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
uchan, node.node) {
lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
}
+ return;
error:
- return;
+ consumer_output_put(ua_sess->consumer);
}
/*
* This is the create channel path meaning that if there is NO
* registry available, we have to create one for this session.
*/
- ret = buffer_reg_pid_create(ua_sess->id, ®_pid);
+ ret = buffer_reg_pid_create(ua_sess->id, ®_pid,
+ ua_sess->root_shm_path, ua_sess->shm_path);
if (ret < 0) {
goto error;
}
- buffer_reg_pid_add(reg_pid);
} else {
goto end;
}
app->uint16_t_alignment, app->uint32_t_alignment,
app->uint64_t_alignment, app->long_alignment,
app->byte_order, app->version.major,
- app->version.minor);
+ app->version.minor, reg_pid->root_shm_path,
+ reg_pid->shm_path,
+ ua_sess->euid, ua_sess->egid);
if (ret < 0) {
+ /*
+ * reg_pid->registry->reg.ust is NULL upon error, so we need to
+ * destroy the buffer registry, because it is always expected
+ * that if the buffer registry can be found, its ust registry is
+ * non-NULL.
+ */
+ buffer_reg_pid_destroy(reg_pid);
goto error;
}
+ buffer_reg_pid_add(reg_pid);
+
DBG3("UST app buffer registry per PID created successfully");
end:
* Return 0 on success or else a negative value.
*/
static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
+ struct ust_app_session *ua_sess,
struct ust_app *app, struct buffer_reg_uid **regp)
{
int ret = 0;
* registry available, we have to create one for this session.
*/
ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
- LTTNG_DOMAIN_UST, ®_uid);
+ LTTNG_DOMAIN_UST, ®_uid,
+ ua_sess->root_shm_path, ua_sess->shm_path);
if (ret < 0) {
goto error;
}
- buffer_reg_uid_add(reg_uid);
} else {
goto end;
}
app->uint16_t_alignment, app->uint32_t_alignment,
app->uint64_t_alignment, app->long_alignment,
app->byte_order, app->version.major,
- app->version.minor);
+ app->version.minor, reg_uid->root_shm_path,
+ reg_uid->shm_path, usess->uid, usess->gid);
if (ret < 0) {
+ /*
+ * reg_uid->registry->reg.ust is NULL upon error, so we need to
+ * destroy the buffer registry, because it is always expected
+ * that if the buffer registry can be found, its ust registry is
+ * non-NULL.
+ */
+ buffer_reg_uid_destroy(reg_uid, NULL);
goto error;
}
/* Add node to teardown list of the session. */
cds_list_add(®_uid->lnode, &usess->buffer_reg_uid_list);
- DBG3("UST app buffer registry per UID created successfully");
+ buffer_reg_uid_add(reg_uid);
+ DBG3("UST app buffer registry per UID created successfully");
end:
if (regp) {
*regp = reg_uid;
/* Init local registry. */
ret = setup_buffer_reg_pid(ua_sess, app, NULL);
if (ret < 0) {
+ delete_ust_app_session(-1, ua_sess, app);
goto error;
}
break;
case LTTNG_BUFFER_PER_UID:
/* Look for a global registry. If none exists, create one. */
- ret = setup_buffer_reg_uid(usess, app, NULL);
+ ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
if (ret < 0) {
+ delete_ust_app_session(-1, ua_sess, app);
goto error;
}
break;
health_code_update();
if (ua_sess->handle == -1) {
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_create_session(app->sock);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("Creating session for app pid %d with ret %d",
return ret;
}
+/*
+ * Match function for a hash table lookup of ust_app_ctx.
+ *
+ * It matches an ust app context based on the context type and, in the case
+ * of perf counters, their name.
+ */
+static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
+{
+ struct ust_app_ctx *ctx;
+ const struct lttng_ust_context *key;
+
+ assert(node);
+ assert(_key);
+
+ ctx = caa_container_of(node, struct ust_app_ctx, node.node);
+ key = _key;
+
+ /* Context type */
+ if (ctx->ctx.ctx != key->ctx) {
+ goto no_match;
+ }
+
+ /* Check the name in the case of perf thread counters. */
+ if (key->ctx == LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER) {
+ if (strncmp(key->u.perf_counter.name,
+ ctx->ctx.u.perf_counter.name,
+ sizeof(key->u.perf_counter.name))) {
+ goto no_match;
+ }
+ }
+
+ /* Match. */
+ return 1;
+
+no_match:
+ return 0;
+}
+
+/*
+ * Lookup for an ust app context from an lttng_ust_context.
+ *
+ * Must be called while holding RCU read side lock.
+ * Return an ust_app_ctx object or NULL on error.
+ */
+static
+struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
+ struct lttng_ust_context *uctx)
+{
+ struct lttng_ht_iter iter;
+ struct lttng_ht_node_ulong *node;
+ struct ust_app_ctx *app_ctx = NULL;
+
+ assert(uctx);
+ assert(ht);
+
+ /* Lookup using the lttng_ust_context_type and a custom match fct. */
+ cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
+ ht_match_ust_app_ctx, uctx, &iter.iter);
+ node = lttng_ht_iter_get_node_ulong(&iter);
+ if (!node) {
+ goto end;
+ }
+
+ app_ctx = caa_container_of(node, struct ust_app_ctx, node);
+
+end:
+ return app_ctx;
+}
+
/*
* Create a context for the channel on the tracer.
*
struct ust_app *app)
{
int ret = 0;
- struct lttng_ht_iter iter;
- struct lttng_ht_node_ulong *node;
struct ust_app_ctx *ua_ctx;
DBG2("UST app adding context to channel %s", ua_chan->name);
- lttng_ht_lookup(ua_chan->ctx, (void *)((unsigned long)uctx->ctx), &iter);
- node = lttng_ht_iter_get_node_ulong(&iter);
- if (node != NULL) {
+ ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
+ if (ua_ctx) {
ret = -EEXIST;
goto error;
}
* Return 0 on success or else a negative value.
*/
static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
- struct ust_app_channel *ua_chan)
+ struct ust_app_channel *ua_chan,
+ struct ust_app *app)
{
int ret = 0;
struct ust_app_stream *stream, *stmp;
/* We don't need the streams anymore. */
cds_list_del(&stream->list);
- delete_ust_app_stream(-1, stream);
+ delete_ust_app_stream(-1, stream, app);
}
error:
assert(reg_chan);
reg_chan->consumer_key = ua_chan->key;
reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
+ reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
/* Create and add a channel registry to session. */
ret = ust_registry_channel_add(reg_sess->reg.ust,
* Return 0 on success else a negative value.
*/
static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
- struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan)
+ struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan,
+ struct ust_app *app)
{
int ret;
DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
/* Setup all streams for the registry. */
- ret = setup_buffer_reg_streams(reg_chan, ua_chan);
+ ret = setup_buffer_reg_streams(reg_chan, ua_chan, app);
if (ret < 0) {
goto error;
}
/* Send channel to the application. */
ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
- if (ret < 0) {
+ if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+ ret = -ENOTCONN; /* Caused by app exiting. */
+ goto error;
+ } else if (ret < 0) {
goto error;
}
ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
if (ret < 0) {
- (void) release_ust_app_stream(-1, &stream);
+ (void) release_ust_app_stream(-1, &stream, app);
+ if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+ ret = -ENOTCONN; /* Caused by app exiting. */
+ goto error_stream_unlock;
+ } else if (ret < 0) {
+ goto error_stream_unlock;
+ }
goto error_stream_unlock;
}
* The return value is not important here. This function will output an
* error if needed.
*/
- (void) release_ust_app_stream(-1, &stream);
+ (void) release_ust_app_stream(-1, &stream, app);
}
ua_chan->is_sent = 1;
/* Create the buffer registry channel object. */
ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, ®_chan);
if (ret < 0) {
+ ERR("Error creating the UST channel \"%s\" registry instance",
+ ua_chan->name);
goto error;
}
assert(reg_chan);
ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
app->bits_per_long, reg_uid->registry->reg.ust);
if (ret < 0) {
+ ERR("Error creating UST channel \"%s\" on the consumer daemon",
+ ua_chan->name);
+
/*
* Let's remove the previously created buffer registry channel so
* it's not visible anymore in the session registry.
/*
* Setup the streams and add it to the session registry.
*/
- ret = setup_buffer_reg_channel(reg_uid->registry, ua_chan, reg_chan);
+ ret = setup_buffer_reg_channel(reg_uid->registry,
+ ua_chan, reg_chan, app);
if (ret < 0) {
+ ERR("Error setting up UST channel \"%s\"",
+ ua_chan->name);
goto error;
}
/* Send buffers to the application. */
ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
if (ret < 0) {
+ if (ret != -ENOTCONN) {
+ ERR("Error sending channel to application");
+ }
goto error;
}
/* Create and add a new channel registry to session. */
ret = ust_registry_channel_add(registry, ua_chan->key);
if (ret < 0) {
+ ERR("Error creating the UST channel \"%s\" registry instance",
+ ua_chan->name);
goto error;
}
ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
app->bits_per_long, registry);
if (ret < 0) {
+ ERR("Error creating UST channel \"%s\" on the consumer daemon",
+ ua_chan->name);
goto error;
}
ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
if (ret < 0) {
+ if (ret != -ENOTCONN) {
+ ERR("Error sending channel to application");
+ }
goto error;
}
* need and send it to the application. This MUST be called with a RCU read
* side lock acquired.
*
- * Return 0 on success or else a negative value.
+ * Return 0 on success or else a negative value. Returns -ENOTCONN if
+ * the application exited concurrently.
*/
static int do_create_channel(struct ust_app *app,
struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
*
* Called with UST app session lock and RCU read-side lock held.
*
- * Return 0 on success or else a negative value.
+ * Return 0 on success or else a negative value. Returns -ENOTCONN if
+ * the application exited concurrently.
*/
static int create_ust_app_channel(struct ust_app_session *ua_sess,
struct ltt_ust_channel *uchan, struct ust_app *app,
error:
/* Valid. Calling here is already in a read side lock */
- delete_ust_app_event(-1, ua_event);
+ delete_ust_app_event(-1, ua_event, app);
return ret;
}
registry = get_session_registry(ua_sess);
assert(registry);
+ pthread_mutex_lock(®istry->lock);
+
/* Metadata already exists for this registry or it was closed previously */
if (registry->metadata_key || registry->metadata_closed) {
ret = 0;
lttng_fd_put(LTTNG_FD_APPS, 1);
delete_ust_app_channel(-1, metadata, app);
error:
+ pthread_mutex_unlock(®istry->lock);
return ret;
}
lta->pid = msg->pid;
lttng_ht_node_init_ulong(<a->pid_n, (unsigned long) lta->pid);
lta->sock = sock;
+ pthread_mutex_init(<a->sock_lock, NULL);
lttng_ht_node_init_ulong(<a->sock_n, (unsigned long) lta->sock);
CDS_INIT_LIST_HEAD(<a->teardown_head);
-
error:
return lta;
}
assert(app);
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_tracer_version(app->sock, &app->version);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
ERR("UST app %d version failed with ret %d", app->sock, ret);
{
struct ust_app *lta;
struct lttng_ht_node_ulong *node;
+ struct lttng_ht_iter ust_app_sock_iter;
struct lttng_ht_iter iter;
struct ust_app_session *ua_sess;
int ret;
rcu_read_lock();
/* Get the node reference for a call_rcu */
- lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
- node = lttng_ht_iter_get_node_ulong(&iter);
+ lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
+ node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
assert(node);
lta = caa_container_of(node, struct ust_app, sock_n);
DBG("PID %d unregistering with sock %d", lta->pid, sock);
- /* Remove application from PID hash table */
- ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
- assert(!ret);
-
- /*
- * Remove application from notify hash table. The thread handling the
- * notify socket could have deleted the node so ignore on error because
- * either way it's valid. The close of that socket is handled by the other
- * thread.
- */
- iter.iter.node = <a->notify_sock_n.node;
- (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
-
/*
- * Ignore return value since the node might have been removed before by an
- * add replace during app registration because the PID can be reassigned by
- * the OS.
+ * For per-PID buffers, perform "push metadata" and flush all
+ * application streams before removing app from hash tables,
+ * ensuring proper behavior of data_pending check.
+ * Remove sessions so they are not visible during deletion.
*/
- iter.iter.node = <a->pid_n.node;
- ret = lttng_ht_del(ust_app_ht, &iter);
- if (ret) {
- DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
- lta->pid);
- }
-
- /* Remove sessions so they are not visible during deletion.*/
cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
node.node) {
struct ust_registry_session *registry;
continue;
}
+ if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
+ (void) ust_app_flush_app_session(lta, ua_sess);
+ }
+
/*
* Add session to list for teardown. This is safe since at this point we
* are the only one using this list.
*/
pthread_mutex_lock(&ua_sess->lock);
+ if (ua_sess->deleted) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ continue;
+ }
+
/*
* Normally, this is done in the delete session process which is
* executed in the call rcu below. However, upon registration we can't
* session so the delete session will NOT push/close a second time.
*/
registry = get_session_registry(ua_sess);
- if (registry && !registry->metadata_closed) {
+ if (registry) {
/* Push metadata for application before freeing the application. */
(void) push_metadata(registry, ua_sess->consumer);
* previous push metadata could have flag the metadata registry to
* close so don't send a close command if closed.
*/
- if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
- !registry->metadata_closed) {
+ if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
/* And ask to close it for this session registry. */
(void) close_metadata(registry, ua_sess->consumer);
}
}
-
cds_list_add(&ua_sess->teardown_node, <a->teardown_head);
+
pthread_mutex_unlock(&ua_sess->lock);
}
+ /* Remove application from PID hash table */
+ ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
+ assert(!ret);
+
+ /*
+ * Remove application from notify hash table. The thread handling the
+ * notify socket could have deleted the node so ignore on error because
+ * either way it's valid. The close of that socket is handled by the other
+ * thread.
+ */
+ iter.iter.node = <a->notify_sock_n.node;
+ (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
+
+ /*
+ * Ignore return value since the node might have been removed before by an
+ * add replace during app registration because the PID can be reassigned by
+ * the OS.
+ */
+ iter.iter.node = <a->pid_n.node;
+ ret = lttng_ht_del(ust_app_ht, &iter);
+ if (ret) {
+ DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
+ lta->pid);
+ }
+
/* Free memory */
call_rcu(<a->pid_n.head, delete_ust_app_rcu);
*/
continue;
}
+ pthread_mutex_lock(&app->sock_lock);
handle = ustctl_tracepoint_list(app->sock);
if (handle < 0) {
if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
ERR("UST app list events getting handle failed for app pid %d",
app->pid);
}
+ pthread_mutex_unlock(&app->sock_lock);
continue;
}
&uiter)) != -LTTNG_UST_ERR_NOENT) {
/* Handle ustctl error. */
if (ret < 0) {
+ int release_ret;
+
if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
ERR("UST app tp list get failed for app %d with ret %d",
app->sock, ret);
break;
}
free(tmp_event);
+ release_ret = ustctl_release_handle(app->sock, handle);
+ if (release_ret < 0 &&
+ release_ret != -LTTNG_UST_ERR_EXITING &&
+ release_ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
+ }
+ pthread_mutex_unlock(&app->sock_lock);
goto rcu_error;
}
new_tmp_event = realloc(tmp_event,
new_nbmem * sizeof(struct lttng_event));
if (new_tmp_event == NULL) {
+ int release_ret;
+
PERROR("realloc ust app events");
free(tmp_event);
ret = -ENOMEM;
+ release_ret = ustctl_release_handle(app->sock, handle);
+ if (release_ret < 0 &&
+ release_ret != -LTTNG_UST_ERR_EXITING &&
+ release_ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
+ }
+ pthread_mutex_unlock(&app->sock_lock);
goto rcu_error;
}
/* Zero the new memory */
tmp_event[count].enabled = -1;
count++;
}
+ ret = ustctl_release_handle(app->sock, handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
+ }
}
ret = count;
*/
continue;
}
+ pthread_mutex_lock(&app->sock_lock);
handle = ustctl_tracepoint_field_list(app->sock);
if (handle < 0) {
if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
ERR("UST app list field getting handle failed for app pid %d",
app->pid);
}
+ pthread_mutex_unlock(&app->sock_lock);
continue;
}
&uiter)) != -LTTNG_UST_ERR_NOENT) {
/* Handle ustctl error. */
if (ret < 0) {
+ int release_ret;
+
if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
ERR("UST app tp list field failed for app %d with ret %d",
app->sock, ret);
break;
}
free(tmp_event);
+ release_ret = ustctl_release_handle(app->sock, handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (release_ret < 0 &&
+ release_ret != -LTTNG_UST_ERR_EXITING &&
+ release_ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
+ }
goto rcu_error;
}
new_tmp_event = realloc(tmp_event,
new_nbmem * sizeof(struct lttng_event_field));
if (new_tmp_event == NULL) {
+ int release_ret;
+
PERROR("realloc ust app event fields");
free(tmp_event);
ret = -ENOMEM;
+ release_ret = ustctl_release_handle(app->sock, handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (release_ret &&
+ release_ret != -LTTNG_UST_ERR_EXITING &&
+ release_ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
+ }
goto rcu_error;
}
/* Zero the new memory */
tmp_event[count].event.enabled = -1;
count++;
}
+ ret = ustctl_release_handle(app->sock, handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0 &&
+ ret != -LTTNG_UST_ERR_EXITING &&
+ ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
+ }
}
ret = count;
rcu_read_lock();
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ret = lttng_ht_del(ust_app_ht, &iter);
- assert(!ret);
- call_rcu(&app->pid_n.head, delete_ust_app_rcu);
+ if (ust_app_ht) {
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ret = lttng_ht_del(ust_app_ht, &iter);
+ assert(!ret);
+ call_rcu(&app->pid_n.head, delete_ust_app_rcu);
+ }
}
/* Cleanup socket hash table */
- cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
- sock_n.node) {
- ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
- assert(!ret);
+ if (ust_app_ht_by_sock) {
+ cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
+ sock_n.node) {
+ ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
+ assert(!ret);
+ }
}
/* Cleanup notify socket hash table */
- cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
- notify_sock_n.node) {
- ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
- assert(!ret);
+ if (ust_app_ht_by_notify_sock) {
+ cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
+ notify_sock_n.node) {
+ ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
+ assert(!ret);
+ }
}
rcu_read_unlock();
/* Destroy is done only when the ht is empty */
- ht_cleanup_push(ust_app_ht);
- ht_cleanup_push(ust_app_ht_by_sock);
- ht_cleanup_push(ust_app_ht_by_notify_sock);
+ if (ust_app_ht) {
+ ht_cleanup_push(ust_app_ht);
+ }
+ if (ust_app_ht_by_sock) {
+ ht_cleanup_push(ust_app_ht_by_sock);
+ }
+ if (ust_app_ht_by_notify_sock) {
+ ht_cleanup_push(ust_app_ht_by_notify_sock);
+ }
}
/*
* Init UST app hash table.
*/
-void ust_app_ht_alloc(void)
+int ust_app_ht_alloc(void)
{
ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+ if (!ust_app_ht) {
+ return -1;
+ }
ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+ if (!ust_app_ht_by_sock) {
+ return -1;
+ }
ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+ if (!ust_app_ht_by_notify_sock) {
+ return -1;
+ }
+ return 0;
}
/*
{
int ret = 0;
struct lttng_ht_iter iter, uiter;
- struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
+ struct lttng_ht_node_str *ua_chan_node;
struct ust_app *app;
struct ust_app_session *ua_sess;
struct ust_app_channel *ua_chan;
}
ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
- lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &uiter);
- ua_event_node = lttng_ht_iter_get_node_str(&uiter);
- if (ua_event_node == NULL) {
+ ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
+ uevent->filter, uevent->attr.loglevel,
+ uevent->exclusion);
+ if (ua_event == NULL) {
DBG2("Event %s not found in channel %s for app pid %d."
"Skipping", uevent->attr.name, uchan->name, app->pid);
continue;
}
- ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
ret = disable_ust_app_event(ua_sess, ua_event, app);
if (ret < 0) {
*/
continue;
}
+ if (!trace_ust_pid_tracker_lookup(usess, app->pid)) {
+ /* Skip. */
+ continue;
+ }
+
/*
* Create session on the tracer side and add it to app session HT. Note
* that if session exist, it will simply return a pointer to the ust
* or a timeout on it. We can't inform the caller that for a
* specific app, the session failed so lets continue here.
*/
+ ret = 0; /* Not an error. */
continue;
case -ENOMEM:
default:
assert(ua_sess);
pthread_mutex_lock(&ua_sess->lock);
+
+ if (ua_sess->deleted) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ continue;
+ }
+
if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
sizeof(uchan->name))) {
copy_channel_attr_to_ustctl(&ua_sess->metadata_attr, &uchan->attr);
}
pthread_mutex_unlock(&ua_sess->lock);
if (ret < 0) {
- if (ret == -ENOMEM) {
- /* No more memory is a fatal error. Stop right now. */
- goto error_rcu_unlock;
- }
/* Cleanup the created session if it's the case. */
if (created) {
destroy_app_session(app, ua_sess);
}
+ switch (ret) {
+ case -ENOTCONN:
+ /*
+ * The application's socket is not valid. Either a bad socket
+ * or a timeout on it. We can't inform the caller that for a
+ * specific app, the session failed so lets continue here.
+ */
+ ret = 0; /* Not an error. */
+ continue;
+ case -ENOMEM:
+ default:
+ goto error_rcu_unlock;
+ }
}
}
pthread_mutex_lock(&ua_sess->lock);
+ if (ua_sess->deleted) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ continue;
+ }
+
/* Lookup channel in the ust app session */
lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- /* If the channel is not found, there is a code flow error */
- assert(ua_chan_node);
+ /*
+ * It is possible that the channel cannot be found is
+ * the channel/event creation occurs concurrently with
+ * an application exit.
+ */
+ if (!ua_chan_node) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ continue;
+ }
ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
}
pthread_mutex_lock(&ua_sess->lock);
+
+ if (ua_sess->deleted) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ continue;
+ }
+
/* Lookup channel in the ust app session */
lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
pthread_mutex_lock(&ua_sess->lock);
+ if (ua_sess->deleted) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ goto end;
+ }
+
/* Upon restart, we skip the setup, already done */
if (ua_sess->started) {
goto skip_setup;
ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
if (ret < 0) {
- if (ret != -EEXIST) {
+ if (errno != EEXIST) {
ERR("Trace directory creation error");
goto error_unlock;
}
skip_setup:
/* This start the UST tracing */
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_start_session(app->sock, ua_sess->handle);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("Error starting tracing for app pid: %d (ret: %d)",
health_code_update();
/* Quiescent wait after starting trace */
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_wait_quiescent(app->sock);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app wait quiescent failed for app pid %d ret %d",
app->pid, ret);
pthread_mutex_lock(&ua_sess->lock);
+ if (ua_sess->deleted) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ goto end_no_session;
+ }
+
/*
* If started = 0, it means that stop trace has been called for a session
* that was never started. It's possible since we can have a fail start
health_code_update();
/* This inhibits UST tracing */
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_stop_session(app->sock, ua_sess->handle);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("Error stopping tracing for app pid: %d (ret: %d)",
health_code_update();
/* Quiescent wait after stopping trace */
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_wait_quiescent(app->sock);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app wait quiescent failed for app pid %d ret %d",
app->pid, ret);
registry = get_session_registry(ua_sess);
assert(registry);
- if (!registry->metadata_closed) {
- /* Push metadata for application before freeing the application. */
- (void) push_metadata(registry, ua_sess->consumer);
- }
+ /* Push metadata for application before freeing the application. */
+ (void) push_metadata(registry, ua_sess->consumer);
end_unlock:
pthread_mutex_unlock(&ua_sess->lock);
return -1;
}
-/*
- * Flush buffers for a specific UST session and app.
- */
static
-int ust_app_flush_trace(struct ltt_ust_session *usess, struct ust_app *app)
+int ust_app_flush_app_session(struct ust_app *app,
+ struct ust_app_session *ua_sess)
{
- int ret = 0;
+ int ret, retval = 0;
struct lttng_ht_iter iter;
- struct ust_app_session *ua_sess;
struct ust_app_channel *ua_chan;
+ struct consumer_socket *socket;
- DBG("Flushing buffers for ust app pid %d", app->pid);
+ DBG("Flushing app session buffers for ust app pid %d", app->pid);
rcu_read_lock();
if (!app->compatible) {
- goto end_no_session;
- }
-
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == NULL) {
- goto end_no_session;
+ goto end_not_compatible;
}
pthread_mutex_lock(&ua_sess->lock);
+ if (ua_sess->deleted) {
+ goto end_deleted;
+ }
+
health_code_update();
/* Flushing buffers */
- cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
- node.node) {
- health_code_update();
- assert(ua_chan->is_sent);
- ret = ustctl_sock_flush_buffer(app->sock, ua_chan->obj);
- if (ret < 0) {
- if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app PID %d channel %s flush failed with ret %d",
- app->pid, ua_chan->name, ret);
- } else {
- DBG3("UST app failed to flush %s. Application is dead.",
- ua_chan->name);
- /*
- * This is normal behavior, an application can die during the
- * creation process. Don't report an error so the execution can
- * continue normally.
- */
+ socket = consumer_find_socket_by_bitness(app->bits_per_long,
+ ua_sess->consumer);
+
+ /* Flush buffers and push metadata. */
+ switch (ua_sess->buffer_type) {
+ case LTTNG_BUFFER_PER_PID:
+ cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
+ node.node) {
+ health_code_update();
+ assert(ua_chan->is_sent);
+ ret = consumer_flush_channel(socket, ua_chan->key);
+ if (ret) {
+ ERR("Error flushing consumer channel");
+ retval = -1;
+ continue;
}
- /* Continuing flushing all buffers */
- continue;
}
+ break;
+ case LTTNG_BUFFER_PER_UID:
+ default:
+ assert(0);
+ break;
}
health_code_update();
+end_deleted:
pthread_mutex_unlock(&ua_sess->lock);
-end_no_session:
+
+end_not_compatible:
rcu_read_unlock();
health_code_update();
- return 0;
+ return retval;
+}
+
+/*
+ * Flush buffers for all applications for a specific UST session.
+ * Called with UST session lock held.
+ */
+static
+int ust_app_flush_session(struct ltt_ust_session *usess)
+
+{
+ int ret = 0;
+
+ DBG("Flushing session buffers for all ust apps");
+
+ rcu_read_lock();
+
+ /* Flush buffers and push metadata. */
+ switch (usess->buffer_type) {
+ case LTTNG_BUFFER_PER_UID:
+ {
+ struct buffer_reg_uid *reg;
+ struct lttng_ht_iter iter;
+
+ /* Flush all per UID buffers associated to that session. */
+ cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+ struct ust_registry_session *ust_session_reg;
+ struct buffer_reg_channel *reg_chan;
+ struct consumer_socket *socket;
+
+ /* Get consumer socket to use to push the metadata.*/
+ socket = consumer_find_socket_by_bitness(reg->bits_per_long,
+ usess->consumer);
+ if (!socket) {
+ /* Ignore request if no consumer is found for the session. */
+ continue;
+ }
+
+ cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
+ reg_chan, node.node) {
+ /*
+ * The following call will print error values so the return
+ * code is of little importance because whatever happens, we
+ * have to try them all.
+ */
+ (void) consumer_flush_channel(socket, reg_chan->consumer_key);
+ }
+
+ ust_session_reg = reg->registry->reg.ust;
+ /* Push metadata. */
+ (void) push_metadata(ust_session_reg, usess->consumer);
+ }
+ break;
+ }
+ case LTTNG_BUFFER_PER_PID:
+ {
+ struct ust_app_session *ua_sess;
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
+
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == NULL) {
+ continue;
+ }
+ (void) ust_app_flush_app_session(app, ua_sess);
+ }
+ break;
+ }
+ default:
+ ret = -1;
+ assert(0);
+ break;
+ }
+
+ rcu_read_unlock();
+ health_code_update();
+ return ret;
}
/*
health_code_update();
/* Quiescent wait after stopping trace */
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_wait_quiescent(app->sock);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app wait quiescent failed for app pid %d ret %d",
app->pid, ret);
/*
* Start tracing for the UST session.
+ * Called with UST session lock held.
*/
int ust_app_stop_trace_all(struct ltt_ust_session *usess)
{
}
}
- /* Flush buffers and push metadata (for UID buffers). */
- switch (usess->buffer_type) {
- case LTTNG_BUFFER_PER_UID:
- {
- struct buffer_reg_uid *reg;
-
- /* Flush all per UID buffers associated to that session. */
- cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
- struct ust_registry_session *ust_session_reg;
- struct buffer_reg_channel *reg_chan;
- struct consumer_socket *socket;
-
- /* Get consumer socket to use to push the metadata.*/
- socket = consumer_find_socket_by_bitness(reg->bits_per_long,
- usess->consumer);
- if (!socket) {
- /* Ignore request if no consumer is found for the session. */
- continue;
- }
-
- cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
- reg_chan, node.node) {
- /*
- * The following call will print error values so the return
- * code is of little importance because whatever happens, we
- * have to try them all.
- */
- (void) consumer_flush_channel(socket, reg_chan->consumer_key);
- }
-
- ust_session_reg = reg->registry->reg.ust;
- if (!ust_session_reg->metadata_closed) {
- /* Push metadata. */
- (void) push_metadata(ust_session_reg, usess->consumer);
- }
- }
-
- break;
- }
- case LTTNG_BUFFER_PER_PID:
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ret = ust_app_flush_trace(usess, app);
- if (ret < 0) {
- /* Continue to next apps even on error */
- continue;
- }
- }
- break;
- default:
- assert(0);
- break;
- }
+ (void) ust_app_flush_session(usess);
rcu_read_unlock();
return 0;
}
-/*
- * Add channels/events from UST global domain to registered apps at sock.
- */
-void ust_app_global_update(struct ltt_ust_session *usess, int sock)
+static
+void ust_app_global_create(struct ltt_ust_session *usess, struct ust_app *app)
{
int ret = 0;
struct lttng_ht_iter iter, uiter;
- struct ust_app *app;
struct ust_app_session *ua_sess = NULL;
struct ust_app_channel *ua_chan;
struct ust_app_event *ua_event;
struct ust_app_ctx *ua_ctx;
+ int is_created = 0;
- assert(usess);
- assert(sock >= 0);
-
- DBG2("UST app global update for app sock %d for session id %" PRIu64, sock,
- usess->id);
-
- rcu_read_lock();
-
- app = ust_app_find_by_sock(sock);
- if (app == NULL) {
- /*
- * Application can be unregistered before so this is possible hence
- * simply stopping the update.
- */
- DBG3("UST app update failed to find app sock %d", sock);
- goto error;
- }
-
- if (!app->compatible) {
- goto error;
- }
-
- ret = create_ust_app_session(usess, app, &ua_sess, NULL);
+ ret = create_ust_app_session(usess, app, &ua_sess, &is_created);
if (ret < 0) {
/* Tracer is probably gone or ENOMEM. */
goto error;
}
+ if (!is_created) {
+ /* App session already created. */
+ goto end;
+ }
assert(ua_sess);
pthread_mutex_lock(&ua_sess->lock);
+ if (ua_sess->deleted) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ goto end;
+ }
+
/*
* We can iterate safely here over all UST app session since the create ust
* app session above made a shadow copy of the UST global domain from the
cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
node.node) {
ret = do_create_channel(app, usess, ua_sess, ua_chan);
- if (ret < 0) {
+ if (ret < 0 && ret != -ENOTCONN) {
/*
- * Stop everything. On error, the application failed, no more
- * file descriptor are available or ENOMEM so stopping here is
- * the only thing we can do for now.
+ * Stop everything. On error, the application
+ * failed, no more file descriptor are available
+ * or ENOMEM so stopping here is the only thing
+ * we can do for now. The only exception is
+ * -ENOTCONN, which indicates that the application
+ * has exit.
*/
goto error_unlock;
}
DBG2("UST trace started for app pid %d", app->pid);
}
-
+end:
/* Everything went well at this point. */
- rcu_read_unlock();
return;
error_unlock:
if (ua_sess) {
destroy_app_session(app, ua_sess);
}
- rcu_read_unlock();
return;
}
+static
+void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
+{
+ struct ust_app_session *ua_sess;
+
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == NULL) {
+ return;
+ }
+ destroy_app_session(app, ua_sess);
+}
+
+/*
+ * Add channels/events from UST global domain to registered apps at sock.
+ *
+ * Called with session lock held.
+ * Called with RCU read-side lock held.
+ */
+void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
+{
+ assert(usess);
+
+ DBG2("UST app global update for app sock %d for session id %" PRIu64,
+ app->sock, usess->id);
+
+ if (!app->compatible) {
+ return;
+ }
+
+ if (trace_ust_pid_tracker_lookup(usess, app->pid)) {
+ ust_app_global_create(usess, app);
+ } else {
+ ust_app_global_destroy(usess, app);
+ }
+}
+
+/*
+ * Called with session lock held.
+ */
+void ust_app_global_update_all(struct ltt_ust_session *usess)
+{
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
+
+ rcu_read_lock();
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ust_app_global_update(usess, app);
+ }
+ rcu_read_unlock();
+}
+
/*
* Add context to a specific channel for global UST domain.
*/
}
pthread_mutex_lock(&ua_sess->lock);
+
+ if (ua_sess->deleted) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ continue;
+ }
+
/* Lookup channel in the ust app session */
lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
}
pthread_mutex_lock(&ua_sess->lock);
+
+ if (ua_sess->deleted) {
+ ret = 0;
+ goto end_unlock;
+ }
+
/* Lookup channel in the ust app session */
lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
ua_chan_node = lttng_ht_iter_get_node_str(&iter);
health_code_update();
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_calibrate(app->sock, calibrate);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
switch (ret) {
case -ENOSYS:
* On success 0 is returned else a negative value.
*/
static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
- char *sig, size_t nr_fields, struct ustctl_field *fields, int loglevel,
- char *model_emf_uri)
+ char *sig, size_t nr_fields, struct ustctl_field *fields,
+ int loglevel_value, char *model_emf_uri)
{
int ret, ret_code;
uint32_t event_id = 0;
* three variables MUST NOT be read/write after this.
*/
ret_code = ust_registry_create_event(registry, chan_reg_key,
- sobjd, cobjd, name, sig, nr_fields, fields, loglevel,
- model_emf_uri, ua_sess->buffer_type, &event_id,
- app);
+ sobjd, cobjd, name, sig, nr_fields, fields,
+ loglevel_value, model_emf_uri, ua_sess->buffer_type,
+ &event_id, app);
/*
* The return value is returned to ustctl so in case of an error, the
switch (cmd) {
case USTCTL_NOTIFY_CMD_EVENT:
{
- int sobjd, cobjd, loglevel;
+ int sobjd, cobjd, loglevel_value;
char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
size_t nr_fields;
struct ustctl_field *fields;
DBG2("UST app ustctl register event received");
- ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name, &loglevel,
- &sig, &nr_fields, &fields, &model_emf_uri);
+ ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name,
+ &loglevel_value, &sig, &nr_fields, &fields,
+ &model_emf_uri);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app recv event failed with ret %d", ret);
* to the this function.
*/
ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
- fields, loglevel, model_emf_uri);
+ fields, loglevel_value, model_emf_uri);
if (ret < 0) {
goto error;
}
* Return 0 on success or else a negative value.
*/
int ust_app_snapshot_record(struct ltt_ust_session *usess,
- struct snapshot_output *output, int wait, unsigned int nb_streams)
+ struct snapshot_output *output, int wait,
+ uint64_t nb_packets_per_stream)
{
int ret = 0;
unsigned int snapshot_done = 0;
struct lttng_ht_iter iter;
struct ust_app *app;
char pathname[PATH_MAX];
- uint64_t max_stream_size = 0;
assert(usess);
assert(output);
rcu_read_lock();
- /*
- * Compute the maximum size of a single stream if a max size is asked by
- * the caller.
- */
- if (output->max_size > 0 && nb_streams > 0) {
- max_stream_size = output->max_size / nb_streams;
- }
-
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
/* Add the UST default trace dir to path. */
cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
reg_chan, node.node) {
-
- /*
- * Make sure the maximum stream size is not lower than the
- * subbuffer size or else it's an error since we won't be able to
- * snapshot anything.
- */
- if (max_stream_size &&
- reg_chan->subbuf_size > max_stream_size) {
- ret = -EINVAL;
- DBG3("UST app snapshot record maximum stream size %" PRIu64
- " is smaller than subbuffer size of %zu",
- max_stream_size, reg_chan->subbuf_size);
- goto error;
- }
- ret = consumer_snapshot_channel(socket, reg_chan->consumer_key, output, 0,
- usess->uid, usess->gid, pathname, wait,
- max_stream_size);
+ ret = consumer_snapshot_channel(socket, reg_chan->consumer_key,
+ output, 0, usess->uid, usess->gid, pathname, wait,
+ nb_packets_per_stream);
if (ret < 0) {
goto error;
}
}
- ret = consumer_snapshot_channel(socket, reg->registry->reg.ust->metadata_key, output,
- 1, usess->uid, usess->gid, pathname, wait,
- max_stream_size);
+ ret = consumer_snapshot_channel(socket,
+ reg->registry->reg.ust->metadata_key, output, 1,
+ usess->uid, usess->gid, pathname, wait, 0);
if (ret < 0) {
goto error;
}
cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
ua_chan, node.node) {
- /*
- * Make sure the maximum stream size is not lower than the
- * subbuffer size or else it's an error since we won't be able to
- * snapshot anything.
- */
- if (max_stream_size &&
- ua_chan->attr.subbuf_size > max_stream_size) {
- ret = -EINVAL;
- DBG3("UST app snapshot record maximum stream size %" PRIu64
- " is smaller than subbuffer size of %" PRIu64,
- max_stream_size, ua_chan->attr.subbuf_size);
- goto error;
- }
-
- ret = consumer_snapshot_channel(socket, ua_chan->key, output, 0,
- ua_sess->euid, ua_sess->egid, pathname, wait,
- max_stream_size);
+ ret = consumer_snapshot_channel(socket, ua_chan->key, output,
+ 0, ua_sess->euid, ua_sess->egid, pathname, wait,
+ nb_packets_per_stream);
if (ret < 0) {
goto error;
}
registry = get_session_registry(ua_sess);
assert(registry);
ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
- 1, ua_sess->euid, ua_sess->egid, pathname, wait,
- max_stream_size);
+ 1, ua_sess->euid, ua_sess->egid, pathname, wait, 0);
if (ret < 0) {
goto error;
}
}
/*
- * Return the number of streams for a UST session.
+ * Return the size taken by one more packet per stream.
*/
-unsigned int ust_app_get_nb_stream(struct ltt_ust_session *usess)
+uint64_t ust_app_get_size_one_more_packet_per_stream(struct ltt_ust_session *usess,
+ uint64_t cur_nr_packets)
{
- unsigned int ret = 0;
+ uint64_t tot_size = 0;
struct ust_app *app;
struct lttng_ht_iter iter;
cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
struct buffer_reg_channel *reg_chan;
+ rcu_read_lock();
cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
reg_chan, node.node) {
- ret += reg_chan->stream_count;
+ if (cur_nr_packets >= reg_chan->num_subbuf) {
+ /*
+ * Don't take channel into account if we
+ * already grab all its packets.
+ */
+ continue;
+ }
+ tot_size += reg_chan->subbuf_size * reg_chan->stream_count;
}
+ rcu_read_unlock();
}
break;
}
cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
ua_chan, node.node) {
- ret += ua_chan->streams.count;
+ if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
+ /*
+ * Don't take channel into account if we
+ * already grab all its packets.
+ */
+ continue;
+ }
+ tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
}
}
rcu_read_unlock();
break;
}
- return ret;
+ return tot_size;
}