*/
#define _GNU_SOURCE
+#define _LGPL_SOURCE
#include <errno.h>
#include <inttypes.h>
#include <pthread.h>
#include "ust-ctl.h"
#include "utils.h"
+static
+int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
+
/* Next available channel key. Access under next_channel_key_lock. */
static uint64_t _next_channel_key;
static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
/*
* Push metadata to consumer socket.
*
- * The socket lock MUST be acquired.
- * The ust app session lock MUST be acquired.
+ * RCU read-side lock must be held to guarantee existance of socket.
+ * Must be called with the ust app session lock held.
+ * Must be called with the registry lock held.
*
* On success, return the len of metadata pushed or else a negative value.
*/
assert(socket);
/*
- * On a push metadata error either the consumer is dead or the metadata
- * channel has been destroyed because its endpoint might have died (e.g:
- * relayd). If so, the metadata closed flag is set to 1 so we deny pushing
- * metadata again which is not valid anymore on the consumer side.
- *
- * The ust app session mutex locked allows us to make this check without
- * the registry lock.
+ * Means that no metadata was assigned to the session. This can
+ * happens if no start has been done previously.
+ */
+ if (!registry->metadata_key) {
+ return 0;
+ }
+
+ /*
+ * On a push metadata error either the consumer is dead or the
+ * metadata channel has been destroyed because its endpoint
+ * might have died (e.g: relayd). If so, the metadata closed
+ * flag is set to 1 so we deny pushing metadata again which is
+ * not valid anymore on the consumer side.
*/
if (registry->metadata_closed) {
return -EPIPE;
}
- pthread_mutex_lock(®istry->lock);
-
offset = registry->metadata_len_sent;
len = registry->metadata_len - registry->metadata_len_sent;
if (len == 0) {
registry->metadata_len_sent += len;
push_data:
- pthread_mutex_unlock(®istry->lock);
ret = consumer_push_metadata(socket, registry->metadata_key,
metadata_str, len, offset);
if (ret < 0) {
/*
- * There is an acceptable race here between the registry metadata key
- * assignment and the creation on the consumer. The session daemon can
- * concurrently push metadata for this registry while being created on
- * the consumer since the metadata key of the registry is assigned
- * *before* it is setup to avoid the consumer to ask for metadata that
- * could possibly be not found in the session daemon.
+ * There is an acceptable race here between the registry
+ * metadata key assignment and the creation on the
+ * consumer. The session daemon can concurrently push
+ * metadata for this registry while being created on the
+ * consumer since the metadata key of the registry is
+ * assigned *before* it is setup to avoid the consumer
+ * to ask for metadata that could possibly be not found
+ * in the session daemon.
*
- * The metadata will get pushed either by the session being stopped or
- * the consumer requesting metadata if that race is triggered.
+ * The metadata will get pushed either by the session
+ * being stopped or the consumer requesting metadata if
+ * that race is triggered.
*/
if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
ret = 0;
}
- /* Update back the actual metadata len sent since it failed here. */
- pthread_mutex_lock(®istry->lock);
+ /*
+ * Update back the actual metadata len sent since it
+ * failed here.
+ */
registry->metadata_len_sent -= len;
- pthread_mutex_unlock(®istry->lock);
ret_val = ret;
goto error_push;
}
end:
error:
- pthread_mutex_unlock(®istry->lock);
+ if (ret_val) {
+ /*
+ * On error, flag the registry that the metadata is
+ * closed. We were unable to push anything and this
+ * means that either the consumer is not responding or
+ * the metadata cache has been destroyed on the
+ * consumer.
+ */
+ registry->metadata_closed = 1;
+ }
error_push:
free(metadata_str);
return ret_val;
}
/*
- * For a given application and session, push metadata to consumer. The session
- * lock MUST be acquired here before calling this.
+ * For a given application and session, push metadata to consumer.
* Either sock or consumer is required : if sock is NULL, the default
* socket to send the metadata is retrieved from consumer, if sock
* is not NULL we use it to send the metadata.
+ * RCU read-side lock must be held while calling this function,
+ * therefore ensuring existance of registry. It also ensures existance
+ * of socket throughout this function.
*
* Return 0 on success else a negative error.
*/
assert(registry);
assert(consumer);
- rcu_read_lock();
-
- /*
- * Means that no metadata was assigned to the session. This can happens if
- * no start has been done previously.
- */
- if (!registry->metadata_key) {
- ret_val = 0;
- goto end_rcu_unlock;
+ pthread_mutex_lock(®istry->lock);
+ if (registry->metadata_closed) {
+ ret_val = -EPIPE;
+ goto error;
}
/* Get consumer socket to use to push the metadata.*/
consumer);
if (!socket) {
ret_val = -1;
- goto error_rcu_unlock;
+ goto error;
}
- /*
- * TODO: Currently, we hold the socket lock around sampling of the next
- * metadata segment to ensure we send metadata over the consumer socket in
- * the correct order. This makes the registry lock nest inside the socket
- * lock.
- *
- * Please note that this is a temporary measure: we should move this lock
- * back into ust_consumer_push_metadata() when the consumer gets the
- * ability to reorder the metadata it receives.
- */
- pthread_mutex_lock(socket->lock);
ret = ust_app_push_metadata(registry, socket, 0);
- pthread_mutex_unlock(socket->lock);
if (ret < 0) {
ret_val = ret;
- goto error_rcu_unlock;
+ goto error;
}
-
- rcu_read_unlock();
+ pthread_mutex_unlock(®istry->lock);
return 0;
-error_rcu_unlock:
- /*
- * On error, flag the registry that the metadata is closed. We were unable
- * to push anything and this means that either the consumer is not
- * responding or the metadata cache has been destroyed on the consumer.
- */
- registry->metadata_closed = 1;
-end_rcu_unlock:
- rcu_read_unlock();
+error:
+ pthread_mutex_unlock(®istry->lock);
return ret_val;
}
/*
* Send to the consumer a close metadata command for the given session. Once
* done, the metadata channel is deleted and the session metadata pointer is
- * nullified. The session lock MUST be acquired here unless the application is
+ * nullified. The session lock MUST be held unless the application is
* in the destroy path.
*
* Return 0 on success else a negative value.
rcu_read_lock();
+ pthread_mutex_lock(®istry->lock);
+
if (!registry->metadata_key || registry->metadata_closed) {
ret = 0;
goto end;
*/
registry->metadata_closed = 1;
end:
+ pthread_mutex_unlock(®istry->lock);
rcu_read_unlock();
return ret;
}
pthread_mutex_lock(&ua_sess->lock);
registry = get_session_registry(ua_sess);
- if (registry && !registry->metadata_closed) {
+ if (registry) {
/* Push metadata for application before freeing the application. */
(void) push_metadata(registry, ua_sess->consumer);
* previous push metadata could have flag the metadata registry to
* close so don't send a close command if closed.
*/
- if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
- !registry->metadata_closed) {
+ if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
/* And ask to close it for this session registry. */
(void) close_metadata(registry, ua_sess->consumer);
}
struct tm *timeinfo;
char datetime[16];
int ret;
+ char tmp_shm_path[PATH_MAX];
/* Get date and time for unique app path */
time(&rawtime);
goto error;
}
+ strncpy(ua_sess->root_shm_path, usess->root_shm_path,
+ sizeof(ua_sess->root_shm_path));
+ ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
+ strncpy(ua_sess->shm_path, usess->shm_path,
+ sizeof(ua_sess->shm_path));
+ ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
+ if (ua_sess->shm_path[0]) {
+ switch (ua_sess->buffer_type) {
+ case LTTNG_BUFFER_PER_PID:
+ ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
+ DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
+ app->name, app->pid, datetime);
+ break;
+ case LTTNG_BUFFER_PER_UID:
+ ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
+ DEFAULT_UST_TRACE_UID_PATH,
+ app->uid, app->bits_per_long);
+ break;
+ default:
+ assert(0);
+ goto error;
+ }
+ if (ret < 0) {
+ PERROR("sprintf UST shadow copy session");
+ assert(0);
+ goto error;
+ }
+ strncat(ua_sess->shm_path, tmp_shm_path,
+ sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
+ ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
+ }
+
/* Iterate over all channels in global domain. */
cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
uchan, node.node) {
* This is the create channel path meaning that if there is NO
* registry available, we have to create one for this session.
*/
- ret = buffer_reg_pid_create(ua_sess->id, ®_pid);
+ ret = buffer_reg_pid_create(ua_sess->id, ®_pid,
+ ua_sess->root_shm_path, ua_sess->shm_path);
if (ret < 0) {
goto error;
}
- buffer_reg_pid_add(reg_pid);
} else {
goto end;
}
app->uint16_t_alignment, app->uint32_t_alignment,
app->uint64_t_alignment, app->long_alignment,
app->byte_order, app->version.major,
- app->version.minor);
+ app->version.minor, reg_pid->root_shm_path,
+ reg_pid->shm_path,
+ ua_sess->euid, ua_sess->egid);
if (ret < 0) {
+ /*
+ * reg_pid->registry->reg.ust is NULL upon error, so we need to
+ * destroy the buffer registry, because it is always expected
+ * that if the buffer registry can be found, its ust registry is
+ * non-NULL.
+ */
+ buffer_reg_pid_destroy(reg_pid);
goto error;
}
+ buffer_reg_pid_add(reg_pid);
+
DBG3("UST app buffer registry per PID created successfully");
end:
* Return 0 on success or else a negative value.
*/
static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
+ struct ust_app_session *ua_sess,
struct ust_app *app, struct buffer_reg_uid **regp)
{
int ret = 0;
* registry available, we have to create one for this session.
*/
ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
- LTTNG_DOMAIN_UST, ®_uid);
+ LTTNG_DOMAIN_UST, ®_uid,
+ ua_sess->root_shm_path, ua_sess->shm_path);
if (ret < 0) {
goto error;
}
- buffer_reg_uid_add(reg_uid);
} else {
goto end;
}
app->uint16_t_alignment, app->uint32_t_alignment,
app->uint64_t_alignment, app->long_alignment,
app->byte_order, app->version.major,
- app->version.minor);
+ app->version.minor, reg_uid->root_shm_path,
+ reg_uid->shm_path, usess->uid, usess->gid);
if (ret < 0) {
+ /*
+ * reg_uid->registry->reg.ust is NULL upon error, so we need to
+ * destroy the buffer registry, because it is always expected
+ * that if the buffer registry can be found, its ust registry is
+ * non-NULL.
+ */
+ buffer_reg_uid_destroy(reg_uid, NULL);
goto error;
}
/* Add node to teardown list of the session. */
cds_list_add(®_uid->lnode, &usess->buffer_reg_uid_list);
- DBG3("UST app buffer registry per UID created successfully");
+ buffer_reg_uid_add(reg_uid);
+ DBG3("UST app buffer registry per UID created successfully");
end:
if (regp) {
*regp = reg_uid;
/* Init local registry. */
ret = setup_buffer_reg_pid(ua_sess, app, NULL);
if (ret < 0) {
+ delete_ust_app_session(-1, ua_sess, app);
goto error;
}
break;
case LTTNG_BUFFER_PER_UID:
/* Look for a global registry. If none exists, create one. */
- ret = setup_buffer_reg_uid(usess, app, NULL);
+ ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
if (ret < 0) {
+ delete_ust_app_session(-1, ua_sess, app);
goto error;
}
break;
/*
* Lookup for an ust app context from an lttng_ust_context.
*
+ * Must be called while holding RCU read side lock.
* Return an ust_app_ctx object or NULL on error.
*/
static
assert(reg_chan);
reg_chan->consumer_key = ua_chan->key;
reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
+ reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
/* Create and add a channel registry to session. */
ret = ust_registry_channel_add(reg_sess->reg.ust,
registry = get_session_registry(ua_sess);
assert(registry);
+ pthread_mutex_lock(®istry->lock);
+
/* Metadata already exists for this registry or it was closed previously */
if (registry->metadata_key || registry->metadata_closed) {
ret = 0;
lttng_fd_put(LTTNG_FD_APPS, 1);
delete_ust_app_channel(-1, metadata, app);
error:
+ pthread_mutex_unlock(®istry->lock);
return ret;
}
{
struct ust_app *lta;
struct lttng_ht_node_ulong *node;
+ struct lttng_ht_iter ust_app_sock_iter;
struct lttng_ht_iter iter;
struct ust_app_session *ua_sess;
int ret;
rcu_read_lock();
/* Get the node reference for a call_rcu */
- lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
- node = lttng_ht_iter_get_node_ulong(&iter);
+ lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
+ node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
assert(node);
lta = caa_container_of(node, struct ust_app, sock_n);
DBG("PID %d unregistering with sock %d", lta->pid, sock);
- /* Remove application from PID hash table */
- ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
- assert(!ret);
-
- /*
- * Remove application from notify hash table. The thread handling the
- * notify socket could have deleted the node so ignore on error because
- * either way it's valid. The close of that socket is handled by the other
- * thread.
- */
- iter.iter.node = <a->notify_sock_n.node;
- (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
-
/*
- * Ignore return value since the node might have been removed before by an
- * add replace during app registration because the PID can be reassigned by
- * the OS.
+ * For per-PID buffers, perform "push metadata" and flush all
+ * application streams before removing app from hash tables,
+ * ensuring proper behavior of data_pending check.
+ * Remove sessions so they are not visible during deletion.
*/
- iter.iter.node = <a->pid_n.node;
- ret = lttng_ht_del(ust_app_ht, &iter);
- if (ret) {
- DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
- lta->pid);
- }
-
- /* Remove sessions so they are not visible during deletion.*/
cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
node.node) {
struct ust_registry_session *registry;
continue;
}
+ if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
+ (void) ust_app_flush_app_session(lta, ua_sess);
+ }
+
/*
* Add session to list for teardown. This is safe since at this point we
* are the only one using this list.
* session so the delete session will NOT push/close a second time.
*/
registry = get_session_registry(ua_sess);
- if (registry && !registry->metadata_closed) {
+ if (registry) {
/* Push metadata for application before freeing the application. */
(void) push_metadata(registry, ua_sess->consumer);
* previous push metadata could have flag the metadata registry to
* close so don't send a close command if closed.
*/
- if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
- !registry->metadata_closed) {
+ if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
/* And ask to close it for this session registry. */
(void) close_metadata(registry, ua_sess->consumer);
}
}
-
cds_list_add(&ua_sess->teardown_node, <a->teardown_head);
+
pthread_mutex_unlock(&ua_sess->lock);
}
+ /* Remove application from PID hash table */
+ ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
+ assert(!ret);
+
+ /*
+ * Remove application from notify hash table. The thread handling the
+ * notify socket could have deleted the node so ignore on error because
+ * either way it's valid. The close of that socket is handled by the other
+ * thread.
+ */
+ iter.iter.node = <a->notify_sock_n.node;
+ (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
+
+ /*
+ * Ignore return value since the node might have been removed before by an
+ * add replace during app registration because the PID can be reassigned by
+ * the OS.
+ */
+ iter.iter.node = <a->pid_n.node;
+ ret = lttng_ht_del(ust_app_ht, &iter);
+ if (ret) {
+ DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
+ lta->pid);
+ }
+
/* Free memory */
call_rcu(<a->pid_n.head, delete_ust_app_rcu);
rcu_read_lock();
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ret = lttng_ht_del(ust_app_ht, &iter);
- assert(!ret);
- call_rcu(&app->pid_n.head, delete_ust_app_rcu);
+ if (ust_app_ht) {
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ret = lttng_ht_del(ust_app_ht, &iter);
+ assert(!ret);
+ call_rcu(&app->pid_n.head, delete_ust_app_rcu);
+ }
}
/* Cleanup socket hash table */
- cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
- sock_n.node) {
- ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
- assert(!ret);
+ if (ust_app_ht_by_sock) {
+ cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
+ sock_n.node) {
+ ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
+ assert(!ret);
+ }
}
/* Cleanup notify socket hash table */
- cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
- notify_sock_n.node) {
- ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
- assert(!ret);
+ if (ust_app_ht_by_notify_sock) {
+ cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
+ notify_sock_n.node) {
+ ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
+ assert(!ret);
+ }
}
rcu_read_unlock();
/* Destroy is done only when the ht is empty */
- ht_cleanup_push(ust_app_ht);
- ht_cleanup_push(ust_app_ht_by_sock);
- ht_cleanup_push(ust_app_ht_by_notify_sock);
+ if (ust_app_ht) {
+ ht_cleanup_push(ust_app_ht);
+ }
+ if (ust_app_ht_by_sock) {
+ ht_cleanup_push(ust_app_ht_by_sock);
+ }
+ if (ust_app_ht_by_notify_sock) {
+ ht_cleanup_push(ust_app_ht_by_notify_sock);
+ }
}
/*
* Init UST app hash table.
*/
-void ust_app_ht_alloc(void)
+int ust_app_ht_alloc(void)
{
ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+ if (!ust_app_ht) {
+ return -1;
+ }
ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+ if (!ust_app_ht_by_sock) {
+ return -1;
+ }
ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+ if (!ust_app_ht_by_notify_sock) {
+ return -1;
+ }
+ return 0;
}
/*
*/
continue;
}
+ if (!trace_ust_pid_tracker_lookup(usess, app->pid)) {
+ /* Skip. */
+ continue;
+ }
+
/*
* Create session on the tracer side and add it to app session HT. Note
* that if session exist, it will simply return a pointer to the ust
registry = get_session_registry(ua_sess);
assert(registry);
- if (!registry->metadata_closed) {
- /* Push metadata for application before freeing the application. */
- (void) push_metadata(registry, ua_sess->consumer);
- }
+ /* Push metadata for application before freeing the application. */
+ (void) push_metadata(registry, ua_sess->consumer);
end_unlock:
pthread_mutex_unlock(&ua_sess->lock);
return -1;
}
-/*
- * Flush buffers for a specific UST session and app.
- */
static
-int ust_app_flush_trace(struct ltt_ust_session *usess, struct ust_app *app)
+int ust_app_flush_app_session(struct ust_app *app,
+ struct ust_app_session *ua_sess)
{
- int ret = 0;
+ int ret, retval = 0;
struct lttng_ht_iter iter;
- struct ust_app_session *ua_sess;
struct ust_app_channel *ua_chan;
+ struct consumer_socket *socket;
- DBG("Flushing buffers for ust app pid %d", app->pid);
+ DBG("Flushing app session buffers for ust app pid %d", app->pid);
rcu_read_lock();
if (!app->compatible) {
- goto end_no_session;
- }
-
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == NULL) {
- goto end_no_session;
+ goto end_not_compatible;
}
pthread_mutex_lock(&ua_sess->lock);
health_code_update();
/* Flushing buffers */
- cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
- node.node) {
- health_code_update();
- assert(ua_chan->is_sent);
- ret = ustctl_sock_flush_buffer(app->sock, ua_chan->obj);
- if (ret < 0) {
- if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app PID %d channel %s flush failed with ret %d",
- app->pid, ua_chan->name, ret);
- } else {
- DBG3("UST app failed to flush %s. Application is dead.",
- ua_chan->name);
- /*
- * This is normal behavior, an application can die during the
- * creation process. Don't report an error so the execution can
- * continue normally.
- */
+ socket = consumer_find_socket_by_bitness(app->bits_per_long,
+ ua_sess->consumer);
+
+ /* Flush buffers and push metadata. */
+ switch (ua_sess->buffer_type) {
+ case LTTNG_BUFFER_PER_PID:
+ cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
+ node.node) {
+ health_code_update();
+ assert(ua_chan->is_sent);
+ ret = consumer_flush_channel(socket, ua_chan->key);
+ if (ret) {
+ ERR("Error flushing consumer channel");
+ retval = -1;
+ continue;
}
- /* Continuing flushing all buffers */
- continue;
}
+ break;
+ case LTTNG_BUFFER_PER_UID:
+ default:
+ assert(0);
+ break;
}
health_code_update();
pthread_mutex_unlock(&ua_sess->lock);
-end_no_session:
+
+end_not_compatible:
rcu_read_unlock();
health_code_update();
- return 0;
+ return retval;
+}
+
+/*
+ * Flush buffers for all applications for a specific UST session.
+ * Called with UST session lock held.
+ */
+static
+int ust_app_flush_session(struct ltt_ust_session *usess)
+
+{
+ int ret = 0;
+
+ DBG("Flushing session buffers for all ust apps");
+
+ rcu_read_lock();
+
+ /* Flush buffers and push metadata. */
+ switch (usess->buffer_type) {
+ case LTTNG_BUFFER_PER_UID:
+ {
+ struct buffer_reg_uid *reg;
+ struct lttng_ht_iter iter;
+
+ /* Flush all per UID buffers associated to that session. */
+ cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+ struct ust_registry_session *ust_session_reg;
+ struct buffer_reg_channel *reg_chan;
+ struct consumer_socket *socket;
+
+ /* Get consumer socket to use to push the metadata.*/
+ socket = consumer_find_socket_by_bitness(reg->bits_per_long,
+ usess->consumer);
+ if (!socket) {
+ /* Ignore request if no consumer is found for the session. */
+ continue;
+ }
+
+ cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
+ reg_chan, node.node) {
+ /*
+ * The following call will print error values so the return
+ * code is of little importance because whatever happens, we
+ * have to try them all.
+ */
+ (void) consumer_flush_channel(socket, reg_chan->consumer_key);
+ }
+
+ ust_session_reg = reg->registry->reg.ust;
+ /* Push metadata. */
+ (void) push_metadata(ust_session_reg, usess->consumer);
+ }
+ break;
+ }
+ case LTTNG_BUFFER_PER_PID:
+ {
+ struct ust_app_session *ua_sess;
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
+
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == NULL) {
+ continue;
+ }
+ (void) ust_app_flush_app_session(app, ua_sess);
+ }
+ break;
+ }
+ default:
+ ret = -1;
+ assert(0);
+ break;
+ }
+
+ rcu_read_unlock();
+ health_code_update();
+ return ret;
}
/*
/*
* Start tracing for the UST session.
+ * Called with UST session lock held.
*/
int ust_app_stop_trace_all(struct ltt_ust_session *usess)
{
}
}
- /* Flush buffers and push metadata (for UID buffers). */
- switch (usess->buffer_type) {
- case LTTNG_BUFFER_PER_UID:
- {
- struct buffer_reg_uid *reg;
-
- /* Flush all per UID buffers associated to that session. */
- cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
- struct ust_registry_session *ust_session_reg;
- struct buffer_reg_channel *reg_chan;
- struct consumer_socket *socket;
-
- /* Get consumer socket to use to push the metadata.*/
- socket = consumer_find_socket_by_bitness(reg->bits_per_long,
- usess->consumer);
- if (!socket) {
- /* Ignore request if no consumer is found for the session. */
- continue;
- }
-
- cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
- reg_chan, node.node) {
- /*
- * The following call will print error values so the return
- * code is of little importance because whatever happens, we
- * have to try them all.
- */
- (void) consumer_flush_channel(socket, reg_chan->consumer_key);
- }
-
- ust_session_reg = reg->registry->reg.ust;
- if (!ust_session_reg->metadata_closed) {
- /* Push metadata. */
- (void) push_metadata(ust_session_reg, usess->consumer);
- }
- }
-
- break;
- }
- case LTTNG_BUFFER_PER_PID:
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ret = ust_app_flush_trace(usess, app);
- if (ret < 0) {
- /* Continue to next apps even on error */
- continue;
- }
- }
- break;
- default:
- assert(0);
- break;
- }
+ (void) ust_app_flush_session(usess);
rcu_read_unlock();
return 0;
}
-/*
- * Add channels/events from UST global domain to registered apps at sock.
- */
-void ust_app_global_update(struct ltt_ust_session *usess, int sock)
+static
+void ust_app_global_create(struct ltt_ust_session *usess, struct ust_app *app)
{
int ret = 0;
struct lttng_ht_iter iter, uiter;
- struct ust_app *app;
struct ust_app_session *ua_sess = NULL;
struct ust_app_channel *ua_chan;
struct ust_app_event *ua_event;
struct ust_app_ctx *ua_ctx;
+ int is_created = 0;
- assert(usess);
- assert(sock >= 0);
-
- DBG2("UST app global update for app sock %d for session id %" PRIu64, sock,
- usess->id);
-
- rcu_read_lock();
-
- app = ust_app_find_by_sock(sock);
- if (app == NULL) {
- /*
- * Application can be unregistered before so this is possible hence
- * simply stopping the update.
- */
- DBG3("UST app update failed to find app sock %d", sock);
- goto error;
- }
-
- if (!app->compatible) {
- goto error;
- }
-
- ret = create_ust_app_session(usess, app, &ua_sess, NULL);
+ ret = create_ust_app_session(usess, app, &ua_sess, &is_created);
if (ret < 0) {
/* Tracer is probably gone or ENOMEM. */
goto error;
}
+ if (!is_created) {
+ /* App session already created. */
+ goto end;
+ }
assert(ua_sess);
pthread_mutex_lock(&ua_sess->lock);
DBG2("UST trace started for app pid %d", app->pid);
}
-
+end:
/* Everything went well at this point. */
- rcu_read_unlock();
return;
error_unlock:
if (ua_sess) {
destroy_app_session(app, ua_sess);
}
- rcu_read_unlock();
return;
}
+static
+void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
+{
+ struct ust_app_session *ua_sess;
+
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == NULL) {
+ return;
+ }
+ destroy_app_session(app, ua_sess);
+}
+
+/*
+ * Add channels/events from UST global domain to registered apps at sock.
+ *
+ * Called with session lock held.
+ * Called with RCU read-side lock held.
+ */
+void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
+{
+ assert(usess);
+
+ DBG2("UST app global update for app sock %d for session id %" PRIu64,
+ app->sock, usess->id);
+
+ if (!app->compatible) {
+ return;
+ }
+
+ if (trace_ust_pid_tracker_lookup(usess, app->pid)) {
+ ust_app_global_create(usess, app);
+ } else {
+ ust_app_global_destroy(usess, app);
+ }
+}
+
+/*
+ * Called with session lock held.
+ */
+void ust_app_global_update_all(struct ltt_ust_session *usess)
+{
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
+
+ rcu_read_lock();
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ust_app_global_update(usess, app);
+ }
+ rcu_read_unlock();
+}
+
/*
* Add context to a specific channel for global UST domain.
*/
* Return 0 on success or else a negative value.
*/
int ust_app_snapshot_record(struct ltt_ust_session *usess,
- struct snapshot_output *output, int wait, uint64_t max_stream_size)
+ struct snapshot_output *output, int wait,
+ uint64_t nb_packets_per_stream)
{
int ret = 0;
unsigned int snapshot_done = 0;
reg_chan, node.node) {
ret = consumer_snapshot_channel(socket, reg_chan->consumer_key,
output, 0, usess->uid, usess->gid, pathname, wait,
- max_stream_size);
+ nb_packets_per_stream);
if (ret < 0) {
goto error;
}
}
ret = consumer_snapshot_channel(socket,
reg->registry->reg.ust->metadata_key, output, 1,
- usess->uid, usess->gid, pathname, wait, max_stream_size);
+ usess->uid, usess->gid, pathname, wait, 0);
if (ret < 0) {
goto error;
}
ua_chan, node.node) {
ret = consumer_snapshot_channel(socket, ua_chan->key, output,
0, ua_sess->euid, ua_sess->egid, pathname, wait,
- max_stream_size);
+ nb_packets_per_stream);
if (ret < 0) {
goto error;
}
registry = get_session_registry(ua_sess);
assert(registry);
ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
- 1, ua_sess->euid, ua_sess->egid, pathname, wait,
- max_stream_size);
+ 1, ua_sess->euid, ua_sess->egid, pathname, wait, 0);
if (ret < 0) {
goto error;
}
}
/*
- * Return the number of streams for a UST session.
+ * Return the size taken by one more packet per stream.
*/
-unsigned int ust_app_get_nb_stream(struct ltt_ust_session *usess)
+uint64_t ust_app_get_size_one_more_packet_per_stream(struct ltt_ust_session *usess,
+ uint64_t cur_nr_packets)
{
- unsigned int ret = 0;
+ uint64_t tot_size = 0;
struct ust_app *app;
struct lttng_ht_iter iter;
cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
struct buffer_reg_channel *reg_chan;
+ rcu_read_lock();
cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
reg_chan, node.node) {
- ret += reg_chan->stream_count;
+ if (cur_nr_packets >= reg_chan->num_subbuf) {
+ /*
+ * Don't take channel into account if we
+ * already grab all its packets.
+ */
+ continue;
+ }
+ tot_size += reg_chan->subbuf_size * reg_chan->stream_count;
}
+ rcu_read_unlock();
}
break;
}
cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
ua_chan, node.node) {
- ret += ua_chan->streams.count;
+ if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
+ /*
+ * Don't take channel into account if we
+ * already grab all its packets.
+ */
+ continue;
+ }
+ tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
}
}
rcu_read_unlock();
break;
}
- return ret;
+ return tot_size;
}