#include "lttng-ust-ctl.hpp"
#include "lttng-ust-error.hpp"
#include "notification-thread-commands.hpp"
-#include "rotate.hpp"
#include "session.hpp"
#include "ust-app.hpp"
#include "ust-consumer.hpp"
#include <common/format.hpp>
#include <common/hashtable/utils.hpp>
#include <common/make-unique.hpp>
+#include <common/pthread-lock.hpp>
#include <common/sessiond-comm/sessiond-comm.hpp>
#include <common/urcu.hpp>
struct lttng_ht *ust_app_ht_by_sock;
struct lttng_ht *ust_app_ht_by_notify_sock;
-static int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
+static int ust_app_flush_app_session(ust_app& app, ust_app_session& ua_sess);
/* Next available channel key. Access under next_channel_key_lock. */
static uint64_t _next_channel_key;
{
struct ust_app_event *event;
const struct ust_app_ht_key *key;
- int ev_loglevel_value;
LTTNG_ASSERT(node);
LTTNG_ASSERT(_key);
event = caa_container_of(node, struct ust_app_event, node.node);
key = (ust_app_ht_key *) _key;
- ev_loglevel_value = event->attr.loglevel;
/* Match the 4 elements of the key: name, filter, loglevel, exclusions */
}
/* Event loglevel. */
- if (ev_loglevel_value != key->loglevel_type) {
- if (event->attr.loglevel_type == LTTNG_UST_ABI_LOGLEVEL_ALL &&
- key->loglevel_type == 0 && ev_loglevel_value == -1) {
- /*
- * Match is accepted. This is because on event creation, the
- * loglevel is set to -1 if the event loglevel type is ALL so 0 and
- * -1 are accepted for this loglevel type since 0 is the one set by
- * the API when receiving an enable event.
- */
- } else {
- goto no_match;
- }
+ if (!loglevels_match(event->attr.loglevel_type,
+ event->attr.loglevel,
+ key->loglevel_type,
+ key->loglevel_value,
+ LTTNG_UST_ABI_LOGLEVEL_ALL)) {
+ goto no_match;
}
/* One of the filters is NULL, fail. */
ht = ua_chan->events;
key.name = event->attr.name;
key.filter = event->filter;
- key.loglevel_type = (lttng_ust_abi_loglevel_type) event->attr.loglevel;
+ key.loglevel_type = (lttng_ust_abi_loglevel_type) event->attr.loglevel_type;
+ key.loglevel_value = event->attr.loglevel;
key.exclusion = event->exclusion;
node_ptr = cds_lfht_add_unique(ht->ht,
}
free(ua_ctx->obj);
}
+
+ if (ua_ctx->ctx.ctx == LTTNG_UST_ABI_CONTEXT_APP_CONTEXT) {
+ free(ua_ctx->ctx.u.app_ctx.provider_name);
+ free(ua_ctx->ctx.u.app_ctx.ctx_name);
+ }
+
free(ua_ctx);
}
return;
}
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
session = session_find_by_id(ua_chan->session->tracing_id);
if (!session || !session->ust_session) {
/*
uchan->per_pid_closed_app_lost += lost;
end:
- rcu_read_unlock();
if (session) {
session_put(session);
}
/* Wipe sessions */
cds_list_for_each_entry_safe (ua_sess, tmp_ua_sess, &app->teardown_head, teardown_node) {
/* Free every object in the session and the session. */
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
delete_ust_app_session(sock, ua_sess, app);
- rcu_read_unlock();
}
/* Remove the event notifier rules associated with this app. */
- rcu_read_lock();
- cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
- &iter.iter,
- event_notifier_rule,
- node.node) {
- ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &iter);
- LTTNG_ASSERT(!ret);
+ {
+ lttng::urcu::read_lock_guard read_lock;
- delete_ust_app_event_notifier_rule(app->sock, event_notifier_rule, app);
- }
+ cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
+ &iter.iter,
+ event_notifier_rule,
+ node.node) {
+ ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &iter);
+ LTTNG_ASSERT(!ret);
- rcu_read_unlock();
+ delete_ust_app_event_notifier_rule(app->sock, event_notifier_rule, app);
+ }
+ }
lttng_ht_destroy(app->sessions);
lttng_ht_destroy(app->ust_sessions_objd);
strncpy(ua_chan->name, name, sizeof(ua_chan->name));
ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
- ua_chan->enabled = 1;
+ ua_chan->enabled = true;
ua_chan->handle = -1;
ua_chan->session = ua_sess;
ua_chan->key = get_next_channel_key();
goto error;
}
- ua_event->enabled = 1;
+ ua_event->enabled = true;
strncpy(ua_event->name, name, sizeof(ua_event->name));
ua_event->name[sizeof(ua_event->name) - 1] = '\0';
lttng_ht_node_init_str(&ua_event->node, ua_event->name);
goto error;
}
- ua_event_notifier_rule->enabled = 1;
+ ua_event_notifier_rule->enabled = true;
ua_event_notifier_rule->token = lttng_trigger_get_tracer_token(trigger);
lttng_ht_node_init_u64(&ua_event_notifier_rule->node, ua_event_notifier_rule->token);
static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
const char *name,
const struct lttng_bytecode *filter,
+ lttng_ust_abi_loglevel_type loglevel_type,
int loglevel_value,
const struct lttng_event_exclusion *exclusion)
{
/* Setup key for event lookup. */
key.name = name;
key.filter = filter;
- key.loglevel_type = (lttng_ust_abi_loglevel_type) loglevel_value;
+ key.loglevel_type = loglevel_type;
+ key.loglevel_value = loglevel_value;
/* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
key.exclusion = exclusion;
goto error;
}
- ua_chan->enabled = 1;
+ ua_chan->enabled = true;
DBG2("UST app channel %s enabled successfully for app: pid = %d", ua_chan->name, app->pid);
LTTNG_ASSERT(ua_sess);
LTTNG_ASSERT(app);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
reg_pid = buffer_reg_pid_find(ua_sess->id);
if (!reg_pid) {
*regp = reg_pid;
}
error:
- rcu_read_unlock();
return ret;
}
LTTNG_ASSERT(usess);
LTTNG_ASSERT(app);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
reg_uid = buffer_reg_uid_find(usess->id, app->abi.bits_per_long, app->uid);
if (!reg_uid) {
*regp = reg_uid;
}
error:
- rcu_read_unlock();
return ret;
}
case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
if (strncmp(key->u.perf_counter.name,
ctx->ctx.u.perf_counter.name,
- sizeof(key->u.perf_counter.name))) {
+ sizeof(key->u.perf_counter.name)) != 0) {
goto no_match;
}
break;
case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
- if (strcmp(key->u.app_ctx.provider_name, ctx->ctx.u.app_ctx.provider_name) ||
- strcmp(key->u.app_ctx.ctx_name, ctx->ctx.u.app_ctx.ctx_name)) {
+ if (strcmp(key->u.app_ctx.provider_name, ctx->ctx.u.app_ctx.provider_name) != 0 ||
+ strcmp(key->u.app_ctx.ctx_name, ctx->ctx.u.app_ctx.ctx_name) != 0) {
goto no_match;
}
break;
goto error;
}
- ua_event->enabled = 1;
+ ua_event->enabled = true;
error:
return ret;
goto error;
}
- ua_event->enabled = 0;
+ ua_event->enabled = false;
error:
return ret;
goto error;
}
- ua_chan->enabled = 0;
+ ua_chan->enabled = false;
error:
return ret;
LTTNG_ASSERT(ua_chan);
LTTNG_ASSERT(registry);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
health_code_update();
/* Get the right consumer socket for the application. */
}
}
- rcu_read_unlock();
return 0;
error_destroy:
lttng_fd_put(LTTNG_FD_APPS, 1);
error:
health_code_update();
- rcu_read_unlock();
return ret;
}
DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
registry = get_session_registry(ua_sess);
/* The UST app session lock is held, registry shall not be null. */
}
}
error:
- rcu_read_unlock();
if (session) {
session_put(session);
}
goto error_free_pipe;
}
+ urcu_ref_init(<a->ref);
+
lta->event_notifier_group.event_pipe = event_notifier_event_source_pipe;
lta->ppid = msg->ppid;
app->registration_time = time(nullptr);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
/*
* On a re-registration, we want to kick out the previous registration of
app->notify_sock,
app->v_major,
app->v_minor);
-
- rcu_read_unlock();
}
/*
return ret;
}
-/*
- * Unregister app by removing it from the global traceable app list and freeing
- * the data struct.
- *
- * The socket is already closed at this point so no close to sock.
- */
-void ust_app_unregister(int sock)
+static void ust_app_unregister(ust_app& app)
{
- struct ust_app *lta;
- struct lttng_ht_node_ulong *node;
- struct lttng_ht_iter ust_app_sock_iter;
struct lttng_ht_iter iter;
struct ust_app_session *ua_sess;
- int ret;
-
- rcu_read_lock();
-
- /* Get the node reference for a call_rcu */
- lttng_ht_lookup(ust_app_ht_by_sock, (void *) ((unsigned long) sock), &ust_app_sock_iter);
- node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
- LTTNG_ASSERT(node);
- lta = lttng::utils::container_of(node, &ust_app::sock_n);
- DBG("PID %d unregistering with sock %d", lta->pid, sock);
+ lttng::urcu::read_lock_guard read_lock;
/*
* For per-PID buffers, perform "push metadata" and flush all
* ensuring proper behavior of data_pending check.
* Remove sessions so they are not visible during deletion.
*/
- cds_lfht_for_each_entry (lta->sessions->ht, &iter.iter, ua_sess, node.node) {
- ret = lttng_ht_del(lta->sessions, &iter);
- if (ret) {
+ cds_lfht_for_each_entry (app.sessions->ht, &iter.iter, ua_sess, node.node) {
+ const auto del_ret = lttng_ht_del(app.sessions, &iter);
+ if (del_ret) {
/* The session was already removed so scheduled for teardown. */
continue;
}
if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
- (void) ust_app_flush_app_session(lta, ua_sess);
+ (void) ust_app_flush_app_session(app, *ua_sess);
}
/*
* Add session to list for teardown. This is safe since at this point we
* are the only one using this list.
*/
- pthread_mutex_lock(&ua_sess->lock);
+ lttng::pthread::lock_guard ust_app_session_lock(ua_sess->lock);
if (ua_sess->deleted) {
- pthread_mutex_unlock(&ua_sess->lock);
continue;
}
locked_registry.reset();
}
}
- cds_list_add(&ua_sess->teardown_node, <a->teardown_head);
- pthread_mutex_unlock(&ua_sess->lock);
+ cds_list_add(&ua_sess->teardown_node, &app.teardown_head);
}
- /* Remove application from PID hash table */
- ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
- LTTNG_ASSERT(!ret);
-
/*
* Remove application from notify hash table. The thread handling the
* notify socket could have deleted the node so ignore on error because
* either way it's valid. The close of that socket is handled by the
* apps_notify_thread.
*/
- iter.iter.node = <a->notify_sock_n.node;
+ iter.iter.node = &app.notify_sock_n.node;
(void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
/*
* add replace during app registration because the PID can be reassigned by
* the OS.
*/
- iter.iter.node = <a->pid_n.node;
- ret = lttng_ht_del(ust_app_ht, &iter);
- if (ret) {
- DBG3("Unregister app by PID %d failed. This can happen on pid reuse", lta->pid);
+ iter.iter.node = &app.pid_n.node;
+ if (lttng_ht_del(ust_app_ht, &iter)) {
+ DBG3("Unregister app by PID %d failed. This can happen on pid reuse", app.pid);
}
+}
+
+/*
+ * Unregister app by removing it from the global traceable app list and freeing
+ * the data struct.
+ *
+ * The socket is already closed at this point, so there is no need to close it.
+ */
+void ust_app_unregister_by_socket(int sock_fd)
+{
+ struct ust_app *app;
+ struct lttng_ht_node_ulong *node;
+ struct lttng_ht_iter ust_app_sock_iter;
+ int ret;
- /* Free memory */
- call_rcu(<a->pid_n.head, delete_ust_app_rcu);
+ lttng::urcu::read_lock_guard read_lock;
- rcu_read_unlock();
- return;
+ /* Get the node reference for a call_rcu */
+ lttng_ht_lookup(ust_app_ht_by_sock, (void *) ((unsigned long) sock_fd), &ust_app_sock_iter);
+ node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
+ assert(node);
+
+ app = caa_container_of(node, struct ust_app, sock_n);
+
+ DBG_FMT("Application unregistering after socket activity: pid={}, socket_fd={}",
+ app->pid,
+ sock_fd);
+
+ /* Remove application from socket hash table */
+ ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
+ assert(!ret);
+
+ /*
+ * The socket is closed: release its reference to the application
+ * to trigger its eventual teardown.
+ */
+ ust_app_put(app);
}
/*
goto error;
}
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- struct lttng_ust_abi_tracepoint_iter uiter;
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ struct lttng_ust_abi_tracepoint_iter uiter;
- health_code_update();
+ health_code_update();
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- pthread_mutex_lock(&app->sock_lock);
- handle = lttng_ust_ctl_tracepoint_list(app->sock);
- if (handle < 0) {
- if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app list events getting handle failed for app pid %d",
- app->pid);
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
}
- pthread_mutex_unlock(&app->sock_lock);
- continue;
- }
-
- while ((ret = lttng_ust_ctl_tracepoint_list_get(app->sock, handle, &uiter)) !=
- -LTTNG_UST_ERR_NOENT) {
- /* Handle ustctl error. */
- if (ret < 0) {
- int release_ret;
- if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
- ERR("UST app tp list get failed for app %d with ret %d",
- app->sock,
- ret);
- } else {
- DBG3("UST app tp list get failed. Application is dead");
- break;
- }
- free(tmp_event);
- release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
- if (release_ret < 0 && release_ret != -LTTNG_UST_ERR_EXITING &&
- release_ret != -EPIPE) {
- ERR("Error releasing app handle for app %d with ret %d",
- app->sock,
- release_ret);
+ pthread_mutex_lock(&app->sock_lock);
+ handle = lttng_ust_ctl_tracepoint_list(app->sock);
+ if (handle < 0) {
+ if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app list events getting handle failed for app pid %d",
+ app->pid);
}
pthread_mutex_unlock(&app->sock_lock);
- goto rcu_error;
+ continue;
}
- health_code_update();
- if (count >= nbmem) {
- /* In case the realloc fails, we free the memory */
- struct lttng_event *new_tmp_event;
- size_t new_nbmem;
-
- new_nbmem = nbmem << 1;
- DBG2("Reallocating event list from %zu to %zu entries",
- nbmem,
- new_nbmem);
- new_tmp_event = (lttng_event *) realloc(
- tmp_event, new_nbmem * sizeof(struct lttng_event));
- if (new_tmp_event == nullptr) {
+ while ((ret = lttng_ust_ctl_tracepoint_list_get(
+ app->sock, handle, &uiter)) != -LTTNG_UST_ERR_NOENT) {
+ /* Handle ustctl error. */
+ if (ret < 0) {
int release_ret;
- PERROR("realloc ust app events");
+ if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+ ERR("UST app tp list get failed for app %d with ret %d",
+ app->sock,
+ ret);
+ } else {
+ DBG3("UST app tp list get failed. Application is dead");
+ break;
+ }
+
free(tmp_event);
- ret = -ENOMEM;
release_ret =
lttng_ust_ctl_release_handle(app->sock, handle);
if (release_ret < 0 &&
app->sock,
release_ret);
}
+
pthread_mutex_unlock(&app->sock_lock);
goto rcu_error;
}
- /* Zero the new memory */
- memset(new_tmp_event + nbmem,
- 0,
- (new_nbmem - nbmem) * sizeof(struct lttng_event));
- nbmem = new_nbmem;
- tmp_event = new_tmp_event;
+
+ health_code_update();
+ if (count >= nbmem) {
+ /* In case the realloc fails, we free the memory */
+ struct lttng_event *new_tmp_event;
+ size_t new_nbmem;
+
+ new_nbmem = nbmem << 1;
+ DBG2("Reallocating event list from %zu to %zu entries",
+ nbmem,
+ new_nbmem);
+ new_tmp_event = (lttng_event *) realloc(
+ tmp_event, new_nbmem * sizeof(struct lttng_event));
+ if (new_tmp_event == nullptr) {
+ int release_ret;
+
+ PERROR("realloc ust app events");
+ free(tmp_event);
+ ret = -ENOMEM;
+ release_ret = lttng_ust_ctl_release_handle(
+ app->sock, handle);
+ if (release_ret < 0 &&
+ release_ret != -LTTNG_UST_ERR_EXITING &&
+ release_ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d",
+ app->sock,
+ release_ret);
+ }
+
+ pthread_mutex_unlock(&app->sock_lock);
+ goto rcu_error;
+ }
+ /* Zero the new memory */
+ memset(new_tmp_event + nbmem,
+ 0,
+ (new_nbmem - nbmem) * sizeof(struct lttng_event));
+ nbmem = new_nbmem;
+ tmp_event = new_tmp_event;
+ }
+
+ memcpy(tmp_event[count].name,
+ uiter.name,
+ LTTNG_UST_ABI_SYM_NAME_LEN);
+ tmp_event[count].loglevel = uiter.loglevel;
+ tmp_event[count].type =
+ (enum lttng_event_type) LTTNG_UST_ABI_TRACEPOINT;
+ tmp_event[count].pid = app->pid;
+ tmp_event[count].enabled = -1;
+ count++;
}
- memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_ABI_SYM_NAME_LEN);
- tmp_event[count].loglevel = uiter.loglevel;
- tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_ABI_TRACEPOINT;
- tmp_event[count].pid = app->pid;
- tmp_event[count].enabled = -1;
- count++;
- }
- ret = lttng_ust_ctl_release_handle(app->sock, handle);
- pthread_mutex_unlock(&app->sock_lock);
- if (ret < 0) {
- if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
- DBG3("Error releasing app handle. Application died: pid = %d, sock = %d",
- app->pid,
- app->sock);
- } else if (ret == -EAGAIN) {
- WARN("Error releasing app handle. Communication time out: pid = %d, sock = %d",
- app->pid,
- app->sock);
- } else {
- ERR("Error releasing app handle with ret %d: pid = %d, sock = %d",
- ret,
- app->pid,
- app->sock);
+
+ ret = lttng_ust_ctl_release_handle(app->sock, handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0) {
+ if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+ DBG3("Error releasing app handle. Application died: pid = %d, sock = %d",
+ app->pid,
+ app->sock);
+ } else if (ret == -EAGAIN) {
+ WARN("Error releasing app handle. Communication time out: pid = %d, sock = %d",
+ app->pid,
+ app->sock);
+ } else {
+ ERR("Error releasing app handle with ret %d: pid = %d, sock = %d",
+ ret,
+ app->pid,
+ app->sock);
+ }
}
}
}
DBG2("UST app list events done (%zu events)", count);
rcu_error:
- rcu_read_unlock();
error:
health_code_update();
return ret;
goto error;
}
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- struct lttng_ust_abi_field_iter uiter;
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ struct lttng_ust_abi_field_iter uiter;
- health_code_update();
+ health_code_update();
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- pthread_mutex_lock(&app->sock_lock);
- handle = lttng_ust_ctl_tracepoint_field_list(app->sock);
- if (handle < 0) {
- if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
- ERR("UST app list field getting handle failed for app pid %d",
- app->pid);
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
}
- pthread_mutex_unlock(&app->sock_lock);
- continue;
- }
- while ((ret = lttng_ust_ctl_tracepoint_field_list_get(app->sock, handle, &uiter)) !=
- -LTTNG_UST_ERR_NOENT) {
- /* Handle ustctl error. */
- if (ret < 0) {
- int release_ret;
-
- if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
- ERR("UST app tp list field failed for app %d with ret %d",
- app->sock,
- ret);
- } else {
- DBG3("UST app tp list field failed. Application is dead");
- break;
+ pthread_mutex_lock(&app->sock_lock);
+ handle = lttng_ust_ctl_tracepoint_field_list(app->sock);
+ if (handle < 0) {
+ if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app list field getting handle failed for app pid %d",
+ app->pid);
}
- free(tmp_event);
- release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
pthread_mutex_unlock(&app->sock_lock);
- if (release_ret < 0 && release_ret != -LTTNG_UST_ERR_EXITING &&
- release_ret != -EPIPE) {
- ERR("Error releasing app handle for app %d with ret %d",
- app->sock,
- release_ret);
- }
- goto rcu_error;
+ continue;
}
- health_code_update();
- if (count >= nbmem) {
- /* In case the realloc fails, we free the memory */
- struct lttng_event_field *new_tmp_event;
- size_t new_nbmem;
-
- new_nbmem = nbmem << 1;
- DBG2("Reallocating event field list from %zu to %zu entries",
- nbmem,
- new_nbmem);
- new_tmp_event = (lttng_event_field *) realloc(
- tmp_event, new_nbmem * sizeof(struct lttng_event_field));
- if (new_tmp_event == nullptr) {
+ while ((ret = lttng_ust_ctl_tracepoint_field_list_get(
+ app->sock, handle, &uiter)) != -LTTNG_UST_ERR_NOENT) {
+ /* Handle ustctl error. */
+ if (ret < 0) {
int release_ret;
- PERROR("realloc ust app event fields");
+ if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+ ERR("UST app tp list field failed for app %d with ret %d",
+ app->sock,
+ ret);
+ } else {
+ DBG3("UST app tp list field failed. Application is dead");
+ break;
+ }
+
free(tmp_event);
- ret = -ENOMEM;
release_ret =
lttng_ust_ctl_release_handle(app->sock, handle);
pthread_mutex_unlock(&app->sock_lock);
- if (release_ret && release_ret != -LTTNG_UST_ERR_EXITING &&
+ if (release_ret < 0 &&
+ release_ret != -LTTNG_UST_ERR_EXITING &&
release_ret != -EPIPE) {
ERR("Error releasing app handle for app %d with ret %d",
app->sock,
release_ret);
}
+
goto rcu_error;
}
- /* Zero the new memory */
- memset(new_tmp_event + nbmem,
- 0,
- (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
- nbmem = new_nbmem;
- tmp_event = new_tmp_event;
+
+ health_code_update();
+ if (count >= nbmem) {
+ /* In case the realloc fails, we free the memory */
+ struct lttng_event_field *new_tmp_event;
+ size_t new_nbmem;
+
+ new_nbmem = nbmem << 1;
+ DBG2("Reallocating event field list from %zu to %zu entries",
+ nbmem,
+ new_nbmem);
+ new_tmp_event = (lttng_event_field *) realloc(
+ tmp_event,
+ new_nbmem * sizeof(struct lttng_event_field));
+ if (new_tmp_event == nullptr) {
+ int release_ret;
+
+ PERROR("realloc ust app event fields");
+ free(tmp_event);
+ ret = -ENOMEM;
+ release_ret = lttng_ust_ctl_release_handle(
+ app->sock, handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (release_ret &&
+ release_ret != -LTTNG_UST_ERR_EXITING &&
+ release_ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d",
+ app->sock,
+ release_ret);
+ }
+
+ goto rcu_error;
+ }
+
+ /* Zero the new memory */
+ memset(new_tmp_event + nbmem,
+ 0,
+ (new_nbmem - nbmem) *
+ sizeof(struct lttng_event_field));
+ nbmem = new_nbmem;
+ tmp_event = new_tmp_event;
+ }
+
+ memcpy(tmp_event[count].field_name,
+ uiter.field_name,
+ LTTNG_UST_ABI_SYM_NAME_LEN);
+ /* Mapping between these enums matches 1 to 1. */
+ tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
+ tmp_event[count].nowrite = uiter.nowrite;
+
+ memcpy(tmp_event[count].event.name,
+ uiter.event_name,
+ LTTNG_UST_ABI_SYM_NAME_LEN);
+ tmp_event[count].event.loglevel = uiter.loglevel;
+ tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
+ tmp_event[count].event.pid = app->pid;
+ tmp_event[count].event.enabled = -1;
+ count++;
}
- memcpy(tmp_event[count].field_name,
- uiter.field_name,
- LTTNG_UST_ABI_SYM_NAME_LEN);
- /* Mapping between these enums matches 1 to 1. */
- tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
- tmp_event[count].nowrite = uiter.nowrite;
-
- memcpy(tmp_event[count].event.name,
- uiter.event_name,
- LTTNG_UST_ABI_SYM_NAME_LEN);
- tmp_event[count].event.loglevel = uiter.loglevel;
- tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
- tmp_event[count].event.pid = app->pid;
- tmp_event[count].event.enabled = -1;
- count++;
- }
- ret = lttng_ust_ctl_release_handle(app->sock, handle);
- pthread_mutex_unlock(&app->sock_lock);
- if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
- ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
+ ret = lttng_ust_ctl_release_handle(app->sock, handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d",
+ app->sock,
+ ret);
+ }
}
}
DBG2("UST app list event fields done (%zu events)", count);
rcu_error:
- rcu_read_unlock();
error:
health_code_update();
return ret;
DBG2("UST app cleaning registered apps hash table");
- rcu_read_lock();
-
/* Cleanup notify socket hash table */
if (ust_app_ht_by_notify_sock) {
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (
ust_app_ht_by_notify_sock->ht, &iter.iter, app, notify_sock_n.node) {
/*
* are unregistered prior to this clean-up.
*/
LTTNG_ASSERT(lttng_ht_get_count(app->token_to_event_notifier_rule_ht) == 0);
-
ust_app_notify_sock_unregister(app->notify_sock);
}
}
- if (ust_app_ht) {
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ret = lttng_ht_del(ust_app_ht, &iter);
- LTTNG_ASSERT(!ret);
- call_rcu(&app->pid_n.head, delete_ust_app_rcu);
- }
- }
-
/* Cleanup socket hash table */
if (ust_app_ht_by_sock) {
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (ust_app_ht_by_sock->ht, &iter.iter, app, sock_n.node) {
ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
LTTNG_ASSERT(!ret);
+ ust_app_put(app);
}
}
- rcu_read_unlock();
-
/* Destroy is done only when the ht is empty */
if (ust_app_ht) {
lttng_ht_destroy(ust_app_ht);
uchan->name,
usess->id);
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- /* For every registered applications */
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- struct lttng_ht_iter uiter;
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == nullptr) {
- continue;
- }
+ /* For every registered applications */
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ struct lttng_ht_iter uiter;
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == nullptr) {
+ continue;
+ }
- /* Get channel */
- lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- /* If the session if found for the app, the channel must be there */
- LTTNG_ASSERT(ua_chan_node);
+ /* Get channel */
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+ /* If the session if found for the app, the channel must be there */
+ LTTNG_ASSERT(ua_chan_node);
- ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
- /* The channel must not be already disabled */
- LTTNG_ASSERT(ua_chan->enabled == 1);
+ ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+ /* The channel must not be already disabled */
+ LTTNG_ASSERT(ua_chan->enabled);
- /* Disable channel onto application */
- ret = disable_ust_app_channel(ua_sess, ua_chan, app);
- if (ret < 0) {
- /* XXX: We might want to report this error at some point... */
- continue;
+ /* Disable channel onto application */
+ ret = disable_ust_app_channel(ua_sess, ua_chan, app);
+ if (ret < 0) {
+ /* XXX: We might want to report this error at some point... */
+ continue;
+ }
}
}
- rcu_read_unlock();
return ret;
}
uchan->name,
usess->id);
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- /* For every registered applications */
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == nullptr) {
- continue;
- }
+ /* For every registered applications */
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == nullptr) {
+ continue;
+ }
- /* Enable channel onto application */
- ret = enable_ust_app_channel(ua_sess, uchan, app);
- if (ret < 0) {
- /* XXX: We might want to report this error at some point... */
- continue;
+ /* Enable channel onto application */
+ ret = enable_ust_app_channel(ua_sess, uchan, app);
+ if (ret < 0) {
+ /* XXX: We might want to report this error at some point... */
+ continue;
+ }
}
}
- rcu_read_unlock();
return ret;
}
uchan->name,
usess->id);
- rcu_read_lock();
-
- /* For all registered applications */
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == nullptr) {
- /* Next app */
- continue;
- }
+ {
+ lttng::urcu::read_lock_guard read_lock;
- /* Lookup channel in the ust app session */
- lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- if (ua_chan_node == nullptr) {
- DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
- "Skipping",
- uchan->name,
- usess->id,
- app->pid);
- continue;
- }
- ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+ /* For all registered applications */
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == nullptr) {
+ /* Next app */
+ continue;
+ }
- ua_event = find_ust_app_event(ua_chan->events,
- uevent->attr.name,
- uevent->filter,
- uevent->attr.loglevel,
- uevent->exclusion);
- if (ua_event == nullptr) {
- DBG2("Event %s not found in channel %s for app pid %d."
- "Skipping",
- uevent->attr.name,
- uchan->name,
- app->pid);
- continue;
- }
+ /* Lookup channel in the ust app session */
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+ if (ua_chan_node == nullptr) {
+ DBG2("Channel %s not found in session id %" PRIu64
+ " for app pid %d."
+ "Skipping",
+ uchan->name,
+ usess->id,
+ app->pid);
+ continue;
+ }
+ ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+
+ ua_event = find_ust_app_event(
+ ua_chan->events,
+ uevent->attr.name,
+ uevent->filter,
+ (enum lttng_ust_abi_loglevel_type) uevent->attr.loglevel_type,
+ uevent->attr.loglevel,
+ uevent->exclusion);
+ if (ua_event == nullptr) {
+ DBG2("Event %s not found in channel %s for app pid %d."
+ "Skipping",
+ uevent->attr.name,
+ uchan->name,
+ app->pid);
+ continue;
+ }
- ret = disable_ust_app_event(ua_event, app);
- if (ret < 0) {
- /* XXX: Report error someday... */
- continue;
+ ret = disable_ust_app_event(ua_event, app);
+ if (ret < 0) {
+ /* XXX: Report error someday... */
+ continue;
+ }
}
}
- rcu_read_unlock();
return ret;
}
* tracer also.
*/
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- /* For all registered applications */
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- ua_sess = lookup_session_by_app(usess, app);
- if (!ua_sess) {
- /* The application has problem or is probably dead. */
- continue;
- }
+ /* For all registered applications */
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = lookup_session_by_app(usess, app);
+ if (!ua_sess) {
+ /* The application has problem or is probably dead. */
+ continue;
+ }
- pthread_mutex_lock(&ua_sess->lock);
+ pthread_mutex_lock(&ua_sess->lock);
- if (ua_sess->deleted) {
- pthread_mutex_unlock(&ua_sess->lock);
- continue;
- }
+ if (ua_sess->deleted) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ continue;
+ }
- /* Lookup channel in the ust app session */
- lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- /*
- * It is possible that the channel cannot be found is
- * the channel/event creation occurs concurrently with
- * an application exit.
- */
- if (!ua_chan_node) {
- pthread_mutex_unlock(&ua_sess->lock);
- continue;
- }
+ /* Lookup channel in the ust app session */
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+ /*
+ * It is possible that the channel cannot be found is
+ * the channel/event creation occurs concurrently with
+ * an application exit.
+ */
+ if (!ua_chan_node) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ continue;
+ }
- ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+ ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+
+ /* Get event node */
+ ua_event = find_ust_app_event(
+ ua_chan->events,
+ uevent->attr.name,
+ uevent->filter,
+ (enum lttng_ust_abi_loglevel_type) uevent->attr.loglevel_type,
+ uevent->attr.loglevel,
+ uevent->exclusion);
+ if (ua_event == nullptr) {
+ DBG3("UST app enable event %s not found for app PID %d."
+ "Skipping app",
+ uevent->attr.name,
+ app->pid);
+ goto next_app;
+ }
- /* Get event node */
- ua_event = find_ust_app_event(ua_chan->events,
- uevent->attr.name,
- uevent->filter,
- uevent->attr.loglevel,
- uevent->exclusion);
- if (ua_event == nullptr) {
- DBG3("UST app enable event %s not found for app PID %d."
- "Skipping app",
- uevent->attr.name,
- app->pid);
- goto next_app;
- }
-
- ret = enable_ust_app_event(ua_event, app);
- if (ret < 0) {
+ ret = enable_ust_app_event(ua_event, app);
+ if (ret < 0) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ goto error;
+ }
+ next_app:
pthread_mutex_unlock(&ua_sess->lock);
- goto error;
}
- next_app:
- pthread_mutex_unlock(&ua_sess->lock);
}
-
error:
- rcu_read_unlock();
return ret;
}
uevent->attr.name,
usess->id);
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- /* For all registered applications */
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- ua_sess = lookup_session_by_app(usess, app);
- if (!ua_sess) {
- /* The application has problem or is probably dead. */
- continue;
- }
+ /* For all registered applications */
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
- pthread_mutex_lock(&ua_sess->lock);
+ ua_sess = lookup_session_by_app(usess, app);
+ if (!ua_sess) {
+ /* The application has problem or is probably dead. */
+ continue;
+ }
- if (ua_sess->deleted) {
- pthread_mutex_unlock(&ua_sess->lock);
- continue;
- }
+ pthread_mutex_lock(&ua_sess->lock);
- /* Lookup channel in the ust app session */
- lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- /* If the channel is not found, there is a code flow error */
- LTTNG_ASSERT(ua_chan_node);
+ if (ua_sess->deleted) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ continue;
+ }
- ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+ /* Lookup channel in the ust app session */
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+ /* If the channel is not found, there is a code flow error */
+ LTTNG_ASSERT(ua_chan_node);
- ret = create_ust_app_event(ua_chan, uevent, app);
- pthread_mutex_unlock(&ua_sess->lock);
- if (ret < 0) {
- if (ret != -LTTNG_UST_ERR_EXIST) {
- /* Possible value at this point: -ENOMEM. If so, we stop! */
- break;
+ ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+
+ ret = create_ust_app_event(ua_chan, uevent, app);
+ pthread_mutex_unlock(&ua_sess->lock);
+ if (ret < 0) {
+ if (ret != -LTTNG_UST_ERR_EXIST) {
+ /* Possible value at this point: -ENOMEM. If so, we stop! */
+ break;
+ }
+
+ DBG2("UST app event %s already exist on app PID %d",
+ uevent->attr.name,
+ app->pid);
+ continue;
}
- DBG2("UST app event %s already exist on app PID %d",
- uevent->attr.name,
- app->pid);
- continue;
}
}
- rcu_read_unlock();
return ret;
}
DBG("Starting tracing for ust app pid %d", app->pid);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
if (!app->compatible) {
goto end;
}
/* Indicate that the session has been started once */
- ua_sess->started = 1;
- ua_sess->enabled = 1;
+ ua_sess->started = true;
+ ua_sess->enabled = true;
pthread_mutex_unlock(&ua_sess->lock);
}
end:
- rcu_read_unlock();
health_code_update();
return 0;
error_unlock:
pthread_mutex_unlock(&ua_sess->lock);
- rcu_read_unlock();
health_code_update();
return -1;
}
DBG("Stopping tracing for ust app pid %d", app->pid);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
if (!app->compatible) {
goto end_no_session;
}
health_code_update();
- ua_sess->enabled = 0;
+ ua_sess->enabled = false;
/* Quiescent wait after stopping trace */
pthread_mutex_lock(&app->sock_lock);
end_unlock:
pthread_mutex_unlock(&ua_sess->lock);
end_no_session:
- rcu_read_unlock();
health_code_update();
return 0;
error_rcu_unlock:
pthread_mutex_unlock(&ua_sess->lock);
- rcu_read_unlock();
health_code_update();
return -1;
}
-static int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess)
+static int ust_app_flush_app_session(ust_app& app, ust_app_session& ua_sess)
{
int ret, retval = 0;
struct lttng_ht_iter iter;
struct ust_app_channel *ua_chan;
struct consumer_socket *socket;
- DBG("Flushing app session buffers for ust app pid %d", app->pid);
-
- rcu_read_lock();
+ DBG("Flushing app session buffers for ust app pid %d", app.pid);
- if (!app->compatible) {
+ if (!app.compatible) {
goto end_not_compatible;
}
- pthread_mutex_lock(&ua_sess->lock);
+ pthread_mutex_lock(&ua_sess.lock);
- if (ua_sess->deleted) {
+ if (ua_sess.deleted) {
goto end_deleted;
}
health_code_update();
/* Flushing buffers */
- socket = consumer_find_socket_by_bitness(app->abi.bits_per_long, ua_sess->consumer);
+ socket = consumer_find_socket_by_bitness(app.abi.bits_per_long, ua_sess.consumer);
/* Flush buffers and push metadata. */
- switch (ua_sess->buffer_type) {
+ switch (ua_sess.buffer_type) {
case LTTNG_BUFFER_PER_PID:
- cds_lfht_for_each_entry (ua_sess->channels->ht, &iter.iter, ua_chan, node.node) {
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (ua_sess.channels->ht, &iter.iter, ua_chan, node.node) {
health_code_update();
ret = consumer_flush_channel(socket, ua_chan->key);
if (ret) {
continue;
}
}
+
break;
+ }
case LTTNG_BUFFER_PER_UID:
default:
abort();
health_code_update();
end_deleted:
- pthread_mutex_unlock(&ua_sess->lock);
+ pthread_mutex_unlock(&ua_sess.lock);
end_not_compatible:
- rcu_read_unlock();
health_code_update();
return retval;
}
DBG("Flushing session buffers for all ust apps");
- rcu_read_lock();
-
/* Flush buffers and push metadata. */
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
/* Flush all per UID buffers associated to that session. */
cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
+ lttng::urcu::read_lock_guard read_lock;
lsu::registry_session *ust_session_reg;
struct buffer_reg_channel *buf_reg_chan;
struct consumer_socket *socket;
auto locked_registry = ust_session_reg->lock();
(void) push_metadata(locked_registry, usess->consumer);
}
+
break;
}
case LTTNG_BUFFER_PER_PID:
struct ust_app_session *ua_sess;
struct lttng_ht_iter iter;
struct ust_app *app;
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
ua_sess = lookup_session_by_app(usess, app);
if (ua_sess == nullptr) {
continue;
}
- (void) ust_app_flush_app_session(app, ua_sess);
+
+ (void) ust_app_flush_app_session(*app, *ua_sess);
}
+
break;
}
default:
break;
}
- rcu_read_unlock();
health_code_update();
return ret;
}
DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
if (!app->compatible) {
goto end_not_compatible;
pthread_mutex_unlock(&ua_sess->lock);
end_not_compatible:
- rcu_read_unlock();
health_code_update();
return ret;
}
DBG("Clearing stream quiescent state for all ust apps");
- rcu_read_lock();
-
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
struct consumer_socket *socket;
struct buffer_reg_channel *buf_reg_chan;
+ lttng::urcu::read_lock_guard read_lock;
/* Get associated consumer socket.*/
socket = consumer_find_socket_by_bitness(reg->bits_per_long,
buf_reg_chan->consumer_key);
}
}
+
break;
}
case LTTNG_BUFFER_PER_PID:
struct ust_app_session *ua_sess;
struct lttng_ht_iter iter;
struct ust_app *app;
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
ua_sess = lookup_session_by_app(usess, app);
}
(void) ust_app_clear_quiescent_app_session(app, ua_sess);
}
+
break;
}
default:
break;
}
- rcu_read_unlock();
health_code_update();
return ret;
}
DBG("Destroy tracing for ust app pid %d", app->pid);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
if (!app->compatible) {
goto end;
}
}
end:
- rcu_read_unlock();
health_code_update();
return 0;
}
* Even though the start trace might fail, flag this session active so
* other application coming in are started by default.
*/
- usess->active = 1;
-
- rcu_read_lock();
+ usess->active = true;
/*
* In a start-stop-start use-case, we need to clear the quiescent state
*/
(void) ust_app_clear_quiescent_session(usess);
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ust_app_global_update(usess, app);
- }
+ {
+ lttng::urcu::read_lock_guard read_lock;
- rcu_read_unlock();
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ust_app_global_update(usess, app);
+ }
+ }
return 0;
}
* Even though the stop trace might fail, flag this session inactive so
* other application coming in are not started by default.
*/
- usess->active = 0;
+ usess->active = false;
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ret = ust_app_stop_trace(usess, app);
- if (ret < 0) {
- /* Continue to next apps even on error */
- continue;
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ret = ust_app_stop_trace(usess, app);
+ if (ret < 0) {
+ /* Continue to next apps even on error */
+ continue;
+ }
}
}
(void) ust_app_flush_session(usess);
- rcu_read_unlock();
-
return 0;
}
DBG("Destroy all UST traces");
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ret = destroy_trace(usess, app);
- if (ret < 0) {
- /* Continue to next apps even on error */
- continue;
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ret = destroy_trace(usess, app);
+ if (ret < 0) {
+ /* Continue to next apps even on error */
+ continue;
+ }
}
}
- rcu_read_unlock();
-
return 0;
}
ua_event = find_ust_app_event(ua_chan->events,
uevent->attr.name,
uevent->filter,
+ (enum lttng_ust_abi_loglevel_type) uevent->attr.loglevel_type,
uevent->attr.loglevel,
uevent->exclusion);
if (!ua_event) {
}
}
- rcu_read_lock();
- /* Remove all unknown event sources from the app. */
- cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
- &app_trigger_iter.iter,
- event_notifier_rule,
- node.node) {
- const uint64_t app_token = event_notifier_rule->token;
- bool found = false;
+ {
+ lttng::urcu::read_lock_guard read_lock;
- /*
- * Check if the app event trigger still exists on the
- * notification side.
- */
- for (i = 0; i < count; i++) {
- uint64_t notification_thread_token;
- const struct lttng_trigger *trigger =
- lttng_triggers_get_at_index(triggers, i);
+ /* Remove all unknown event sources from the app. */
+ cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
+ &app_trigger_iter.iter,
+ event_notifier_rule,
+ node.node) {
+ const uint64_t app_token = event_notifier_rule->token;
+ bool found = false;
+
+ /*
+ * Check if the app event trigger still exists on the
+ * notification side.
+ */
+ for (i = 0; i < count; i++) {
+ uint64_t notification_thread_token;
+ const struct lttng_trigger *trigger =
+ lttng_triggers_get_at_index(triggers, i);
- LTTNG_ASSERT(trigger);
+ LTTNG_ASSERT(trigger);
- notification_thread_token = lttng_trigger_get_tracer_token(trigger);
+ notification_thread_token = lttng_trigger_get_tracer_token(trigger);
- if (notification_thread_token == app_token) {
- found = true;
- break;
+ if (notification_thread_token == app_token) {
+ found = true;
+ break;
+ }
}
- }
- if (found) {
- /* Still valid. */
- continue;
- }
+ if (found) {
+ /* Still valid. */
+ continue;
+ }
- /*
- * This trigger was unregistered, disable it on the tracer's
- * side.
- */
- ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &app_trigger_iter);
- LTTNG_ASSERT(ret == 0);
+ /*
+ * This trigger was unregistered, disable it on the tracer's
+ * side.
+ */
+ ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &app_trigger_iter);
+ LTTNG_ASSERT(ret == 0);
- /* Callee logs errors. */
- (void) disable_ust_object(app, event_notifier_rule->obj);
+ /* Callee logs errors. */
+ (void) disable_ust_object(app, event_notifier_rule->obj);
- delete_ust_app_event_notifier_rule(app->sock, event_notifier_rule, app);
+ delete_ust_app_event_notifier_rule(app->sock, event_notifier_rule, app);
+ }
}
- rcu_read_unlock();
-
end:
lttng_triggers_destroy(triggers);
return;
ret = find_or_create_ust_app_session(usess, app, &ua_sess, nullptr);
if (ret < 0) {
/* Tracer is probably gone or ENOMEM. */
- if (ua_sess) {
- destroy_app_session(app, ua_sess);
- }
goto end;
}
+
LTTNG_ASSERT(ua_sess);
pthread_mutex_lock(&ua_sess->lock);
goto deleted_session;
}
- rcu_read_lock();
+ {
+ lttng::urcu::read_lock_guard read_lock;
- ust_app_synchronize_all_channels(usess, ua_sess, app);
+ ust_app_synchronize_all_channels(usess, ua_sess, app);
- /*
- * Create the metadata for the application. This returns gracefully if a
- * metadata was already set for the session.
- *
- * The metadata channel must be created after the data channels as the
- * consumer daemon assumes this ordering. When interacting with a relay
- * daemon, the consumer will use this assumption to send the
- * "STREAMS_SENT" message to the relay daemon.
- */
- ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
- if (ret < 0) {
- ERR("Metadata creation failed for app sock %d for session id %" PRIu64,
- app->sock,
- usess->id);
+ /*
+ * Create the metadata for the application. This returns gracefully if a
+ * metadata was already set for the session.
+ *
+ * The metadata channel must be created after the data channels as the
+ * consumer daemon assumes this ordering. When interacting with a relay
+ * daemon, the consumer will use this assumption to send the
+ * "STREAMS_SENT" message to the relay daemon.
+ */
+ ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
+ if (ret < 0) {
+ ERR("Metadata creation failed for app sock %d for session id %" PRIu64,
+ app->sock,
+ usess->id);
+ }
}
- rcu_read_unlock();
-
deleted_session:
pthread_mutex_unlock(&ua_sess->lock);
end:
struct lttng_ht_iter iter;
struct ust_app *app;
- rcu_read_lock();
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- ust_app_global_update(usess, app);
+ {
+ lttng::urcu::read_lock_guard read_lock;
+
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ust_app_global_update(usess, app);
+ }
}
- rcu_read_unlock();
}
void ust_app_global_update_all_event_notifier_rules()
struct lttng_ht_iter iter;
struct ust_app *app;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
ust_app_global_update_event_notifier_rules(app);
}
-
- rcu_read_unlock();
}
/*
LTTNG_ASSERT(usess->active);
- rcu_read_lock();
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
- ua_sess = lookup_session_by_app(usess, app);
- if (ua_sess == nullptr) {
- continue;
- }
+ {
+ lttng::urcu::read_lock_guard read_lock;
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == nullptr) {
+ continue;
+ }
- pthread_mutex_lock(&ua_sess->lock);
+ pthread_mutex_lock(&ua_sess->lock);
- if (ua_sess->deleted) {
- pthread_mutex_unlock(&ua_sess->lock);
- continue;
- }
+ if (ua_sess->deleted) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ continue;
+ }
- /* Lookup channel in the ust app session */
- lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
- ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- if (ua_chan_node == nullptr) {
- goto next_app;
- }
- ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
- ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
- if (ret < 0) {
- goto next_app;
+ /* Lookup channel in the ust app session */
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+ if (ua_chan_node == nullptr) {
+ goto next_app;
+ }
+ ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+ ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
+ if (ret < 0) {
+ goto next_app;
+ }
+ next_app:
+ pthread_mutex_unlock(&ua_sess->lock);
}
- next_app:
- pthread_mutex_unlock(&ua_sess->lock);
}
- rcu_read_unlock();
return ret;
}
goto reply;
}
}
- } catch (std::exception& ex) {
+ } catch (const std::exception& ex) {
ERR("Failed to handle application context: %s", ex.what());
ret_code = -EINVAL;
goto reply;
application_reply_code = 0;
} catch (const std::exception& ex) {
ERR("%s: %s",
- fmt::format(
+ lttng::format(
"Failed to create or find enumeration provided by application: app = {}, enumeration name = {}",
*app,
name)
LTTNG_ASSERT(sock >= 0);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
obj = zmalloc<ust_app_notify_sock_obj>();
if (!obj) {
(void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
close_socket:
- rcu_read_unlock();
/*
* Close socket after a grace period to avoid for the socket to be reused
/*
* Destroy a ust app data structure and free its memory.
*/
-void ust_app_destroy(struct ust_app *app)
+static void ust_app_destroy(ust_app& app)
{
- if (!app) {
- return;
- }
-
- call_rcu(&app->pid_n.head, delete_ust_app_rcu);
+ call_rcu(&app.pid_n.head, delete_ust_app_rcu);
}
/*
LTTNG_ASSERT(usess);
LTTNG_ASSERT(output);
- rcu_read_lock();
-
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
struct buffer_reg_uid *reg;
+ lttng::urcu::read_lock_guard read_lock;
+
cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
struct buffer_reg_channel *buf_reg_chan;
struct consumer_socket *socket;
goto error;
}
}
+
break;
}
case LTTNG_BUFFER_PER_PID:
{
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
struct consumer_socket *socket;
struct lttng_ht_iter chan_iter;
error:
free(trace_path);
- rcu_read_unlock();
return status;
}
cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
struct buffer_reg_channel *buf_reg_chan;
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (
reg->registry->channels->ht, &iter.iter, buf_reg_chan, node.node) {
if (cur_nr_packets >= buf_reg_chan->num_subbuf) {
}
tot_size += buf_reg_chan->subbuf_size * buf_reg_chan->stream_count;
}
- rcu_read_unlock();
}
break;
}
case LTTNG_BUFFER_PER_PID:
{
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
struct ust_app_channel *ua_chan;
struct ust_app_session *ua_sess;
tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
}
}
- rcu_read_unlock();
break;
}
default:
*discarded = 0;
*lost = 0;
- rcu_read_lock();
/*
* Iterate over every registered applications. Sum counters for
* all applications containing requested session and channel.
*/
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
struct lttng_ht_iter uiter;
}
}
- rcu_read_unlock();
return ret;
}
DBG("Regenerating the metadata for ust app pid %d", app->pid);
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
ua_sess = lookup_session_by_app(usess, app);
if (ua_sess == nullptr) {
pthread_mutex_unlock(&ua_sess->lock);
end:
- rcu_read_unlock();
health_code_update();
return ret;
}
DBG("Regenerating the metadata for all UST apps");
- rcu_read_lock();
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
if (!app->compatible) {
}
}
- rcu_read_unlock();
-
return 0;
}
int ret;
enum lttng_error_code cmd_ret = LTTNG_OK;
struct lttng_ht_iter iter;
- struct ust_app *app;
struct ltt_ust_session *usess = session->ust_session;
LTTNG_ASSERT(usess);
- rcu_read_lock();
-
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
struct buffer_reg_channel *buf_reg_chan;
struct consumer_socket *socket;
+ lttng::urcu::read_lock_guard read_lock;
/* Get consumer socket to use to push the metadata.*/
socket = consumer_find_socket_by_bitness(reg->bits_per_long,
}
case LTTNG_BUFFER_PER_PID:
{
- cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ lttng::urcu::read_lock_guard read_lock;
+ ust_app *raw_app;
+
+ cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, raw_app, pid_n.node) {
struct consumer_socket *socket;
struct lttng_ht_iter chan_iter;
struct ust_app_channel *ua_chan;
struct ust_app_session *ua_sess;
lsu::registry_session *registry;
+ bool app_reference_taken;
- ua_sess = lookup_session_by_app(usess, app);
+ app_reference_taken = ust_app_get(*raw_app);
+ if (!app_reference_taken) {
+ /* Application unregistered concurrently, skip it. */
+ DBG("Could not get application reference as it is being torn down; skipping application");
+ continue;
+ }
+
+ ust_app_reference app(raw_app);
+ raw_app = nullptr;
+
+ ua_sess = lookup_session_by_app(usess, app.get());
if (!ua_sess) {
/* Session not associated with this app. */
continue;
}
registry = get_session_registry(ua_sess);
- if (!registry) {
- DBG("Application session is being torn down. Skip application.");
- continue;
- }
+ LTTNG_ASSERT(registry);
/* Rotate the data channels. */
cds_lfht_for_each_entry (
ua_sess->consumer,
/* is_metadata_channel */ false);
if (ret < 0) {
- /* Per-PID buffer and application going away. */
- if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
- continue;
cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
goto error;
}
(void) push_metadata(locked_registry, usess->consumer);
}
+
ret = consumer_rotate_channel(socket,
registry->_metadata_key,
ua_sess->consumer,
/* is_metadata_channel */ true);
if (ret < 0) {
- /* Per-PID buffer and application going away. */
- if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
- continue;
cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
goto error;
}
}
+
break;
}
default:
cmd_ret = LTTNG_OK;
error:
- rcu_read_unlock();
return cmd_ret;
}
int fmt_ret;
LTTNG_ASSERT(usess->current_trace_chunk);
- rcu_read_lock();
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
struct buffer_reg_uid *reg;
+ lttng::urcu::read_lock_guard read_lock;
cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
fmt_ret = asprintf(&pathname_index,
case LTTNG_BUFFER_PER_PID:
{
struct ust_app *app;
+ lttng::urcu::read_lock_guard read_lock;
/*
* Create the toplevel ust/ directory in case no apps are running.
ret = LTTNG_OK;
error:
- rcu_read_unlock();
return ret;
}
LTTNG_ASSERT(usess);
- rcu_read_lock();
-
if (usess->active) {
ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
cmd_ret = LTTNG_ERR_FATAL;
case LTTNG_BUFFER_PER_UID:
{
struct buffer_reg_uid *reg;
+ lttng::urcu::read_lock_guard read_lock;
cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
struct buffer_reg_channel *buf_reg_chan;
}
case LTTNG_BUFFER_PER_PID:
{
+ lttng::urcu::read_lock_guard read_lock;
+
cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
struct consumer_socket *socket;
struct lttng_ht_iter chan_iter;
error_socket:
end:
- rcu_read_unlock();
return cmd_ret;
}
LTTNG_ASSERT(usess);
- rcu_read_lock();
-
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
struct buffer_reg_channel *buf_reg_chan;
struct consumer_socket *socket;
+ lttng::urcu::read_lock_guard read_lock;
socket = consumer_find_socket_by_bitness(reg->bits_per_long,
usess->consumer);
case LTTNG_BUFFER_PER_PID:
{
struct ust_app *app;
+ lttng::urcu::read_lock_guard read_lock;
cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
struct consumer_socket *socket;
}
error:
- rcu_read_unlock();
return ret;
}
*/
return v_major <= 9 ? lsu::ctl_field_quirks::UNDERSCORE_PREFIXED_VARIANT_TAG_MAPPINGS :
lsu::ctl_field_quirks::NONE;
-}
\ No newline at end of file
+}
+
+static void ust_app_release(urcu_ref *ref)
+{
+ auto& app = *lttng::utils::container_of(ref, &ust_app::ref);
+
+ ust_app_unregister(app);
+ ust_app_destroy(app);
+}
+
+bool ust_app_get(ust_app& app)
+{
+ return urcu_ref_get_unless_zero(&app.ref);
+}
+
+void ust_app_put(struct ust_app *app)
+{
+ if (!app) {
+ return;
+ }
+
+ urcu_ref_put(&app->ref, ust_app_release);
+}