+ assert(app);
+ assert(usess);
+ assert(ua_sess);
+ assert(ua_chan);
+
+ /* Handle buffer type before sending the channel to the application. */
+ switch (usess->buffer_type) {
+ case LTTNG_BUFFER_PER_UID:
+ {
+ ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
+ if (ret < 0) {
+ goto error;
+ }
+ break;
+ }
+ case LTTNG_BUFFER_PER_PID:
+ {
+ ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
+ if (ret < 0) {
+ goto error;
+ }
+ break;
+ }
+ default:
+ assert(0);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* Initialize ust objd object using the received handle and add it. */
+ lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
+ lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
+
+ /* If channel is not enabled, disable it on the tracer */
+ if (!ua_chan->enabled) {
+ ret = disable_ust_channel(app, ua_sess, ua_chan);
+ if (ret < 0) {
+ goto error;
+ }
+ }
+
+error:
+ return ret;
+}
+
+/*
+ * Create UST app channel and create it on the tracer. Set ua_chanp of the
+ * newly created channel if not NULL.
+ *
+ * Called with UST app session lock and RCU read-side lock held.
+ *
+ * Return 0 on success or else a negative value. Returns -ENOTCONN if
+ * the application exited concurrently.
+ */
+static int create_ust_app_channel(struct ust_app_session *ua_sess,
+ struct ltt_ust_channel *uchan, struct ust_app *app,
+ enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
+ struct ust_app_channel **ua_chanp)
+{
+ int ret = 0;
+ struct lttng_ht_iter iter;
+ struct lttng_ht_node_str *ua_chan_node;
+ struct ust_app_channel *ua_chan;
+
+ /* Lookup channel in the ust app session */
+ lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
+ ua_chan_node = lttng_ht_iter_get_node_str(&iter);
+ if (ua_chan_node != NULL) {
+ ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+ goto end;
+ }
+
+ ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
+ if (ua_chan == NULL) {
+ /* Only malloc can fail here */
+ ret = -ENOMEM;
+ goto error_alloc;
+ }
+ shadow_copy_channel(ua_chan, uchan);
+
+ /* Set channel type. */
+ ua_chan->attr.type = type;
+
+ ret = do_create_channel(app, usess, ua_sess, ua_chan);
+ if (ret < 0) {
+ goto error;
+ }
+
+ DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
+ app->pid);
+
+ /* Only add the channel if successful on the tracer side. */
+ lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
+
+end:
+ if (ua_chanp) {
+ *ua_chanp = ua_chan;
+ }
+
+ /* Everything went well. */
+ return 0;
+
+error:
+ delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
+error_alloc:
+ return ret;
+}
+
+/*
+ * Create UST app event and create it on the tracer side.
+ *
+ * Called with ust app session mutex held.
+ */
+static
+int create_ust_app_event(struct ust_app_session *ua_sess,
+ struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
+ struct ust_app *app)
+{
+ int ret = 0;
+ struct ust_app_event *ua_event;
+
+ /* Get event node */
+ ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
+ uevent->filter, uevent->attr.loglevel, uevent->exclusion);
+ if (ua_event != NULL) {
+ ret = -EEXIST;
+ goto end;
+ }
+
+ /* Does not exist so create one */
+ ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
+ if (ua_event == NULL) {
+ /* Only malloc can failed so something is really wrong */
+ ret = -ENOMEM;
+ goto end;
+ }
+ shadow_copy_event(ua_event, uevent);
+
+ /* Create it on the tracer side */
+ ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
+ if (ret < 0) {
+ /* Not found previously means that it does not exist on the tracer */
+ assert(ret != -LTTNG_UST_ERR_EXIST);
+ goto error;
+ }
+
+ add_unique_ust_app_event(ua_chan, ua_event);
+
+ DBG2("UST app create event %s for PID %d completed", ua_event->name,
+ app->pid);
+
+end:
+ return ret;
+
+error:
+ /* Valid. Calling here is already in a read side lock */
+ delete_ust_app_event(-1, ua_event, app);
+ return ret;
+}
+
+/*
+ * Create UST metadata and open it on the tracer side.
+ *
+ * Called with UST app session lock held and RCU read side lock.
+ */
+static int create_ust_app_metadata(struct ust_app_session *ua_sess,
+ struct ust_app *app, struct consumer_output *consumer)
+{
+ int ret = 0;
+ struct ust_app_channel *metadata;
+ struct consumer_socket *socket;
+ struct ust_registry_session *registry;
+
+ assert(ua_sess);
+ assert(app);
+ assert(consumer);
+
+ registry = get_session_registry(ua_sess);
+ assert(registry);
+
+ pthread_mutex_lock(®istry->lock);
+
+ /* Metadata already exists for this registry or it was closed previously */
+ if (registry->metadata_key || registry->metadata_closed) {
+ ret = 0;
+ goto error;
+ }
+
+ /* Allocate UST metadata */
+ metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
+ if (!metadata) {
+ /* malloc() failed */
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
+
+ /* Need one fd for the channel. */
+ ret = lttng_fd_get(LTTNG_FD_APPS, 1);
+ if (ret < 0) {
+ ERR("Exhausted number of available FD upon create metadata");
+ goto error;
+ }
+
+ /* Get the right consumer socket for the application. */
+ socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
+ if (!socket) {
+ ret = -EINVAL;
+ goto error_consumer;
+ }
+
+ /*
+ * Keep metadata key so we can identify it on the consumer side. Assign it
+ * to the registry *before* we ask the consumer so we avoid the race of the
+ * consumer requesting the metadata and the ask_channel call on our side
+ * did not returned yet.
+ */
+ registry->metadata_key = metadata->key;
+
+ /*
+ * Ask the metadata channel creation to the consumer. The metadata object
+ * will be created by the consumer and kept their. However, the stream is
+ * never added or monitored until we do a first push metadata to the
+ * consumer.
+ */
+ ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
+ registry);
+ if (ret < 0) {
+ /* Nullify the metadata key so we don't try to close it later on. */
+ registry->metadata_key = 0;
+ goto error_consumer;
+ }
+
+ /*
+ * The setup command will make the metadata stream be sent to the relayd,
+ * if applicable, and the thread managing the metadatas. This is important
+ * because after this point, if an error occurs, the only way the stream
+ * can be deleted is to be monitored in the consumer.
+ */
+ ret = consumer_setup_metadata(socket, metadata->key);
+ if (ret < 0) {
+ /* Nullify the metadata key so we don't try to close it later on. */
+ registry->metadata_key = 0;
+ goto error_consumer;
+ }
+
+ DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
+ metadata->key, app->pid);
+
+error_consumer:
+ lttng_fd_put(LTTNG_FD_APPS, 1);
+ delete_ust_app_channel(-1, metadata, app);
+error:
+ pthread_mutex_unlock(®istry->lock);
+ return ret;
+}
+
+/*
+ * Return ust app pointer or NULL if not found. RCU read side lock MUST be
+ * acquired before calling this function.
+ */
+struct ust_app *ust_app_find_by_pid(pid_t pid)
+{
+ struct ust_app *app = NULL;
+ struct lttng_ht_node_ulong *node;
+ struct lttng_ht_iter iter;
+
+ lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
+ node = lttng_ht_iter_get_node_ulong(&iter);
+ if (node == NULL) {
+ DBG2("UST app no found with pid %d", pid);
+ goto error;
+ }
+
+ DBG2("Found UST app by pid %d", pid);
+
+ app = caa_container_of(node, struct ust_app, pid_n);
+
+error:
+ return app;
+}
+
+/*
+ * Allocate and init an UST app object using the registration information and
+ * the command socket. This is called when the command socket connects to the
+ * session daemon.
+ *
+ * The object is returned on success or else NULL.
+ */
+struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
+{
+ struct ust_app *lta = NULL;
+
+ assert(msg);
+ assert(sock >= 0);
+
+ DBG3("UST app creating application for socket %d", sock);
+
+ if ((msg->bits_per_long == 64 &&
+ (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
+ || (msg->bits_per_long == 32 &&
+ (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
+ ERR("Registration failed: application \"%s\" (pid: %d) has "
+ "%d-bit long, but no consumerd for this size is available.\n",
+ msg->name, msg->pid, msg->bits_per_long);
+ goto error;
+ }
+
+ lta = zmalloc(sizeof(struct ust_app));
+ if (lta == NULL) {
+ PERROR("malloc");
+ goto error;
+ }
+
+ lta->ppid = msg->ppid;
+ lta->uid = msg->uid;
+ lta->gid = msg->gid;
+
+ lta->bits_per_long = msg->bits_per_long;
+ lta->uint8_t_alignment = msg->uint8_t_alignment;
+ lta->uint16_t_alignment = msg->uint16_t_alignment;
+ lta->uint32_t_alignment = msg->uint32_t_alignment;
+ lta->uint64_t_alignment = msg->uint64_t_alignment;
+ lta->long_alignment = msg->long_alignment;
+ lta->byte_order = msg->byte_order;
+
+ lta->v_major = msg->major;
+ lta->v_minor = msg->minor;
+ lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
+ lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+ lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+ lta->notify_sock = -1;
+
+ /* Copy name and make sure it's NULL terminated. */
+ strncpy(lta->name, msg->name, sizeof(lta->name));
+ lta->name[UST_APP_PROCNAME_LEN] = '\0';
+
+ /*
+ * Before this can be called, when receiving the registration information,
+ * the application compatibility is checked. So, at this point, the
+ * application can work with this session daemon.
+ */
+ lta->compatible = 1;
+
+ lta->pid = msg->pid;
+ lttng_ht_node_init_ulong(<a->pid_n, (unsigned long) lta->pid);
+ lta->sock = sock;
+ pthread_mutex_init(<a->sock_lock, NULL);
+ lttng_ht_node_init_ulong(<a->sock_n, (unsigned long) lta->sock);
+
+ CDS_INIT_LIST_HEAD(<a->teardown_head);
+error:
+ return lta;
+}
+
+/*
+ * For a given application object, add it to every hash table.
+ */
+void ust_app_add(struct ust_app *app)
+{
+ assert(app);
+ assert(app->notify_sock >= 0);
+
+ rcu_read_lock();
+
+ /*
+ * On a re-registration, we want to kick out the previous registration of
+ * that pid
+ */
+ lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
+
+ /*
+ * The socket _should_ be unique until _we_ call close. So, a add_unique
+ * for the ust_app_ht_by_sock is used which asserts fail if the entry was
+ * already in the table.
+ */
+ lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
+
+ /* Add application to the notify socket hash table. */
+ lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
+ lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
+
+ DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
+ "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
+ app->gid, app->sock, app->name, app->notify_sock, app->v_major,
+ app->v_minor);
+
+ rcu_read_unlock();
+}
+
+/*
+ * Set the application version into the object.
+ *
+ * Return 0 on success else a negative value either an errno code or a
+ * LTTng-UST error code.
+ */
+int ust_app_version(struct ust_app *app)
+{
+ int ret;
+
+ assert(app);
+
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_tracer_version(app->sock, &app->version);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0) {
+ if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+ ERR("UST app %d version failed with ret %d", app->sock, ret);
+ } else {
+ DBG3("UST app %d version failed. Application is dead", app->sock);
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Unregister app by removing it from the global traceable app list and freeing
+ * the data struct.
+ *
+ * The socket is already closed at this point so no close to sock.
+ */
+void ust_app_unregister(int sock)
+{
+ struct ust_app *lta;
+ struct lttng_ht_node_ulong *node;
+ struct lttng_ht_iter ust_app_sock_iter;
+ struct lttng_ht_iter iter;
+ struct ust_app_session *ua_sess;
+ int ret;
+
+ rcu_read_lock();
+
+ /* Get the node reference for a call_rcu */
+ lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
+ node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
+ assert(node);
+
+ lta = caa_container_of(node, struct ust_app, sock_n);
+ DBG("PID %d unregistering with sock %d", lta->pid, sock);
+
+ /*
+ * For per-PID buffers, perform "push metadata" and flush all
+ * application streams before removing app from hash tables,
+ * ensuring proper behavior of data_pending check.
+ * Remove sessions so they are not visible during deletion.
+ */
+ cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
+ node.node) {
+ struct ust_registry_session *registry;
+
+ ret = lttng_ht_del(lta->sessions, &iter);
+ if (ret) {
+ /* The session was already removed so scheduled for teardown. */
+ continue;
+ }
+
+ if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
+ (void) ust_app_flush_app_session(lta, ua_sess);
+ }
+
+ /*
+ * Add session to list for teardown. This is safe since at this point we
+ * are the only one using this list.
+ */
+ pthread_mutex_lock(&ua_sess->lock);
+
+ if (ua_sess->deleted) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ continue;
+ }
+
+ /*
+ * Normally, this is done in the delete session process which is
+ * executed in the call rcu below. However, upon registration we can't
+ * afford to wait for the grace period before pushing data or else the
+ * data pending feature can race between the unregistration and stop
+ * command where the data pending command is sent *before* the grace
+ * period ended.
+ *
+ * The close metadata below nullifies the metadata pointer in the
+ * session so the delete session will NOT push/close a second time.
+ */
+ registry = get_session_registry(ua_sess);
+ if (registry) {
+ /* Push metadata for application before freeing the application. */
+ (void) push_metadata(registry, ua_sess->consumer);
+
+ /*
+ * Don't ask to close metadata for global per UID buffers. Close
+ * metadata only on destroy trace session in this case. Also, the
+ * previous push metadata could have flag the metadata registry to
+ * close so don't send a close command if closed.
+ */
+ if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
+ /* And ask to close it for this session registry. */
+ (void) close_metadata(registry, ua_sess->consumer);
+ }
+ }
+ cds_list_add(&ua_sess->teardown_node, <a->teardown_head);
+
+ pthread_mutex_unlock(&ua_sess->lock);
+ }
+
+ /* Remove application from PID hash table */
+ ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
+ assert(!ret);
+
+ /*
+ * Remove application from notify hash table. The thread handling the
+ * notify socket could have deleted the node so ignore on error because
+ * either way it's valid. The close of that socket is handled by the other
+ * thread.
+ */
+ iter.iter.node = <a->notify_sock_n.node;
+ (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
+
+ /*
+ * Ignore return value since the node might have been removed before by an
+ * add replace during app registration because the PID can be reassigned by
+ * the OS.
+ */
+ iter.iter.node = <a->pid_n.node;
+ ret = lttng_ht_del(ust_app_ht, &iter);
+ if (ret) {
+ DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
+ lta->pid);
+ }
+
+ /* Free memory */
+ call_rcu(<a->pid_n.head, delete_ust_app_rcu);
+
+ rcu_read_unlock();
+ return;
+}
+
+/*
+ * Fill events array with all events name of all registered apps.
+ */
+int ust_app_list_events(struct lttng_event **events)
+{
+ int ret, handle;
+ size_t nbmem, count = 0;
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
+ struct lttng_event *tmp_event;
+
+ nbmem = UST_APP_EVENT_LIST_SIZE;
+ tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
+ if (tmp_event == NULL) {
+ PERROR("zmalloc ust app events");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ rcu_read_lock();
+
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ struct lttng_ust_tracepoint_iter uiter;
+
+ health_code_update();
+
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ pthread_mutex_lock(&app->sock_lock);
+ handle = ustctl_tracepoint_list(app->sock);
+ if (handle < 0) {
+ if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app list events getting handle failed for app pid %d",
+ app->pid);
+ }
+ pthread_mutex_unlock(&app->sock_lock);
+ continue;
+ }
+
+ while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
+ &uiter)) != -LTTNG_UST_ERR_NOENT) {
+ /* Handle ustctl error. */
+ if (ret < 0) {
+ int release_ret;
+
+ if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+ ERR("UST app tp list get failed for app %d with ret %d",
+ app->sock, ret);
+ } else {
+ DBG3("UST app tp list get failed. Application is dead");
+ /*
+ * This is normal behavior, an application can die during the
+ * creation process. Don't report an error so the execution can
+ * continue normally. Continue normal execution.
+ */
+ break;
+ }
+ free(tmp_event);
+ release_ret = ustctl_release_handle(app->sock, handle);
+ if (release_ret < 0 &&
+ release_ret != -LTTNG_UST_ERR_EXITING &&
+ release_ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
+ }
+ pthread_mutex_unlock(&app->sock_lock);
+ goto rcu_error;
+ }
+
+ health_code_update();
+ if (count >= nbmem) {
+ /* In case the realloc fails, we free the memory */
+ struct lttng_event *new_tmp_event;
+ size_t new_nbmem;
+
+ new_nbmem = nbmem << 1;
+ DBG2("Reallocating event list from %zu to %zu entries",
+ nbmem, new_nbmem);
+ new_tmp_event = realloc(tmp_event,
+ new_nbmem * sizeof(struct lttng_event));
+ if (new_tmp_event == NULL) {
+ int release_ret;
+
+ PERROR("realloc ust app events");
+ free(tmp_event);
+ ret = -ENOMEM;
+ release_ret = ustctl_release_handle(app->sock, handle);
+ if (release_ret < 0 &&
+ release_ret != -LTTNG_UST_ERR_EXITING &&
+ release_ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
+ }
+ pthread_mutex_unlock(&app->sock_lock);
+ goto rcu_error;
+ }
+ /* Zero the new memory */
+ memset(new_tmp_event + nbmem, 0,
+ (new_nbmem - nbmem) * sizeof(struct lttng_event));
+ nbmem = new_nbmem;
+ tmp_event = new_tmp_event;
+ }
+ memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
+ tmp_event[count].loglevel = uiter.loglevel;
+ tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
+ tmp_event[count].pid = app->pid;
+ tmp_event[count].enabled = -1;
+ count++;
+ }
+ ret = ustctl_release_handle(app->sock, handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
+ }
+ }
+
+ ret = count;
+ *events = tmp_event;
+
+ DBG2("UST app list events done (%zu events)", count);
+
+rcu_error:
+ rcu_read_unlock();
+error:
+ health_code_update();
+ return ret;
+}
+
+/*
+ * Fill events array with all events name of all registered apps.
+ */
+int ust_app_list_event_fields(struct lttng_event_field **fields)
+{
+ int ret, handle;
+ size_t nbmem, count = 0;
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
+ struct lttng_event_field *tmp_event;
+
+ nbmem = UST_APP_EVENT_LIST_SIZE;
+ tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
+ if (tmp_event == NULL) {
+ PERROR("zmalloc ust app event fields");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ rcu_read_lock();
+
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ struct lttng_ust_field_iter uiter;
+
+ health_code_update();
+
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ pthread_mutex_lock(&app->sock_lock);
+ handle = ustctl_tracepoint_field_list(app->sock);
+ if (handle < 0) {
+ if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app list field getting handle failed for app pid %d",
+ app->pid);
+ }
+ pthread_mutex_unlock(&app->sock_lock);
+ continue;
+ }
+
+ while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
+ &uiter)) != -LTTNG_UST_ERR_NOENT) {
+ /* Handle ustctl error. */
+ if (ret < 0) {
+ int release_ret;
+
+ if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+ ERR("UST app tp list field failed for app %d with ret %d",
+ app->sock, ret);
+ } else {
+ DBG3("UST app tp list field failed. Application is dead");
+ /*
+ * This is normal behavior, an application can die during the
+ * creation process. Don't report an error so the execution can
+ * continue normally. Reset list and count for next app.
+ */
+ break;
+ }
+ free(tmp_event);
+ release_ret = ustctl_release_handle(app->sock, handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (release_ret < 0 &&
+ release_ret != -LTTNG_UST_ERR_EXITING &&
+ release_ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
+ }
+ goto rcu_error;
+ }
+
+ health_code_update();
+ if (count >= nbmem) {
+ /* In case the realloc fails, we free the memory */
+ struct lttng_event_field *new_tmp_event;
+ size_t new_nbmem;
+
+ new_nbmem = nbmem << 1;
+ DBG2("Reallocating event field list from %zu to %zu entries",
+ nbmem, new_nbmem);
+ new_tmp_event = realloc(tmp_event,
+ new_nbmem * sizeof(struct lttng_event_field));
+ if (new_tmp_event == NULL) {
+ int release_ret;
+
+ PERROR("realloc ust app event fields");
+ free(tmp_event);
+ ret = -ENOMEM;
+ release_ret = ustctl_release_handle(app->sock, handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (release_ret &&
+ release_ret != -LTTNG_UST_ERR_EXITING &&
+ release_ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
+ }
+ goto rcu_error;
+ }
+ /* Zero the new memory */
+ memset(new_tmp_event + nbmem, 0,
+ (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
+ nbmem = new_nbmem;
+ tmp_event = new_tmp_event;
+ }
+
+ memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
+ /* Mapping between these enums matches 1 to 1. */
+ tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
+ tmp_event[count].nowrite = uiter.nowrite;
+
+ memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
+ tmp_event[count].event.loglevel = uiter.loglevel;
+ tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
+ tmp_event[count].event.pid = app->pid;
+ tmp_event[count].event.enabled = -1;
+ count++;
+ }
+ ret = ustctl_release_handle(app->sock, handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0 &&
+ ret != -LTTNG_UST_ERR_EXITING &&
+ ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
+ }
+ }
+
+ ret = count;
+ *fields = tmp_event;
+
+ DBG2("UST app list event fields done (%zu events)", count);
+
+rcu_error:
+ rcu_read_unlock();
+error:
+ health_code_update();
+ return ret;
+}
+
+/*
+ * Free and clean all traceable apps of the global list.
+ *
+ * Should _NOT_ be called with RCU read-side lock held.
+ */
+void ust_app_clean_list(void)
+{
+ int ret;
+ struct ust_app *app;
+ struct lttng_ht_iter iter;
+
+ DBG2("UST app cleaning registered apps hash table");
+
+ rcu_read_lock();
+
+ if (ust_app_ht) {
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ret = lttng_ht_del(ust_app_ht, &iter);
+ assert(!ret);
+ call_rcu(&app->pid_n.head, delete_ust_app_rcu);
+ }
+ }
+
+ /* Cleanup socket hash table */
+ if (ust_app_ht_by_sock) {
+ cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
+ sock_n.node) {
+ ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
+ assert(!ret);
+ }
+ }
+
+ /* Cleanup notify socket hash table */
+ if (ust_app_ht_by_notify_sock) {
+ cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
+ notify_sock_n.node) {
+ ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
+ assert(!ret);
+ }
+ }
+ rcu_read_unlock();
+
+ /* Destroy is done only when the ht is empty */
+ if (ust_app_ht) {
+ ht_cleanup_push(ust_app_ht);
+ }
+ if (ust_app_ht_by_sock) {
+ ht_cleanup_push(ust_app_ht_by_sock);
+ }
+ if (ust_app_ht_by_notify_sock) {
+ ht_cleanup_push(ust_app_ht_by_notify_sock);
+ }
+}
+
+/*
+ * Init UST app hash table.
+ */
+int ust_app_ht_alloc(void)
+{
+ ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+ if (!ust_app_ht) {
+ return -1;
+ }
+ ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+ if (!ust_app_ht_by_sock) {
+ return -1;
+ }
+ ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+ if (!ust_app_ht_by_notify_sock) {
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * For a specific UST session, disable the channel for all registered apps.
+ */
+int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
+ struct ltt_ust_channel *uchan)
+{
+ int ret = 0;
+ struct lttng_ht_iter iter;
+ struct lttng_ht_node_str *ua_chan_node;
+ struct ust_app *app;
+ struct ust_app_session *ua_sess;
+ struct ust_app_channel *ua_chan;
+
+ if (usess == NULL || uchan == NULL) {
+ ERR("Disabling UST global channel with NULL values");
+ ret = -1;
+ goto error;
+ }
+
+ DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
+ uchan->name, usess->id);
+
+ rcu_read_lock();
+
+ /* For every registered applications */
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ struct lttng_ht_iter uiter;
+ if (!app->compatible) {
+ /*
+ * TODO: In time, we should notice the caller of this error by
+ * telling him that this is a version error.
+ */
+ continue;
+ }
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == NULL) {
+ continue;
+ }
+
+ /* Get channel */
+ lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+ /* If the session if found for the app, the channel must be there */
+ assert(ua_chan_node);
+
+ ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+ /* The channel must not be already disabled */
+ assert(ua_chan->enabled == 1);
+
+ /* Disable channel onto application */
+ ret = disable_ust_app_channel(ua_sess, ua_chan, app);
+ if (ret < 0) {
+ /* XXX: We might want to report this error at some point... */
+ continue;
+ }
+ }
+
+ rcu_read_unlock();
+
+error:
+ return ret;
+}
+
+/*
+ * For a specific UST session, enable the channel for all registered apps.
+ */
+int ust_app_enable_channel_glb(struct ltt_ust_session *usess,