+ case USTCTL_NOTIFY_CMD_ENUM:
+ {
+ int sobjd;
+ char name[LTTNG_UST_SYM_NAME_LEN];
+ size_t nr_entries;
+ struct ustctl_enum_entry *entries;
+
+ DBG2("UST app ustctl register enum received");
+
+ ret = ustctl_recv_register_enum(sock, &sobjd, name,
+ &entries, &nr_entries);
+ if (ret < 0) {
+ if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app recv enum failed with ret %d", ret);
+ } else {
+ DBG3("UST app recv enum failed. Application died");
+ }
+ goto error;
+ }
+
+ /* Callee assumes ownership of entries */
+ ret = add_enum_ust_registry(sock, sobjd, name,
+ entries, nr_entries);
+ if (ret < 0) {
+ goto error;
+ }
+
+ break;
+ }
+ default:
+ /* Should NEVER happen. */
+ assert(0);
+ }
+
+error:
+ return ret;
+}
+
+/*
+ * Once the notify socket hangs up, this is called. First, it tries to find the
+ * corresponding application. On failure, the call_rcu to close the socket is
+ * executed. If an application is found, it tries to delete it from the notify
+ * socket hash table. Whathever the result, it proceeds to the call_rcu.
+ *
+ * Note that an object needs to be allocated here so on ENOMEM failure, the
+ * call RCU is not done but the rest of the cleanup is.
+ */
+void ust_app_notify_sock_unregister(int sock)
+{
+ int err_enomem = 0;
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
+ struct ust_app_notify_sock_obj *obj;
+
+ assert(sock >= 0);
+
+ rcu_read_lock();
+
+ obj = zmalloc(sizeof(*obj));
+ if (!obj) {
+ /*
+ * An ENOMEM is kind of uncool. If this strikes we continue the
+ * procedure but the call_rcu will not be called. In this case, we
+ * accept the fd leak rather than possibly creating an unsynchronized
+ * state between threads.
+ *
+ * TODO: The notify object should be created once the notify socket is
+ * registered and stored independantely from the ust app object. The
+ * tricky part is to synchronize the teardown of the application and
+ * this notify object. Let's keep that in mind so we can avoid this
+ * kind of shenanigans with ENOMEM in the teardown path.
+ */
+ err_enomem = 1;
+ } else {
+ obj->fd = sock;
+ }
+
+ DBG("UST app notify socket unregister %d", sock);
+
+ /*
+ * Lookup application by notify socket. If this fails, this means that the
+ * hash table delete has already been done by the application
+ * unregistration process so we can safely close the notify socket in a
+ * call RCU.
+ */
+ app = find_app_by_notify_sock(sock);
+ if (!app) {
+ goto close_socket;
+ }
+
+ iter.iter.node = &app->notify_sock_n.node;
+
+ /*
+ * Whatever happens here either we fail or succeed, in both cases we have
+ * to close the socket after a grace period to continue to the call RCU
+ * here. If the deletion is successful, the application is not visible
+ * anymore by other threads and is it fails it means that it was already
+ * deleted from the hash table so either way we just have to close the
+ * socket.
+ */
+ (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
+
+close_socket:
+ rcu_read_unlock();
+
+ /*
+ * Close socket after a grace period to avoid for the socket to be reused
+ * before the application object is freed creating potential race between
+ * threads trying to add unique in the global hash table.
+ */
+ if (!err_enomem) {
+ call_rcu(&obj->head, close_notify_sock_rcu);
+ }
+}
+
+/*
+ * Destroy a ust app data structure and free its memory.
+ */
+void ust_app_destroy(struct ust_app *app)
+{
+ if (!app) {
+ return;
+ }
+
+ call_rcu(&app->pid_n.head, delete_ust_app_rcu);
+}
+
+/*
+ * Take a snapshot for a given UST session. The snapshot is sent to the given
+ * output.
+ *
+ * Return 0 on success or else a negative value.
+ */
+int ust_app_snapshot_record(struct ltt_ust_session *usess,
+ struct snapshot_output *output, int wait,
+ uint64_t nb_packets_per_stream)
+{
+ int ret = 0;
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
+ char pathname[PATH_MAX];
+
+ assert(usess);
+ assert(output);
+
+ rcu_read_lock();
+
+ switch (usess->buffer_type) {
+ case LTTNG_BUFFER_PER_UID:
+ {
+ struct buffer_reg_uid *reg;
+
+ cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+ struct buffer_reg_channel *reg_chan;
+ struct consumer_socket *socket;
+
+ /* Get consumer socket to use to push the metadata.*/
+ socket = consumer_find_socket_by_bitness(reg->bits_per_long,
+ usess->consumer);
+ if (!socket) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ memset(pathname, 0, sizeof(pathname));
+ ret = snprintf(pathname, sizeof(pathname),
+ DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
+ reg->uid, reg->bits_per_long);
+ if (ret < 0) {
+ PERROR("snprintf snapshot path");
+ goto error;
+ }
+
+ /* Add the UST default trace dir to path. */
+ cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
+ reg_chan, node.node) {
+ ret = consumer_snapshot_channel(socket, reg_chan->consumer_key,
+ output, 0, usess->uid, usess->gid, pathname, wait,
+ nb_packets_per_stream);
+ if (ret < 0) {
+ goto error;
+ }
+ }
+ ret = consumer_snapshot_channel(socket,
+ reg->registry->reg.ust->metadata_key, output, 1,
+ usess->uid, usess->gid, pathname, wait, 0);
+ if (ret < 0) {
+ goto error;
+ }
+ }
+ break;
+ }
+ case LTTNG_BUFFER_PER_PID:
+ {
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ struct consumer_socket *socket;
+ struct lttng_ht_iter chan_iter;
+ struct ust_app_channel *ua_chan;
+ struct ust_app_session *ua_sess;
+ struct ust_registry_session *registry;
+
+ ua_sess = lookup_session_by_app(usess, app);
+ if (!ua_sess) {
+ /* Session not associated with this app. */
+ continue;
+ }
+
+ /* Get the right consumer socket for the application. */
+ socket = consumer_find_socket_by_bitness(app->bits_per_long,
+ output->consumer);
+ if (!socket) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* Add the UST default trace dir to path. */
+ memset(pathname, 0, sizeof(pathname));
+ ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
+ ua_sess->path);
+ if (ret < 0) {
+ PERROR("snprintf snapshot path");
+ goto error;
+ }
+
+ cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
+ ua_chan, node.node) {
+ ret = consumer_snapshot_channel(socket, ua_chan->key, output,
+ 0, ua_sess->euid, ua_sess->egid, pathname, wait,
+ nb_packets_per_stream);
+ if (ret < 0) {
+ goto error;
+ }
+ }
+
+ registry = get_session_registry(ua_sess);
+ if (!registry) {
+ DBG("Application session is being torn down. Abort snapshot record.");
+ ret = -1;
+ goto error;
+ }
+ ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
+ 1, ua_sess->euid, ua_sess->egid, pathname, wait, 0);
+ if (ret < 0) {
+ goto error;
+ }
+ }
+ break;
+ }
+ default:
+ assert(0);
+ break;
+ }
+
+error:
+ rcu_read_unlock();
+ return ret;
+}
+
+/*
+ * Return the size taken by one more packet per stream.
+ */
+uint64_t ust_app_get_size_one_more_packet_per_stream(struct ltt_ust_session *usess,
+ uint64_t cur_nr_packets)
+{
+ uint64_t tot_size = 0;
+ struct ust_app *app;
+ struct lttng_ht_iter iter;
+
+ assert(usess);
+
+ switch (usess->buffer_type) {
+ case LTTNG_BUFFER_PER_UID:
+ {
+ struct buffer_reg_uid *reg;
+
+ cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+ struct buffer_reg_channel *reg_chan;
+
+ rcu_read_lock();
+ cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
+ reg_chan, node.node) {
+ if (cur_nr_packets >= reg_chan->num_subbuf) {
+ /*
+ * Don't take channel into account if we
+ * already grab all its packets.
+ */
+ continue;
+ }
+ tot_size += reg_chan->subbuf_size * reg_chan->stream_count;
+ }
+ rcu_read_unlock();
+ }
+ break;
+ }
+ case LTTNG_BUFFER_PER_PID:
+ {
+ rcu_read_lock();
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ struct ust_app_channel *ua_chan;
+ struct ust_app_session *ua_sess;
+ struct lttng_ht_iter chan_iter;
+
+ ua_sess = lookup_session_by_app(usess, app);
+ if (!ua_sess) {
+ /* Session not associated with this app. */
+ continue;
+ }
+
+ cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
+ ua_chan, node.node) {
+ if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
+ /*
+ * Don't take channel into account if we
+ * already grab all its packets.
+ */
+ continue;
+ }
+ tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
+ }
+ }
+ rcu_read_unlock();
+ break;
+ }