X-Git-Url: https://git.lttng.org/?p=lttng-tools.git;a=blobdiff_plain;f=src%2Fbin%2Flttng-sessiond%2Fust-app.c;h=b30089dbc2fd6029f590adf8c05752ddda2d3225;hp=8c314ec1c9688d5df77c1eefc8fa047988e0d474;hb=da8734126339713603c25799dcd1dd72726730de;hpb=88ebf5a7a2c9b4d56953a6daa66715e4043f049c diff --git a/src/bin/lttng-sessiond/ust-app.c b/src/bin/lttng-sessiond/ust-app.c index 8c314ec1c..b30089dbc 100644 --- a/src/bin/lttng-sessiond/ust-app.c +++ b/src/bin/lttng-sessiond/ust-app.c @@ -7,7 +7,6 @@ */ #define _LGPL_SOURCE -#include #include #include #include @@ -19,6 +18,7 @@ #include #include +#include #include #include @@ -245,7 +245,7 @@ static struct ust_registry_session *get_session_registry( { struct buffer_reg_uid *reg_uid = buffer_reg_uid_find( ua_sess->tracing_id, ua_sess->bits_per_long, - ua_sess->real_credentials.uid); + lttng_credentials_get_uid(&ua_sess->real_credentials)); if (!reg_uid) { goto error; } @@ -921,6 +921,28 @@ void delete_ust_app(struct ust_app *app) ht_cleanup_push(app->ust_sessions_objd); ht_cleanup_push(app->ust_objd); + /* + * This could be NULL if the event notifier setup failed (e.g the app + * was killed or the tracer does not support this feature). + */ + if (app->event_notifier_group.object) { + enum lttng_error_code ret_code; + const int event_notifier_read_fd = lttng_pipe_get_readfd( + app->event_notifier_group.event_pipe); + + ret_code = notification_thread_command_remove_tracer_event_source( + notification_thread_handle, + event_notifier_read_fd); + if (ret_code != LTTNG_OK) { + ERR("Failed to remove application tracer event source from notification thread"); + } + + ustctl_release_object(sock, app->event_notifier_group.object); + free(app->event_notifier_group.object); + } + + lttng_pipe_destroy(app->event_notifier_group.event_pipe); + /* * Wait until we have deleted the application from the sock hash table * before closing this socket, otherwise an application could re-use the @@ -1143,7 +1165,7 @@ struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx) if (uctx) { memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx)); if (uctx->ctx == LTTNG_UST_CONTEXT_APP_CONTEXT) { - char *provider_name = NULL, *ctx_name = NULL; + char *provider_name = NULL, *ctx_name = NULL; provider_name = strdup(uctx->u.app_ctx.provider_name); ctx_name = strdup(uctx->u.app_ctx.ctx_name); @@ -1194,7 +1216,7 @@ error: * Return allocated filter or NULL on error. */ static struct lttng_ust_filter_bytecode *create_ust_bytecode_from_bytecode( - struct lttng_filter_bytecode *orig_f) + const struct lttng_filter_bytecode *orig_f) { struct lttng_ust_filter_bytecode *filter = NULL; @@ -1343,33 +1365,28 @@ error: /* * Set the filter on the tracer. */ -static -int set_ust_event_filter(struct ust_app_event *ua_event, - struct ust_app *app) +static int set_ust_object_filter(struct ust_app *app, + const struct lttng_filter_bytecode *bytecode, + struct lttng_ust_object_data *ust_object) { int ret; struct lttng_ust_filter_bytecode *ust_bytecode = NULL; health_code_update(); - if (!ua_event->filter) { - ret = 0; - goto error; - } - - ust_bytecode = create_ust_bytecode_from_bytecode(ua_event->filter); + ust_bytecode = create_ust_bytecode_from_bytecode(bytecode); if (!ust_bytecode) { ret = -LTTNG_ERR_NOMEM; goto error; } pthread_mutex_lock(&app->sock_lock); ret = ustctl_set_filter(app->sock, ust_bytecode, - ua_event->obj); + ust_object); pthread_mutex_unlock(&app->sock_lock); if (ret < 0) { if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) { - ERR("UST app event %s filter failed for app (pid: %d) " - "with ret %d", ua_event->attr.name, app->pid, ret); + ERR("UST app set object filter failed for object %p of app (pid: %d) " + "with ret %d", ust_object, app->pid, ret); } else { /* * This is normal behavior, an application can die during the @@ -1377,12 +1394,12 @@ int set_ust_event_filter(struct ust_app_event *ua_event, * continue normally. */ ret = 0; - DBG3("UST app filter event failed. Application is dead."); + DBG3("Failed to set UST app object filter. Application is dead."); } goto error; } - DBG2("UST filter set successfully for event %s", ua_event->name); + DBG2("UST filter successfully set for object %p", ust_object); error: health_code_update(); @@ -1392,7 +1409,7 @@ error: static struct lttng_ust_event_exclusion *create_ust_exclusion_from_exclusion( - struct lttng_event_exclusion *exclusion) + const struct lttng_event_exclusion *exclusion) { struct lttng_ust_event_exclusion *ust_exclusion = NULL; size_t exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) + @@ -1414,33 +1431,30 @@ end: /* * Set event exclusions on the tracer. */ -static -int set_ust_event_exclusion(struct ust_app_event *ua_event, - struct ust_app *app) +static int set_ust_object_exclusions(struct ust_app *app, + const struct lttng_event_exclusion *exclusions, + struct lttng_ust_object_data *ust_object) { int ret; - struct lttng_ust_event_exclusion *ust_exclusion = NULL; + struct lttng_ust_event_exclusion *ust_exclusions = NULL; - health_code_update(); + assert(exclusions && exclusions->count > 0); - if (!ua_event->exclusion || !ua_event->exclusion->count) { - ret = 0; - goto error; - } + health_code_update(); - ust_exclusion = create_ust_exclusion_from_exclusion( - ua_event->exclusion); - if (!ust_exclusion) { + ust_exclusions = create_ust_exclusion_from_exclusion( + exclusions); + if (!ust_exclusions) { ret = -LTTNG_ERR_NOMEM; goto error; } pthread_mutex_lock(&app->sock_lock); - ret = ustctl_set_exclusion(app->sock, ust_exclusion, ua_event->obj); + ret = ustctl_set_exclusion(app->sock, ust_exclusions, ust_object); pthread_mutex_unlock(&app->sock_lock); if (ret < 0) { if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) { - ERR("UST app event %s exclusions failed for app (pid: %d) " - "with ret %d", ua_event->attr.name, app->pid, ret); + ERR("Failed to set UST app exclusions for object %p of app (pid: %d) " + "with ret %d", ust_object, app->pid, ret); } else { /* * This is normal behavior, an application can die during the @@ -1448,37 +1462,36 @@ int set_ust_event_exclusion(struct ust_app_event *ua_event, * continue normally. */ ret = 0; - DBG3("UST app event exclusion failed. Application is dead."); + DBG3("Failed to set UST app object exclusions. Application is dead."); } goto error; } - DBG2("UST exclusion set successfully for event %s", ua_event->name); + DBG2("UST exclusions set successfully for object %p", ust_object); error: health_code_update(); - free(ust_exclusion); + free(ust_exclusions); return ret; } /* * Disable the specified event on to UST tracer for the UST session. */ -static int disable_ust_event(struct ust_app *app, - struct ust_app_session *ua_sess, struct ust_app_event *ua_event) +static int disable_ust_object(struct ust_app *app, + struct lttng_ust_object_data *object) { int ret; health_code_update(); pthread_mutex_lock(&app->sock_lock); - ret = ustctl_disable(app->sock, ua_event->obj); + ret = ustctl_disable(app->sock, object); pthread_mutex_unlock(&app->sock_lock); if (ret < 0) { if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) { - ERR("UST app event %s disable failed for app (pid: %d) " - "and session handle %d with ret %d", - ua_event->attr.name, app->pid, ua_sess->handle, ret); + ERR("Failed to disable UST app object %p app (pid: %d) with ret %d", + object, app->pid, ret); } else { /* * This is normal behavior, an application can die during the @@ -1486,13 +1499,13 @@ static int disable_ust_event(struct ust_app *app, * continue normally. */ ret = 0; - DBG3("UST app disable event failed. Application is dead."); + DBG3("Failed to disable UST app object. Application is dead."); } goto error; } - DBG2("UST app event %s disabled successfully for app (pid: %d)", - ua_event->attr.name, app->pid); + DBG2("UST app object %p disabled successfully for app (pid: %d)", + object, app->pid); error: health_code_update(); @@ -1580,21 +1593,20 @@ error: /* * Enable the specified event on to UST tracer for the UST session. */ -static int enable_ust_event(struct ust_app *app, - struct ust_app_session *ua_sess, struct ust_app_event *ua_event) +static int enable_ust_object( + struct ust_app *app, struct lttng_ust_object_data *ust_object) { int ret; health_code_update(); pthread_mutex_lock(&app->sock_lock); - ret = ustctl_enable(app->sock, ua_event->obj); + ret = ustctl_enable(app->sock, ust_object); pthread_mutex_unlock(&app->sock_lock); if (ret < 0) { if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) { - ERR("UST app event %s enable failed for app (pid: %d) " - "and session handle %d with ret %d", - ua_event->attr.name, app->pid, ua_sess->handle, ret); + ERR("UST app enable failed for object %p app (pid: %d) with ret %d", + ust_object, app->pid, ret); } else { /* * This is normal behavior, an application can die during the @@ -1602,13 +1614,13 @@ static int enable_ust_event(struct ust_app *app, * continue normally. */ ret = 0; - DBG3("UST app enable event failed. Application is dead."); + DBG3("Failed to enable UST app object. Application is dead."); } goto error; } - DBG2("UST app event %s enabled successfully for app (pid: %d)", - ua_event->attr.name, app->pid); + DBG2("UST app object %p enabled successfully for app (pid: %d)", + ust_object, app->pid); error: health_code_update(); @@ -1704,14 +1716,14 @@ int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess, ua_event->handle = ua_event->obj->handle; - DBG2("UST app event %s created successfully for pid:%d", - ua_event->attr.name, app->pid); + DBG2("UST app event %s created successfully for pid:%d object: %p", + ua_event->attr.name, app->pid, ua_event->obj); health_code_update(); /* Set filter if one is present. */ if (ua_event->filter) { - ret = set_ust_event_filter(ua_event, app); + ret = set_ust_object_filter(app, ua_event->filter, ua_event->obj); if (ret < 0) { goto error; } @@ -1719,7 +1731,7 @@ int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess, /* Set exclusions for the event */ if (ua_event->exclusion) { - ret = set_ust_event_exclusion(ua_event, app); + ret = set_ust_object_exclusions(app, ua_event->exclusion, ua_event->obj); if (ret < 0) { goto error; } @@ -1731,7 +1743,7 @@ int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess, * We now need to explicitly enable the event, since it * is now disabled at creation. */ - ret = enable_ust_event(app, ua_sess, ua_event); + ret = enable_ust_object(app, ua_event->obj); if (ret < 0) { /* * If we hit an EPERM, something is wrong with our enable call. If @@ -1847,10 +1859,10 @@ static void shadow_copy_session(struct ust_app_session *ua_sess, ua_sess->tracing_id = usess->id; ua_sess->id = get_next_session_id(); - ua_sess->real_credentials.uid = app->uid; - ua_sess->real_credentials.gid = app->gid; - ua_sess->effective_credentials.uid = usess->uid; - ua_sess->effective_credentials.gid = usess->gid; + LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.uid, app->uid); + LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.gid, app->gid); + LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.uid, usess->uid); + LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.gid, usess->gid); ua_sess->buffer_type = usess->buffer_type; ua_sess->bits_per_long = app->bits_per_long; @@ -1872,7 +1884,7 @@ static void shadow_copy_session(struct ust_app_session *ua_sess, case LTTNG_BUFFER_PER_UID: ret = snprintf(ua_sess->path, sizeof(ua_sess->path), DEFAULT_UST_TRACE_UID_PATH, - ua_sess->real_credentials.uid, + lttng_credentials_get_uid(&ua_sess->real_credentials), app->bits_per_long); break; default: @@ -1995,8 +2007,9 @@ static int setup_buffer_reg_pid(struct ust_app_session *ua_sess, app->uint64_t_alignment, app->long_alignment, app->byte_order, app->version.major, app->version.minor, reg_pid->root_shm_path, reg_pid->shm_path, - ua_sess->effective_credentials.uid, - ua_sess->effective_credentials.gid, ua_sess->tracing_id, + lttng_credentials_get_uid(&ua_sess->effective_credentials), + lttng_credentials_get_gid(&ua_sess->effective_credentials), + ua_sess->tracing_id, app->uid); if (ret < 0) { /* @@ -2296,7 +2309,7 @@ end: */ static int create_ust_app_channel_context(struct ust_app_channel *ua_chan, - struct lttng_ust_context_attr *uctx, + struct lttng_ust_context_attr *uctx, struct ust_app *app) { int ret = 0; @@ -2341,7 +2354,7 @@ int enable_ust_app_event(struct ust_app_session *ua_sess, { int ret; - ret = enable_ust_event(app, ua_sess, ua_event); + ret = enable_ust_object(app, ua_event->obj); if (ret < 0) { goto error; } @@ -2360,7 +2373,7 @@ static int disable_ust_app_event(struct ust_app_session *ua_sess, { int ret; - ret = disable_ust_event(app, ua_sess, ua_event); + ret = disable_ust_object(app, ua_event->obj); if (ret < 0) { goto error; } @@ -2887,8 +2900,9 @@ static int create_channel_per_uid(struct ust_app *app, notification_ret = notification_thread_command_add_channel( notification_thread_handle, session->name, - ua_sess->effective_credentials.uid, - ua_sess->effective_credentials.gid, ua_chan->name, + lttng_credentials_get_uid(&ua_sess->effective_credentials), + lttng_credentials_get_gid(&ua_sess->effective_credentials), + ua_chan->name, ua_chan->key, LTTNG_DOMAIN_UST, ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf); if (notification_ret != LTTNG_OK) { @@ -2987,8 +3001,9 @@ static int create_channel_per_pid(struct ust_app *app, cmd_ret = notification_thread_command_add_channel( notification_thread_handle, session->name, - ua_sess->effective_credentials.uid, - ua_sess->effective_credentials.gid, ua_chan->name, + lttng_credentials_get_uid(&ua_sess->effective_credentials), + lttng_credentials_get_gid(&ua_sess->effective_credentials), + ua_chan->name, ua_chan->key, LTTNG_DOMAIN_UST, ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf); if (cmd_ret != LTTNG_OK) { @@ -3319,6 +3334,7 @@ error: struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock) { struct ust_app *lta = NULL; + struct lttng_pipe *event_notifier_event_source_pipe = NULL; assert(msg); assert(sock >= 0); @@ -3335,12 +3351,21 @@ struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock) goto error; } + event_notifier_event_source_pipe = lttng_pipe_open(FD_CLOEXEC); + if (!event_notifier_event_source_pipe) { + PERROR("Failed to open application event source pipe: '%s' (ppid = %d)", + msg->name, msg->ppid); + goto error; + } + lta = zmalloc(sizeof(struct ust_app)); if (lta == NULL) { PERROR("malloc"); - goto error; + goto error_free_pipe; } + lta->event_notifier_group.event_pipe = event_notifier_event_source_pipe; + lta->ppid = msg->ppid; lta->uid = msg->uid; lta->gid = msg->gid; @@ -3378,8 +3403,12 @@ struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock) lttng_ht_node_init_ulong(<a->sock_n, (unsigned long) lta->sock); CDS_INIT_LIST_HEAD(<a->teardown_head); -error: return lta; + +error_free_pipe: + lttng_pipe_destroy(event_notifier_event_source_pipe); +error: + return NULL; } /* @@ -3445,6 +3474,61 @@ int ust_app_version(struct ust_app *app) return ret; } +/* + * Setup the base event notifier group. + * + * Return 0 on success else a negative value either an errno code or a + * LTTng-UST error code. + */ +int ust_app_setup_event_notifier_group(struct ust_app *app) +{ + int ret; + int event_pipe_write_fd; + struct lttng_ust_object_data *event_notifier_group = NULL; + enum lttng_error_code lttng_ret; + + assert(app); + + /* Get the write side of the pipe. */ + event_pipe_write_fd = lttng_pipe_get_writefd( + app->event_notifier_group.event_pipe); + + pthread_mutex_lock(&app->sock_lock); + ret = ustctl_create_event_notifier_group(app->sock, + event_pipe_write_fd, &event_notifier_group); + pthread_mutex_unlock(&app->sock_lock); + if (ret < 0) { + if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) { + ERR("Failed to create application event notifier group: ret = %d, app socket fd = %d, event_pipe_write_fd = %d", + ret, app->sock, event_pipe_write_fd); + } else { + DBG("Failed to create application event notifier group (application is dead): app socket fd = %d", + app->sock); + } + + goto error; + } + + lttng_ret = notification_thread_command_add_tracer_event_source( + notification_thread_handle, + lttng_pipe_get_readfd(app->event_notifier_group.event_pipe), + LTTNG_DOMAIN_UST); + if (lttng_ret != LTTNG_OK) { + ERR("Failed to add tracer event source to notification thread"); + ret = - 1; + goto error; + } + + /* Assign handle only when the complete setup is valid. */ + app->event_notifier_group.object = event_notifier_group; + return ret; + +error: + ustctl_release_object(app->sock, app->event_notifier_group.object); + free(app->event_notifier_group.object); + return ret; +} + /* * Unregister app by removing it from the global traceable app list and freeing * the data struct. @@ -4356,15 +4440,6 @@ int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app) goto skip_setup; } - /* - * Create the metadata for the application. This returns gracefully if a - * metadata was already set for the session. - */ - ret = create_ust_app_metadata(ua_sess, app, usess->consumer); - if (ret < 0) { - goto error_unlock; - } - health_code_update(); skip_setup: @@ -5032,6 +5107,7 @@ void ust_app_synchronize(struct ltt_ust_session *usess, } rcu_read_lock(); + cds_lfht_for_each_entry(usess->domain_global.channels->ht, &uchan_iter, uchan, node.node) { struct ust_app_channel *ua_chan; @@ -5045,7 +5121,7 @@ void ust_app_synchronize(struct ltt_ust_session *usess, * allocated (if necessary) and sent to the application, and * all enabled contexts will be added to the channel. */ - ret = find_or_create_ust_app_channel(usess, ua_sess, + ret = find_or_create_ust_app_channel(usess, ua_sess, app, uchan, &ua_chan); if (ret) { /* Tracer is probably gone or ENOMEM. */ @@ -5075,6 +5151,21 @@ void ust_app_synchronize(struct ltt_ust_session *usess, } } } + + /* + * Create the metadata for the application. This returns gracefully if a + * metadata was already set for the session. + * + * The metadata channel must be created after the data channels as the + * consumer daemon assumes this ordering. When interacting with a relay + * daemon, the consumer will use this assumption to send the + * "STREAMS_SENT" message to the relay daemon. + */ + ret = create_ust_app_metadata(ua_sess, app, usess->consumer); + if (ret < 0) { + goto error_unlock; + } + rcu_read_unlock(); end: @@ -5897,7 +5988,7 @@ enum lttng_error_code ust_app_snapshot_record( status = LTTNG_ERR_INVALID; goto error; } - /* Add the UST default trace dir to path. */ + /* Add the UST default trace dir to path. */ cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter, reg_chan, node.node) { status = consumer_snapshot_channel(socket, @@ -5961,14 +6052,12 @@ enum lttng_error_code ust_app_snapshot_record( status = LTTNG_ERR_INVALID; goto error; } - cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter, + cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter, ua_chan, node.node) { status = consumer_snapshot_channel(socket, ua_chan->key, output, 0, - ua_sess->effective_credentials - .uid, - ua_sess->effective_credentials - .gid, + lttng_credentials_get_uid(&ua_sess->effective_credentials), + lttng_credentials_get_gid(&ua_sess->effective_credentials), &trace_path[consumer_path_offset], wait, nb_packets_per_stream); switch (status) { @@ -5988,8 +6077,8 @@ enum lttng_error_code ust_app_snapshot_record( } status = consumer_snapshot_channel(socket, registry->metadata_key, output, 1, - ua_sess->effective_credentials.uid, - ua_sess->effective_credentials.gid, + lttng_credentials_get_uid(&ua_sess->effective_credentials), + lttng_credentials_get_gid(&ua_sess->effective_credentials), &trace_path[consumer_path_offset], wait, 0); switch (status) { case LTTNG_OK: @@ -6345,10 +6434,8 @@ enum lttng_error_code ust_app_rotate_session(struct ltt_session *session) ua_chan, node.node) { ret = consumer_rotate_channel(socket, ua_chan->key, - ua_sess->effective_credentials - .uid, - ua_sess->effective_credentials - .gid, + lttng_credentials_get_uid(&ua_sess->effective_credentials), + lttng_credentials_get_gid(&ua_sess->effective_credentials), ua_sess->consumer, /* is_metadata_channel */ false); if (ret < 0) { @@ -6364,8 +6451,8 @@ enum lttng_error_code ust_app_rotate_session(struct ltt_session *session) (void) push_metadata(registry, usess->consumer); ret = consumer_rotate_channel(socket, registry->metadata_key, - ua_sess->effective_credentials.uid, - ua_sess->effective_credentials.gid, + lttng_credentials_get_uid(&ua_sess->effective_credentials), + lttng_credentials_get_gid(&ua_sess->effective_credentials), ua_sess->consumer, /* is_metadata_channel */ true); if (ret < 0) { @@ -6644,3 +6731,126 @@ end: rcu_read_unlock(); return cmd_ret; } + +/* + * This function skips the metadata channel as the begin/end timestamps of a + * metadata packet are useless. + * + * Moreover, opening a packet after a "clear" will cause problems for live + * sessions as it will introduce padding that was not part of the first trace + * chunk. The relay daemon expects the content of the metadata stream of + * successive metadata trace chunks to be strict supersets of one another. + * + * For example, flushing a packet at the beginning of the metadata stream of + * a trace chunk resulting from a "clear" session command will cause the + * size of the metadata stream of the new trace chunk to not match the size of + * the metadata stream of the original chunk. This will confuse the relay + * daemon as the same "offset" in a metadata stream will no longer point + * to the same content. + */ +enum lttng_error_code ust_app_open_packets(struct ltt_session *session) +{ + enum lttng_error_code ret = LTTNG_OK; + struct lttng_ht_iter iter; + struct ltt_ust_session *usess = session->ust_session; + + assert(usess); + + rcu_read_lock(); + + switch (usess->buffer_type) { + case LTTNG_BUFFER_PER_UID: + { + struct buffer_reg_uid *reg; + + cds_list_for_each_entry ( + reg, &usess->buffer_reg_uid_list, lnode) { + struct buffer_reg_channel *reg_chan; + struct consumer_socket *socket; + + socket = consumer_find_socket_by_bitness( + reg->bits_per_long, usess->consumer); + if (!socket) { + ret = LTTNG_ERR_FATAL; + goto error; + } + + cds_lfht_for_each_entry(reg->registry->channels->ht, + &iter.iter, reg_chan, node.node) { + const int open_ret = + consumer_open_channel_packets( + socket, + reg_chan->consumer_key); + + if (open_ret < 0) { + ret = LTTNG_ERR_UNK; + goto error; + } + } + } + break; + } + case LTTNG_BUFFER_PER_PID: + { + struct ust_app *app; + + cds_lfht_for_each_entry ( + ust_app_ht->ht, &iter.iter, app, pid_n.node) { + struct consumer_socket *socket; + struct lttng_ht_iter chan_iter; + struct ust_app_channel *ua_chan; + struct ust_app_session *ua_sess; + struct ust_registry_session *registry; + + ua_sess = lookup_session_by_app(usess, app); + if (!ua_sess) { + /* Session not associated with this app. */ + continue; + } + + /* Get the right consumer socket for the application. */ + socket = consumer_find_socket_by_bitness( + app->bits_per_long, usess->consumer); + if (!socket) { + ret = LTTNG_ERR_FATAL; + goto error; + } + + registry = get_session_registry(ua_sess); + if (!registry) { + DBG("Application session is being torn down. Skip application."); + continue; + } + + cds_lfht_for_each_entry(ua_sess->channels->ht, + &chan_iter.iter, ua_chan, node.node) { + const int open_ret = + consumer_open_channel_packets( + socket, + ua_chan->key); + + if (open_ret < 0) { + /* + * Per-PID buffer and application going + * away. + */ + if (open_ret == -LTTNG_ERR_CHAN_NOT_FOUND) { + continue; + } + + ret = LTTNG_ERR_UNK; + goto error; + } + } + } + break; + } + default: + abort(); + break; + } + +error: + rcu_read_unlock(); + return ret; +}