+
+static
+int consumer_clear_monitored_channel(struct lttng_consumer_channel *channel)
+{
+ struct lttng_ht *ht;
+ struct lttng_consumer_stream *stream;
+ struct lttng_ht_iter iter;
+ int ret;
+
+ ht = the_consumer_data.stream_per_chan_id_ht;
+
+ rcu_read_lock();
+ cds_lfht_for_each_entry_duplicate(ht->ht,
+ ht->hash_fct(&channel->key, lttng_ht_seed),
+ ht->match_fct, &channel->key,
+ &iter.iter, stream, node_channel_id.node) {
+ /*
+ * Protect against teardown with mutex.
+ */
+ pthread_mutex_lock(&stream->lock);
+ if (cds_lfht_is_node_deleted(&stream->node.node)) {
+ goto next;
+ }
+ ret = consumer_clear_stream(stream);
+ if (ret) {
+ goto error_unlock;
+ }
+ next:
+ pthread_mutex_unlock(&stream->lock);
+ }
+ rcu_read_unlock();
+ return LTTCOMM_CONSUMERD_SUCCESS;
+
+error_unlock:
+ pthread_mutex_unlock(&stream->lock);
+ rcu_read_unlock();
+ return ret;
+}
+
+int lttng_consumer_clear_channel(struct lttng_consumer_channel *channel)
+{
+ int ret;
+
+ DBG("Consumer clear channel %" PRIu64, channel->key);
+
+ if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA) {
+ /*
+ * Nothing to do for the metadata channel/stream.
+ * Snapshot mechanism already take care of the metadata
+ * handling/generation, and monitored channels only need to
+ * have their data stream cleared..
+ */
+ ret = LTTCOMM_CONSUMERD_SUCCESS;
+ goto end;
+ }
+
+ if (!channel->monitor) {
+ ret = consumer_clear_unmonitored_channel(channel);
+ } else {
+ ret = consumer_clear_monitored_channel(channel);
+ }
+end:
+ return ret;
+}
+
+enum lttcomm_return_code lttng_consumer_open_channel_packets(
+ struct lttng_consumer_channel *channel)
+{
+ struct lttng_consumer_stream *stream;
+ enum lttcomm_return_code ret = LTTCOMM_CONSUMERD_SUCCESS;
+
+ if (channel->metadata_stream) {
+ ERR("Open channel packets command attempted on a metadata channel");
+ ret = LTTCOMM_CONSUMERD_INVALID_PARAMETERS;
+ goto end;
+ }
+
+ rcu_read_lock();
+ cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
+ enum consumer_stream_open_packet_status status;
+
+ pthread_mutex_lock(&stream->lock);
+ if (cds_lfht_is_node_deleted(&stream->node.node)) {
+ goto next;
+ }
+
+ status = consumer_stream_open_packet(stream);
+ switch (status) {
+ case CONSUMER_STREAM_OPEN_PACKET_STATUS_OPENED:
+ DBG("Opened a packet in \"open channel packets\" command: stream id = %" PRIu64
+ ", channel name = %s, session id = %" PRIu64,
+ stream->key, stream->chan->name,
+ stream->chan->session_id);
+ stream->opened_packet_in_current_trace_chunk = true;
+ break;
+ case CONSUMER_STREAM_OPEN_PACKET_STATUS_NO_SPACE:
+ DBG("No space left to open a packet in \"open channel packets\" command: stream id = %" PRIu64
+ ", channel name = %s, session id = %" PRIu64,
+ stream->key, stream->chan->name,
+ stream->chan->session_id);
+ break;
+ case CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR:
+ /*
+ * Only unexpected internal errors can lead to this
+ * failing. Report an unknown error.
+ */
+ ERR("Failed to flush empty buffer in \"open channel packets\" command: stream id = %" PRIu64
+ ", channel id = %" PRIu64
+ ", channel name = %s"
+ ", session id = %" PRIu64,
+ stream->key, channel->key,
+ channel->name, channel->session_id);
+ ret = LTTCOMM_CONSUMERD_UNKNOWN_ERROR;
+ goto error_unlock;
+ default:
+ abort();
+ }
+
+ next:
+ pthread_mutex_unlock(&stream->lock);
+ }
+
+end_rcu_unlock:
+ rcu_read_unlock();
+end:
+ return ret;
+
+error_unlock:
+ pthread_mutex_unlock(&stream->lock);
+ goto end_rcu_unlock;
+}
+
+void lttng_consumer_sigbus_handle(void *addr)
+{
+ lttng_ustconsumer_sigbus_handle(addr);
+}