}
static void channel_free(struct channel *chan,
- struct lttng_ust_shm_handle *handle)
+ struct lttng_ust_shm_handle *handle,
+ int consumer)
{
channel_backend_free(&chan->backend, handle);
/* chan is freed by shm teardown */
- shm_object_table_destroy(handle->table);
+ shm_object_table_destroy(handle->table, consumer);
free(handle);
}
error_backend_init:
error_append:
- shm_object_table_destroy(handle->table);
+ shm_object_table_destroy(handle->table, 1);
error_table_alloc:
free(handle);
return NULL;
return handle;
error_table_object:
- shm_object_table_destroy(handle->table);
+ shm_object_table_destroy(handle->table, 0);
error_table_alloc:
free(handle);
return NULL;
}
static
-void channel_release(struct channel *chan, struct lttng_ust_shm_handle *handle)
+void channel_release(struct channel *chan, struct lttng_ust_shm_handle *handle,
+ int consumer)
{
- channel_free(chan, handle);
+ channel_free(chan, handle, consumer);
}
/**
* sessiond/consumer are keeping a reference on the shm file
* descriptor directly. No need to refcount.
*/
- channel_release(chan, handle);
+ channel_release(chan, handle, consumer);
return;
}
* Force a sub-buffer switch. This operation is completely reentrant : can be
* called while tracing is active with absolutely no lock held.
*
- * Note, however, that as a v_cmpxchg is used for some atomic
- * operations, this function must be called from the CPU which owns the buffer
- * for a ACTIVE flush.
+ * For RING_BUFFER_SYNC_PER_CPU ring buffers, as a v_cmpxchg is used for
+ * some atomic operations, this function must be called from the CPU
+ * which owns the buffer for a ACTIVE flush. However, for
+ * RING_BUFFER_SYNC_GLOBAL ring buffers, this function can be called
+ * from any CPU.
*/
void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
struct lttng_ust_shm_handle *handle)