-#ifndef _LINUX_RING_BUFFER_VATOMIC_H
-#define _LINUX_RING_BUFFER_VATOMIC_H
-
/*
- * linux/ringbuffer/vatomic.h
- *
- * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * SPDX-License-Identifier: MIT
*
- * Dual LGPL v2.1/GPL v2 license.
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*/
+#ifndef _LTTNG_RING_BUFFER_VATOMIC_H
+#define _LTTNG_RING_BUFFER_VATOMIC_H
+
#include <assert.h>
#include <urcu/uatomic.h>
};
static inline
-long v_read(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
+long v_read(const struct lttng_ust_lib_ring_buffer_config *config, union v_atomic *v_a)
{
assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
return uatomic_read(&v_a->a);
}
static inline
-void v_set(const struct lib_ring_buffer_config *config, union v_atomic *v_a,
+void v_set(const struct lttng_ust_lib_ring_buffer_config *config, union v_atomic *v_a,
long v)
{
assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
}
static inline
-void v_add(const struct lib_ring_buffer_config *config, long v, union v_atomic *v_a)
+void v_add(const struct lttng_ust_lib_ring_buffer_config *config, long v, union v_atomic *v_a)
{
assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
uatomic_add(&v_a->a, v);
}
static inline
-void v_inc(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
+void v_inc(const struct lttng_ust_lib_ring_buffer_config *config, union v_atomic *v_a)
{
assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
uatomic_inc(&v_a->a);
* Non-atomic decrement. Only used by reader, apply to reader-owned subbuffer.
*/
static inline
-void _v_dec(const struct lib_ring_buffer_config *config, union v_atomic *v_a)
+void _v_dec(const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)), union v_atomic *v_a)
{
--v_a->v;
}
static inline
-long v_cmpxchg(const struct lib_ring_buffer_config *config, union v_atomic *v_a,
+long v_cmpxchg(const struct lttng_ust_lib_ring_buffer_config *config, union v_atomic *v_a,
long old, long _new)
{
assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
return uatomic_cmpxchg(&v_a->a, old, _new);
}
-#endif /* _LINUX_RING_BUFFER_VATOMIC_H */
+#endif /* _LTTNG_RING_BUFFER_VATOMIC_H */