#include <linux/bitops.h>
#include <counter/counter.h>
#include <counter/counter-internal.h>
+#include <wrapper/compiler_attributes.h>
#include <wrapper/limits.h>
/*
* Using unsigned arithmetic because overflow is defined.
*/
-static inline int __lttng_counter_add(const struct lib_counter_config *config,
+static __always_inline int __lttng_counter_add(const struct lib_counter_config *config,
enum lib_counter_config_alloc alloc,
enum lib_counter_config_sync sync,
struct lib_counter *counter,
int8_t global_sum_step = counter->global_sum_step.s8;
res = *int_p;
- switch (sync) {
- case COUNTER_SYNC_PER_CPU:
+ switch (alloc) {
+ case COUNTER_ALLOC_PER_CPU:
{
do {
move_sum = 0;
old = res;
n = (int8_t) ((uint8_t) old + (uint8_t) v);
- if (unlikely(n > (int8_t) global_sum_step))
- move_sum = (int8_t) global_sum_step / 2;
- else if (unlikely(n < -(int8_t) global_sum_step))
- move_sum = -((int8_t) global_sum_step / 2);
- n -= move_sum;
+ if (unlikely(global_sum_step)) {
+ if (unlikely(n > (int8_t) global_sum_step))
+ move_sum = (int8_t) global_sum_step / 2;
+ else if (unlikely(n < -(int8_t) global_sum_step))
+ move_sum = -((int8_t) global_sum_step / 2);
+ n -= move_sum;
+ }
res = cmpxchg_local(int_p, old, n);
} while (old != res);
break;
}
- case COUNTER_SYNC_GLOBAL:
+ case COUNTER_ALLOC_GLOBAL:
{
do {
old = res;
int16_t global_sum_step = counter->global_sum_step.s16;
res = *int_p;
- switch (sync) {
- case COUNTER_SYNC_PER_CPU:
+ switch (alloc) {
+ case COUNTER_ALLOC_PER_CPU:
{
do {
move_sum = 0;
old = res;
n = (int16_t) ((uint16_t) old + (uint16_t) v);
- if (unlikely(n > (int16_t) global_sum_step))
- move_sum = (int16_t) global_sum_step / 2;
- else if (unlikely(n < -(int16_t) global_sum_step))
- move_sum = -((int16_t) global_sum_step / 2);
- n -= move_sum;
+ if (unlikely(global_sum_step)) {
+ if (unlikely(n > (int16_t) global_sum_step))
+ move_sum = (int16_t) global_sum_step / 2;
+ else if (unlikely(n < -(int16_t) global_sum_step))
+ move_sum = -((int16_t) global_sum_step / 2);
+ n -= move_sum;
+ }
res = cmpxchg_local(int_p, old, n);
} while (old != res);
break;
}
- case COUNTER_SYNC_GLOBAL:
+ case COUNTER_ALLOC_GLOBAL:
{
do {
old = res;
int32_t global_sum_step = counter->global_sum_step.s32;
res = *int_p;
- switch (sync) {
- case COUNTER_SYNC_PER_CPU:
+ switch (alloc) {
+ case COUNTER_ALLOC_PER_CPU:
{
do {
move_sum = 0;
old = res;
n = (int32_t) ((uint32_t) old + (uint32_t) v);
- if (unlikely(n > (int32_t) global_sum_step))
- move_sum = (int32_t) global_sum_step / 2;
- else if (unlikely(n < -(int32_t) global_sum_step))
- move_sum = -((int32_t) global_sum_step / 2);
- n -= move_sum;
+ if (unlikely(global_sum_step)) {
+ if (unlikely(n > (int32_t) global_sum_step))
+ move_sum = (int32_t) global_sum_step / 2;
+ else if (unlikely(n < -(int32_t) global_sum_step))
+ move_sum = -((int32_t) global_sum_step / 2);
+ n -= move_sum;
+ }
res = cmpxchg_local(int_p, old, n);
} while (old != res);
break;
}
- case COUNTER_SYNC_GLOBAL:
+ case COUNTER_ALLOC_GLOBAL:
{
do {
old = res;
int64_t global_sum_step = counter->global_sum_step.s64;
res = *int_p;
- switch (sync) {
- case COUNTER_SYNC_PER_CPU:
+ switch (alloc) {
+ case COUNTER_ALLOC_PER_CPU:
{
do {
move_sum = 0;
old = res;
n = (int64_t) ((uint64_t) old + (uint64_t) v);
- if (unlikely(n > (int64_t) global_sum_step))
- move_sum = (int64_t) global_sum_step / 2;
- else if (unlikely(n < -(int64_t) global_sum_step))
- move_sum = -((int64_t) global_sum_step / 2);
- n -= move_sum;
+ if (unlikely(global_sum_step)) {
+ if (unlikely(n > (int64_t) global_sum_step))
+ move_sum = (int64_t) global_sum_step / 2;
+ else if (unlikely(n < -(int64_t) global_sum_step))
+ move_sum = -((int64_t) global_sum_step / 2);
+ n -= move_sum;
+ }
res = cmpxchg_local(int_p, old, n);
} while (old != res);
break;
}
- case COUNTER_SYNC_GLOBAL:
+ case COUNTER_ALLOC_GLOBAL:
{
do {
old = res;
return 0;
}
-static inline int __lttng_counter_add_percpu(const struct lib_counter_config *config,
+static __always_inline int __lttng_counter_add_percpu(const struct lib_counter_config *config,
struct lib_counter *counter,
const size_t *dimension_indexes, int64_t v)
{
return 0;
}
-static inline int __lttng_counter_add_global(const struct lib_counter_config *config,
+static __always_inline int __lttng_counter_add_global(const struct lib_counter_config *config,
struct lib_counter *counter,
const size_t *dimension_indexes, int64_t v)
{
dimension_indexes, v, NULL);
}
-static inline int lttng_counter_add(const struct lib_counter_config *config,
+static __always_inline int lttng_counter_add(const struct lib_counter_config *config,
struct lib_counter *counter,
const size_t *dimension_indexes, int64_t v)
{
switch (config->alloc) {
- case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU:
+ lttng_fallthrough;
case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
return __lttng_counter_add_percpu(config, counter, dimension_indexes, v);
case COUNTER_ALLOC_GLOBAL:
}
}
-static inline int lttng_counter_inc(const struct lib_counter_config *config,
+static __always_inline int lttng_counter_inc(const struct lib_counter_config *config,
struct lib_counter *counter,
const size_t *dimension_indexes)
{
return lttng_counter_add(config, counter, dimension_indexes, 1);
}
-static inline int lttng_counter_dec(const struct lib_counter_config *config,
+static __always_inline int lttng_counter_dec(const struct lib_counter_config *config,
struct lib_counter *counter,
const size_t *dimension_indexes)
{