Implement lib counter
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Wed, 15 Jul 2020 18:13:24 +0000 (14:13 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Wed, 25 Nov 2020 19:16:57 +0000 (14:16 -0500)
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Change-Id: I47424817ca874357d5ae4349a048cce889988a76

12 files changed:
include/counter/config.h [new file with mode: 0644]
include/counter/counter-api.h [new file with mode: 0644]
include/counter/counter-internal.h [new file with mode: 0644]
include/counter/counter-types.h [new file with mode: 0644]
include/counter/counter.h [new file with mode: 0644]
include/lttng/events.h
src/Kbuild
src/lib/Kbuild
src/lib/counter/counter.c [new file with mode: 0644]
src/lttng-counter-client-percpu-32-modular.c [new file with mode: 0644]
src/lttng-counter-client-percpu-64-modular.c [new file with mode: 0644]
src/lttng-events.c

diff --git a/include/counter/config.h b/include/counter/config.h
new file mode 100644 (file)
index 0000000..845d941
--- /dev/null
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * counter/config.h
+ *
+ * LTTng Counters Configuration
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_COUNTER_CONFIG_H
+#define _LTTNG_COUNTER_CONFIG_H
+
+#include <linux/types.h>
+#include <linux/percpu.h>
+
+enum lib_counter_config_alloc {
+       COUNTER_ALLOC_PER_CPU   = (1 << 0),
+       COUNTER_ALLOC_GLOBAL    = (1 << 1),
+};
+
+enum lib_counter_config_sync {
+       COUNTER_SYNC_PER_CPU,
+       COUNTER_SYNC_GLOBAL,
+};
+
+struct lib_counter_config {
+       u32 alloc;      /* enum lib_counter_config_alloc flags */
+       enum lib_counter_config_sync sync;
+       enum {
+               COUNTER_ARITHMETIC_MODULAR,
+               COUNTER_ARITHMETIC_SATURATE,    /* TODO */
+       } arithmetic;
+       enum {
+               COUNTER_SIZE_8_BIT      = 1,
+               COUNTER_SIZE_16_BIT     = 2,
+               COUNTER_SIZE_32_BIT     = 4,
+               COUNTER_SIZE_64_BIT     = 8,
+       } counter_size;
+};
+
+#endif /* _LTTNG_COUNTER_CONFIG_H */
diff --git a/include/counter/counter-api.h b/include/counter/counter-api.h
new file mode 100644 (file)
index 0000000..f2829fc
--- /dev/null
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * counter/counter-api.h
+ *
+ * LTTng Counters API, requiring counter/config.h
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_COUNTER_API_H
+#define _LTTNG_COUNTER_API_H
+
+#include <linux/types.h>
+#include <linux/percpu.h>
+#include <linux/bitops.h>
+#include <counter/counter.h>
+#include <counter/counter-internal.h>
+
+/*
+ * Using unsigned arithmetic because overflow is defined.
+ */
+static inline int __lttng_counter_add(const struct lib_counter_config *config,
+                                      enum lib_counter_config_alloc alloc,
+                                      enum lib_counter_config_sync sync,
+                                      struct lib_counter *counter,
+                                      const size_t *dimension_indexes, int64_t v,
+                                      int64_t *remainder)
+{
+       size_t index;
+       bool overflow = false, underflow = false;
+       struct lib_counter_layout *layout;
+       int64_t move_sum = 0;
+
+       if (unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
+               return -EOVERFLOW;
+       index = lttng_counter_get_index(config, counter, dimension_indexes);
+
+       switch (alloc) {
+       case COUNTER_ALLOC_PER_CPU:
+               layout = per_cpu_ptr(counter->percpu_counters, smp_processor_id());
+               break;
+       case COUNTER_ALLOC_GLOBAL:
+               layout = &counter->global_counters;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       switch (config->counter_size) {
+       case COUNTER_SIZE_8_BIT:
+       {
+               int8_t *int_p = (int8_t *) layout->counters + index;
+               int8_t old, n, res;
+               int8_t global_sum_step = counter->global_sum_step.s8;
+
+               res = *int_p;
+               switch (sync) {
+               case COUNTER_SYNC_PER_CPU:
+               {
+                       do {
+                               move_sum = 0;
+                               old = res;
+                               n = (int8_t) ((uint8_t) old + (uint8_t) v);
+                               if (unlikely(n > (int8_t) global_sum_step))
+                                       move_sum = (int8_t) global_sum_step / 2;
+                               else if (unlikely(n < -(int8_t) global_sum_step))
+                                       move_sum = -((int8_t) global_sum_step / 2);
+                               n -= move_sum;
+                               res = cmpxchg_local(int_p, old, n);
+                       } while (old != res);
+                       break;
+               }
+               case COUNTER_SYNC_GLOBAL:
+               {
+                       do {
+                               old = res;
+                               n = (int8_t) ((uint8_t) old + (uint8_t) v);
+                               res = cmpxchg(int_p, old, n);
+                       } while (old != res);
+                       break;
+               }
+               }
+               if (v > 0 && (v >= U8_MAX || n < old))
+                       overflow = true;
+               else if (v < 0 && (v <= -U8_MAX || n > old))
+                       underflow = true;
+               break;
+       }
+       case COUNTER_SIZE_16_BIT:
+       {
+               int16_t *int_p = (int16_t *) layout->counters + index;
+               int16_t old, n, res;
+               int16_t global_sum_step = counter->global_sum_step.s16;
+
+               res = *int_p;
+               switch (sync) {
+               case COUNTER_SYNC_PER_CPU:
+               {
+                       do {
+                               move_sum = 0;
+                               old = res;
+                               n = (int16_t) ((uint16_t) old + (uint16_t) v);
+                               if (unlikely(n > (int16_t) global_sum_step))
+                                       move_sum = (int16_t) global_sum_step / 2;
+                               else if (unlikely(n < -(int16_t) global_sum_step))
+                                       move_sum = -((int16_t) global_sum_step / 2);
+                               n -= move_sum;
+                               res = cmpxchg_local(int_p, old, n);
+                       } while (old != res);
+                       break;
+               }
+               case COUNTER_SYNC_GLOBAL:
+               {
+                       do {
+                               old = res;
+                               n = (int16_t) ((uint16_t) old + (uint16_t) v);
+                               res = cmpxchg(int_p, old, n);
+                       } while (old != res);
+                       break;
+               }
+               }
+               if (v > 0 && (v >= U16_MAX || n < old))
+                       overflow = true;
+               else if (v < 0 && (v <= -U16_MAX || n > old))
+                       underflow = true;
+               break;
+       }
+       case COUNTER_SIZE_32_BIT:
+       {
+               int32_t *int_p = (int32_t *) layout->counters + index;
+               int32_t old, n, res;
+               int32_t global_sum_step = counter->global_sum_step.s32;
+
+               res = *int_p;
+               switch (sync) {
+               case COUNTER_SYNC_PER_CPU:
+               {
+                       do {
+                               move_sum = 0;
+                               old = res;
+                               n = (int32_t) ((uint32_t) old + (uint32_t) v);
+                               if (unlikely(n > (int32_t) global_sum_step))
+                                       move_sum = (int32_t) global_sum_step / 2;
+                               else if (unlikely(n < -(int32_t) global_sum_step))
+                                       move_sum = -((int32_t) global_sum_step / 2);
+                               n -= move_sum;
+                               res = cmpxchg_local(int_p, old, n);
+                       } while (old != res);
+                       break;
+               }
+               case COUNTER_SYNC_GLOBAL:
+               {
+                       do {
+                               old = res;
+                               n = (int32_t) ((uint32_t) old + (uint32_t) v);
+                               res = cmpxchg(int_p, old, n);
+                       } while (old != res);
+                       break;
+               }
+               }
+               if (v > 0 && (v >= U32_MAX || n < old))
+                       overflow = true;
+               else if (v < 0 && (v <= -U32_MAX || n > old))
+                       underflow = true;
+               break;
+       }
+#if BITS_PER_LONG == 64
+       case COUNTER_SIZE_64_BIT:
+       {
+               int64_t *int_p = (int64_t *) layout->counters + index;
+               int64_t old, n, res;
+               int64_t global_sum_step = counter->global_sum_step.s64;
+
+               res = *int_p;
+               switch (sync) {
+               case COUNTER_SYNC_PER_CPU:
+               {
+                       do {
+                               move_sum = 0;
+                               old = res;
+                               n = (int64_t) ((uint64_t) old + (uint64_t) v);
+                               if (unlikely(n > (int64_t) global_sum_step))
+                                       move_sum = (int64_t) global_sum_step / 2;
+                               else if (unlikely(n < -(int64_t) global_sum_step))
+                                       move_sum = -((int64_t) global_sum_step / 2);
+                               n -= move_sum;
+                               res = cmpxchg_local(int_p, old, n);
+                       } while (old != res);
+                       break;
+               }
+               case COUNTER_SYNC_GLOBAL:
+               {
+                       do {
+                               old = res;
+                               n = (int64_t) ((uint64_t) old + (uint64_t) v);
+                               res = cmpxchg(int_p, old, n);
+                       } while (old != res);
+                       break;
+               }
+               }
+               if (v > 0 && n < old)
+                       overflow = true;
+               else if (v < 0 && n > old)
+                       underflow = true;
+               break;
+       }
+#endif
+       default:
+               return -EINVAL;
+       }
+       if (unlikely(overflow && !test_bit(index, layout->overflow_bitmap)))
+               set_bit(index, layout->overflow_bitmap);
+       else if (unlikely(underflow && !test_bit(index, layout->underflow_bitmap)))
+               set_bit(index, layout->underflow_bitmap);
+       if (remainder)
+               *remainder = move_sum;
+       return 0;
+}
+
+static inline int __lttng_counter_add_percpu(const struct lib_counter_config *config,
+                                    struct lib_counter *counter,
+                                    const size_t *dimension_indexes, int64_t v)
+{
+       int64_t move_sum;
+       int ret;
+
+       ret = __lttng_counter_add(config, COUNTER_ALLOC_PER_CPU, config->sync,
+                                      counter, dimension_indexes, v, &move_sum);
+       if (unlikely(ret))
+               return ret;
+       if (unlikely(move_sum))
+               return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, COUNTER_SYNC_GLOBAL,
+                                          counter, dimension_indexes, move_sum, NULL);
+       return 0;
+}
+
+static inline int __lttng_counter_add_global(const struct lib_counter_config *config,
+                                    struct lib_counter *counter,
+                                    const size_t *dimension_indexes, int64_t v)
+{
+       return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, config->sync, counter,
+                                  dimension_indexes, v, NULL);
+}
+
+static inline int lttng_counter_add(const struct lib_counter_config *config,
+                                   struct lib_counter *counter,
+                                   const size_t *dimension_indexes, int64_t v)
+{
+       switch (config->alloc) {
+       case COUNTER_ALLOC_PER_CPU:     /* Fallthrough */
+       case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+               return __lttng_counter_add_percpu(config, counter, dimension_indexes, v);
+       case COUNTER_ALLOC_GLOBAL:
+               return __lttng_counter_add_global(config, counter, dimension_indexes, v);
+       default:
+               return -EINVAL;
+       }
+}
+
+static inline int lttng_counter_inc(const struct lib_counter_config *config,
+                                    struct lib_counter *counter,
+                                    const size_t *dimension_indexes)
+{
+       return lttng_counter_add(config, counter, dimension_indexes, 1);
+}
+
+static inline int lttng_counter_dec(const struct lib_counter_config *config,
+                                   struct lib_counter *counter,
+                                   const size_t *dimension_indexes)
+{
+       return lttng_counter_add(config, counter, dimension_indexes, -1);
+}
+
+#endif /* _LTTNG_COUNTER_API_H */
diff --git a/include/counter/counter-internal.h b/include/counter/counter-internal.h
new file mode 100644 (file)
index 0000000..839b92c
--- /dev/null
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * counter/counter-internal.h
+ *
+ * LTTng Counters Internal Header
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_COUNTER_INTERNAL_H
+#define _LTTNG_COUNTER_INTERNAL_H
+
+#include <linux/types.h>
+#include <linux/percpu.h>
+#include <counter/counter-types.h>
+#include <counter/config.h>
+
+static inline int lttng_counter_validate_indexes(const struct lib_counter_config *config,
+                                                struct lib_counter *counter,
+                                                const size_t *dimension_indexes)
+{
+       size_t nr_dimensions = counter->nr_dimensions, i;
+
+       for (i = 0; i < nr_dimensions; i++) {
+               if (unlikely(dimension_indexes[i] >= counter->dimensions[i].max_nr_elem))
+                       return -EOVERFLOW;
+       }
+       return 0;
+}
+
+static inline size_t lttng_counter_get_index(const struct lib_counter_config *config,
+                                            struct lib_counter *counter,
+                                            const size_t *dimension_indexes)
+{
+       size_t nr_dimensions = counter->nr_dimensions, i;
+       size_t index = 0;
+
+       for (i = 0; i < nr_dimensions; i++) {
+               struct lib_counter_dimension *dimension = &counter->dimensions[i];
+               const size_t *dimension_index = &dimension_indexes[i];
+
+               index += *dimension_index * dimension->stride;
+       }
+       return index;
+}
+
+#endif /* _LTTNG_COUNTER_INTERNAL_H */
diff --git a/include/counter/counter-types.h b/include/counter/counter-types.h
new file mode 100644 (file)
index 0000000..5ae0184
--- /dev/null
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * counter/counter-types.h
+ *
+ * LTTng Counters Types
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_COUNTER_TYPES_H
+#define _LTTNG_COUNTER_TYPES_H
+
+#include <linux/types.h>
+#include <linux/percpu.h>
+#include <counter/config.h>
+
+struct lib_counter_dimension {
+       /*
+        * Max. number of indexable elements.
+        */
+       size_t max_nr_elem;
+       /*
+        * The stride for a dimension is the multiplication factor which
+        * should be applied to its index to take into account other
+        * dimensions nested inside.
+        */
+       size_t stride;
+};
+
+struct lib_counter_layout {
+       void *counters;
+       unsigned long *underflow_bitmap;
+       unsigned long *overflow_bitmap;
+};
+
+enum lib_counter_arithmetic {
+       LIB_COUNTER_ARITHMETIC_MODULAR,
+       LIB_COUNTER_ARITHMETIC_SATURATE,
+};
+
+struct lib_counter {
+       size_t nr_dimensions;
+       int64_t allocated_elem;
+       struct lib_counter_dimension *dimensions;
+       enum lib_counter_arithmetic arithmetic;
+       union {
+               struct {
+                       int32_t max, min;
+               } limits_32_bit;
+               struct {
+                       int64_t max, min;
+               } limits_64_bit;
+       } saturation;
+       union {
+               int8_t s8;
+               int16_t s16;
+               int32_t s32;
+               int64_t s64;
+       } global_sum_step;              /* 0 if unused */
+       struct lib_counter_config config;
+
+       struct lib_counter_layout global_counters;
+       struct lib_counter_layout __percpu *percpu_counters;
+};
+
+#endif /* _LTTNG_COUNTER_TYPES_H */
diff --git a/include/counter/counter.h b/include/counter/counter.h
new file mode 100644 (file)
index 0000000..b244fa2
--- /dev/null
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * counter/counter.h
+ *
+ * LTTng Counters API
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_COUNTER_H
+#define _LTTNG_COUNTER_H
+
+#include <linux/types.h>
+#include <linux/percpu.h>
+#include <counter/config.h>
+#include <counter/counter-types.h>
+
+/* max_nr_elem is for each dimension. */
+struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
+                                        size_t nr_dimensions,
+                                        const size_t *max_nr_elem,
+                                        int64_t global_sum_step);
+void lttng_counter_destroy(struct lib_counter *counter);
+
+int lttng_counter_get_nr_dimensions(const struct lib_counter_config *config,
+                                   struct lib_counter *counter,
+                                   size_t *nr_dimensions);
+int lttng_counter_get_max_nr_elem(const struct lib_counter_config *config,
+                                 struct lib_counter *counter,
+                                 size_t *max_nr_elem); /* array of size nr_dimensions */
+
+int lttng_counter_read(const struct lib_counter_config *config,
+                     struct lib_counter *counter,
+                     const size_t *dimension_indexes,
+                     int cpu, int64_t *value,
+                     bool *overflow, bool *underflow);
+int lttng_counter_aggregate(const struct lib_counter_config *config,
+                           struct lib_counter *counter,
+                           const size_t *dimension_indexes,
+                           int64_t *value,
+                           bool *overflow, bool *underflow);
+int lttng_counter_clear(const struct lib_counter_config *config,
+                       struct lib_counter *counter,
+                       const size_t *dimension_indexes);
+
+#endif /* _LTTNG_COUNTER_H */
index 80de505056fe8a265d06c02f43717c92934fc0ea..7b3b18571184fb0ee1d9717277800d42f20ce06e 100644 (file)
@@ -512,6 +512,28 @@ struct lttng_channel_ops {
                        uint64_t *id);
 };
 
+struct lttng_counter_ops {
+       struct lib_counter *(*counter_create)(size_t nr_dimensions,
+                       const size_t *max_nr_elem,      /* for each dimension */
+                       int64_t global_sum_step);
+       void (*counter_destroy)(struct lib_counter *counter);
+       int (*counter_add)(struct lib_counter *counter, const size_t *dimension_indexes,
+                       int64_t v);
+       /*
+        * counter_read reads a specific cpu's counter if @cpu >= 0, or
+        * the global aggregation counter if @cpu == -1.
+        */
+       int (*counter_read)(struct lib_counter *counter, const size_t *dimension_indexes, int cpu,
+                        int64_t *value, bool *overflow, bool *underflow);
+       /*
+        * counter_aggregate returns the total sum of all per-cpu counters and
+        * the global aggregation counter.
+        */
+       int (*counter_aggregate)(struct lib_counter *counter, const size_t *dimension_indexes,
+                       int64_t *value, bool *overflow, bool *underflow);
+       int (*counter_clear)(struct lib_counter *counter, const size_t *dimension_indexes);
+};
+
 struct lttng_transport {
        char *name;
        struct module *owner;
@@ -519,6 +541,13 @@ struct lttng_transport {
        struct lttng_channel_ops ops;
 };
 
+struct lttng_counter_transport {
+       char *name;
+       struct module *owner;
+       struct list_head node;
+       struct lttng_counter_ops ops;
+};
+
 struct lttng_syscall_filter;
 
 #define LTTNG_EVENT_HT_BITS            12
@@ -789,6 +818,9 @@ int lttng_event_notifier_disable(struct lttng_event_notifier *event_notifier);
 void lttng_transport_register(struct lttng_transport *transport);
 void lttng_transport_unregister(struct lttng_transport *transport);
 
+void lttng_counter_transport_register(struct lttng_counter_transport *transport);
+void lttng_counter_transport_unregister(struct lttng_counter_transport *transport);
+
 void synchronize_trace(void);
 int lttng_abi_init(void);
 int lttng_abi_compat_old_init(void);
index 7f67791a523b301d5c8f30019529a86405a8d4c2..e3896e8125b212fc5eab6754011aeea3261a08b6 100644 (file)
@@ -13,6 +13,12 @@ obj-$(CONFIG_LTTNG) += lttng-ring-buffer-client-mmap-discard.o
 obj-$(CONFIG_LTTNG) += lttng-ring-buffer-client-mmap-overwrite.o
 obj-$(CONFIG_LTTNG) += lttng-ring-buffer-metadata-mmap-client.o
 obj-$(CONFIG_LTTNG) += lttng-ring-buffer-event-notifier-client.o
+
+obj-$(CONFIG_LTTNG) += lttng-counter-client-percpu-32-modular.o
+ifneq ($CONFIG_64BIT),)
+       obj-$(CONFIG_LTTNG) += lttng-counter-client-percpu-64-modular.o
+endif # CONFIG_64BIT
+
 obj-$(CONFIG_LTTNG) += lttng-clock.o
 
 obj-$(CONFIG_LTTNG) += lttng-tracer.o
index 82049e9df91208a0eab24d55ed56725bb101bfda..b0f49b6b704ad8093ffab4cba21911e9c4682ff9 100644 (file)
@@ -18,4 +18,9 @@ lttng-lib-ring-buffer-objs := \
   prio_heap/lttng_prio_heap.o \
   ../wrapper/splice.o
 
+obj-$(CONFIG_LTTNG) += lttng-counter.o
+
+lttng-counter-objs := \
+  counter/counter.o
+
 # vim:syntax=make
diff --git a/src/lib/counter/counter.c b/src/lib/counter/counter.c
new file mode 100644 (file)
index 0000000..bd5c459
--- /dev/null
@@ -0,0 +1,507 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * counter.c
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <lttng/tracer.h>
+#include <linux/cpumask.h>
+#include <counter/counter.h>
+#include <counter/counter-internal.h>
+#include <wrapper/vmalloc.h>
+
+static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension)
+{
+       return dimension->max_nr_elem;
+}
+
+static int lttng_counter_init_stride(const struct lib_counter_config *config,
+                                    struct lib_counter *counter)
+{
+       size_t nr_dimensions = counter->nr_dimensions;
+       size_t stride = 1;
+       ssize_t i;
+
+       for (i = nr_dimensions - 1; i >= 0; i--) {
+               struct lib_counter_dimension *dimension = &counter->dimensions[i];
+               size_t nr_elem;
+
+               nr_elem = lttng_counter_get_dimension_nr_elements(dimension);
+               dimension->stride = stride;
+               /* nr_elem should be minimum 1 for each dimension. */
+               if (!nr_elem)
+                       return -EINVAL;
+               stride *= nr_elem;
+               if (stride > SIZE_MAX / nr_elem)
+                       return -EINVAL;
+       }
+       return 0;
+}
+
+static int lttng_counter_layout_init(struct lib_counter *counter, int cpu)
+{
+       struct lib_counter_layout *layout;
+       size_t counter_size;
+       size_t nr_elem = counter->allocated_elem;
+
+       if (cpu == -1)
+               layout = &counter->global_counters;
+       else
+               layout = per_cpu_ptr(counter->percpu_counters, cpu);
+       switch (counter->config.counter_size) {
+       case COUNTER_SIZE_8_BIT:
+       case COUNTER_SIZE_16_BIT:
+       case COUNTER_SIZE_32_BIT:
+       case COUNTER_SIZE_64_BIT:
+               counter_size = (size_t) counter->config.counter_size;
+               break;
+       default:
+               return -EINVAL;
+       }
+       layout->counters = lttng_kvzalloc_node(ALIGN(counter_size * nr_elem,
+                                                    1 << INTERNODE_CACHE_SHIFT),
+                                              GFP_KERNEL | __GFP_NOWARN,
+                                              cpu_to_node(max(cpu, 0)));
+       if (!layout->counters)
+               return -ENOMEM;
+       layout->overflow_bitmap = lttng_kvzalloc_node(ALIGN(ALIGN(nr_elem, 8) / 8,
+                                                    1 << INTERNODE_CACHE_SHIFT),
+                                              GFP_KERNEL | __GFP_NOWARN,
+                                              cpu_to_node(max(cpu, 0)));
+       if (!layout->overflow_bitmap)
+               return -ENOMEM;
+       layout->underflow_bitmap = lttng_kvzalloc_node(ALIGN(ALIGN(nr_elem, 8) / 8,
+                                                    1 << INTERNODE_CACHE_SHIFT),
+                                              GFP_KERNEL | __GFP_NOWARN,
+                                              cpu_to_node(max(cpu, 0)));
+       if (!layout->underflow_bitmap)
+               return -ENOMEM;
+       return 0;
+}
+
+static void lttng_counter_layout_fini(struct lib_counter *counter, int cpu)
+{
+       struct lib_counter_layout *layout;
+
+       if (cpu == -1)
+               layout = &counter->global_counters;
+       else
+               layout = per_cpu_ptr(counter->percpu_counters, cpu);
+
+       lttng_kvfree(layout->counters);
+       lttng_kvfree(layout->overflow_bitmap);
+       lttng_kvfree(layout->underflow_bitmap);
+}
+
+static
+int lttng_counter_set_global_sum_step(struct lib_counter *counter,
+                                     int64_t global_sum_step)
+{
+       if (global_sum_step < 0)
+               return -EINVAL;
+
+       switch (counter->config.counter_size) {
+       case COUNTER_SIZE_8_BIT:
+               if (global_sum_step > S8_MAX)
+                       return -EINVAL;
+               counter->global_sum_step.s8 = (int8_t) global_sum_step;
+               break;
+       case COUNTER_SIZE_16_BIT:
+               if (global_sum_step > S16_MAX)
+                       return -EINVAL;
+               counter->global_sum_step.s16 = (int16_t) global_sum_step;
+               break;
+       case COUNTER_SIZE_32_BIT:
+               if (global_sum_step > S32_MAX)
+                       return -EINVAL;
+               counter->global_sum_step.s32 = (int32_t) global_sum_step;
+               break;
+       case COUNTER_SIZE_64_BIT:
+               counter->global_sum_step.s64 = global_sum_step;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static
+int validate_args(const struct lib_counter_config *config,
+       size_t nr_dimensions,
+       const size_t *max_nr_elem,
+       int64_t global_sum_step)
+{
+       if (BITS_PER_LONG != 64 && config->counter_size == COUNTER_SIZE_64_BIT) {
+               WARN_ON_ONCE(1);
+               return -1;
+       }
+       if (!max_nr_elem)
+               return -1;
+       /*
+        * global sum step is only useful with allocating both per-cpu
+        * and global counters.
+        */
+       if (global_sum_step && (!(config->alloc & COUNTER_ALLOC_GLOBAL) ||
+                       !(config->alloc & COUNTER_ALLOC_PER_CPU)))
+               return -1;
+       return 0;
+}
+
+struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
+                                        size_t nr_dimensions,
+                                        const size_t *max_nr_elem,
+                                        int64_t global_sum_step)
+{
+       struct lib_counter *counter;
+       size_t dimension, nr_elem = 1;
+       int cpu, ret;
+
+       if (validate_args(config, nr_dimensions, max_nr_elem, global_sum_step))
+               return NULL;
+       counter = kzalloc(sizeof(struct lib_counter), GFP_KERNEL);
+       if (!counter)
+               return NULL;
+       counter->config = *config;
+       if (lttng_counter_set_global_sum_step(counter, global_sum_step))
+               goto error_sum_step;
+       counter->nr_dimensions = nr_dimensions;
+       counter->dimensions = kzalloc(nr_dimensions * sizeof(*counter->dimensions), GFP_KERNEL);
+       if (!counter->dimensions)
+               goto error_dimensions;
+       for (dimension = 0; dimension < nr_dimensions; dimension++)
+               counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension];
+       if (config->alloc & COUNTER_ALLOC_PER_CPU) {
+               counter->percpu_counters = alloc_percpu(struct lib_counter_layout);
+               if (!counter->percpu_counters)
+                       goto error_alloc_percpu;
+       }
+
+       if (lttng_counter_init_stride(config, counter))
+               goto error_init_stride;
+       //TODO saturation values.
+       for (dimension = 0; dimension < counter->nr_dimensions; dimension++)
+               nr_elem *= lttng_counter_get_dimension_nr_elements(&counter->dimensions[dimension]);
+       counter->allocated_elem = nr_elem;
+       if (config->alloc & COUNTER_ALLOC_GLOBAL) {
+               ret = lttng_counter_layout_init(counter, -1);   /* global */
+               if (ret)
+                       goto layout_init_error;
+       }
+       if (config->alloc & COUNTER_ALLOC_PER_CPU) {
+               //TODO: integrate with CPU hotplug and online cpus
+               for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
+                       ret = lttng_counter_layout_init(counter, cpu);
+                       if (ret)
+                               goto layout_init_error;
+               }
+       }
+       return counter;
+
+layout_init_error:
+       if (config->alloc & COUNTER_ALLOC_PER_CPU) {
+               for (cpu = 0; cpu < num_possible_cpus(); cpu++)
+                       lttng_counter_layout_fini(counter, cpu);
+       }
+       if (config->alloc & COUNTER_ALLOC_GLOBAL)
+               lttng_counter_layout_fini(counter, -1);
+error_init_stride:
+       free_percpu(counter->percpu_counters);
+error_alloc_percpu:
+       kfree(counter->dimensions);
+error_dimensions:
+error_sum_step:
+       kfree(counter);
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(lttng_counter_create);
+
+void lttng_counter_destroy(struct lib_counter *counter)
+{
+       struct lib_counter_config *config = &counter->config;
+       int cpu;
+
+       if (config->alloc & COUNTER_ALLOC_PER_CPU) {
+               for (cpu = 0; cpu < num_possible_cpus(); cpu++)
+                       lttng_counter_layout_fini(counter, cpu);
+               free_percpu(counter->percpu_counters);
+       }
+       if (config->alloc & COUNTER_ALLOC_GLOBAL)
+               lttng_counter_layout_fini(counter, -1);
+       kfree(counter->dimensions);
+       kfree(counter);
+}
+EXPORT_SYMBOL_GPL(lttng_counter_destroy);
+
+int lttng_counter_read(const struct lib_counter_config *config,
+                      struct lib_counter *counter,
+                      const size_t *dimension_indexes,
+                      int cpu, int64_t *value, bool *overflow,
+                      bool *underflow)
+{
+       struct lib_counter_layout *layout;
+       size_t index;
+
+       if (unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
+               return -EOVERFLOW;
+       index = lttng_counter_get_index(config, counter, dimension_indexes);
+
+       switch (config->alloc) {
+       case COUNTER_ALLOC_PER_CPU:
+               if (cpu < 0 || cpu >= num_possible_cpus())
+                       return -EINVAL;
+               layout = per_cpu_ptr(counter->percpu_counters, cpu);
+               break;
+       case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+               if (cpu >= 0) {
+                       if (cpu >= num_possible_cpus())
+                               return -EINVAL;
+                       layout = per_cpu_ptr(counter->percpu_counters, cpu);
+               } else {
+                       layout = &counter->global_counters;
+               }
+               break;
+       case COUNTER_ALLOC_GLOBAL:
+               if (cpu >= 0)
+                       return -EINVAL;
+               layout = &counter->global_counters;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       switch (config->counter_size) {
+       case COUNTER_SIZE_8_BIT:
+       {
+               int8_t *int_p = (int8_t *) layout->counters + index;
+               *value = (int64_t) READ_ONCE(*int_p);
+               break;
+       }
+       case COUNTER_SIZE_16_BIT:
+       {
+               int16_t *int_p = (int16_t *) layout->counters + index;
+               *value = (int64_t) READ_ONCE(*int_p);
+               break;
+       }
+       case COUNTER_SIZE_32_BIT:
+       {
+               int32_t *int_p = (int32_t *) layout->counters + index;
+               *value = (int64_t) READ_ONCE(*int_p);
+               break;
+       }
+#if BITS_PER_LONG == 64
+       case COUNTER_SIZE_64_BIT:
+       {
+               int64_t *int_p = (int64_t *) layout->counters + index;
+               *value = READ_ONCE(*int_p);
+               break;
+       }
+#endif
+       default:
+               WARN_ON_ONCE(1);
+       }
+       *overflow = test_bit(index, layout->overflow_bitmap);
+       *underflow = test_bit(index, layout->underflow_bitmap);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_counter_read);
+
+int lttng_counter_aggregate(const struct lib_counter_config *config,
+                           struct lib_counter *counter,
+                           const size_t *dimension_indexes,
+                           int64_t *value, bool *overflow,
+                           bool *underflow)
+{
+       int cpu, ret;
+       int64_t v, sum = 0;
+       bool of, uf;
+
+       *overflow = false;
+       *underflow = false;
+
+       switch (config->alloc) {
+       case COUNTER_ALLOC_GLOBAL:      /* Fallthrough */
+       case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+               /* Read global counter. */
+               ret = lttng_counter_read(config, counter, dimension_indexes,
+                                        -1, &v, &of, &uf);
+               if (ret < 0)
+                       return ret;
+               sum += v;
+               *overflow |= of;
+               *underflow |= uf;
+               break;
+       case COUNTER_ALLOC_PER_CPU:
+               break;
+       }
+
+       switch (config->alloc) {
+       case COUNTER_ALLOC_GLOBAL:
+               break;
+       case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:      /* Fallthrough */
+       case COUNTER_ALLOC_PER_CPU:
+               //TODO: integrate with CPU hotplug and online cpus
+               for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
+                       int64_t old = sum;
+
+                       ret = lttng_counter_read(config, counter, dimension_indexes,
+                                                cpu, &v, &of, &uf);
+                       if (ret < 0)
+                               return ret;
+                       *overflow |= of;
+                       *underflow |= uf;
+                       /* Overflow is defined on unsigned types. */
+                       sum = (int64_t) ((uint64_t) old + (uint64_t) v);
+                       if (v > 0 && sum < old)
+                               *overflow = true;
+                       else if (v < 0 && sum > old)
+                               *underflow = true;
+               }
+               break;
+       default:
+               return -EINVAL;
+       }
+       *value = sum;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_counter_aggregate);
+
+static
+int lttng_counter_clear_cpu(const struct lib_counter_config *config,
+                           struct lib_counter *counter,
+                           const size_t *dimension_indexes,
+                           int cpu)
+{
+       struct lib_counter_layout *layout;
+       size_t index;
+
+       if (unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
+               return -EOVERFLOW;
+       index = lttng_counter_get_index(config, counter, dimension_indexes);
+
+       switch (config->alloc) {
+       case COUNTER_ALLOC_PER_CPU:
+               if (cpu < 0 || cpu >= num_possible_cpus())
+                       return -EINVAL;
+               layout = per_cpu_ptr(counter->percpu_counters, cpu);
+               break;
+       case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+               if (cpu >= 0) {
+                       if (cpu >= num_possible_cpus())
+                               return -EINVAL;
+                       layout = per_cpu_ptr(counter->percpu_counters, cpu);
+               } else {
+                       layout = &counter->global_counters;
+               }
+               break;
+       case COUNTER_ALLOC_GLOBAL:
+               if (cpu >= 0)
+                       return -EINVAL;
+               layout = &counter->global_counters;
+               break;
+       default:
+               return -EINVAL;
+       }
+       switch (config->counter_size) {
+       case COUNTER_SIZE_8_BIT:
+       {
+               int8_t *int_p = (int8_t *) layout->counters + index;
+               WRITE_ONCE(*int_p, 0);
+               break;
+       }
+       case COUNTER_SIZE_16_BIT:
+       {
+               int16_t *int_p = (int16_t *) layout->counters + index;
+               WRITE_ONCE(*int_p, 0);
+               break;
+       }
+       case COUNTER_SIZE_32_BIT:
+       {
+               int32_t *int_p = (int32_t *) layout->counters + index;
+               WRITE_ONCE(*int_p, 0);
+               break;
+       }
+#if BITS_PER_LONG == 64
+       case COUNTER_SIZE_64_BIT:
+       {
+               int64_t *int_p = (int64_t *) layout->counters + index;
+               WRITE_ONCE(*int_p, 0);
+               break;
+       }
+#endif
+       default:
+               WARN_ON_ONCE(1);
+       }
+       clear_bit(index, layout->overflow_bitmap);
+       clear_bit(index, layout->underflow_bitmap);
+       return 0;
+}
+
+int lttng_counter_clear(const struct lib_counter_config *config,
+                       struct lib_counter *counter,
+                       const size_t *dimension_indexes)
+{
+       int cpu, ret;
+
+       switch (config->alloc) {
+       case COUNTER_ALLOC_GLOBAL:      /* Fallthrough */
+       case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+               /* Clear global counter. */
+               ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, -1);
+               if (ret < 0)
+                       return ret;
+               break;
+       case COUNTER_ALLOC_PER_CPU:
+               break;
+       }
+
+       switch (config->alloc) {
+       case COUNTER_ALLOC_GLOBAL:
+               break;
+       case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:      /* Fallthrough */
+       case COUNTER_ALLOC_PER_CPU:
+               //TODO: integrate with CPU hotplug and online cpus
+               for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
+                       ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, cpu);
+                       if (ret < 0)
+                               return ret;
+               }
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_counter_clear);
+
+int lttng_counter_get_nr_dimensions(const struct lib_counter_config *config,
+                                   struct lib_counter *counter,
+                                   size_t *nr_dimensions)
+{
+       *nr_dimensions = counter->nr_dimensions;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_counter_get_nr_dimensions);
+
+int lttng_counter_get_max_nr_elem(const struct lib_counter_config *config,
+                                 struct lib_counter *counter,
+                                 size_t *max_nr_elem)  /* array of size nr_dimensions */
+{
+       size_t dimension;
+
+       for (dimension = 0; dimension < counter->nr_dimensions; dimension++)
+               max_nr_elem[dimension] = lttng_counter_get_dimension_nr_elements(&counter->dimensions[dimension]);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_counter_get_max_nr_elem);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng counter library");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/lttng-counter-client-percpu-32-modular.c b/src/lttng-counter-client-percpu-32-modular.c
new file mode 100644 (file)
index 0000000..3894386
--- /dev/null
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-counter-client-percpu-32-modular.c
+ *
+ * LTTng lib counter client. Per-cpu 32-bit counters in overflow
+ * arithmetic.
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+#include <counter/counter.h>
+#include <counter/counter-api.h>
+
+static const struct lib_counter_config client_config = {
+       .alloc = COUNTER_ALLOC_PER_CPU,
+       .sync = COUNTER_SYNC_PER_CPU,
+       .arithmetic = COUNTER_ARITHMETIC_MODULAR,
+       .counter_size = COUNTER_SIZE_32_BIT,
+};
+
+static struct lib_counter *counter_create(size_t nr_dimensions,
+                                         const size_t *max_nr_elem,
+                                         int64_t global_sum_step)
+{
+       return lttng_counter_create(&client_config, nr_dimensions, max_nr_elem,
+                                   global_sum_step);
+}
+
+static void counter_destroy(struct lib_counter *counter)
+{
+       return lttng_counter_destroy(counter);
+}
+
+static int counter_add(struct lib_counter *counter, const size_t *dimension_indexes, int64_t v)
+{
+       return lttng_counter_add(&client_config, counter, dimension_indexes, v);
+}
+
+static int counter_read(struct lib_counter *counter, const size_t *dimension_indexes, int cpu,
+                       int64_t *value, bool *overflow, bool *underflow)
+{
+       return lttng_counter_read(&client_config, counter, dimension_indexes, cpu, value,
+                                 overflow, underflow);
+}
+
+static int counter_aggregate(struct lib_counter *counter, const size_t *dimension_indexes,
+                            int64_t *value, bool *overflow, bool *underflow)
+{
+       return lttng_counter_aggregate(&client_config, counter, dimension_indexes, value,
+                                      overflow, underflow);
+}
+
+static int counter_clear(struct lib_counter *counter, const size_t *dimension_indexes)
+{
+       return lttng_counter_clear(&client_config, counter, dimension_indexes);
+}
+
+static struct lttng_counter_transport lttng_counter_transport = {
+       .name = "counter-per-cpu-32-modular",
+       .owner = THIS_MODULE,
+       .ops = {
+               .counter_create = counter_create,
+               .counter_destroy = counter_destroy,
+               .counter_add = counter_add,
+               .counter_read = counter_read,
+               .counter_aggregate = counter_aggregate,
+               .counter_clear = counter_clear,
+       },
+};
+
+static int __init lttng_counter_client_init(void)
+{
+       /*
+        * This vmalloc sync all also takes care of the lib counter
+        * vmalloc'd module pages when it is built as a module into LTTng.
+        */
+       wrapper_vmalloc_sync_mappings();
+       lttng_counter_transport_register(&lttng_counter_transport);
+       return 0;
+}
+
+module_init(lttng_counter_client_init);
+
+static void __exit lttng_counter_client_exit(void)
+{
+       lttng_counter_transport_unregister(&lttng_counter_transport);
+}
+
+module_exit(lttng_counter_client_exit);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng counter per-cpu 32-bit overflow client");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
diff --git a/src/lttng-counter-client-percpu-64-modular.c b/src/lttng-counter-client-percpu-64-modular.c
new file mode 100644 (file)
index 0000000..8a40f3c
--- /dev/null
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-counter-client-percpu-64-modular.c
+ *
+ * LTTng lib counter client. Per-cpu 64-bit counters in overflow
+ * arithmetic.
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <linux/module.h>
+#include <lttng/tracer.h>
+#include <counter/counter.h>
+#include <counter/counter-api.h>
+
+static const struct lib_counter_config client_config = {
+       .alloc = COUNTER_ALLOC_PER_CPU,
+       .sync = COUNTER_SYNC_PER_CPU,
+       .arithmetic = COUNTER_ARITHMETIC_MODULAR,
+       .counter_size = COUNTER_SIZE_64_BIT,
+};
+
+static struct lib_counter *counter_create(size_t nr_dimensions,
+                                         const size_t *max_nr_elem,
+                                         int64_t global_sum_step)
+{
+       return lttng_counter_create(&client_config, nr_dimensions, max_nr_elem,
+                                   global_sum_step);
+}
+
+static void counter_destroy(struct lib_counter *counter)
+{
+       return lttng_counter_destroy(counter);
+}
+
+static int counter_add(struct lib_counter *counter, const size_t *dimension_indexes, int64_t v)
+{
+       return lttng_counter_add(&client_config, counter, dimension_indexes, v);
+}
+
+static int counter_read(struct lib_counter *counter, const size_t *dimension_indexes, int cpu,
+                       int64_t *value, bool *overflow, bool *underflow)
+{
+       return lttng_counter_read(&client_config, counter, dimension_indexes, cpu, value,
+                                 overflow, underflow);
+}
+
+static int counter_aggregate(struct lib_counter *counter, const size_t *dimension_indexes,
+                            int64_t *value, bool *overflow, bool *underflow)
+{
+       return lttng_counter_aggregate(&client_config, counter, dimension_indexes, value,
+                                      overflow, underflow);
+}
+
+static int counter_clear(struct lib_counter *counter, const size_t *dimension_indexes)
+{
+       return lttng_counter_clear(&client_config, counter, dimension_indexes);
+}
+
+static struct lttng_counter_transport lttng_counter_transport = {
+       .name = "counter-per-cpu-64-modular",
+       .owner = THIS_MODULE,
+       .ops = {
+               .counter_create = counter_create,
+               .counter_destroy = counter_destroy,
+               .counter_add = counter_add,
+               .counter_read = counter_read,
+               .counter_aggregate = counter_aggregate,
+               .counter_clear = counter_clear,
+       },
+};
+
+static int __init lttng_counter_client_init(void)
+{
+       /*
+        * This vmalloc sync all also takes care of the lib counter
+        * vmalloc'd module pages when it is built as a module into LTTng.
+        */
+       wrapper_vmalloc_sync_mappings();
+       lttng_counter_transport_register(&lttng_counter_transport);
+       return 0;
+}
+
+module_init(lttng_counter_client_init);
+
+static void __exit lttng_counter_client_exit(void)
+{
+       lttng_counter_transport_unregister(&lttng_counter_transport);
+}
+
+module_exit(lttng_counter_client_exit);
+
+MODULE_LICENSE("GPL and additional rights");
+MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
+MODULE_DESCRIPTION("LTTng counter per-cpu 32-bit overflow client");
+MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
+       __stringify(LTTNG_MODULES_MINOR_VERSION) "."
+       __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+       LTTNG_MODULES_EXTRAVERSION);
index 75a193045b1d381a3bf531542e763d741b095d8a..012f2245a353f4b7c43c689985b30628d65063d1 100644 (file)
@@ -52,6 +52,7 @@
 static LIST_HEAD(sessions);
 static LIST_HEAD(event_notifier_groups);
 static LIST_HEAD(lttng_transport_list);
+static LIST_HEAD(lttng_counter_transport_list);
 /*
  * Protect the sessions and metadata caches.
  */
@@ -759,6 +760,18 @@ void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
        wake_up_interruptible(&stream->read_wait);
 }
 
+static
+struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
+{
+       struct lttng_counter_transport *transport;
+
+       list_for_each_entry(transport, &lttng_counter_transport_list, node) {
+               if (!strcmp(transport->name, name))
+                       return transport;
+       }
+       return NULL;
+}
+
 /*
  * Supports event creation while tracing session is active.
  * Needs to be called with sessions mutex held.
@@ -3878,6 +3891,29 @@ void lttng_transport_unregister(struct lttng_transport *transport)
 }
 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
 
+void lttng_counter_transport_register(struct lttng_counter_transport *transport)
+{
+       /*
+        * Make sure no page fault can be triggered by the module about to be
+        * registered. We deal with this here so we don't have to call
+        * vmalloc_sync_mappings() in each module's init.
+        */
+       wrapper_vmalloc_sync_mappings();
+
+       mutex_lock(&sessions_mutex);
+       list_add_tail(&transport->node, &lttng_counter_transport_list);
+       mutex_unlock(&sessions_mutex);
+}
+EXPORT_SYMBOL_GPL(lttng_counter_transport_register);
+
+void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
+{
+       mutex_lock(&sessions_mutex);
+       list_del(&transport->node);
+       mutex_unlock(&sessions_mutex);
+}
+EXPORT_SYMBOL_GPL(lttng_counter_transport_unregister);
+
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
 
 enum cpuhp_state lttng_hp_prepare;
This page took 0.053791 seconds and 4 git commands to generate.