Tracepoint loglevel: setup all loglevel information at build time
[lttng-ust.git] / liblttng-ust / lttng-ust-abi.c
index ea0a12483940def937212bc152f518cef90e9c81..6be716be6a26403b4c3a2917d1ee2097a96a3b32 100644 (file)
 #include <urcu/compiler.h>
 #include <urcu/list.h>
 #include <lttng/ust-events.h>
-#include <lttng/usterr-signal-safe.h>
-#include "lttng/core.h"
+#include <lttng/ust-version.h>
+#include <usterr-signal-safe.h>
+#include <helper.h>
 #include "ltt-tracer.h"
+#include "tracepoint-internal.h"
+
+struct ltt_tracepoint_list {
+       struct tracepoint_iter iter;
+       int got_first;
+};
+
+struct ltt_loglevel_list {
+       struct loglevel_iter *iter;
+       int got_first;
+};
+
+static int lttng_ust_abi_close_in_progress;
 
 static
 int lttng_abi_tracepoint_list(void);
 
+static
+int lttng_abi_loglevel_list(void);
+
 /*
  * Object descriptor table. Should be protected from concurrent access
  * by the caller.
@@ -201,6 +218,7 @@ static const struct lttng_ust_objd_ops lttng_metadata_ops;
 static const struct lttng_ust_objd_ops lttng_event_ops;
 static const struct lttng_ust_objd_ops lib_ring_buffer_objd_ops;
 static const struct lttng_ust_objd_ops lttng_tracepoint_list_ops;
+static const struct lttng_ust_objd_ops lttng_loglevel_list_ops;
 
 enum channel_type {
        PER_CPU_CHANNEL,
@@ -241,9 +259,9 @@ static
 long lttng_abi_tracer_version(int objd,
        struct lttng_ust_tracer_version *v)
 {
-       v->version = LTTNG_UST_VERSION;
-       v->patchlevel = LTTNG_UST_PATCHLEVEL;
-       v->sublevel = LTTNG_UST_SUBLEVEL;
+       v->major = LTTNG_UST_MAJOR_VERSION;
+       v->minor = LTTNG_UST_MINOR_VERSION;
+       v->patchlevel = LTTNG_UST_PATCHLEVEL_VERSION;
        return 0;
 }
 
@@ -285,6 +303,8 @@ long lttng_abi_add_context(int objd,
  *             Returns a file descriptor listing available tracepoints
  *     LTTNG_UST_WAIT_QUIESCENT
  *             Returns after all previously running probes have completed
+ *     LTTNG_UST_LOGLEVEL_LIST
+ *             Returns a file descriptor listing available loglevels
  *
  * The returned session will be deleted when its file descriptor is closed.
  */
@@ -302,6 +322,8 @@ long lttng_cmd(int objd, unsigned int cmd, unsigned long arg)
        case LTTNG_UST_WAIT_QUIESCENT:
                synchronize_trace();
                return 0;
+       case LTTNG_UST_LOGLEVEL_LIST:
+               return lttng_abi_loglevel_list();
        default:
                return -EINVAL;
        }
@@ -322,16 +344,17 @@ void lttng_metadata_create_events(int channel_objd)
        struct ltt_channel *channel = objd_private(channel_objd);
        static struct lttng_ust_event metadata_params = {
                .instrumentation = LTTNG_UST_TRACEPOINT,
-               .name = "lttng:metadata",
+               .name = "lttng_ust:metadata",
        };
        struct ltt_event *event;
+       int ret;
 
        /*
         * We tolerate no failure path after event creation. It will stay
         * invariant for the rest of the session.
         */
-       event = ltt_event_create(channel, &metadata_params, NULL);
-       if (!event) {
+       ret = ltt_event_create(channel, &metadata_params, NULL, &event);
+       if (ret < 0) {
                goto create_error;
        }
        return;
@@ -513,7 +536,8 @@ copy:
        if (!list->iter.tracepoint) {
                tp_list_entry[0] = '\0';        /* end of list */
        } else {
-               if (!strcmp((*list->iter.tracepoint)->name, "lttng:metadata"))
+               if (!strcmp((*list->iter.tracepoint)->name,
+                               "lttng_ust:metadata"))
                        goto next;
                memcpy(tp_list_entry, (*list->iter.tracepoint)->name,
                        LTTNG_UST_SYM_NAME_LEN);
@@ -587,6 +611,112 @@ static const struct lttng_ust_objd_ops lttng_tracepoint_list_ops = {
        .cmd = lttng_tracepoint_list_cmd,
 };
 
+/*
+ * beware: we don't keep the mutex over the send, but we must walk the
+ * whole list each time we are called again. So sending one loglevel
+ * entry at a time means this is O(n^2). TODO: do as in the kernel and
+ * send multiple tracepoints for each call to amortize this cost.
+ */
+static
+void ltt_loglevel_list_get(struct ltt_loglevel_list *list,
+               const char *loglevel_provider,
+               const char *loglevel,
+               long *value)
+{
+#if 0
+next:
+       if (!list->got_first) {
+               //tp_loglevel_iter_start(&list->iter);
+               list->got_first = 1;
+               goto copy;
+       }
+       //tp_loglevel_iter_next(&list->iter);
+copy:
+       if (!list->iter->desc.provider) {
+               loglevel_provider[0] = '\0';    /* end of list */
+       } else {
+               memcpy(loglevel_provider, list->iter->desc.provider,
+                       LTTNG_UST_SYM_NAME_LEN);
+               memcpy(loglevel, list->iter.loglevel,
+                       LTTNG_UST_SYM_NAME_LEN);
+               *value = list->iter.value;
+       }
+#endif
+}
+
+static
+long lttng_loglevel_list_cmd(int objd, unsigned int cmd, unsigned long arg)
+{
+       struct ltt_loglevel_list *list = objd_private(objd);
+       struct lttng_ust_loglevel *loglevel_list_entry =
+               (struct lttng_ust_loglevel *) arg;
+
+       switch (cmd) {
+       case LTTNG_UST_LOGLEVEL_LIST_GET:
+/*
+               ltt_tracepoint_list_get(list,
+                       loglevel_list_entry->provider,
+                       loglevel_list_entry->loglevel,
+                       &loglevel_list_entry->value);
+               if (loglevel_list_entry->provider[0] == '\0')
+                       return -ENOENT;
+*/
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static
+int lttng_abi_loglevel_list(void)
+{
+       int list_objd, ret;
+       struct ltt_loglevel_list *list;
+
+       list_objd = objd_alloc(NULL, &lttng_loglevel_list_ops);
+       if (list_objd < 0) {
+               ret = list_objd;
+               goto objd_error;
+       }
+       list = zmalloc(sizeof(*list));
+       if (!list) {
+               ret = -ENOMEM;
+               goto alloc_error;
+       }
+       objd_set_private(list_objd, list);
+
+       return list_objd;
+
+alloc_error:
+       {
+               int err;
+
+               err = lttng_ust_objd_unref(list_objd);
+               assert(!err);
+       }
+objd_error:
+       return ret;
+}
+
+static
+int lttng_release_loglevel_list(int objd)
+{
+       struct ltt_loglevel_list *list = objd_private(objd);
+
+       if (list) {
+               //tp_loglevel_iter_stop(&list->iter);
+               free(list);
+               return 0;
+       } else {
+               return -EINVAL;
+       }
+}
+
+static const struct lttng_ust_objd_ops lttng_loglevel_list_ops = {
+       .release = lttng_release_loglevel_list,
+       .cmd = lttng_loglevel_list_cmd,
+};
+
 struct stream_priv_data {
        struct lttng_ust_lib_ring_buffer *buf;
        struct ltt_channel *ltt_chan;
@@ -646,9 +776,8 @@ int lttng_abi_create_event(int channel_objd,
         * We tolerate no failure path after event creation. It will stay
         * invariant for the rest of the session.
         */
-       event = ltt_event_create(channel, event_param, NULL);
-       if (!event) {
-               ret = -EINVAL;
+       ret = ltt_event_create(channel, event_param, NULL, &event);
+       if (ret < 0) {
                goto event_error;
        }
        objd_set_private(event_objd, event);
@@ -835,7 +964,21 @@ int lttng_rb_release(int objd)
                buf = priv->buf;
                channel = priv->ltt_chan;
                free(priv);
-               channel->ops->buffer_read_close(buf, channel->handle);
+               /*
+                * If we are at ABI exit, we don't want to close the
+                * buffer opened for read: it is being shared between
+                * the parent and child (right after fork), and we don't
+                * want the child to close it for the parent. For a real
+                * exit, we don't care about marking it as closed, as
+                * the consumer daemon (if there is one) will do fine
+                * even if we don't mark it as "closed" for reading on
+                * our side.
+                * We only mark it as closed if it is being explicitely
+                * released by the session daemon with an explicit
+                * release command.
+                */
+               if (!lttng_ust_abi_close_in_progress)
+                       channel->ops->buffer_read_close(buf, channel->handle);
 
                return lttng_ust_objd_unref(channel->objd);
        }
@@ -899,5 +1042,7 @@ static const struct lttng_ust_objd_ops lttng_event_ops = {
 
 void lttng_ust_abi_exit(void)
 {
+       lttng_ust_abi_close_in_progress = 1;
        objd_table_destroy();
+       lttng_ust_abi_close_in_progress = 0;
 }
This page took 0.026401 seconds and 4 git commands to generate.