#include <urcu/compiler.h>
#include <urcu/list.h>
#include <lttng/ust-events.h>
-#include <lttng/usterr-signal-safe.h>
-#include "lttng/core.h"
+#include <lttng/ust-version.h>
+#include <usterr-signal-safe.h>
+#include <helper.h>
#include "ltt-tracer.h"
+#include "tracepoint-internal.h"
+
+struct ltt_tracepoint_list {
+ struct tracepoint_iter iter;
+ int got_first;
+};
+
+struct ltt_loglevel_list {
+ struct loglevel_iter *iter;
+ int got_first;
+};
static int lttng_ust_abi_close_in_progress;
static
int lttng_abi_tracepoint_list(void);
+static
+int lttng_abi_loglevel_list(void);
+
/*
* Object descriptor table. Should be protected from concurrent access
* by the caller.
static const struct lttng_ust_objd_ops lttng_event_ops;
static const struct lttng_ust_objd_ops lib_ring_buffer_objd_ops;
static const struct lttng_ust_objd_ops lttng_tracepoint_list_ops;
+static const struct lttng_ust_objd_ops lttng_loglevel_list_ops;
enum channel_type {
PER_CPU_CHANNEL,
long lttng_abi_tracer_version(int objd,
struct lttng_ust_tracer_version *v)
{
- v->version = LTTNG_UST_VERSION;
- v->patchlevel = LTTNG_UST_PATCHLEVEL;
- v->sublevel = LTTNG_UST_SUBLEVEL;
+ v->major = LTTNG_UST_MAJOR_VERSION;
+ v->minor = LTTNG_UST_MINOR_VERSION;
+ v->patchlevel = LTTNG_UST_PATCHLEVEL_VERSION;
return 0;
}
* Returns a file descriptor listing available tracepoints
* LTTNG_UST_WAIT_QUIESCENT
* Returns after all previously running probes have completed
+ * LTTNG_UST_LOGLEVEL_LIST
+ * Returns a file descriptor listing available loglevels
*
* The returned session will be deleted when its file descriptor is closed.
*/
case LTTNG_UST_WAIT_QUIESCENT:
synchronize_trace();
return 0;
+ case LTTNG_UST_LOGLEVEL_LIST:
+ return lttng_abi_loglevel_list();
default:
return -EINVAL;
}
.name = "lttng_ust:metadata",
};
struct ltt_event *event;
+ int ret;
/*
* We tolerate no failure path after event creation. It will stay
* invariant for the rest of the session.
*/
- event = ltt_event_create(channel, &metadata_params, NULL);
- if (!event) {
+ ret = ltt_event_create(channel, &metadata_params, NULL, &event);
+ if (ret < 0) {
goto create_error;
}
return;
.cmd = lttng_tracepoint_list_cmd,
};
+/*
+ * beware: we don't keep the mutex over the send, but we must walk the
+ * whole list each time we are called again. So sending one loglevel
+ * entry at a time means this is O(n^2). TODO: do as in the kernel and
+ * send multiple tracepoints for each call to amortize this cost.
+ */
+static
+void ltt_loglevel_list_get(struct ltt_loglevel_list *list,
+ const char *loglevel_provider,
+ const char *loglevel,
+ long *value)
+{
+#if 0
+next:
+ if (!list->got_first) {
+ //tp_loglevel_iter_start(&list->iter);
+ list->got_first = 1;
+ goto copy;
+ }
+ //tp_loglevel_iter_next(&list->iter);
+copy:
+ if (!list->iter->desc.provider) {
+ loglevel_provider[0] = '\0'; /* end of list */
+ } else {
+ memcpy(loglevel_provider, list->iter->desc.provider,
+ LTTNG_UST_SYM_NAME_LEN);
+ memcpy(loglevel, list->iter.loglevel,
+ LTTNG_UST_SYM_NAME_LEN);
+ *value = list->iter.value;
+ }
+#endif
+}
+
+static
+long lttng_loglevel_list_cmd(int objd, unsigned int cmd, unsigned long arg)
+{
+ struct ltt_loglevel_list *list = objd_private(objd);
+ struct lttng_ust_loglevel *loglevel_list_entry =
+ (struct lttng_ust_loglevel *) arg;
+
+ switch (cmd) {
+ case LTTNG_UST_LOGLEVEL_LIST_GET:
+/*
+ ltt_tracepoint_list_get(list,
+ loglevel_list_entry->provider,
+ loglevel_list_entry->loglevel,
+ &loglevel_list_entry->value);
+ if (loglevel_list_entry->provider[0] == '\0')
+ return -ENOENT;
+*/
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static
+int lttng_abi_loglevel_list(void)
+{
+ int list_objd, ret;
+ struct ltt_loglevel_list *list;
+
+ list_objd = objd_alloc(NULL, <tng_loglevel_list_ops);
+ if (list_objd < 0) {
+ ret = list_objd;
+ goto objd_error;
+ }
+ list = zmalloc(sizeof(*list));
+ if (!list) {
+ ret = -ENOMEM;
+ goto alloc_error;
+ }
+ objd_set_private(list_objd, list);
+
+ return list_objd;
+
+alloc_error:
+ {
+ int err;
+
+ err = lttng_ust_objd_unref(list_objd);
+ assert(!err);
+ }
+objd_error:
+ return ret;
+}
+
+static
+int lttng_release_loglevel_list(int objd)
+{
+ struct ltt_loglevel_list *list = objd_private(objd);
+
+ if (list) {
+ //tp_loglevel_iter_stop(&list->iter);
+ free(list);
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
+static const struct lttng_ust_objd_ops lttng_loglevel_list_ops = {
+ .release = lttng_release_loglevel_list,
+ .cmd = lttng_loglevel_list_cmd,
+};
+
struct stream_priv_data {
struct lttng_ust_lib_ring_buffer *buf;
struct ltt_channel *ltt_chan;
* We tolerate no failure path after event creation. It will stay
* invariant for the rest of the session.
*/
- event = ltt_event_create(channel, event_param, NULL);
- if (!event) {
- ret = -EINVAL;
+ ret = ltt_event_create(channel, event_param, NULL, &event);
+ if (ret < 0) {
goto event_error;
}
objd_set_private(event_objd, event);