#include <urcu/compiler.h>
#include <urcu/list.h>
#include <lttng/ust-events.h>
-#include <lttng/usterr-signal-safe.h>
-#include "lttng/core.h"
+#include <lttng/ust-version.h>
+#include <usterr-signal-safe.h>
+#include <helper.h>
#include "ltt-tracer.h"
+#include "tracepoint-internal.h"
+
+struct ltt_tracepoint_list {
+ struct tracepoint_iter iter;
+ int got_first;
+};
+
+static int lttng_ust_abi_close_in_progress;
+
+static
+int lttng_abi_tracepoint_list(void);
/*
* Object descriptor table. Should be protected from concurrent access
static const struct lttng_ust_objd_ops lttng_metadata_ops;
static const struct lttng_ust_objd_ops lttng_event_ops;
static const struct lttng_ust_objd_ops lib_ring_buffer_objd_ops;
+static const struct lttng_ust_objd_ops lttng_tracepoint_list_ops;
enum channel_type {
PER_CPU_CHANNEL,
return ret;
}
-#if 0
-static
-int lttng_abi_tracepoint_list(void)
-{
- int list_objd, ret;
-
- /* TODO: Create list private data */
- list_objd = objd_alloc(NULL, <tng_tracepoint_list_ops);
- if (list_objd < 0) {
- ret = list_objd;
- goto objd_error;
- }
-
- return list_objd;
-
-objd_error:
- return ret;
-}
-#endif //0
-
static
long lttng_abi_tracer_version(int objd,
struct lttng_ust_tracer_version *v)
{
- v->version = LTTNG_UST_VERSION;
- v->patchlevel = LTTNG_UST_PATCHLEVEL;
- v->sublevel = LTTNG_UST_SUBLEVEL;
+ v->major = LTTNG_UST_MAJOR_VERSION;
+ v->minor = LTTNG_UST_MINOR_VERSION;
+ v->patchlevel = LTTNG_UST_PATCHLEVEL_VERSION;
return 0;
}
return lttng_abi_tracer_version(objd,
(struct lttng_ust_tracer_version *) arg);
case LTTNG_UST_TRACEPOINT_LIST:
- return -ENOSYS; //TODO
- //return lttng_abi_tracepoint_list();
+ return lttng_abi_tracepoint_list();
case LTTNG_UST_WAIT_QUIESCENT:
synchronize_trace();
return 0;
struct ltt_channel *channel = objd_private(channel_objd);
static struct lttng_ust_event metadata_params = {
.instrumentation = LTTNG_UST_TRACEPOINT,
- .name = "lttng_metadata",
+ .name = "lttng_ust:metadata",
};
struct ltt_event *event;
int ret;
* We tolerate no failure path after event creation. It will stay
* invariant for the rest of the session.
*/
- event = ltt_event_create(channel, &metadata_params, NULL);
- if (!event) {
- ret = -EINVAL;
+ ret = ltt_event_create(channel, &metadata_params, NULL, &event);
+ if (ret < 0) {
goto create_error;
}
return;
int ret = 0;
struct ltt_channel chan_priv_init;
- chan_objd = objd_alloc(NULL, <tng_channel_ops);
- if (chan_objd < 0) {
- ret = chan_objd;
- goto objd_error;
- }
switch (channel_type) {
case PER_CPU_CHANNEL:
if (chan_param->output == LTTNG_UST_MMAP) {
break;
default:
transport_name = "<unknown>";
- break;
+ return -EINVAL;
+ }
+ chan_objd = objd_alloc(NULL, ops);
+ if (chan_objd < 0) {
+ ret = chan_objd;
+ goto objd_error;
}
memset(&chan_priv_init, 0, sizeof(chan_priv_init));
/* Copy of session UUID for consumer (availability through shm) */
.cmd = lttng_session_cmd,
};
+/*
+ * beware: we don't keep the mutex over the send, but we must walk the
+ * whole list each time we are called again. So sending one tracepoint
+ * at a time means this is O(n^2). TODO: do as in the kernel and send
+ * multiple tracepoints for each call to amortize this cost.
+ */
+static
+void ltt_tracepoint_list_get(struct ltt_tracepoint_list *list,
+ struct lttng_ust_tracepoint_iter *tracepoint)
+{
+next:
+ if (!list->got_first) {
+ tracepoint_iter_start(&list->iter);
+ list->got_first = 1;
+ goto copy;
+ }
+ tracepoint_iter_next(&list->iter);
+copy:
+ if (!list->iter.tracepoint) {
+ tracepoint->name[0] = '\0'; /* end of list */
+ } else {
+ if (!strcmp((*list->iter.tracepoint)->name,
+ "lttng_ust:metadata"))
+ goto next;
+ memcpy(tracepoint->name, (*list->iter.tracepoint)->name,
+ LTTNG_UST_SYM_NAME_LEN);
+#if 0
+ if ((*list->iter.tracepoint)->loglevel) {
+ memcpy(tracepoint->loglevel,
+ (*list->iter.tracepoint)->loglevel->identifier,
+ LTTNG_UST_SYM_NAME_LEN);
+ tracepoint->loglevel_value =
+ (*list->iter.tracepoint)->loglevel->value;
+ } else {
+#endif
+ tracepoint->loglevel[0] = '\0';
+ tracepoint->loglevel_value = 0;
+#if 0
+ }
+#endif
+ }
+}
+
+static
+long lttng_tracepoint_list_cmd(int objd, unsigned int cmd, unsigned long arg)
+{
+ struct ltt_tracepoint_list *list = objd_private(objd);
+ struct lttng_ust_tracepoint_iter *tp =
+ (struct lttng_ust_tracepoint_iter *) arg;
+
+ switch (cmd) {
+ case LTTNG_UST_TRACEPOINT_LIST_GET:
+ ltt_tracepoint_list_get(list, tp);
+ if (tp->name[0] == '\0')
+ return -ENOENT;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static
+int lttng_abi_tracepoint_list(void)
+{
+ int list_objd, ret;
+ struct ltt_tracepoint_list *list;
+
+ list_objd = objd_alloc(NULL, <tng_tracepoint_list_ops);
+ if (list_objd < 0) {
+ ret = list_objd;
+ goto objd_error;
+ }
+ list = zmalloc(sizeof(*list));
+ if (!list) {
+ ret = -ENOMEM;
+ goto alloc_error;
+ }
+ objd_set_private(list_objd, list);
+
+ return list_objd;
+
+alloc_error:
+ {
+ int err;
+
+ err = lttng_ust_objd_unref(list_objd);
+ assert(!err);
+ }
+objd_error:
+ return ret;
+}
+
+static
+int lttng_release_tracepoint_list(int objd)
+{
+ struct ltt_tracepoint_list *list = objd_private(objd);
+
+ if (list) {
+ tracepoint_iter_stop(&list->iter);
+ free(list);
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
+static const struct lttng_ust_objd_ops lttng_tracepoint_list_ops = {
+ .release = lttng_release_tracepoint_list,
+ .cmd = lttng_tracepoint_list_cmd,
+};
+
struct stream_priv_data {
struct lttng_ust_lib_ring_buffer *buf;
struct ltt_channel *ltt_chan;
* We tolerate no failure path after event creation. It will stay
* invariant for the rest of the session.
*/
- event = ltt_event_create(channel, event_param, NULL);
- if (!event) {
- ret = -EINVAL;
+ ret = ltt_event_create(channel, event_param, NULL, &event);
+ if (ret < 0) {
goto event_error;
}
objd_set_private(event_objd, event);
buf = priv->buf;
channel = priv->ltt_chan;
free(priv);
- channel->ops->buffer_read_close(buf, channel->handle);
+ /*
+ * If we are at ABI exit, we don't want to close the
+ * buffer opened for read: it is being shared between
+ * the parent and child (right after fork), and we don't
+ * want the child to close it for the parent. For a real
+ * exit, we don't care about marking it as closed, as
+ * the consumer daemon (if there is one) will do fine
+ * even if we don't mark it as "closed" for reading on
+ * our side.
+ * We only mark it as closed if it is being explicitely
+ * released by the session daemon with an explicit
+ * release command.
+ */
+ if (!lttng_ust_abi_close_in_progress)
+ channel->ops->buffer_read_close(buf, channel->handle);
return lttng_ust_objd_unref(channel->objd);
}
void lttng_ust_abi_exit(void)
{
+ lttng_ust_abi_close_in_progress = 1;
objd_table_destroy();
+ lttng_ust_abi_close_in_progress = 0;
}