* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-//ust// #include <linux/module.h>
-//ust// #include <linux/mutex.h>
-//ust// #include <linux/types.h>
-//#include "jhash.h"
-//#include "list.h"
-//#include "rcupdate.h"
-//ust// #include <linux/marker.h>
-#include <errno.h>
-//ust// #include <linux/slab.h>
-//ust// #include <linux/immediate.h>
-//ust// #include <linux/sched.h>
-//ust// #include <linux/uaccess.h>
-//ust// #include <linux/user_marker.h>
-//ust// #include <linux/ltt-tracer.h>
+#include <stdlib.h>
+#include <errno.h>
#define _LGPL_SOURCE
-#include <urcu.h>
+#include <urcu-bp.h>
+#include <urcu/rculist.h>
+#include <urcu/hlist.h>
-#include "kernelcompat.h"
+#include <ust/core.h>
+#include <ust/marker.h>
+#include <ust/tracepoint.h>
-#include "marker.h"
#include "usterr.h"
#include "channels.h"
#include "tracercore.h"
#include "tracer.h"
+__thread long ust_reg_stack[500];
+volatile __thread long *ust_reg_stack_ptr = (long *) 0;
+
extern struct marker __start___markers[] __attribute__((visibility("hidden")));
extern struct marker __stop___markers[] __attribute__((visibility("hidden")));
*/
static DEFINE_MUTEX(markers_mutex);
+static CDS_LIST_HEAD(libs);
+
+
void lock_markers(void)
{
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
}
void unlock_markers(void)
{
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
}
/*
*/
#define MARKER_HASH_BITS 6
#define MARKER_TABLE_SIZE (1 << MARKER_HASH_BITS)
-static struct hlist_head marker_table[MARKER_TABLE_SIZE];
+static struct cds_hlist_head marker_table[MARKER_TABLE_SIZE];
/*
* Note about RCU :
* marker entries modifications are protected by the markers_mutex.
*/
struct marker_entry {
- struct hlist_node hlist;
+ struct cds_hlist_node hlist;
char *format;
char *name;
/* Probe wrapper */
- void (*call)(const struct marker *mdata, void *call_private, ...);
+ void (*call)(const struct marker *mdata, void *call_private, struct registers *regs, ...);
struct marker_probe_closure single;
struct marker_probe_closure *multi;
int refcount; /* Number of times armed. 0 if disarmed. */
* operations that modifies the execution flow of preemptible code.
*/
notrace void __mark_empty_function(const struct marker *mdata,
- void *probe_private, void *call_private, const char *fmt, va_list *args)
+ void *probe_private, struct registers *regs, void *call_private, const char *fmt, va_list *args)
{
}
//ust// EXPORT_SYMBOL_GPL(__mark_empty_function);
* @...: Variable argument list.
*
* Since we do not use "typical" pointer based RCU in the 1 argument case, we
- * need to put a full smp_rmb() in this branch. This is why we do not use
+ * need to put a full cmm_smp_rmb() in this branch. This is why we do not use
* rcu_dereference() for the pointer read.
*/
notrace void marker_probe_cb(const struct marker *mdata,
- void *call_private, ...)
+ void *call_private, struct registers *regs, ...)
{
va_list args;
char ptype;
if (likely(!ptype)) {
marker_probe_func *func;
/* Must read the ptype before ptr. They are not data dependant,
- * so we put an explicit smp_rmb() here. */
- smp_rmb();
+ * so we put an explicit cmm_smp_rmb() here. */
+ cmm_smp_rmb();
func = mdata->single.func;
/* Must read the ptr before private data. They are not data
- * dependant, so we put an explicit smp_rmb() here. */
- smp_rmb();
- va_start(args, call_private);
- func(mdata, mdata->single.probe_private, call_private,
+ * dependant, so we put an explicit cmm_smp_rmb() here. */
+ cmm_smp_rmb();
+ va_start(args, regs);
+ func(mdata, mdata->single.probe_private, regs, call_private,
mdata->format, &args);
va_end(args);
} else {
/*
* Read mdata->ptype before mdata->multi.
*/
- smp_rmb();
+ cmm_smp_rmb();
multi = mdata->multi;
/*
* multi points to an array, therefore accessing the array
* depends on reading multi. However, even in this case,
* we must insure that the pointer is read _before_ the array
- * data. Same as rcu_dereference, but we need a full smp_rmb()
- * in the fast path, so put the explicit barrier here.
+ * data. Same as rcu_dereference, but we need a full cmm_smp_rmb()
+ * in the fast path, so put the explicit cmm_barrier here.
*/
- smp_read_barrier_depends();
+ cmm_smp_read_barrier_depends();
for (i = 0; multi[i].func; i++) {
- va_start(args, call_private);
+ va_start(args, regs);
multi[i].func(mdata, multi[i].probe_private,
- call_private, mdata->format, &args);
+ regs, call_private, mdata->format, &args);
va_end(args);
}
}
* Should be connected to markers "MARK_NOARGS".
*/
static notrace void marker_probe_cb_noarg(const struct marker *mdata,
- void *call_private, ...)
+ void *call_private, struct registers *regs, ...)
{
va_list args; /* not initialized */
char ptype;
if (likely(!ptype)) {
marker_probe_func *func;
/* Must read the ptype before ptr. They are not data dependant,
- * so we put an explicit smp_rmb() here. */
- smp_rmb();
+ * so we put an explicit cmm_smp_rmb() here. */
+ cmm_smp_rmb();
func = mdata->single.func;
/* Must read the ptr before private data. They are not data
- * dependant, so we put an explicit smp_rmb() here. */
- smp_rmb();
- func(mdata, mdata->single.probe_private, call_private,
+ * dependant, so we put an explicit cmm_smp_rmb() here. */
+ cmm_smp_rmb();
+ func(mdata, mdata->single.probe_private, regs, call_private,
mdata->format, &args);
} else {
struct marker_probe_closure *multi;
/*
* Read mdata->ptype before mdata->multi.
*/
- smp_rmb();
+ cmm_smp_rmb();
multi = mdata->multi;
/*
* multi points to an array, therefore accessing the array
* depends on reading multi. However, even in this case,
* we must insure that the pointer is read _before_ the array
- * data. Same as rcu_dereference, but we need a full smp_rmb()
- * in the fast path, so put the explicit barrier here.
+ * data. Same as rcu_dereference, but we need a full cmm_smp_rmb()
+ * in the fast path, so put the explicit cmm_barrier here.
*/
- smp_read_barrier_depends();
+ cmm_smp_read_barrier_depends();
for (i = 0; multi[i].func; i++)
- multi[i].func(mdata, multi[i].probe_private,
+ multi[i].func(mdata, multi[i].probe_private, regs,
call_private, mdata->format, &args);
}
//ust// rcu_read_unlock_sched_notrace();
static void free_old_closure(struct rcu_head *head)
{
- struct marker_entry *entry = container_of(head,
+ struct marker_entry *entry = _ust_container_of(head,
struct marker_entry, rcu);
- kfree(entry->oldptr);
+ free(entry->oldptr);
/* Make sure we free the data before setting the pending flag to 0 */
- smp_wmb();
+ cmm_smp_wmb();
entry->rcu_pending = 0;
}
return;
if (!entry->ptype) {
- printk(KERN_DEBUG "Single probe : %p %p\n",
+ DBG("Single probe : %p %p",
entry->single.func,
entry->single.probe_private);
} else {
for (i = 0; entry->multi[i].func; i++)
- printk(KERN_DEBUG "Multi probe %d : %p %p\n", i,
+ DBG("Multi probe %d : %p %p", i,
entry->multi[i].func,
entry->multi[i].probe_private);
}
return ERR_PTR(-EBUSY);
}
/* + 2 : one for new probe, one for NULL func */
- new = kzalloc((nr_probes + 2) * sizeof(struct marker_probe_closure),
- GFP_KERNEL);
+ new = zmalloc((nr_probes + 2) * sizeof(struct marker_probe_closure));
if (new == NULL)
return ERR_PTR(-ENOMEM);
if (!old)
int j = 0;
/* N -> M, (N > 1, M > 1) */
/* + 1 for NULL */
- new = kzalloc((nr_probes - nr_del + 1)
- * sizeof(struct marker_probe_closure), GFP_KERNEL);
+ new = zmalloc((nr_probes - nr_del + 1) * sizeof(struct marker_probe_closure));
if (new == NULL)
return ERR_PTR(-ENOMEM);
for (i = 0; old[i].func; i++)
*/
static struct marker_entry *get_marker(const char *channel, const char *name)
{
- struct hlist_head *head;
- struct hlist_node *node;
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
struct marker_entry *e;
size_t channel_len = strlen(channel) + 1;
size_t name_len = strlen(name) + 1;
hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
- hlist_for_each_entry(e, node, head, hlist) {
+ cds_hlist_for_each_entry(e, node, head, hlist) {
if (!strcmp(channel, e->channel) && !strcmp(name, e->name))
return e;
}
static struct marker_entry *add_marker(const char *channel, const char *name,
const char *format)
{
- struct hlist_head *head;
- struct hlist_node *node;
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
struct marker_entry *e;
size_t channel_len = strlen(channel) + 1;
size_t name_len = strlen(name) + 1;
if (format)
format_len = strlen(format) + 1;
head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
- hlist_for_each_entry(e, node, head, hlist) {
+ cds_hlist_for_each_entry(e, node, head, hlist) {
if (!strcmp(channel, e->channel) && !strcmp(name, e->name)) {
- printk(KERN_NOTICE
- "Marker %s.%s busy\n", channel, name);
+ DBG("Marker %s.%s busy", channel, name);
return ERR_PTR(-EBUSY); /* Already there */
}
}
/*
- * Using kmalloc here to allocate a variable length element. Could
+ * Using zmalloc here to allocate a variable length element. Could
* cause some memory fragmentation if overused.
*/
- e = kmalloc(sizeof(struct marker_entry)
- + channel_len + name_len + format_len,
- GFP_KERNEL);
+ e = zmalloc(sizeof(struct marker_entry)
+ + channel_len + name_len + format_len);
if (!e)
return ERR_PTR(-ENOMEM);
memcpy(e->channel, channel, channel_len);
e->name = &e->channel[channel_len];
memcpy(e->name, name, name_len);
if (format) {
- e->format = &e->name[channel_len + name_len];
+ e->format = &e->name[name_len];
memcpy(e->format, format, format_len);
if (strcmp(e->format, MARK_NOARGS) == 0)
e->call = marker_probe_cb_noarg;
e->format_allocated = 0;
e->refcount = 0;
e->rcu_pending = 0;
- hlist_add_head(&e->hlist, head);
+ cds_hlist_add_head(&e->hlist, head);
return e;
}
*/
static int remove_marker(const char *channel, const char *name)
{
- struct hlist_head *head;
- struct hlist_node *node;
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
struct marker_entry *e;
int found = 0;
size_t channel_len = strlen(channel) + 1;
hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
- hlist_for_each_entry(e, node, head, hlist) {
+ cds_hlist_for_each_entry(e, node, head, hlist) {
if (!strcmp(channel, e->channel) && !strcmp(name, e->name)) {
found = 1;
break;
return -ENOENT;
if (e->single.func != __mark_empty_function)
return -EBUSY;
- hlist_del(&e->hlist);
+ cds_hlist_del(&e->hlist);
if (e->format_allocated)
- kfree(e->format);
+ free(e->format);
ret = ltt_channels_unregister(e->channel);
WARN_ON(ret);
/* Make sure the call_rcu has been executed */
//ust// if (e->rcu_pending)
-//ust// rcu_barrier_sched();
- kfree(e);
+//ust// rcu_cmm_barrier_sched();
+ free(e);
return 0;
}
*/
static int marker_set_format(struct marker_entry *entry, const char *format)
{
- entry->format = kstrdup(format, GFP_KERNEL);
+ entry->format = strdup(format);
if (!entry->format)
return -ENOMEM;
entry->format_allocated = 1;
if (entry->format) {
if (strcmp(entry->format, elem->format) != 0) {
- printk(KERN_NOTICE
- "Format mismatch for probe %s "
- "(%s), marker (%s)\n",
+ ERR("Format mismatch for probe %s (%s), marker (%s)",
entry->name,
entry->format,
elem->format);
* Make sure the private data is valid when we update the
* single probe ptr.
*/
- smp_wmb();
+ cmm_smp_wmb();
elem->single.func = entry->single.func;
/*
* We also make sure that the new probe callbacks array is consistent
* Update the function or multi probe array pointer before setting the
* ptype.
*/
- smp_wmb();
+ cmm_smp_wmb();
elem->ptype = entry->ptype;
-//ust// if (elem->tp_name && (active ^ _imv_read(elem->state))) {
-//ust// WARN_ON(!elem->tp_cb);
-//ust// /*
-//ust// * It is ok to directly call the probe registration because type
-//ust// * checking has been done in the __trace_mark_tp() macro.
-//ust// */
-//ust//
-//ust// if (active) {
-//ust// /*
-//ust// * try_module_get should always succeed because we hold
-//ust// * markers_mutex to get the tp_cb address.
-//ust// */
+ if (elem->tp_name && (active ^ _imv_read(elem->state))) {
+ WARN_ON(!elem->tp_cb);
+ /*
+ * It is ok to directly call the probe registration because type
+ * checking has been done in the __trace_mark_tp() macro.
+ */
+
+ if (active) {
+ /*
+ * try_module_get should always succeed because we hold
+ * markers_mutex to get the tp_cb address.
+ */
//ust// ret = try_module_get(__module_text_address(
//ust// (unsigned long)elem->tp_cb));
//ust// BUG_ON(!ret);
-//ust// ret = tracepoint_probe_register_noupdate(
-//ust// elem->tp_name,
-//ust// elem->tp_cb);
-//ust// } else {
-//ust// ret = tracepoint_probe_unregister_noupdate(
-//ust// elem->tp_name,
-//ust// elem->tp_cb);
-//ust// /*
-//ust// * tracepoint_probe_update_all() must be called
-//ust// * before the module containing tp_cb is unloaded.
-//ust// */
+ ret = tracepoint_probe_register_noupdate(
+ elem->tp_name,
+ elem->tp_cb, NULL);
+ } else {
+ ret = tracepoint_probe_unregister_noupdate(
+ elem->tp_name,
+ elem->tp_cb, NULL);
+ /*
+ * tracepoint_probe_update_all() must be called
+ * before the module containing tp_cb is unloaded.
+ */
//ust// module_put(__module_text_address(
//ust// (unsigned long)elem->tp_cb));
-//ust// }
-//ust// }
+ }
+ }
elem->state__imv = active;
return ret;
int ret;
/* leave "call" as is. It is known statically. */
-//ust// if (elem->tp_name && _imv_read(elem->state)) {
-//ust// WARN_ON(!elem->tp_cb);
-//ust// /*
-//ust// * It is ok to directly call the probe registration because type
-//ust// * checking has been done in the __trace_mark_tp() macro.
-//ust// */
-//ust// ret = tracepoint_probe_unregister_noupdate(elem->tp_name,
-//ust// elem->tp_cb);
-//ust// WARN_ON(ret);
-//ust// /*
-//ust// * tracepoint_probe_update_all() must be called
-//ust// * before the module containing tp_cb is unloaded.
-//ust// */
+ if (elem->tp_name && _imv_read(elem->state)) {
+ WARN_ON(!elem->tp_cb);
+ /*
+ * It is ok to directly call the probe registration because type
+ * checking has been done in the __trace_mark_tp() macro.
+ */
+ ret = tracepoint_probe_unregister_noupdate(elem->tp_name,
+ elem->tp_cb, NULL);
+ WARN_ON(ret);
+ /*
+ * tracepoint_probe_update_all() must be called
+ * before the module containing tp_cb is unloaded.
+ */
//ust// module_put(__module_text_address((unsigned long)elem->tp_cb));
-//ust// }
+ }
elem->state__imv = 0;
elem->single.func = __mark_empty_function;
/* Update the function before setting the ptype */
- smp_wmb();
+ cmm_smp_wmb();
elem->ptype = 0; /* single probe */
/*
* Leave the private data and channel_id/event_id there, because removal
*/
}
+/*
+ * is_marker_enabled - Check if a marker is enabled
+ * @channel: channel name
+ * @name: marker name
+ *
+ * Returns 1 if the marker is enabled, 0 if disabled.
+ */
+int is_marker_enabled(const char *channel, const char *name)
+{
+ struct marker_entry *entry;
+
+ pthread_mutex_lock(&markers_mutex);
+ entry = get_marker(channel, name);
+ pthread_mutex_unlock(&markers_mutex);
+
+ return entry && !!entry->refcount;
+}
+
/**
* marker_update_probe_range - Update a probe range
* @begin: beginning of the range
struct marker *iter;
struct marker_entry *mark_entry;
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
for (iter = begin; iter < end; iter++) {
mark_entry = get_marker(iter->channel, iter->name);
if (mark_entry) {
disable_marker(iter);
}
}
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
+}
+
+static void lib_update_markers(void)
+{
+ struct lib *lib;
+
+ /* FIXME: we should probably take a mutex here on libs */
+//ust// pthread_mutex_lock(&module_mutex);
+ cds_list_for_each_entry(lib, &libs, list)
+ marker_update_probe_range(lib->markers_start,
+ lib->markers_start + lib->markers_count);
+//ust// pthread_mutex_unlock(&module_mutex);
}
/*
/* Markers in modules. */
//ust// module_update_markers();
lib_update_markers();
-//ust// tracepoint_probe_update_all();
+ tracepoint_probe_update_all();
/* Update immediate values */
core_imv_update();
//ust// module_imv_update(); /* FIXME: need to port for libs? */
struct marker_probe_closure *old;
int first_probe = 0;
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
entry = get_marker(channel, name);
if (!entry) {
first_probe = 1;
* make sure it's executed now.
*/
//ust// if (entry->rcu_pending)
-//ust// rcu_barrier_sched();
+//ust// rcu_cmm_barrier_sched();
old = marker_entry_add_probe(entry, probe, probe_private);
if (IS_ERR(old)) {
ret = PTR_ERR(old);
else
goto end;
}
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
+ /* Activate marker if necessary */
marker_update_probes();
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
entry = get_marker(channel, name);
if (!entry)
goto end;
//ust// if (entry->rcu_pending)
-//ust// rcu_barrier_sched();
+//ust// rcu_cmm_barrier_sched();
entry->oldptr = old;
entry->rcu_pending = 1;
/* write rcu_pending before calling the RCU callback */
- smp_wmb();
+ cmm_smp_wmb();
//ust// call_rcu_sched(&entry->rcu, free_old_closure);
synchronize_rcu(); free_old_closure(&entry->rcu);
goto end;
ret_err = remove_marker(channel, name);
WARN_ON(ret_err);
end:
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
return ret;
}
//ust// EXPORT_SYMBOL_GPL(marker_probe_register);
struct marker_probe_closure *old;
int ret = -ENOENT;
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
entry = get_marker(channel, name);
if (!entry)
goto end;
//ust// if (entry->rcu_pending)
-//ust// rcu_barrier_sched();
+//ust// rcu_cmm_barrier_sched();
old = marker_entry_remove_probe(entry, probe, probe_private);
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
marker_update_probes();
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
entry = get_marker(channel, name);
if (!entry)
goto end;
//ust// if (entry->rcu_pending)
-//ust// rcu_barrier_sched();
+//ust// rcu_cmm_barrier_sched();
entry->oldptr = old;
entry->rcu_pending = 1;
/* write rcu_pending before calling the RCU callback */
- smp_wmb();
+ cmm_smp_wmb();
//ust// call_rcu_sched(&entry->rcu, free_old_closure);
synchronize_rcu(); free_old_closure(&entry->rcu);
remove_marker(channel, name); /* Ignore busy error message */
ret = 0;
end:
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
return ret;
}
//ust// EXPORT_SYMBOL_GPL(marker_probe_unregister);
{
struct marker_entry *entry;
unsigned int i;
- struct hlist_head *head;
- struct hlist_node *node;
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
for (i = 0; i < MARKER_TABLE_SIZE; i++) {
head = &marker_table[i];
- hlist_for_each_entry(entry, node, head, hlist) {
+ cds_hlist_for_each_entry(entry, node, head, hlist) {
if (!entry->ptype) {
if (entry->single.func == probe
&& entry->single.probe_private
struct marker_entry *entry;
int ret = 0;
struct marker_probe_closure *old;
- const char *channel = NULL, *name = NULL;
+ char *channel = NULL, *name = NULL;
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
entry = get_marker_from_private_data(probe, probe_private);
if (!entry) {
ret = -ENOENT;
goto end;
}
//ust// if (entry->rcu_pending)
-//ust// rcu_barrier_sched();
+//ust// rcu_cmm_barrier_sched();
old = marker_entry_remove_probe(entry, NULL, probe_private);
- channel = kstrdup(entry->channel, GFP_KERNEL);
- name = kstrdup(entry->name, GFP_KERNEL);
- mutex_unlock(&markers_mutex);
+ channel = strdup(entry->channel);
+ name = strdup(entry->name);
+ pthread_mutex_unlock(&markers_mutex);
marker_update_probes();
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
entry = get_marker(channel, name);
if (!entry)
goto end;
//ust// if (entry->rcu_pending)
-//ust// rcu_barrier_sched();
+//ust// rcu_cmm_barrier_sched();
entry->oldptr = old;
entry->rcu_pending = 1;
/* write rcu_pending before calling the RCU callback */
- smp_wmb();
+ cmm_smp_wmb();
//ust// call_rcu_sched(&entry->rcu, free_old_closure);
synchronize_rcu(); free_old_closure(&entry->rcu);
/* Ignore busy error message */
remove_marker(channel, name);
end:
- mutex_unlock(&markers_mutex);
- kfree(channel);
- kfree(name);
+ pthread_mutex_unlock(&markers_mutex);
+ free(channel);
+ free(name);
return ret;
}
//ust// EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data);
void *marker_get_private_data(const char *channel, const char *name,
marker_probe_func *probe, int num)
{
- struct hlist_head *head;
- struct hlist_node *node;
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
struct marker_entry *e;
size_t channel_len = strlen(channel) + 1;
size_t name_len = strlen(name) + 1;
hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
- hlist_for_each_entry(e, node, head, hlist) {
+ cds_hlist_for_each_entry(e, node, head, hlist) {
if (!strcmp(channel, e->channel) && !strcmp(name, e->name)) {
if (!e->ptype) {
if (num == 0 && e->single.func == probe)
//ust//#ifdef CONFIG_MODULES
+/*
+ * Returns 0 if current not found.
+ * Returns 1 if current found.
+ */
+int lib_get_iter_markers(struct marker_iter *iter)
+{
+ struct lib *iter_lib;
+ int found = 0;
+
+//ust// pthread_mutex_lock(&module_mutex);
+ cds_list_for_each_entry(iter_lib, &libs, list) {
+ if (iter_lib < iter->lib)
+ continue;
+ else if (iter_lib > iter->lib)
+ iter->marker = NULL;
+ found = marker_get_iter_range(&iter->marker,
+ iter_lib->markers_start,
+ iter_lib->markers_start + iter_lib->markers_count);
+ if (found) {
+ iter->lib = iter_lib;
+ break;
+ }
+ }
+//ust// pthread_mutex_unlock(&module_mutex);
+ return found;
+}
+
/**
* marker_get_iter_range - Get a next marker iterator given a range.
* @marker: current markers (in), next marker (out)
/*
* must be called with current->user_markers_mutex held
*/
-static void free_user_marker(char __user *state, struct hlist_head *head)
+static void free_user_marker(char __user *state, struct cds_hlist_head *head)
{
struct user_marker *umark;
- struct hlist_node *pos, *n;
+ struct cds_hlist_node *pos, *n;
- hlist_for_each_entry_safe(umark, pos, n, head, hlist) {
+ cds_hlist_for_each_entry_safe(umark, pos, n, head, hlist) {
if (umark->state == state) {
- hlist_del(&umark->hlist);
- kfree(umark);
+ cds_hlist_del(&umark->hlist);
+ free(umark);
}
}
}
-//ust// asmlinkage long sys_marker(char __user *name, char __user *format,
-//ust// char __user *state, int reg)
+/*
+ * Update current process.
+ * Note that we have to wait a whole scheduler period before we are sure that
+ * every running userspace threads have their markers updated.
+ * (synchronize_sched() can be used to insure this).
+ */
+//ust// void marker_update_process(void)
//ust// {
//ust// struct user_marker *umark;
-//ust// long len;
+//ust// struct hlist_node *pos;
//ust// struct marker_entry *entry;
-//ust// int ret = 0;
//ust//
-//ust// printk(KERN_DEBUG "Program %s %s marker [%p, %p]\n",
-//ust// current->comm, reg ? "registers" : "unregisters",
-//ust// name, state);
-//ust// if (reg) {
-//ust// umark = kmalloc(sizeof(struct user_marker), GFP_KERNEL);
-//ust// umark->name[MAX_USER_MARKER_NAME_LEN - 1] = '\0';
-//ust// umark->format[MAX_USER_MARKER_FORMAT_LEN - 1] = '\0';
-//ust// umark->state = state;
-//ust// len = strncpy_from_user(umark->name, name,
-//ust// MAX_USER_MARKER_NAME_LEN - 1);
-//ust// if (len < 0) {
-//ust// ret = -EFAULT;
-//ust// goto error;
-//ust// }
-//ust// len = strncpy_from_user(umark->format, format,
-//ust// MAX_USER_MARKER_FORMAT_LEN - 1);
-//ust// if (len < 0) {
-//ust// ret = -EFAULT;
-//ust// goto error;
-//ust// }
-//ust// printk(KERN_DEBUG "Marker name : %s, format : %s", umark->name,
-//ust// umark->format);
-//ust// mutex_lock(&markers_mutex);
+//ust// pthread_mutex_lock(&markers_mutex);
+//ust// pthread_mutex_lock(¤t->group_leader->user_markers_mutex);
+//ust// if (strcmp(current->comm, "testprog") == 0)
+//ust// DBG("do update pending for testprog");
+//ust// hlist_for_each_entry(umark, pos,
+//ust// ¤t->group_leader->user_markers, hlist) {
+//ust// DBG("Updating marker %s in %s", umark->name, current->comm);
//ust// entry = get_marker("userspace", umark->name);
//ust// if (entry) {
//ust// if (entry->format &&
//ust// strcmp(entry->format, umark->format) != 0) {
-//ust// printk(" error, wrong format in process %s",
+//ust// WARN("error, wrong format in process %s",
//ust// current->comm);
-//ust// ret = -EPERM;
-//ust// goto error_unlock;
+//ust// break;
//ust// }
-//ust// printk(" %s", !!entry->refcount
-//ust// ? "enabled" : "disabled");
-//ust// if (put_user(!!entry->refcount, state)) {
-//ust// ret = -EFAULT;
-//ust// goto error_unlock;
+//ust// if (put_user(!!entry->refcount, umark->state)) {
+//ust// WARN("Marker in %s caused a fault",
+//ust// current->comm);
+//ust// break;
//ust// }
-//ust// printk("\n");
//ust// } else {
-//ust// printk(" disabled\n");
//ust// if (put_user(0, umark->state)) {
-//ust// printk(KERN_WARNING
-//ust// "Marker in %s caused a fault\n",
-//ust// current->comm);
-//ust// goto error_unlock;
+//ust// WARN("Marker in %s caused a fault", current->comm);
+//ust// break;
//ust// }
//ust// }
-//ust// mutex_lock(¤t->group_leader->user_markers_mutex);
-//ust// hlist_add_head(&umark->hlist,
-//ust// ¤t->group_leader->user_markers);
-//ust// current->group_leader->user_markers_sequence++;
-//ust// mutex_unlock(¤t->group_leader->user_markers_mutex);
-//ust// mutex_unlock(&markers_mutex);
-//ust// } else {
-//ust// mutex_lock(¤t->group_leader->user_markers_mutex);
-//ust// free_user_marker(state,
-//ust// ¤t->group_leader->user_markers);
-//ust// current->group_leader->user_markers_sequence++;
-//ust// mutex_unlock(¤t->group_leader->user_markers_mutex);
//ust// }
-//ust// goto end;
-//ust// error_unlock:
-//ust// mutex_unlock(&markers_mutex);
-//ust// error:
-//ust// kfree(umark);
-//ust// end:
-//ust// return ret;
+//ust// clear_thread_flag(TIF_MARKER_PENDING);
+//ust// pthread_mutex_unlock(¤t->group_leader->user_markers_mutex);
+//ust// pthread_mutex_unlock(&markers_mutex);
//ust// }
-//ust//
-//ust// /*
-//ust// * Types :
-//ust// * string : 0
-//ust// */
-//ust// asmlinkage long sys_trace(int type, uint16_t id,
-//ust// char __user *ubuf)
-//ust// {
-//ust// long ret = -EPERM;
-//ust// char *page;
-//ust// int len;
-//ust//
-//ust// switch (type) {
-//ust// case 0: /* String */
-//ust// ret = -ENOMEM;
-//ust// page = (char *)__get_free_page(GFP_TEMPORARY);
-//ust// if (!page)
-//ust// goto string_out;
-//ust// len = strncpy_from_user(page, ubuf, PAGE_SIZE);
-//ust// if (len < 0) {
-//ust// ret = -EFAULT;
-//ust// goto string_err;
-//ust// }
-//ust// trace_mark(userspace, string, "string %s", page);
-//ust// string_err:
-//ust// free_page((unsigned long) page);
-//ust// string_out:
-//ust// break;
-//ust// default:
-//ust// break;
-//ust// }
-//ust// return ret;
-//ust// }
-
-//ust// static void marker_update_processes(void)
-//ust// {
-//ust// struct task_struct *g, *t;
-//ust//
-//ust// /*
-//ust// * markers_mutex is taken to protect the p->user_markers read.
-//ust// */
-//ust// mutex_lock(&markers_mutex);
-//ust// read_lock(&tasklist_lock);
-//ust// for_each_process(g) {
-//ust// WARN_ON(!thread_group_leader(g));
-//ust// if (hlist_empty(&g->user_markers))
-//ust// continue;
-//ust// if (strcmp(g->comm, "testprog") == 0)
-//ust// printk(KERN_DEBUG "set update pending for testprog\n");
-//ust// t = g;
-//ust// do {
-//ust// /* TODO : implement this thread flag in each arch. */
-//ust// set_tsk_thread_flag(t, TIF_MARKER_PENDING);
-//ust// } while ((t = next_thread(t)) != g);
-//ust// }
-//ust// read_unlock(&tasklist_lock);
-//ust// mutex_unlock(&markers_mutex);
-//ust// }
-
-/*
- * Update current process.
- * Note that we have to wait a whole scheduler period before we are sure that
- * every running userspace threads have their markers updated.
- * (synchronize_sched() can be used to insure this).
- */
-void marker_update_process(void)
-{
- struct user_marker *umark;
- struct hlist_node *pos;
- struct marker_entry *entry;
-
- mutex_lock(&markers_mutex);
- mutex_lock(¤t->group_leader->user_markers_mutex);
- if (strcmp(current->comm, "testprog") == 0)
- printk(KERN_DEBUG "do update pending for testprog\n");
- hlist_for_each_entry(umark, pos,
- ¤t->group_leader->user_markers, hlist) {
- printk(KERN_DEBUG "Updating marker %s in %s\n",
- umark->name, current->comm);
- entry = get_marker("userspace", umark->name);
- if (entry) {
- if (entry->format &&
- strcmp(entry->format, umark->format) != 0) {
- printk(KERN_WARNING
- " error, wrong format in process %s\n",
- current->comm);
- break;
- }
- if (put_user(!!entry->refcount, umark->state)) {
- printk(KERN_WARNING
- "Marker in %s caused a fault\n",
- current->comm);
- break;
- }
- } else {
- if (put_user(0, umark->state)) {
- printk(KERN_WARNING
- "Marker in %s caused a fault\n",
- current->comm);
- break;
- }
- }
- }
- clear_thread_flag(TIF_MARKER_PENDING);
- mutex_unlock(¤t->group_leader->user_markers_mutex);
- mutex_unlock(&markers_mutex);
-}
/*
* Called at process exit and upon do_execve().
void exit_user_markers(struct task_struct *p)
{
struct user_marker *umark;
- struct hlist_node *pos, *n;
+ struct cds_hlist_node *pos, *n;
if (thread_group_leader(p)) {
- mutex_lock(&markers_mutex);
- mutex_lock(&p->user_markers_mutex);
- hlist_for_each_entry_safe(umark, pos, n, &p->user_markers,
+ pthread_mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&p->user_markers_mutex);
+ cds_hlist_for_each_entry_safe(umark, pos, n, &p->user_markers,
hlist)
- kfree(umark);
+ free(umark);
INIT_HLIST_HEAD(&p->user_markers);
p->user_markers_sequence++;
- mutex_unlock(&p->user_markers_mutex);
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&p->user_markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
}
}
{
struct marker_entry *entry;
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
entry = get_marker(channel, name);
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
return entry && !!entry->refcount;
}
#endif /* CONFIG_MODULES */
-void ltt_dump_marker_state(struct ltt_trace_struct *trace)
+void ltt_dump_marker_state(struct ust_trace *trace)
{
struct marker_entry *entry;
struct ltt_probe_private_data call_data;
- struct hlist_head *head;
- struct hlist_node *node;
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
unsigned int i;
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
call_data.trace = trace;
call_data.serializer = NULL;
for (i = 0; i < MARKER_TABLE_SIZE; i++) {
head = &marker_table[i];
- hlist_for_each_entry(entry, node, head, hlist) {
+ cds_hlist_for_each_entry(entry, node, head, hlist) {
__trace_mark(0, metadata, core_marker_id,
&call_data,
"channel %s name %s event_id %hu "
entry->format);
}
}
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
}
//ust// EXPORT_SYMBOL_GPL(ltt_dump_marker_state);
-
-static LIST_HEAD(libs);
-
-/*
- * Returns 0 if current not found.
- * Returns 1 if current found.
- */
-int lib_get_iter_markers(struct marker_iter *iter)
-{
- struct lib *iter_lib;
- int found = 0;
-
-//ust// mutex_lock(&module_mutex);
- list_for_each_entry(iter_lib, &libs, list) {
- if (iter_lib < iter->lib)
- continue;
- else if (iter_lib > iter->lib)
- iter->marker = NULL;
- found = marker_get_iter_range(&iter->marker,
- iter_lib->markers_start,
- iter_lib->markers_start + iter_lib->markers_count);
- if (found) {
- iter->lib = iter_lib;
- break;
- }
- }
-//ust// mutex_unlock(&module_mutex);
- return found;
-}
-
-void lib_update_markers(void)
-{
- struct lib *lib;
-
-//ust// mutex_lock(&module_mutex);
- list_for_each_entry(lib, &libs, list)
- marker_update_probe_range(lib->markers_start,
- lib->markers_start + lib->markers_count);
-//ust// mutex_unlock(&module_mutex);
-}
-
static void (*new_marker_cb)(struct marker *) = NULL;
void marker_set_new_marker_cb(void (*cb)(struct marker *))
{
struct lib *pl;
- pl = (struct lib *) malloc(sizeof(struct lib));
+ pl = (struct lib *) zmalloc(sizeof(struct lib));
pl->markers_start = markers_start;
pl->markers_count = markers_count;
/* FIXME: maybe protect this with its own mutex? */
lock_markers();
- list_add(&pl->list, &libs);
+ cds_list_add(&pl->list, &libs);
unlock_markers();
new_markers(markers_start, markers_start + markers_count);
return 0;
}
-int marker_unregister_lib(struct marker *markers_start, int markers_count)
+int marker_unregister_lib(struct marker *markers_start)
{
+ struct lib *lib;
+
/*FIXME: implement; but before implementing, marker_register_lib must
have appropriate locking. */
+ lock_markers();
+
+ /* FIXME: we should probably take a mutex here on libs */
+//ust// pthread_mutex_lock(&module_mutex);
+ cds_list_for_each_entry(lib, &libs, list) {
+ if(lib->markers_start == markers_start) {
+ struct lib *lib2free = lib;
+ cds_list_del(&lib->list);
+ free(lib2free);
+ break;
+ }
+ }
+
+ unlock_markers();
+
return 0;
}
{
if(!initialized) {
marker_register_lib(__start___markers, (((long)__stop___markers)-((long)__start___markers))/sizeof(struct marker));
- printf("markers_start: %p, markers_stop: %p\n", __start___markers, __stop___markers);
initialized = 1;
}
}
+
+void __attribute__((constructor)) destroy_markers(void)
+{
+ marker_unregister_lib(__start___markers);
+}