* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-//ust// #include <linux/module.h>
-//ust// #include <linux/mutex.h>
-//ust// #include <linux/types.h>
-//#include "jhash.h"
-//#include "list.h"
-//#include "rcupdate.h"
-//ust// #include <linux/marker.h>
-#include <errno.h>
-//ust// #include <linux/slab.h>
-//ust// #include <linux/immediate.h>
-//ust// #include <linux/sched.h>
-//ust// #include <linux/uaccess.h>
-//ust// #include <linux/user_marker.h>
-//ust// #include <linux/ltt-tracer.h>
+#include <stdlib.h>
+#include <errno.h>
#define _LGPL_SOURCE
#include <urcu-bp.h>
+#include <urcu/rculist.h>
+#include <urcu/hlist.h>
-#include <ust/kernelcompat.h>
-
+#include <ust/core.h>
#include <ust/marker.h>
+#include <ust/tracepoint.h>
+
#include "usterr.h"
#include "channels.h"
#include "tracercore.h"
__thread long ust_reg_stack[500];
volatile __thread long *ust_reg_stack_ptr = (long *) 0;
-extern struct marker __start___markers[] __attribute__((visibility("hidden")));
-extern struct marker __stop___markers[] __attribute__((visibility("hidden")));
+extern struct marker * const __start___markers_ptrs[] __attribute__((visibility("hidden")));
+extern struct marker * const __stop___markers_ptrs[] __attribute__((visibility("hidden")));
/* Set to 1 to enable marker debug output */
static const int marker_debug;
*/
static DEFINE_MUTEX(markers_mutex);
-static LIST_HEAD(libs);
+static CDS_LIST_HEAD(libs);
void lock_markers(void)
{
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
}
void unlock_markers(void)
{
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
}
/*
*/
#define MARKER_HASH_BITS 6
#define MARKER_TABLE_SIZE (1 << MARKER_HASH_BITS)
-static struct hlist_head marker_table[MARKER_TABLE_SIZE];
+static struct cds_hlist_head marker_table[MARKER_TABLE_SIZE];
/*
* Note about RCU :
* marker entries modifications are protected by the markers_mutex.
*/
struct marker_entry {
- struct hlist_node hlist;
+ struct cds_hlist_node hlist;
char *format;
char *name;
/* Probe wrapper */
* @...: Variable argument list.
*
* Since we do not use "typical" pointer based RCU in the 1 argument case, we
- * need to put a full smp_rmb() in this branch. This is why we do not use
+ * need to put a full cmm_smp_rmb() in this branch. This is why we do not use
* rcu_dereference() for the pointer read.
*/
notrace void marker_probe_cb(const struct marker *mdata,
if (likely(!ptype)) {
marker_probe_func *func;
/* Must read the ptype before ptr. They are not data dependant,
- * so we put an explicit smp_rmb() here. */
- smp_rmb();
+ * so we put an explicit cmm_smp_rmb() here. */
+ cmm_smp_rmb();
func = mdata->single.func;
/* Must read the ptr before private data. They are not data
- * dependant, so we put an explicit smp_rmb() here. */
- smp_rmb();
+ * dependant, so we put an explicit cmm_smp_rmb() here. */
+ cmm_smp_rmb();
va_start(args, regs);
func(mdata, mdata->single.probe_private, regs, call_private,
mdata->format, &args);
/*
* Read mdata->ptype before mdata->multi.
*/
- smp_rmb();
+ cmm_smp_rmb();
multi = mdata->multi;
/*
* multi points to an array, therefore accessing the array
* depends on reading multi. However, even in this case,
* we must insure that the pointer is read _before_ the array
- * data. Same as rcu_dereference, but we need a full smp_rmb()
- * in the fast path, so put the explicit barrier here.
+ * data. Same as rcu_dereference, but we need a full cmm_smp_rmb()
+ * in the fast path, so put the explicit cmm_barrier here.
*/
- smp_read_barrier_depends();
+ cmm_smp_read_barrier_depends();
for (i = 0; multi[i].func; i++) {
va_start(args, regs);
multi[i].func(mdata, multi[i].probe_private,
if (likely(!ptype)) {
marker_probe_func *func;
/* Must read the ptype before ptr. They are not data dependant,
- * so we put an explicit smp_rmb() here. */
- smp_rmb();
+ * so we put an explicit cmm_smp_rmb() here. */
+ cmm_smp_rmb();
func = mdata->single.func;
/* Must read the ptr before private data. They are not data
- * dependant, so we put an explicit smp_rmb() here. */
- smp_rmb();
+ * dependant, so we put an explicit cmm_smp_rmb() here. */
+ cmm_smp_rmb();
func(mdata, mdata->single.probe_private, regs, call_private,
mdata->format, &args);
} else {
/*
* Read mdata->ptype before mdata->multi.
*/
- smp_rmb();
+ cmm_smp_rmb();
multi = mdata->multi;
/*
* multi points to an array, therefore accessing the array
* depends on reading multi. However, even in this case,
* we must insure that the pointer is read _before_ the array
- * data. Same as rcu_dereference, but we need a full smp_rmb()
- * in the fast path, so put the explicit barrier here.
+ * data. Same as rcu_dereference, but we need a full cmm_smp_rmb()
+ * in the fast path, so put the explicit cmm_barrier here.
*/
- smp_read_barrier_depends();
+ cmm_smp_read_barrier_depends();
for (i = 0; multi[i].func; i++)
multi[i].func(mdata, multi[i].probe_private, regs,
call_private, mdata->format, &args);
static void free_old_closure(struct rcu_head *head)
{
- struct marker_entry *entry = container_of(head,
+ struct marker_entry *entry = _ust_container_of(head,
struct marker_entry, rcu);
- kfree(entry->oldptr);
+ free(entry->oldptr);
/* Make sure we free the data before setting the pending flag to 0 */
- smp_wmb();
+ cmm_smp_wmb();
entry->rcu_pending = 0;
}
return ERR_PTR(-EBUSY);
}
/* + 2 : one for new probe, one for NULL func */
- new = kzalloc((nr_probes + 2) * sizeof(struct marker_probe_closure),
- GFP_KERNEL);
+ new = zmalloc((nr_probes + 2) * sizeof(struct marker_probe_closure));
if (new == NULL)
return ERR_PTR(-ENOMEM);
if (!old)
int j = 0;
/* N -> M, (N > 1, M > 1) */
/* + 1 for NULL */
- new = kzalloc((nr_probes - nr_del + 1)
- * sizeof(struct marker_probe_closure), GFP_KERNEL);
+ new = zmalloc((nr_probes - nr_del + 1) * sizeof(struct marker_probe_closure));
if (new == NULL)
return ERR_PTR(-ENOMEM);
for (i = 0; old[i].func; i++)
*/
static struct marker_entry *get_marker(const char *channel, const char *name)
{
- struct hlist_head *head;
- struct hlist_node *node;
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
struct marker_entry *e;
size_t channel_len = strlen(channel) + 1;
size_t name_len = strlen(name) + 1;
hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
- hlist_for_each_entry(e, node, head, hlist) {
+ cds_hlist_for_each_entry(e, node, head, hlist) {
if (!strcmp(channel, e->channel) && !strcmp(name, e->name))
return e;
}
static struct marker_entry *add_marker(const char *channel, const char *name,
const char *format)
{
- struct hlist_head *head;
- struct hlist_node *node;
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
struct marker_entry *e;
size_t channel_len = strlen(channel) + 1;
size_t name_len = strlen(name) + 1;
if (format)
format_len = strlen(format) + 1;
head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
- hlist_for_each_entry(e, node, head, hlist) {
+ cds_hlist_for_each_entry(e, node, head, hlist) {
if (!strcmp(channel, e->channel) && !strcmp(name, e->name)) {
DBG("Marker %s.%s busy", channel, name);
return ERR_PTR(-EBUSY); /* Already there */
}
}
/*
- * Using kmalloc here to allocate a variable length element. Could
+ * Using zmalloc here to allocate a variable length element. Could
* cause some memory fragmentation if overused.
*/
- e = kmalloc(sizeof(struct marker_entry)
- + channel_len + name_len + format_len,
- GFP_KERNEL);
+ e = zmalloc(sizeof(struct marker_entry)
+ + channel_len + name_len + format_len);
if (!e)
return ERR_PTR(-ENOMEM);
memcpy(e->channel, channel, channel_len);
e->name = &e->channel[channel_len];
memcpy(e->name, name, name_len);
if (format) {
- e->format = &e->name[channel_len + name_len];
+ e->format = &e->name[name_len];
memcpy(e->format, format, format_len);
if (strcmp(e->format, MARK_NOARGS) == 0)
e->call = marker_probe_cb_noarg;
e->format_allocated = 0;
e->refcount = 0;
e->rcu_pending = 0;
- hlist_add_head(&e->hlist, head);
+ cds_hlist_add_head(&e->hlist, head);
return e;
}
*/
static int remove_marker(const char *channel, const char *name)
{
- struct hlist_head *head;
- struct hlist_node *node;
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
struct marker_entry *e;
int found = 0;
size_t channel_len = strlen(channel) + 1;
hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
- hlist_for_each_entry(e, node, head, hlist) {
+ cds_hlist_for_each_entry(e, node, head, hlist) {
if (!strcmp(channel, e->channel) && !strcmp(name, e->name)) {
found = 1;
break;
return -ENOENT;
if (e->single.func != __mark_empty_function)
return -EBUSY;
- hlist_del(&e->hlist);
+ cds_hlist_del(&e->hlist);
if (e->format_allocated)
- kfree(e->format);
+ free(e->format);
ret = ltt_channels_unregister(e->channel);
WARN_ON(ret);
/* Make sure the call_rcu has been executed */
//ust// if (e->rcu_pending)
-//ust// rcu_barrier_sched();
- kfree(e);
+//ust// rcu_cmm_barrier_sched();
+ free(e);
return 0;
}
*/
static int marker_set_format(struct marker_entry *entry, const char *format)
{
- entry->format = kstrdup(format, GFP_KERNEL);
+ entry->format = strdup(format);
if (!entry->format)
return -ENOMEM;
entry->format_allocated = 1;
if (entry->format) {
if (strcmp(entry->format, elem->format) != 0) {
- DBG("Format mismatch for probe %s (%s), marker (%s)",
+ ERR("Format mismatch for probe %s (%s), marker (%s)",
entry->name,
entry->format,
elem->format);
* Make sure the private data is valid when we update the
* single probe ptr.
*/
- smp_wmb();
+ cmm_smp_wmb();
elem->single.func = entry->single.func;
/*
* We also make sure that the new probe callbacks array is consistent
* Update the function or multi probe array pointer before setting the
* ptype.
*/
- smp_wmb();
+ cmm_smp_wmb();
elem->ptype = entry->ptype;
-//ust// if (elem->tp_name && (active ^ _imv_read(elem->state))) {
-//ust// WARN_ON(!elem->tp_cb);
-//ust// /*
-//ust// * It is ok to directly call the probe registration because type
-//ust// * checking has been done in the __trace_mark_tp() macro.
-//ust// */
-//ust//
-//ust// if (active) {
-//ust// /*
-//ust// * try_module_get should always succeed because we hold
-//ust// * markers_mutex to get the tp_cb address.
-//ust// */
+ if (elem->tp_name && (active ^ _imv_read(elem->state))) {
+ WARN_ON(!elem->tp_cb);
+ /*
+ * It is ok to directly call the probe registration because type
+ * checking has been done in the __trace_mark_tp() macro.
+ */
+
+ if (active) {
+ /*
+ * try_module_get should always succeed because we hold
+ * markers_mutex to get the tp_cb address.
+ */
//ust// ret = try_module_get(__module_text_address(
//ust// (unsigned long)elem->tp_cb));
//ust// BUG_ON(!ret);
-//ust// ret = tracepoint_probe_register_noupdate(
-//ust// elem->tp_name,
-//ust// elem->tp_cb);
-//ust// } else {
-//ust// ret = tracepoint_probe_unregister_noupdate(
-//ust// elem->tp_name,
-//ust// elem->tp_cb);
-//ust// /*
-//ust// * tracepoint_probe_update_all() must be called
-//ust// * before the module containing tp_cb is unloaded.
-//ust// */
+ ret = tracepoint_probe_register_noupdate(
+ elem->tp_name,
+ elem->tp_cb, NULL);
+ } else {
+ ret = tracepoint_probe_unregister_noupdate(
+ elem->tp_name,
+ elem->tp_cb, NULL);
+ /*
+ * tracepoint_probe_update_all() must be called
+ * before the module containing tp_cb is unloaded.
+ */
//ust// module_put(__module_text_address(
//ust// (unsigned long)elem->tp_cb));
-//ust// }
-//ust// }
+ }
+ }
elem->state__imv = active;
return ret;
*/
static void disable_marker(struct marker *elem)
{
-//ust// int ret;
-//ust//
-//ust// /* leave "call" as is. It is known statically. */
-//ust// if (elem->tp_name && _imv_read(elem->state)) {
-//ust// WARN_ON(!elem->tp_cb);
-//ust// /*
-//ust// * It is ok to directly call the probe registration because type
-//ust// * checking has been done in the __trace_mark_tp() macro.
-//ust// */
-//ust// ret = tracepoint_probe_unregister_noupdate(elem->tp_name,
-//ust// elem->tp_cb);
-//ust// WARN_ON(ret);
-//ust// /*
-//ust// * tracepoint_probe_update_all() must be called
-//ust// * before the module containing tp_cb is unloaded.
-//ust// */
+ int ret;
+
+ /* leave "call" as is. It is known statically. */
+ if (elem->tp_name && _imv_read(elem->state)) {
+ WARN_ON(!elem->tp_cb);
+ /*
+ * It is ok to directly call the probe registration because type
+ * checking has been done in the __trace_mark_tp() macro.
+ */
+ ret = tracepoint_probe_unregister_noupdate(elem->tp_name,
+ elem->tp_cb, NULL);
+ WARN_ON(ret);
+ /*
+ * tracepoint_probe_update_all() must be called
+ * before the module containing tp_cb is unloaded.
+ */
//ust// module_put(__module_text_address((unsigned long)elem->tp_cb));
-//ust// }
+ }
elem->state__imv = 0;
elem->single.func = __mark_empty_function;
/* Update the function before setting the ptype */
- smp_wmb();
+ cmm_smp_wmb();
elem->ptype = 0; /* single probe */
/*
* Leave the private data and channel_id/event_id there, because removal
{
struct marker_entry *entry;
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
entry = get_marker(channel, name);
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
return entry && !!entry->refcount;
}
*
* Updates the probe callback corresponding to a range of markers.
*/
-void marker_update_probe_range(struct marker *begin,
- struct marker *end)
+void marker_update_probe_range(struct marker * const *begin,
+ struct marker * const *end)
{
- struct marker *iter;
+ struct marker * const *iter;
struct marker_entry *mark_entry;
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
for (iter = begin; iter < end; iter++) {
- mark_entry = get_marker(iter->channel, iter->name);
+ mark_entry = get_marker((*iter)->channel, (*iter)->name);
if (mark_entry) {
- set_marker(mark_entry, iter, !!mark_entry->refcount);
+ set_marker(mark_entry, *iter, !!mark_entry->refcount);
/*
* ignore error, continue
*/
"channel %s name %s event_id %hu "
"int #1u%zu long #1u%zu pointer #1u%zu "
"size_t #1u%zu alignment #1u%u",
- iter->channel, iter->name, mark_entry->event_id,
+ (*iter)->channel, (*iter)->name, mark_entry->event_id,
sizeof(int), sizeof(long), sizeof(void *),
sizeof(size_t), ltt_get_alignment());
} else {
- disable_marker(iter);
+ disable_marker(*iter);
}
}
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
}
static void lib_update_markers(void)
struct lib *lib;
/* FIXME: we should probably take a mutex here on libs */
-//ust// mutex_lock(&module_mutex);
- list_for_each_entry(lib, &libs, list)
+//ust// pthread_mutex_lock(&module_mutex);
+ cds_list_for_each_entry(lib, &libs, list)
marker_update_probe_range(lib->markers_start,
lib->markers_start + lib->markers_count);
-//ust// mutex_unlock(&module_mutex);
+//ust// pthread_mutex_unlock(&module_mutex);
}
/*
*/
static void marker_update_probes(void)
{
- /* Core kernel markers */
-//ust// marker_update_probe_range(__start___markers, __stop___markers);
- /* Markers in modules. */
-//ust// module_update_markers();
lib_update_markers();
-//ust// tracepoint_probe_update_all();
+ tracepoint_probe_update_all();
/* Update immediate values */
core_imv_update();
//ust// module_imv_update(); /* FIXME: need to port for libs? */
struct marker_probe_closure *old;
int first_probe = 0;
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
entry = get_marker(channel, name);
if (!entry) {
first_probe = 1;
* make sure it's executed now.
*/
//ust// if (entry->rcu_pending)
-//ust// rcu_barrier_sched();
+//ust// rcu_cmm_barrier_sched();
old = marker_entry_add_probe(entry, probe, probe_private);
if (IS_ERR(old)) {
ret = PTR_ERR(old);
else
goto end;
}
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
/* Activate marker if necessary */
marker_update_probes();
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
entry = get_marker(channel, name);
if (!entry)
goto end;
//ust// if (entry->rcu_pending)
-//ust// rcu_barrier_sched();
+//ust// rcu_cmm_barrier_sched();
entry->oldptr = old;
entry->rcu_pending = 1;
/* write rcu_pending before calling the RCU callback */
- smp_wmb();
+ cmm_smp_wmb();
//ust// call_rcu_sched(&entry->rcu, free_old_closure);
synchronize_rcu(); free_old_closure(&entry->rcu);
goto end;
ret_err = remove_marker(channel, name);
WARN_ON(ret_err);
end:
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
return ret;
}
//ust// EXPORT_SYMBOL_GPL(marker_probe_register);
struct marker_probe_closure *old;
int ret = -ENOENT;
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
entry = get_marker(channel, name);
if (!entry)
goto end;
//ust// if (entry->rcu_pending)
-//ust// rcu_barrier_sched();
+//ust// rcu_cmm_barrier_sched();
old = marker_entry_remove_probe(entry, probe, probe_private);
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
marker_update_probes();
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
entry = get_marker(channel, name);
if (!entry)
goto end;
//ust// if (entry->rcu_pending)
-//ust// rcu_barrier_sched();
+//ust// rcu_cmm_barrier_sched();
entry->oldptr = old;
entry->rcu_pending = 1;
/* write rcu_pending before calling the RCU callback */
- smp_wmb();
+ cmm_smp_wmb();
//ust// call_rcu_sched(&entry->rcu, free_old_closure);
synchronize_rcu(); free_old_closure(&entry->rcu);
remove_marker(channel, name); /* Ignore busy error message */
ret = 0;
end:
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
return ret;
}
//ust// EXPORT_SYMBOL_GPL(marker_probe_unregister);
{
struct marker_entry *entry;
unsigned int i;
- struct hlist_head *head;
- struct hlist_node *node;
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
for (i = 0; i < MARKER_TABLE_SIZE; i++) {
head = &marker_table[i];
- hlist_for_each_entry(entry, node, head, hlist) {
+ cds_hlist_for_each_entry(entry, node, head, hlist) {
if (!entry->ptype) {
if (entry->single.func == probe
&& entry->single.probe_private
struct marker_entry *entry;
int ret = 0;
struct marker_probe_closure *old;
- const char *channel = NULL, *name = NULL;
+ char *channel = NULL, *name = NULL;
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
entry = get_marker_from_private_data(probe, probe_private);
if (!entry) {
ret = -ENOENT;
goto end;
}
//ust// if (entry->rcu_pending)
-//ust// rcu_barrier_sched();
+//ust// rcu_cmm_barrier_sched();
old = marker_entry_remove_probe(entry, NULL, probe_private);
- channel = kstrdup(entry->channel, GFP_KERNEL);
- name = kstrdup(entry->name, GFP_KERNEL);
- mutex_unlock(&markers_mutex);
+ channel = strdup(entry->channel);
+ name = strdup(entry->name);
+ pthread_mutex_unlock(&markers_mutex);
marker_update_probes();
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
entry = get_marker(channel, name);
if (!entry)
goto end;
//ust// if (entry->rcu_pending)
-//ust// rcu_barrier_sched();
+//ust// rcu_cmm_barrier_sched();
entry->oldptr = old;
entry->rcu_pending = 1;
/* write rcu_pending before calling the RCU callback */
- smp_wmb();
+ cmm_smp_wmb();
//ust// call_rcu_sched(&entry->rcu, free_old_closure);
synchronize_rcu(); free_old_closure(&entry->rcu);
/* Ignore busy error message */
remove_marker(channel, name);
end:
- mutex_unlock(&markers_mutex);
- kfree(channel);
- kfree(name);
+ pthread_mutex_unlock(&markers_mutex);
+ free(channel);
+ free(name);
return ret;
}
//ust// EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data);
void *marker_get_private_data(const char *channel, const char *name,
marker_probe_func *probe, int num)
{
- struct hlist_head *head;
- struct hlist_node *node;
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
struct marker_entry *e;
size_t channel_len = strlen(channel) + 1;
size_t name_len = strlen(name) + 1;
hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
- hlist_for_each_entry(e, node, head, hlist) {
+ cds_hlist_for_each_entry(e, node, head, hlist) {
if (!strcmp(channel, e->channel) && !strcmp(name, e->name)) {
if (!e->ptype) {
if (num == 0 && e->single.func == probe)
struct lib *iter_lib;
int found = 0;
-//ust// mutex_lock(&module_mutex);
- list_for_each_entry(iter_lib, &libs, list) {
+//ust// pthread_mutex_lock(&module_mutex);
+ cds_list_for_each_entry(iter_lib, &libs, list) {
if (iter_lib < iter->lib)
continue;
else if (iter_lib > iter->lib)
break;
}
}
-//ust// mutex_unlock(&module_mutex);
+//ust// pthread_mutex_unlock(&module_mutex);
return found;
}
* Returns whether a next marker has been found (1) or not (0).
* Will return the first marker in the range if the input marker is NULL.
*/
-int marker_get_iter_range(struct marker **marker, struct marker *begin,
- struct marker *end)
+int marker_get_iter_range(struct marker * const **marker,
+ struct marker * const *begin,
+ struct marker * const *end)
{
if (!*marker && begin != end) {
*marker = begin;
{
int found = 0;
- /* Core kernel markers */
- if (!iter->lib) {
- /* ust FIXME: how come we cannot disable the following line? we shouldn't need core stuff */
- found = marker_get_iter_range(&iter->marker,
- __start___markers, __stop___markers);
- if (found)
- goto end;
- }
- /* Markers in modules. */
found = lib_get_iter_markers(iter);
-end:
if (!found)
marker_iter_reset(iter);
}
/*
* must be called with current->user_markers_mutex held
*/
-static void free_user_marker(char __user *state, struct hlist_head *head)
+static void free_user_marker(char __user *state, struct cds_hlist_head *head)
{
struct user_marker *umark;
- struct hlist_node *pos, *n;
+ struct cds_hlist_node *pos, *n;
- hlist_for_each_entry_safe(umark, pos, n, head, hlist) {
+ cds_hlist_for_each_entry_safe(umark, pos, n, head, hlist) {
if (umark->state == state) {
- hlist_del(&umark->hlist);
- kfree(umark);
+ cds_hlist_del(&umark->hlist);
+ free(umark);
}
}
}
//ust// struct hlist_node *pos;
//ust// struct marker_entry *entry;
//ust//
-//ust// mutex_lock(&markers_mutex);
-//ust// mutex_lock(¤t->group_leader->user_markers_mutex);
+//ust// pthread_mutex_lock(&markers_mutex);
+//ust// pthread_mutex_lock(¤t->group_leader->user_markers_mutex);
//ust// if (strcmp(current->comm, "testprog") == 0)
//ust// DBG("do update pending for testprog");
//ust// hlist_for_each_entry(umark, pos,
//ust// }
//ust// }
//ust// clear_thread_flag(TIF_MARKER_PENDING);
-//ust// mutex_unlock(¤t->group_leader->user_markers_mutex);
-//ust// mutex_unlock(&markers_mutex);
+//ust// pthread_mutex_unlock(¤t->group_leader->user_markers_mutex);
+//ust// pthread_mutex_unlock(&markers_mutex);
//ust// }
/*
void exit_user_markers(struct task_struct *p)
{
struct user_marker *umark;
- struct hlist_node *pos, *n;
+ struct cds_hlist_node *pos, *n;
if (thread_group_leader(p)) {
- mutex_lock(&markers_mutex);
- mutex_lock(&p->user_markers_mutex);
- hlist_for_each_entry_safe(umark, pos, n, &p->user_markers,
+ pthread_mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&p->user_markers_mutex);
+ cds_hlist_for_each_entry_safe(umark, pos, n, &p->user_markers,
hlist)
- kfree(umark);
+ free(umark);
INIT_HLIST_HEAD(&p->user_markers);
p->user_markers_sequence++;
- mutex_unlock(&p->user_markers_mutex);
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&p->user_markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
}
}
{
struct marker_entry *entry;
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
entry = get_marker(channel, name);
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
return entry && !!entry->refcount;
}
{
struct marker_entry *entry;
struct ltt_probe_private_data call_data;
- struct hlist_head *head;
- struct hlist_node *node;
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
unsigned int i;
- mutex_lock(&markers_mutex);
+ pthread_mutex_lock(&markers_mutex);
call_data.trace = trace;
call_data.serializer = NULL;
for (i = 0; i < MARKER_TABLE_SIZE; i++) {
head = &marker_table[i];
- hlist_for_each_entry(entry, node, head, hlist) {
+ cds_hlist_for_each_entry(entry, node, head, hlist) {
__trace_mark(0, metadata, core_marker_id,
&call_data,
"channel %s name %s event_id %hu "
entry->format);
}
}
- mutex_unlock(&markers_mutex);
+ pthread_mutex_unlock(&markers_mutex);
}
//ust// EXPORT_SYMBOL_GPL(ltt_dump_marker_state);
new_marker_cb = cb;
}
-static void new_markers(struct marker *start, struct marker *end)
+static void new_markers(struct marker * const *start, struct marker * const *end)
{
- if(new_marker_cb) {
- struct marker *m;
+ if (new_marker_cb) {
+ struct marker * const *m;
for(m=start; m < end; m++) {
- new_marker_cb(m);
+ new_marker_cb(*m);
}
}
}
-int marker_register_lib(struct marker *markers_start, int markers_count)
+int marker_register_lib(struct marker * const *markers_start, int markers_count)
{
struct lib *pl;
- struct marker_addr *addr;
- pl = (struct lib *) malloc(sizeof(struct lib));
+ pl = (struct lib *) zmalloc(sizeof(struct lib));
pl->markers_start = markers_start;
pl->markers_count = markers_count;
/* FIXME: maybe protect this with its own mutex? */
lock_markers();
- list_add(&pl->list, &libs);
+ cds_list_add(&pl->list, &libs);
unlock_markers();
new_markers(markers_start, markers_start + markers_count);
return 0;
}
-int marker_unregister_lib(struct marker *markers_start, int markers_count)
+int marker_unregister_lib(struct marker * const *markers_start)
{
+ struct lib *lib;
+
/*FIXME: implement; but before implementing, marker_register_lib must
have appropriate locking. */
+ lock_markers();
+
+ /* FIXME: we should probably take a mutex here on libs */
+//ust// pthread_mutex_lock(&module_mutex);
+ cds_list_for_each_entry(lib, &libs, list) {
+ if(lib->markers_start == markers_start) {
+ struct lib *lib2free = lib;
+ cds_list_del(&lib->list);
+ free(lib2free);
+ break;
+ }
+ }
+
+ unlock_markers();
+
return 0;
}
void __attribute__((constructor)) init_markers(void)
{
- if(!initialized) {
- marker_register_lib(__start___markers, (((long)__stop___markers)-((long)__start___markers))/sizeof(struct marker));
- //DBG("markers_start: %p, markers_stop: %p\n", __start___markers, __stop___markers);
+ if (!initialized) {
+ marker_register_lib(__start___markers_ptrs,
+ __stop___markers_ptrs
+ - __start___markers_ptrs);
initialized = 1;
}
}
+
+void __attribute__((constructor)) destroy_markers(void)
+{
+ marker_unregister_lib(__start___markers_ptrs);
+}