projects
/
ust.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Fix up all use of /dev/stderr for portability to busybox /bin/sh
[ust.git]
/
libust
/
channels.c
diff --git
a/libust/channels.c
b/libust/channels.c
index b7f35439a86e189413e7fa8ef81374c30a945d12..13178e620c4c6eac9bf7c70e2378049b706ed3ca 100644
(file)
--- a/
libust/channels.c
+++ b/
libust/channels.c
@@
-23,37
+23,35
@@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-//ust// #include <linux/module.h>
-//ust// #include <linux/ltt-channels.h>
-//ust// #include <linux/mutex.h>
-//ust// #include <linux/vmalloc.h>
-
-#include <ust/kernelcompat.h>
-#include "channels.h"
-#include "usterr.h"
+#include <stdlib.h>
#include <ust/marker.h>
#include <ust/marker.h>
+#include "channels.h"
+#include "usterr_signal_safe.h"
/*
* ltt_channel_mutex may be nested inside the LTT trace mutex.
* ltt_channel_mutex mutex may be nested inside markers mutex.
*/
static DEFINE_MUTEX(ltt_channel_mutex);
/*
* ltt_channel_mutex may be nested inside the LTT trace mutex.
* ltt_channel_mutex mutex may be nested inside markers mutex.
*/
static DEFINE_MUTEX(ltt_channel_mutex);
-static LIST_HEAD(ltt_channels);
+static
CDS_
LIST_HEAD(ltt_channels);
/*
* Index of next channel in array. Makes sure that as long as a trace channel is
* allocated, no array index will be re-used when a channel is freed and then
* another channel is allocated. This index is cleared and the array indexeds
/*
* Index of next channel in array. Makes sure that as long as a trace channel is
* allocated, no array index will be re-used when a channel is freed and then
* another channel is allocated. This index is cleared and the array indexeds
- * get reassigned when the index_
k
ref goes back to 0, which indicates that no
+ * get reassigned when the index_
urcu_
ref goes back to 0, which indicates that no
* more trace channels are allocated.
*/
static unsigned int free_index;
* more trace channels are allocated.
*/
static unsigned int free_index;
-static struct kref index_kref; /* Keeps track of allocated trace channels */
+static struct urcu_ref index_urcu_ref; /* Keeps track of allocated trace channels */
+
+int ust_channels_overwrite_by_default = 0;
+int ust_channels_request_collection_by_default = 1;
static struct ltt_channel_setting *lookup_channel(const char *name)
{
struct ltt_channel_setting *iter;
static struct ltt_channel_setting *lookup_channel(const char *name)
{
struct ltt_channel_setting *iter;
- list_for_each_entry(iter, <t_channels, list)
+
cds_
list_for_each_entry(iter, <t_channels, list)
if (strcmp(name, iter->name) == 0)
return iter;
return NULL;
if (strcmp(name, iter->name) == 0)
return iter;
return NULL;
@@
-66,22
+64,23
@@
static struct ltt_channel_setting *lookup_channel(const char *name)
*
* Called with lock_markers() and channels mutex held.
*/
*
* Called with lock_markers() and channels mutex held.
*/
-static void release_channel_setting(struct
kref *k
ref)
+static void release_channel_setting(struct
urcu_ref *urcu_
ref)
{
{
- struct ltt_channel_setting *setting =
container_of(k
ref,
- struct ltt_channel_setting,
k
ref);
+ struct ltt_channel_setting *setting =
_ust_container_of(urcu_
ref,
+ struct ltt_channel_setting,
urcu_
ref);
struct ltt_channel_setting *iter;
struct ltt_channel_setting *iter;
- if (uatomic_read(&index_
k
ref.refcount) == 0
- && uatomic_read(&setting->
k
ref.refcount) == 0) {
- list_del(&setting->list);
-
k
free(setting);
+ if (uatomic_read(&index_
urcu_
ref.refcount) == 0
+ && uatomic_read(&setting->
urcu_
ref.refcount) == 0) {
+
cds_
list_del(&setting->list);
+ free(setting);
free_index = 0;
free_index = 0;
- list_for_each_entry(iter, <t_channels, list) {
+
cds_
list_for_each_entry(iter, <t_channels, list) {
iter->index = free_index++;
iter->free_event_id = 0;
}
iter->index = free_index++;
iter->free_event_id = 0;
}
+ /* FIXME: why not run this? */
//ust// markers_compact_event_ids();
}
}
//ust// markers_compact_event_ids();
}
}
@@
-91,12
+90,12
@@
static void release_channel_setting(struct kref *kref)
*
* Called with lock_markers() and channels mutex held.
*/
*
* Called with lock_markers() and channels mutex held.
*/
-static void release_trace_channel(struct
kref *k
ref)
+static void release_trace_channel(struct
urcu_ref *urcu_
ref)
{
struct ltt_channel_setting *iter, *n;
{
struct ltt_channel_setting *iter, *n;
- list_for_each_entry_safe(iter, n, <t_channels, list)
- release_channel_setting(&iter->
k
ref);
+
cds_
list_for_each_entry_safe(iter, n, <t_channels, list)
+ release_channel_setting(&iter->
urcu_
ref);
}
/**
}
/**
@@
-110,28
+109,28
@@
int ltt_channels_register(const char *name)
struct ltt_channel_setting *setting;
int ret = 0;
struct ltt_channel_setting *setting;
int ret = 0;
- mutex_lock(<t_channel_mutex);
+
pthread_
mutex_lock(<t_channel_mutex);
setting = lookup_channel(name);
if (setting) {
setting = lookup_channel(name);
if (setting) {
- if (uatomic_read(&setting->
k
ref.refcount) == 0)
- goto init_
k
ref;
+ if (uatomic_read(&setting->
urcu_
ref.refcount) == 0)
+ goto init_
urcu_
ref;
else {
else {
-
kref_get(&setting->k
ref);
+
urcu_ref_get(&setting->urcu_
ref);
goto end;
}
}
goto end;
}
}
- setting =
kzalloc(sizeof(*setting), GFP_KERNEL
);
+ setting =
zmalloc(sizeof(*setting)
);
if (!setting) {
ret = -ENOMEM;
goto end;
}
if (!setting) {
ret = -ENOMEM;
goto end;
}
- list_add(&setting->list, <t_channels);
+
cds_
list_add(&setting->list, <t_channels);
strncpy(setting->name, name, PATH_MAX-1);
setting->index = free_index++;
strncpy(setting->name, name, PATH_MAX-1);
setting->index = free_index++;
-init_
k
ref:
-
kref_init(&setting->k
ref);
+init_
urcu_
ref:
+
urcu_ref_init(&setting->urcu_
ref);
end:
end:
- mutex_unlock(<t_channel_mutex);
+
pthread_
mutex_unlock(<t_channel_mutex);
return ret;
}
//ust// EXPORT_SYMBOL_GPL(ltt_channels_register);
return ret;
}
//ust// EXPORT_SYMBOL_GPL(ltt_channels_register);
@@
-147,15
+146,15
@@
int ltt_channels_unregister(const char *name)
struct ltt_channel_setting *setting;
int ret = 0;
struct ltt_channel_setting *setting;
int ret = 0;
- mutex_lock(<t_channel_mutex);
+
pthread_
mutex_lock(<t_channel_mutex);
setting = lookup_channel(name);
setting = lookup_channel(name);
- if (!setting || uatomic_read(&setting->
k
ref.refcount) == 0) {
+ if (!setting || uatomic_read(&setting->
urcu_
ref.refcount) == 0) {
ret = -ENOENT;
goto end;
}
ret = -ENOENT;
goto end;
}
-
kref_put(&setting->k
ref, release_channel_setting);
+
urcu_ref_put(&setting->urcu_
ref, release_channel_setting);
end:
end:
- mutex_unlock(<t_channel_mutex);
+
pthread_
mutex_unlock(<t_channel_mutex);
return ret;
}
//ust// EXPORT_SYMBOL_GPL(ltt_channels_unregister);
return ret;
}
//ust// EXPORT_SYMBOL_GPL(ltt_channels_unregister);
@@
-173,16
+172,16
@@
int ltt_channels_set_default(const char *name,
struct ltt_channel_setting *setting;
int ret = 0;
struct ltt_channel_setting *setting;
int ret = 0;
- mutex_lock(<t_channel_mutex);
+
pthread_
mutex_lock(<t_channel_mutex);
setting = lookup_channel(name);
setting = lookup_channel(name);
- if (!setting || uatomic_read(&setting->
k
ref.refcount) == 0) {
+ if (!setting || uatomic_read(&setting->
urcu_
ref.refcount) == 0) {
ret = -ENOENT;
goto end;
}
setting->subbuf_size = subbuf_size;
setting->subbuf_cnt = subbuf_cnt;
end:
ret = -ENOENT;
goto end;
}
setting->subbuf_size = subbuf_size;
setting->subbuf_cnt = subbuf_cnt;
end:
- mutex_unlock(<t_channel_mutex);
+
pthread_
mutex_unlock(<t_channel_mutex);
return ret;
}
//ust// EXPORT_SYMBOL_GPL(ltt_channels_set_default);
return ret;
}
//ust// EXPORT_SYMBOL_GPL(ltt_channels_set_default);
@@
-198,8
+197,8
@@
const char *ltt_channels_get_name_from_index(unsigned int index)
{
struct ltt_channel_setting *iter;
{
struct ltt_channel_setting *iter;
- list_for_each_entry(iter, <t_channels, list)
- if (iter->index == index && uatomic_read(&iter->
k
ref.refcount))
+
cds_
list_for_each_entry(iter, <t_channels, list)
+ if (iter->index == index && uatomic_read(&iter->
urcu_
ref.refcount))
return iter->name;
return NULL;
}
return iter->name;
return NULL;
}
@@
-210,9
+209,9
@@
ltt_channels_get_setting_from_name(const char *name)
{
struct ltt_channel_setting *iter;
{
struct ltt_channel_setting *iter;
- list_for_each_entry(iter, <t_channels, list)
+
cds_
list_for_each_entry(iter, <t_channels, list)
if (!strcmp(iter->name, name)
if (!strcmp(iter->name, name)
- && uatomic_read(&iter->
k
ref.refcount))
+ && uatomic_read(&iter->
urcu_
ref.refcount))
return iter;
return NULL;
}
return iter;
return NULL;
}
@@
-249,38
+248,39
@@
int ltt_channels_get_index_from_name(const char *name)
*/
struct ust_channel *ltt_channels_trace_alloc(unsigned int *nr_channels,
int overwrite,
*/
struct ust_channel *ltt_channels_trace_alloc(unsigned int *nr_channels,
int overwrite,
+ int request_collection,
int active)
{
struct ust_channel *channel = NULL;
struct ltt_channel_setting *iter;
int active)
{
struct ust_channel *channel = NULL;
struct ltt_channel_setting *iter;
- mutex_lock(<t_channel_mutex);
+
pthread_
mutex_lock(<t_channel_mutex);
if (!free_index) {
WARN("ltt_channels_trace_alloc: no free_index; are there any probes connected?");
goto end;
}
if (!free_index) {
WARN("ltt_channels_trace_alloc: no free_index; are there any probes connected?");
goto end;
}
- if (!uatomic_read(&index_
k
ref.refcount))
-
kref_init(&index_k
ref);
+ if (!uatomic_read(&index_
urcu_
ref.refcount))
+
urcu_ref_init(&index_urcu_
ref);
else
else
-
kref_get(&index_k
ref);
+
urcu_ref_get(&index_urcu_
ref);
*nr_channels = free_index;
*nr_channels = free_index;
- channel = kzalloc(sizeof(struct ust_channel) * free_index,
- GFP_KERNEL);
+ channel = zmalloc(sizeof(struct ust_channel) * free_index);
if (!channel) {
WARN("ltt_channel_struct: channel null after alloc");
goto end;
}
if (!channel) {
WARN("ltt_channel_struct: channel null after alloc");
goto end;
}
- list_for_each_entry(iter, <t_channels, list) {
- if (!uatomic_read(&iter->
k
ref.refcount))
+
cds_
list_for_each_entry(iter, <t_channels, list) {
+ if (!uatomic_read(&iter->
urcu_
ref.refcount))
continue;
channel[iter->index].subbuf_size = iter->subbuf_size;
channel[iter->index].subbuf_cnt = iter->subbuf_cnt;
channel[iter->index].overwrite = overwrite;
continue;
channel[iter->index].subbuf_size = iter->subbuf_size;
channel[iter->index].subbuf_cnt = iter->subbuf_cnt;
channel[iter->index].overwrite = overwrite;
+ channel[iter->index].request_collection = request_collection;
channel[iter->index].active = active;
channel[iter->index].channel_name = iter->name;
}
end:
channel[iter->index].active = active;
channel[iter->index].channel_name = iter->name;
}
end:
- mutex_unlock(<t_channel_mutex);
+
pthread_
mutex_unlock(<t_channel_mutex);
return channel;
}
//ust// EXPORT_SYMBOL_GPL(ltt_channels_trace_alloc);
return channel;
}
//ust// EXPORT_SYMBOL_GPL(ltt_channels_trace_alloc);
@@
-295,10
+295,10
@@
end:
void ltt_channels_trace_free(struct ust_channel *channels)
{
lock_markers();
void ltt_channels_trace_free(struct ust_channel *channels)
{
lock_markers();
- mutex_lock(<t_channel_mutex);
-
k
free(channels);
-
kref_put(&index_k
ref, release_trace_channel);
- mutex_unlock(<t_channel_mutex);
+
pthread_
mutex_lock(<t_channel_mutex);
+ free(channels);
+
urcu_ref_put(&index_urcu_
ref, release_trace_channel);
+
pthread_
mutex_unlock(<t_channel_mutex);
unlock_markers();
}
//ust// EXPORT_SYMBOL_GPL(ltt_channels_trace_free);
unlock_markers();
}
//ust// EXPORT_SYMBOL_GPL(ltt_channels_trace_free);
@@
-352,9
+352,9
@@
int ltt_channels_get_event_id(const char *channel, const char *name)
{
int ret;
{
int ret;
- mutex_lock(<t_channel_mutex);
+
pthread_
mutex_lock(<t_channel_mutex);
ret = _ltt_channels_get_event_id(channel, name);
ret = _ltt_channels_get_event_id(channel, name);
- mutex_unlock(<t_channel_mutex);
+
pthread_
mutex_unlock(<t_channel_mutex);
return ret;
}
return ret;
}
This page took
0.027898 seconds
and
4
git commands to generate.