convert to uatomic ops
[ust.git] / libust / channels.c
index 76b951773893b000585d897497cddb1f6372677c..b7f35439a86e189413e7fa8ef81374c30a945d12 100644 (file)
 //ust// #include <linux/mutex.h>
 //ust// #include <linux/vmalloc.h>
 
-#include "kernelcompat.h"
+#include <ust/kernelcompat.h>
 #include "channels.h"
 #include "usterr.h"
+#include <ust/marker.h>
 
 /*
  * ltt_channel_mutex may be nested inside the LTT trace mutex.
@@ -71,8 +72,8 @@ static void release_channel_setting(struct kref *kref)
                struct ltt_channel_setting, kref);
        struct ltt_channel_setting *iter;
 
-       if (atomic_read(&index_kref.refcount) == 0
-           && atomic_read(&setting->kref.refcount) == 0) {
+       if (uatomic_read(&index_kref.refcount) == 0
+           && uatomic_read(&setting->kref.refcount) == 0) {
                list_del(&setting->list);
                kfree(setting);
 
@@ -112,7 +113,7 @@ int ltt_channels_register(const char *name)
        mutex_lock(&ltt_channel_mutex);
        setting = lookup_channel(name);
        if (setting) {
-               if (atomic_read(&setting->kref.refcount) == 0)
+               if (uatomic_read(&setting->kref.refcount) == 0)
                        goto init_kref;
                else {
                        kref_get(&setting->kref);
@@ -148,7 +149,7 @@ int ltt_channels_unregister(const char *name)
 
        mutex_lock(&ltt_channel_mutex);
        setting = lookup_channel(name);
-       if (!setting || atomic_read(&setting->kref.refcount) == 0) {
+       if (!setting || uatomic_read(&setting->kref.refcount) == 0) {
                ret = -ENOENT;
                goto end;
        }
@@ -174,7 +175,7 @@ int ltt_channels_set_default(const char *name,
 
        mutex_lock(&ltt_channel_mutex);
        setting = lookup_channel(name);
-       if (!setting || atomic_read(&setting->kref.refcount) == 0) {
+       if (!setting || uatomic_read(&setting->kref.refcount) == 0) {
                ret = -ENOENT;
                goto end;
        }
@@ -198,7 +199,7 @@ const char *ltt_channels_get_name_from_index(unsigned int index)
        struct ltt_channel_setting *iter;
 
        list_for_each_entry(iter, &ltt_channels, list)
-               if (iter->index == index && atomic_read(&iter->kref.refcount))
+               if (iter->index == index && uatomic_read(&iter->kref.refcount))
                        return iter->name;
        return NULL;
 }
@@ -211,7 +212,7 @@ ltt_channels_get_setting_from_name(const char *name)
 
        list_for_each_entry(iter, &ltt_channels, list)
                if (!strcmp(iter->name, name)
-                   && atomic_read(&iter->kref.refcount))
+                   && uatomic_read(&iter->kref.refcount))
                        return iter;
        return NULL;
 }
@@ -246,11 +247,11 @@ int ltt_channels_get_index_from_name(const char *name)
  * Called with trace lock held. Does not perform the trace buffer allocation,
  * because we must let the user overwrite specific channel sizes.
  */
-struct ltt_channel_struct *ltt_channels_trace_alloc(unsigned int *nr_channels,
+struct ust_channel *ltt_channels_trace_alloc(unsigned int *nr_channels,
                                                    int overwrite,
                                                    int active)
 {
-       struct ltt_channel_struct *channel = NULL;
+       struct ust_channel *channel = NULL;
        struct ltt_channel_setting *iter;
 
        mutex_lock(&ltt_channel_mutex);
@@ -258,19 +259,19 @@ struct ltt_channel_struct *ltt_channels_trace_alloc(unsigned int *nr_channels,
                WARN("ltt_channels_trace_alloc: no free_index; are there any probes connected?");
                goto end;
        }
-       if (!atomic_read(&index_kref.refcount))
+       if (!uatomic_read(&index_kref.refcount))
                kref_init(&index_kref);
        else
                kref_get(&index_kref);
        *nr_channels = free_index;
-       channel = kzalloc(sizeof(struct ltt_channel_struct) * free_index,
+       channel = kzalloc(sizeof(struct ust_channel) * free_index,
                          GFP_KERNEL);
        if (!channel) {
                WARN("ltt_channel_struct: channel null after alloc");
                goto end;
        }
        list_for_each_entry(iter, &ltt_channels, list) {
-               if (!atomic_read(&iter->kref.refcount))
+               if (!uatomic_read(&iter->kref.refcount))
                        continue;
                channel[iter->index].subbuf_size = iter->subbuf_size;
                channel[iter->index].subbuf_cnt = iter->subbuf_cnt;
@@ -291,7 +292,7 @@ end:
  * Called with trace lock held. The actual channel buffers must be freed before
  * this function is called.
  */
-void ltt_channels_trace_free(struct ltt_channel_struct *channels)
+void ltt_channels_trace_free(struct ust_channel *channels)
 {
        lock_markers();
        mutex_lock(&ltt_channel_mutex);
This page took 0.024011 seconds and 4 git commands to generate.