Fix: timer_expire_entry changed in 4.19.312
[lttng-modules.git] / lttng-tracker-id.c
1 /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
2 *
3 * lttng-tracker-pid.c
4 *
5 * LTTng Process ID tracking.
6 *
7 * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/err.h>
13 #include <linux/seq_file.h>
14 #include <linux/stringify.h>
15 #include <linux/hash.h>
16 #include <linux/rcupdate.h>
17
18 #include <wrapper/tracepoint.h>
19 #include <wrapper/rcu.h>
20 #include <wrapper/list.h>
21 #include <lttng-events.h>
22
23 /*
24 * Hash table is allocated and freed when there are no possible
25 * concurrent lookups (ensured by the alloc/free caller). However,
26 * there can be concurrent RCU lookups vs add/del operations.
27 *
28 * Concurrent updates of the PID hash table are forbidden: the caller
29 * must ensure mutual exclusion. This is currently done by holding the
30 * sessions_mutex across calls to create, destroy, add, and del
31 * functions of this API.
32 */
33 int lttng_id_tracker_get_node_id(const struct lttng_id_hash_node *node)
34 {
35 return node->id;
36 }
37
38 /*
39 * Lookup performed from RCU read-side critical section (RCU sched),
40 * protected by preemption off at the tracepoint call site.
41 * Return true if found, false if not found.
42 */
43 bool lttng_id_tracker_lookup(struct lttng_id_tracker_rcu *p, int id)
44 {
45 struct hlist_head *head;
46 struct lttng_id_hash_node *e;
47 uint32_t hash = hash_32(id, 32);
48
49 head = &p->id_hash[hash & (LTTNG_ID_TABLE_SIZE - 1)];
50 lttng_hlist_for_each_entry_rcu(e, head, hlist) {
51 if (id == e->id)
52 return true; /* Found */
53 }
54 return false;
55 }
56 EXPORT_SYMBOL_GPL(lttng_id_tracker_lookup);
57
58 static struct lttng_id_tracker_rcu *lttng_id_tracker_rcu_create(void)
59 {
60 struct lttng_id_tracker_rcu *tracker;
61
62 tracker = kzalloc(sizeof(struct lttng_id_tracker_rcu), GFP_KERNEL);
63 if (!tracker)
64 return NULL;
65 return tracker;
66 }
67
68 /*
69 * Tracker add and del operations support concurrent RCU lookups.
70 */
71 int lttng_id_tracker_add(struct lttng_id_tracker *lf, int id)
72 {
73 struct hlist_head *head;
74 struct lttng_id_hash_node *e;
75 struct lttng_id_tracker_rcu *p = lf->p;
76 uint32_t hash = hash_32(id, 32);
77 bool allocated = false;
78 int ret;
79
80 if (!p) {
81 p = lttng_id_tracker_rcu_create();
82 if (!p)
83 return -ENOMEM;
84 allocated = true;
85 }
86 head = &p->id_hash[hash & (LTTNG_ID_TABLE_SIZE - 1)];
87 lttng_hlist_for_each_entry(e, head, hlist) {
88 if (id == e->id) {
89 ret = -EEXIST;
90 goto error;
91 }
92 }
93 e = kmalloc(sizeof(struct lttng_id_hash_node), GFP_KERNEL);
94 if (!e) {
95 ret = -ENOMEM;
96 goto error;
97 }
98 e->id = id;
99 hlist_add_head_rcu(&e->hlist, head);
100 if (allocated) {
101 rcu_assign_pointer(lf->p, p);
102 }
103 return 0;
104
105 error:
106 if (allocated) {
107 kfree(p);
108 }
109 return ret;
110 }
111
112 static
113 void id_tracker_del_node_rcu(struct lttng_id_hash_node *e)
114 {
115 hlist_del_rcu(&e->hlist);
116 /*
117 * We choose to use a heavyweight synchronize on removal here,
118 * since removal of an ID from the tracker mask is a rare
119 * operation, and we don't want to use more cache lines than
120 * what we really need when doing the ID lookups, so we don't
121 * want to afford adding a rcu_head field to those pid hash
122 * node.
123 */
124 synchronize_trace();
125 kfree(e);
126 }
127
128 /*
129 * This removal is only used on destroy, so it does not need to support
130 * concurrent RCU lookups.
131 */
132 static
133 void id_tracker_del_node(struct lttng_id_hash_node *e)
134 {
135 hlist_del(&e->hlist);
136 kfree(e);
137 }
138
139 int lttng_id_tracker_del(struct lttng_id_tracker *lf, int id)
140 {
141 struct hlist_head *head;
142 struct lttng_id_hash_node *e;
143 struct lttng_id_tracker_rcu *p = lf->p;
144 uint32_t hash = hash_32(id, 32);
145
146 if (!p)
147 return -ENOENT;
148 head = &p->id_hash[hash & (LTTNG_ID_TABLE_SIZE - 1)];
149 /*
150 * No need of _safe iteration, because we stop traversal as soon
151 * as we remove the entry.
152 */
153 lttng_hlist_for_each_entry(e, head, hlist) {
154 if (id == e->id) {
155 id_tracker_del_node_rcu(e);
156 return 0;
157 }
158 }
159 return -ENOENT; /* Not found */
160 }
161
162 static void lttng_id_tracker_rcu_destroy(struct lttng_id_tracker_rcu *p)
163 {
164 int i;
165
166 if (!p)
167 return;
168 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
169 struct hlist_head *head = &p->id_hash[i];
170 struct lttng_id_hash_node *e;
171 struct hlist_node *tmp;
172
173 lttng_hlist_for_each_entry_safe(e, tmp, head, hlist)
174 id_tracker_del_node(e);
175 }
176 kfree(p);
177 }
178
179 int lttng_id_tracker_empty_set(struct lttng_id_tracker *lf)
180 {
181 struct lttng_id_tracker_rcu *p, *oldp;
182
183 p = lttng_id_tracker_rcu_create();
184 if (!p)
185 return -ENOMEM;
186 oldp = lf->p;
187 rcu_assign_pointer(lf->p, p);
188 synchronize_trace();
189 lttng_id_tracker_rcu_destroy(oldp);
190 return 0;
191 }
192
193 void lttng_id_tracker_destroy(struct lttng_id_tracker *lf, bool rcu)
194 {
195 struct lttng_id_tracker_rcu *p = lf->p;
196
197 if (!lf->p)
198 return;
199 rcu_assign_pointer(lf->p, NULL);
200 if (rcu)
201 synchronize_trace();
202 lttng_id_tracker_rcu_destroy(p);
203 }
This page took 0.032558 seconds and 4 git commands to generate.