Fix: timer_expire_entry changed in 4.19.312
[lttng-modules.git] / lttng-tracker-id.c
1 /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
2 *
3 * lttng-tracker-pid.c
4 *
5 * LTTng Process ID tracking.
6 *
7 * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/err.h>
13 #include <linux/seq_file.h>
14 #include <linux/stringify.h>
15 #include <linux/hash.h>
16 #include <linux/rcupdate.h>
17
18 #include <wrapper/tracepoint.h>
19 #include <wrapper/rcu.h>
20 #include <wrapper/list.h>
21 #include <lttng-events.h>
22
23 /*
24 * Hash table is allocated and freed when there are no possible
25 * concurrent lookups (ensured by the alloc/free caller). However,
26 * there can be concurrent RCU lookups vs add/del operations.
27 *
28 * Concurrent updates of the PID hash table are forbidden: the caller
29 * must ensure mutual exclusion. This is currently done by holding the
30 * sessions_mutex across calls to create, destroy, add, and del
31 * functions of this API.
32 */
33 int lttng_id_tracker_get_node_id(const struct lttng_id_hash_node *node)
34 {
35 return node->id;
36 }
37
38 /*
39 * Lookup performed from RCU read-side critical section (RCU sched),
40 * protected by preemption off at the tracepoint call site.
41 * Return true if found, false if not found.
42 */
43 bool lttng_id_tracker_lookup(struct lttng_id_tracker_rcu *p, int id)
44 {
45 struct hlist_head *head;
46 struct lttng_id_hash_node *e;
47 uint32_t hash = hash_32(id, 32);
48
49 head = &p->id_hash[hash & (LTTNG_ID_TABLE_SIZE - 1)];
50 lttng_hlist_for_each_entry_rcu(e, head, hlist) {
51 if (id == e->id)
52 return true; /* Found */
53 }
54 return false;
55 }
56 EXPORT_SYMBOL_GPL(lttng_id_tracker_lookup);
57
58 static struct lttng_id_tracker_rcu *lttng_id_tracker_rcu_create(void)
59 {
60 struct lttng_id_tracker_rcu *tracker;
61
62 tracker = kzalloc(sizeof(struct lttng_id_tracker_rcu), GFP_KERNEL);
63 if (!tracker)
64 return NULL;
65 return tracker;
66 }
67
68 /*
69 * Tracker add and del operations support concurrent RCU lookups.
70 */
71 int lttng_id_tracker_add(struct lttng_id_tracker *lf, int id)
72 {
73 struct hlist_head *head;
74 struct lttng_id_hash_node *e;
75 struct lttng_id_tracker_rcu *p = lf->p;
76 uint32_t hash = hash_32(id, 32);
77 bool allocated = false;
78
79 if (!p) {
80 p = lttng_id_tracker_rcu_create();
81 if (!p)
82 return -ENOMEM;
83 allocated = true;
84 }
85 head = &p->id_hash[hash & (LTTNG_ID_TABLE_SIZE - 1)];
86 lttng_hlist_for_each_entry(e, head, hlist) {
87 if (id == e->id)
88 return -EEXIST;
89 }
90 e = kmalloc(sizeof(struct lttng_id_hash_node), GFP_KERNEL);
91 if (!e)
92 return -ENOMEM;
93 e->id = id;
94 hlist_add_head_rcu(&e->hlist, head);
95 if (allocated) {
96 rcu_assign_pointer(lf->p, p);
97 }
98 return 0;
99 }
100
101 static
102 void id_tracker_del_node_rcu(struct lttng_id_hash_node *e)
103 {
104 hlist_del_rcu(&e->hlist);
105 /*
106 * We choose to use a heavyweight synchronize on removal here,
107 * since removal of an ID from the tracker mask is a rare
108 * operation, and we don't want to use more cache lines than
109 * what we really need when doing the ID lookups, so we don't
110 * want to afford adding a rcu_head field to those pid hash
111 * node.
112 */
113 synchronize_trace();
114 kfree(e);
115 }
116
117 /*
118 * This removal is only used on destroy, so it does not need to support
119 * concurrent RCU lookups.
120 */
121 static
122 void id_tracker_del_node(struct lttng_id_hash_node *e)
123 {
124 hlist_del(&e->hlist);
125 kfree(e);
126 }
127
128 int lttng_id_tracker_del(struct lttng_id_tracker *lf, int id)
129 {
130 struct hlist_head *head;
131 struct lttng_id_hash_node *e;
132 struct lttng_id_tracker_rcu *p = lf->p;
133 uint32_t hash = hash_32(id, 32);
134
135 if (!p)
136 return -ENOENT;
137 head = &p->id_hash[hash & (LTTNG_ID_TABLE_SIZE - 1)];
138 /*
139 * No need of _safe iteration, because we stop traversal as soon
140 * as we remove the entry.
141 */
142 lttng_hlist_for_each_entry(e, head, hlist) {
143 if (id == e->id) {
144 id_tracker_del_node_rcu(e);
145 return 0;
146 }
147 }
148 return -ENOENT; /* Not found */
149 }
150
151 static void lttng_id_tracker_rcu_destroy(struct lttng_id_tracker_rcu *p)
152 {
153 int i;
154
155 if (!p)
156 return;
157 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
158 struct hlist_head *head = &p->id_hash[i];
159 struct lttng_id_hash_node *e;
160 struct hlist_node *tmp;
161
162 lttng_hlist_for_each_entry_safe(e, tmp, head, hlist)
163 id_tracker_del_node(e);
164 }
165 kfree(p);
166 }
167
168 int lttng_id_tracker_empty_set(struct lttng_id_tracker *lf)
169 {
170 struct lttng_id_tracker_rcu *p, *oldp;
171
172 p = lttng_id_tracker_rcu_create();
173 if (!p)
174 return -ENOMEM;
175 oldp = lf->p;
176 rcu_assign_pointer(lf->p, p);
177 synchronize_trace();
178 lttng_id_tracker_rcu_destroy(oldp);
179 return 0;
180 }
181
182 void lttng_id_tracker_destroy(struct lttng_id_tracker *lf, bool rcu)
183 {
184 struct lttng_id_tracker_rcu *p = lf->p;
185
186 if (!lf->p)
187 return;
188 rcu_assign_pointer(lf->p, NULL);
189 if (rcu)
190 synchronize_trace();
191 lttng_id_tracker_rcu_destroy(p);
192 }
This page took 0.032106 seconds and 4 git commands to generate.