Tests: Fix: 99% fill ratio for high buffer usage is too high for larger events
[lttng-tools.git] / src / bin / lttng-sessiond / lttng-syscall.c
1 /*
2 * Copyright (C) 2014 David Goulet <dgoulet@efficios.com>
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 *
6 */
7
8 #define _LGPL_SOURCE
9 #include <stdbool.h>
10
11 #include <common/common.h>
12 #include <common/kernel-ctl/kernel-ctl.h>
13
14 #include "lttng-sessiond.h"
15 #include "kernel.h"
16 #include "lttng-syscall.h"
17 #include "utils.h"
18
19 /* Global syscall table. */
20 struct syscall *syscall_table;
21
22 /* Number of entry in the syscall table. */
23 static size_t syscall_table_nb_entry;
24
25 /*
26 * Populate the system call table using the kernel tracer.
27 *
28 * Return 0 on success and the syscall table is allocated. On error, a negative
29 * value is returned.
30 */
31 int syscall_init_table(int tracer_fd)
32 {
33 int ret, fd, err;
34 size_t nbmem;
35 FILE *fp;
36 /* Syscall data from the kernel. */
37 size_t index = 0;
38 bool at_least_one_syscall = false;
39 uint32_t bitness;
40 char name[SYSCALL_NAME_LEN];
41
42 DBG3("Syscall init system call table");
43
44 fd = kernctl_syscall_list(tracer_fd);
45 if (fd < 0) {
46 ret = fd;
47 PERROR("kernelctl syscall list");
48 goto error_ioctl;
49 }
50
51 fp = fdopen(fd, "r");
52 if (!fp) {
53 ret = -errno;
54 PERROR("syscall list fdopen");
55 goto error_fp;
56 }
57
58 nbmem = SYSCALL_TABLE_INIT_SIZE;
59 syscall_table = zmalloc(sizeof(struct syscall) * nbmem);
60 if (!syscall_table) {
61 ret = -errno;
62 PERROR("syscall list zmalloc");
63 goto error;
64 }
65
66 while (fscanf(fp,
67 "syscall { index = %zu; \
68 name = %" XSTR(SYSCALL_NAME_LEN) "[^;]; \
69 bitness = %u; };\n",
70 &index, name, &bitness) == 3) {
71 at_least_one_syscall = true;
72 if (index >= nbmem) {
73 struct syscall *new_list;
74 size_t new_nbmem;
75
76 /* Double memory size. */
77 new_nbmem = max(index + 1, nbmem << 1);
78 if (new_nbmem > (SIZE_MAX / sizeof(*new_list))) {
79 /* Overflow, stop everything, something went really wrong. */
80 ERR("Syscall listing memory size overflow. Stopping");
81 free(syscall_table);
82 syscall_table = NULL;
83 ret = -EINVAL;
84 goto error;
85 }
86
87 DBG("Reallocating syscall table from %zu to %zu entries", nbmem,
88 new_nbmem);
89 new_list = realloc(syscall_table, new_nbmem * sizeof(*new_list));
90 if (!new_list) {
91 ret = -errno;
92 PERROR("syscall list realloc");
93 goto error;
94 }
95
96 /* Zero out the new memory. */
97 memset(new_list + nbmem, 0,
98 (new_nbmem - nbmem) * sizeof(*new_list));
99 nbmem = new_nbmem;
100 syscall_table = new_list;
101 }
102 syscall_table[index].index = index;
103 syscall_table[index].bitness = bitness;
104 if (lttng_strncpy(syscall_table[index].name, name,
105 sizeof(syscall_table[index].name))) {
106 ret = -EINVAL;
107 free(syscall_table);
108 syscall_table = NULL;
109 goto error;
110 }
111 /*
112 DBG("Syscall name '%s' at index %" PRIu32 " of bitness %u",
113 syscall_table[index].name,
114 syscall_table[index].index,
115 syscall_table[index].bitness);
116 */
117 }
118
119 /* Index starts at 0. */
120 if (at_least_one_syscall) {
121 syscall_table_nb_entry = index + 1;
122 }
123
124 ret = 0;
125
126 error:
127 err = fclose(fp);
128 if (err) {
129 PERROR("syscall list fclose");
130 }
131 return ret;
132
133 error_fp:
134 err = close(fd);
135 if (err) {
136 PERROR("syscall list close");
137 }
138
139 error_ioctl:
140 return ret;
141 }
142
143 /*
144 * Helper function for the list syscalls command that empty the temporary
145 * syscall hashtable used to track duplicate between 32 and 64 bit arch.
146 *
147 * This empty the hash table and destroys it after. After this, the pointer is
148 * unsuable. RCU read side lock MUST be acquired before calling this.
149 */
150 static void destroy_syscall_ht(struct lttng_ht *ht)
151 {
152 struct lttng_ht_iter iter;
153 struct syscall *ksyscall;
154
155 DBG3("Destroying syscall hash table.");
156
157 if (!ht) {
158 return;
159 }
160
161 cds_lfht_for_each_entry(ht->ht, &iter.iter, ksyscall, node.node) {
162 int ret;
163
164 ret = lttng_ht_del(ht, &iter);
165 assert(!ret);
166 free(ksyscall);
167 }
168 ht_cleanup_push(ht);
169 }
170
171 /*
172 * Allocate the given hashtable pointer.
173 *
174 * Return 0 on success else a negative LTTNG error value.
175 */
176 static int init_syscall_ht(struct lttng_ht **ht)
177 {
178 int ret;
179
180 *ht = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
181 if (!*ht) {
182 ret = -LTTNG_ERR_NOMEM;
183 } else {
184 ret = 0;
185 }
186
187 return ret;
188 }
189
190 /*
191 * Lookup a syscall in the given hash table by name.
192 *
193 * Return syscall object if found or else NULL.
194 */
195 static struct syscall *lookup_syscall(struct lttng_ht *ht, const char *name)
196 {
197 struct lttng_ht_node_str *node;
198 struct lttng_ht_iter iter;
199 struct syscall *ksyscall = NULL;
200
201 assert(ht);
202 assert(name);
203
204 lttng_ht_lookup(ht, (void *) name, &iter);
205 node = lttng_ht_iter_get_node_str(&iter);
206 if (node) {
207 ksyscall = caa_container_of(node, struct syscall, node);
208 }
209
210 return ksyscall;
211 }
212
213 /*
214 * Using the given syscall object in the events array with the bitness of the
215 * syscall at index in the syscall table.
216 */
217 static void update_event_syscall_bitness(struct lttng_event *events,
218 unsigned int index, unsigned int syscall_index)
219 {
220 assert(events);
221
222 if (syscall_table[index].bitness == 32) {
223 events[syscall_index].flags |= LTTNG_EVENT_FLAG_SYSCALL_32;
224 } else {
225 events[syscall_index].flags |= LTTNG_EVENT_FLAG_SYSCALL_64;
226 }
227 }
228
229 /*
230 * Allocate and initialize syscall object and add it to the given hashtable.
231 *
232 * Return 0 on success else -LTTNG_ERR_NOMEM.
233 */
234 static int add_syscall_to_ht(struct lttng_ht *ht, unsigned int index,
235 unsigned int syscall_index)
236 {
237 int ret;
238 struct syscall *ksyscall;
239
240 assert(ht);
241
242 ksyscall = zmalloc(sizeof(*ksyscall));
243 if (!ksyscall) {
244 ret = -LTTNG_ERR_NOMEM;
245 goto error;
246 }
247
248 strncpy(ksyscall->name, syscall_table[index].name,
249 sizeof(ksyscall->name));
250 ksyscall->bitness = syscall_table[index].bitness;
251 ksyscall->index = syscall_index;
252 lttng_ht_node_init_str(&ksyscall->node, ksyscall->name);
253 lttng_ht_add_unique_str(ht, &ksyscall->node);
254 ret = 0;
255
256 error:
257 return ret;
258 }
259
260 /*
261 * List syscalls present in the kernel syscall global array, allocate and
262 * populate the events structure with them. Skip the empty syscall name.
263 *
264 * Return the number of entries in the array else a negative value.
265 */
266 ssize_t syscall_table_list(struct lttng_event **_events)
267 {
268 int i, index = 0;
269 ssize_t ret;
270 struct lttng_event *events;
271 /* Hash table used to filter duplicate out. */
272 struct lttng_ht *syscalls_ht = NULL;
273
274 assert(_events);
275
276 DBG("Syscall table listing.");
277
278 rcu_read_lock();
279
280 /*
281 * Allocate at least the number of total syscall we have even if some of
282 * them might not be valid. The count below will make sure to return the
283 * right size of the events array.
284 */
285 events = zmalloc(syscall_table_nb_entry * sizeof(*events));
286 if (!events) {
287 PERROR("syscall table list zmalloc");
288 ret = -LTTNG_ERR_NOMEM;
289 goto error;
290 }
291
292 ret = init_syscall_ht(&syscalls_ht);
293 if (ret < 0) {
294 goto error;
295 }
296
297 for (i = 0; i < syscall_table_nb_entry; i++) {
298 struct syscall *ksyscall;
299
300 /* Skip empty syscalls. */
301 if (*syscall_table[i].name == '\0') {
302 continue;
303 }
304
305 ksyscall = lookup_syscall(syscalls_ht, syscall_table[i].name);
306 if (ksyscall) {
307 update_event_syscall_bitness(events, i, ksyscall->index);
308 continue;
309 }
310
311 ret = add_syscall_to_ht(syscalls_ht, i, index);
312 if (ret < 0) {
313 goto error;
314 }
315
316 /* Copy the event information in the event's array. */
317 strncpy(events[index].name, syscall_table[i].name,
318 sizeof(events[index].name));
319 update_event_syscall_bitness(events, i, index);
320 events[index].type = LTTNG_EVENT_SYSCALL;
321 /* This makes the command line not print the enabled/disabled field. */
322 events[index].enabled = -1;
323 index++;
324 }
325
326 destroy_syscall_ht(syscalls_ht);
327 *_events = events;
328 rcu_read_unlock();
329 return index;
330
331 error:
332 destroy_syscall_ht(syscalls_ht);
333 free(events);
334 rcu_read_unlock();
335 return ret;
336 }
This page took 0.036656 seconds and 4 git commands to generate.