docs: Correct GitHub URLs in lttng-ust.3
[lttng-ust.git] / src / common / counter / shm.c
1 /*
2 * SPDX-License-Identifier: LGPL-2.1-only
3 *
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 */
6
7 #define _LGPL_SOURCE
8 #include "shm.h"
9 #include <unistd.h>
10 #include <fcntl.h>
11 #include <sys/mman.h>
12 #include <sys/types.h>
13 #include <sys/stat.h> /* For mode constants */
14 #include <fcntl.h> /* For O_* constants */
15 #include <assert.h>
16 #include <stdio.h>
17 #include <signal.h>
18 #include <dirent.h>
19 #include <limits.h>
20 #include <stdbool.h>
21 #include <stdint.h>
22
23 #ifdef HAVE_LIBNUMA
24 #include <numa.h>
25 #include <numaif.h>
26 #endif
27
28 #include <lttng/ust-utils.h>
29
30 #include "common/macros.h"
31 #include "common/ust-fd.h"
32 #include "common/compat/mmap.h"
33
34 /*
35 * Ensure we have the required amount of space available by writing 0
36 * into the entire buffer. Not doing so can trigger SIGBUS when going
37 * beyond the available shm space.
38 */
39 static
40 int zero_file(int fd, size_t len)
41 {
42 ssize_t retlen;
43 size_t written = 0;
44 char *zeropage;
45 long pagelen;
46 int ret;
47
48 pagelen = sysconf(_SC_PAGESIZE);
49 if (pagelen < 0)
50 return (int) pagelen;
51 zeropage = calloc(pagelen, 1);
52 if (!zeropage)
53 return -ENOMEM;
54
55 while (len > written) {
56 do {
57 retlen = write(fd, zeropage,
58 min_t(size_t, pagelen, len - written));
59 } while (retlen == -1UL && errno == EINTR);
60 if (retlen < 0) {
61 ret = (int) retlen;
62 goto error;
63 }
64 written += retlen;
65 }
66 ret = 0;
67 error:
68 free(zeropage);
69 return ret;
70 }
71
72 struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj, bool populate)
73 {
74 struct lttng_counter_shm_object_table *table;
75
76 table = zmalloc_populate(sizeof(struct lttng_counter_shm_object_table) +
77 max_nb_obj * sizeof(table->objects[0]), populate);
78 if (!table)
79 return NULL;
80 table->size = max_nb_obj;
81 return table;
82 }
83
84 static
85 struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_shm(struct lttng_counter_shm_object_table *table,
86 size_t memory_map_size,
87 int cpu_fd, bool populate)
88 {
89 struct lttng_counter_shm_object *obj;
90 int flags = MAP_SHARED;
91 int shmfd, ret;
92 char *memory_map;
93
94 if (cpu_fd < 0)
95 return NULL;
96 if (table->allocated_len >= table->size)
97 return NULL;
98 obj = &table->objects[table->allocated_len];
99
100 /* create shm */
101
102 shmfd = cpu_fd;
103 ret = zero_file(shmfd, memory_map_size);
104 if (ret) {
105 PERROR("zero_file");
106 goto error_zero_file;
107 }
108 ret = ftruncate(shmfd, memory_map_size);
109 if (ret) {
110 PERROR("ftruncate");
111 goto error_ftruncate;
112 }
113 /*
114 * Also ensure the file metadata is synced with the storage by using
115 * fsync(2).
116 */
117 ret = fsync(shmfd);
118 if (ret) {
119 PERROR("fsync");
120 goto error_fsync;
121 }
122 obj->shm_fd_ownership = 0;
123 obj->shm_fd = shmfd;
124
125 if (populate)
126 flags |= LTTNG_MAP_POPULATE;
127 /* memory_map: mmap */
128 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
129 flags, shmfd, 0);
130 if (memory_map == MAP_FAILED) {
131 PERROR("mmap");
132 goto error_mmap;
133 }
134 obj->type = LTTNG_COUNTER_SHM_OBJECT_SHM;
135 obj->memory_map = memory_map;
136 obj->memory_map_size = memory_map_size;
137 obj->allocated_len = 0;
138 obj->index = table->allocated_len++;
139
140 return obj;
141
142 error_mmap:
143 error_fsync:
144 error_ftruncate:
145 error_zero_file:
146 return NULL;
147 }
148
149 static
150 struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_mem(struct lttng_counter_shm_object_table *table,
151 size_t memory_map_size, bool populate)
152 {
153 struct lttng_counter_shm_object *obj;
154 void *memory_map;
155
156 if (table->allocated_len >= table->size)
157 return NULL;
158 obj = &table->objects[table->allocated_len];
159
160 memory_map = zmalloc_populate(memory_map_size, populate);
161 if (!memory_map)
162 goto alloc_error;
163
164 /* no shm_fd */
165 obj->shm_fd = -1;
166 obj->shm_fd_ownership = 0;
167
168 obj->type = LTTNG_COUNTER_SHM_OBJECT_MEM;
169 obj->memory_map = memory_map;
170 obj->memory_map_size = memory_map_size;
171 obj->allocated_len = 0;
172 obj->index = table->allocated_len++;
173
174 return obj;
175
176 alloc_error:
177 return NULL;
178 }
179
180 /*
181 * libnuma prints errors on the console even for numa_available().
182 * Work-around this limitation by using get_mempolicy() directly to
183 * check whether the kernel supports mempolicy.
184 */
185 #ifdef HAVE_LIBNUMA
186 static bool lttng_is_numa_available(void)
187 {
188 int ret;
189
190 ret = get_mempolicy(NULL, NULL, 0, NULL, 0);
191 if (ret && errno == ENOSYS) {
192 return false;
193 }
194 return numa_available() > 0;
195 }
196 #endif
197
198 #ifdef HAVE_LIBNUMA
199 struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
200 size_t memory_map_size,
201 enum lttng_counter_shm_object_type type,
202 int cpu_fd,
203 int cpu,
204 bool populate)
205 #else
206 struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
207 size_t memory_map_size,
208 enum lttng_counter_shm_object_type type,
209 int cpu_fd,
210 int cpu __attribute__((unused)),
211 bool populate)
212 #endif
213 {
214 struct lttng_counter_shm_object *shm_object;
215 #ifdef HAVE_LIBNUMA
216 int oldnode = 0, node;
217 bool numa_avail;
218
219 numa_avail = lttng_is_numa_available();
220 if (numa_avail) {
221 oldnode = numa_preferred();
222 if (cpu >= 0) {
223 node = numa_node_of_cpu(cpu);
224 if (node >= 0)
225 numa_set_preferred(node);
226 }
227 if (cpu < 0 || node < 0)
228 numa_set_localalloc();
229 }
230 #endif /* HAVE_LIBNUMA */
231 switch (type) {
232 case LTTNG_COUNTER_SHM_OBJECT_SHM:
233 shm_object = _lttng_counter_shm_object_table_alloc_shm(table, memory_map_size,
234 cpu_fd, populate);
235 break;
236 case LTTNG_COUNTER_SHM_OBJECT_MEM:
237 shm_object = _lttng_counter_shm_object_table_alloc_mem(table, memory_map_size,
238 populate);
239 break;
240 default:
241 assert(0);
242 }
243 #ifdef HAVE_LIBNUMA
244 if (numa_avail)
245 numa_set_preferred(oldnode);
246 #endif /* HAVE_LIBNUMA */
247 return shm_object;
248 }
249
250 struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table,
251 int shm_fd, size_t memory_map_size, bool populate)
252 {
253 struct lttng_counter_shm_object *obj;
254 int flags = MAP_SHARED;
255 char *memory_map;
256
257 if (table->allocated_len >= table->size)
258 return NULL;
259
260 obj = &table->objects[table->allocated_len];
261
262 obj->shm_fd = shm_fd;
263 obj->shm_fd_ownership = 1;
264
265 if (populate)
266 flags |= LTTNG_MAP_POPULATE;
267 /* memory_map: mmap */
268 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
269 flags, shm_fd, 0);
270 if (memory_map == MAP_FAILED) {
271 PERROR("mmap");
272 goto error_mmap;
273 }
274 obj->type = LTTNG_COUNTER_SHM_OBJECT_SHM;
275 obj->memory_map = memory_map;
276 obj->memory_map_size = memory_map_size;
277 obj->allocated_len = memory_map_size;
278 obj->index = table->allocated_len++;
279
280 return obj;
281
282 error_mmap:
283 return NULL;
284 }
285
286 /*
287 * Passing ownership of mem to object.
288 */
289 struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_mem(struct lttng_counter_shm_object_table *table,
290 void *mem, size_t memory_map_size)
291 {
292 struct lttng_counter_shm_object *obj;
293
294 if (table->allocated_len >= table->size)
295 return NULL;
296 obj = &table->objects[table->allocated_len];
297
298 obj->shm_fd = -1;
299 obj->shm_fd_ownership = 0;
300
301 obj->type = LTTNG_COUNTER_SHM_OBJECT_MEM;
302 obj->memory_map = mem;
303 obj->memory_map_size = memory_map_size;
304 obj->allocated_len = memory_map_size;
305 obj->index = table->allocated_len++;
306
307 return obj;
308
309 return NULL;
310 }
311
312 static
313 void lttng_counter_shmp_object_destroy(struct lttng_counter_shm_object *obj, int consumer)
314 {
315 switch (obj->type) {
316 case LTTNG_COUNTER_SHM_OBJECT_SHM:
317 {
318 int ret;
319
320 ret = munmap(obj->memory_map, obj->memory_map_size);
321 if (ret) {
322 PERROR("umnmap");
323 assert(0);
324 }
325
326 if (obj->shm_fd_ownership) {
327 /* Delete FDs only if called from app (not consumer). */
328 if (!consumer) {
329 lttng_ust_lock_fd_tracker();
330 ret = close(obj->shm_fd);
331 if (!ret) {
332 lttng_ust_delete_fd_from_tracker(obj->shm_fd);
333 } else {
334 PERROR("close");
335 assert(0);
336 }
337 lttng_ust_unlock_fd_tracker();
338 } else {
339 ret = close(obj->shm_fd);
340 if (ret) {
341 PERROR("close");
342 assert(0);
343 }
344 }
345 }
346 break;
347 }
348 case LTTNG_COUNTER_SHM_OBJECT_MEM:
349 {
350 free(obj->memory_map);
351 break;
352 }
353 default:
354 assert(0);
355 }
356 }
357
358 void lttng_counter_shm_object_table_destroy(struct lttng_counter_shm_object_table *table, int consumer)
359 {
360 int i;
361
362 for (i = 0; i < table->allocated_len; i++)
363 lttng_counter_shmp_object_destroy(&table->objects[i], consumer);
364 free(table);
365 }
366
367 /*
368 * lttng_counter_zalloc_shm - allocate memory within a shm object.
369 *
370 * Shared memory is already zeroed by shmget.
371 * *NOT* multithread-safe (should be protected by mutex).
372 * Returns a -1, -1 tuple on error.
373 */
374 struct lttng_counter_shm_ref lttng_counter_zalloc_shm(struct lttng_counter_shm_object *obj, size_t len)
375 {
376 struct lttng_counter_shm_ref ref;
377 struct lttng_counter_shm_ref shm_ref_error = { -1, -1 };
378
379 if (obj->memory_map_size - obj->allocated_len < len)
380 return shm_ref_error;
381 ref.index = obj->index;
382 ref.offset = obj->allocated_len;
383 obj->allocated_len += len;
384 return ref;
385 }
386
387 void lttng_counter_align_shm(struct lttng_counter_shm_object *obj, size_t align)
388 {
389 size_t offset_len = lttng_ust_offset_align(obj->allocated_len, align);
390 obj->allocated_len += offset_len;
391 }
This page took 0.045448 seconds and 4 git commands to generate.