a1ef3d69f0602544f9335a9f1d59843e14208c55
[lttng-ust.git] / src / common / ringbuffer / shm.c
1 /*
2 * SPDX-License-Identifier: LGPL-2.1-only
3 *
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 */
6
7 #define _LGPL_SOURCE
8 #include "shm.h"
9 #include <unistd.h>
10 #include <fcntl.h>
11 #include <sys/mman.h>
12 #include <sys/types.h>
13 #include <sys/stat.h> /* For mode constants */
14 #include <fcntl.h> /* For O_* constants */
15 #include <assert.h>
16 #include <stdio.h>
17 #include <signal.h>
18 #include <dirent.h>
19 #include <limits.h>
20 #include <stdbool.h>
21 #include <stdint.h>
22
23 #ifdef HAVE_LIBNUMA
24 #include <numa.h>
25 #include <numaif.h>
26 #endif
27
28 #include <lttng/ust-utils.h>
29
30 #include "common/macros.h"
31 #include "common/ust-fd.h"
32 #include "common/compat/mmap.h"
33
34 /*
35 * Ensure we have the required amount of space available by writing 0
36 * into the entire buffer. Not doing so can trigger SIGBUS when going
37 * beyond the available shm space.
38 */
39 static
40 int zero_file(int fd, size_t len)
41 {
42 ssize_t retlen;
43 size_t written = 0;
44 char *zeropage;
45 long pagelen;
46 int ret;
47
48 pagelen = sysconf(_SC_PAGESIZE);
49 if (pagelen < 0)
50 return (int) pagelen;
51 zeropage = calloc(pagelen, 1);
52 if (!zeropage)
53 return -ENOMEM;
54
55 while (len > written) {
56 do {
57 retlen = write(fd, zeropage,
58 min_t(size_t, pagelen, len - written));
59 } while (retlen == -1UL && errno == EINTR);
60 if (retlen < 0) {
61 ret = (int) retlen;
62 goto error;
63 }
64 written += retlen;
65 }
66 ret = 0;
67 error:
68 free(zeropage);
69 return ret;
70 }
71
72 struct shm_object_table *shm_object_table_create(size_t max_nb_obj)
73 {
74 struct shm_object_table *table;
75
76 table = zmalloc(sizeof(struct shm_object_table) +
77 max_nb_obj * sizeof(table->objects[0]));
78 if (!table)
79 return NULL;
80 table->size = max_nb_obj;
81 return table;
82 }
83
84 static
85 struct shm_object *_shm_object_table_alloc_shm(struct shm_object_table *table,
86 size_t memory_map_size,
87 int stream_fd)
88 {
89 int shmfd, waitfd[2], ret, i;
90 struct shm_object *obj;
91 char *memory_map;
92
93 if (stream_fd < 0)
94 return NULL;
95 if (table->allocated_len >= table->size)
96 return NULL;
97 obj = &table->objects[table->allocated_len];
98
99 /* wait_fd: create pipe */
100 ret = pipe2(waitfd, O_CLOEXEC);
101 if (ret < 0) {
102 PERROR("pipe");
103 goto error_pipe;
104 }
105 /* The write end of the pipe needs to be non-blocking */
106 ret = fcntl(waitfd[1], F_SETFL, O_NONBLOCK);
107 if (ret < 0) {
108 PERROR("fcntl");
109 goto error_fcntl;
110 }
111 memcpy(obj->wait_fd, waitfd, sizeof(waitfd));
112
113 /*
114 * Set POSIX shared memory object size
115 *
116 * First, use ftruncate() to set its size, some implementations won't
117 * allow writes past the size set by ftruncate.
118 * Then, use write() to fill it with zeros, this allows us to fully
119 * allocate it and detect a shortage of shm space without dealing with
120 * a SIGBUS.
121 */
122
123 shmfd = stream_fd;
124 ret = ftruncate(shmfd, memory_map_size);
125 if (ret) {
126 PERROR("ftruncate");
127 goto error_ftruncate;
128 }
129 ret = zero_file(shmfd, memory_map_size);
130 if (ret) {
131 PERROR("zero_file");
132 goto error_zero_file;
133 }
134
135 /*
136 * Also ensure the file metadata is synced with the storage by using
137 * fsync(2). Some platforms don't allow fsync on POSIX shm fds, ignore
138 * EINVAL accordingly.
139 */
140 ret = fsync(shmfd);
141 if (ret && errno != EINVAL) {
142 PERROR("fsync");
143 goto error_fsync;
144 }
145 obj->shm_fd_ownership = 0;
146 obj->shm_fd = shmfd;
147
148 /* memory_map: mmap */
149 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
150 MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0);
151 if (memory_map == MAP_FAILED) {
152 PERROR("mmap");
153 goto error_mmap;
154 }
155 obj->type = SHM_OBJECT_SHM;
156 obj->memory_map = memory_map;
157 obj->memory_map_size = memory_map_size;
158 obj->allocated_len = 0;
159 obj->index = table->allocated_len++;
160
161 return obj;
162
163 error_mmap:
164 error_fsync:
165 error_ftruncate:
166 error_zero_file:
167 error_fcntl:
168 for (i = 0; i < 2; i++) {
169 ret = close(waitfd[i]);
170 if (ret) {
171 PERROR("close");
172 assert(0);
173 }
174 }
175 error_pipe:
176 return NULL;
177 }
178
179 static
180 struct shm_object *_shm_object_table_alloc_mem(struct shm_object_table *table,
181 size_t memory_map_size)
182 {
183 struct shm_object *obj;
184 void *memory_map;
185 int waitfd[2], i, ret;
186
187 if (table->allocated_len >= table->size)
188 return NULL;
189 obj = &table->objects[table->allocated_len];
190
191 memory_map = zmalloc(memory_map_size);
192 if (!memory_map)
193 goto alloc_error;
194
195 /* wait_fd: create pipe */
196 ret = pipe2(waitfd, O_CLOEXEC);
197 if (ret < 0) {
198 PERROR("pipe");
199 goto error_pipe;
200 }
201 /* The write end of the pipe needs to be non-blocking */
202 ret = fcntl(waitfd[1], F_SETFL, O_NONBLOCK);
203 if (ret < 0) {
204 PERROR("fcntl");
205 goto error_fcntl;
206 }
207 memcpy(obj->wait_fd, waitfd, sizeof(waitfd));
208
209 /* no shm_fd */
210 obj->shm_fd = -1;
211 obj->shm_fd_ownership = 0;
212
213 obj->type = SHM_OBJECT_MEM;
214 obj->memory_map = memory_map;
215 obj->memory_map_size = memory_map_size;
216 obj->allocated_len = 0;
217 obj->index = table->allocated_len++;
218
219 return obj;
220
221 error_fcntl:
222 for (i = 0; i < 2; i++) {
223 ret = close(waitfd[i]);
224 if (ret) {
225 PERROR("close");
226 assert(0);
227 }
228 }
229 error_pipe:
230 free(memory_map);
231 alloc_error:
232 return NULL;
233 }
234
235 /*
236 * libnuma prints errors on the console even for numa_available().
237 * Work-around this limitation by using get_mempolicy() directly to
238 * check whether the kernel supports mempolicy.
239 */
240 #ifdef HAVE_LIBNUMA
241 static bool lttng_is_numa_available(void)
242 {
243 int ret;
244
245 ret = get_mempolicy(NULL, NULL, 0, NULL, 0);
246 if (ret && errno == ENOSYS) {
247 return false;
248 }
249 return numa_available() > 0;
250 }
251 #endif
252
253 #ifdef HAVE_LIBNUMA
254 struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
255 size_t memory_map_size,
256 enum shm_object_type type,
257 int stream_fd,
258 int cpu)
259 #else
260 struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
261 size_t memory_map_size,
262 enum shm_object_type type,
263 int stream_fd,
264 int cpu __attribute__((unused)))
265 #endif
266 {
267 struct shm_object *shm_object;
268 #ifdef HAVE_LIBNUMA
269 int oldnode = 0, node;
270 bool numa_avail;
271
272 numa_avail = lttng_is_numa_available();
273 if (numa_avail) {
274 oldnode = numa_preferred();
275 if (cpu >= 0) {
276 node = numa_node_of_cpu(cpu);
277 if (node >= 0)
278 numa_set_preferred(node);
279 }
280 if (cpu < 0 || node < 0)
281 numa_set_localalloc();
282 }
283 #endif /* HAVE_LIBNUMA */
284 switch (type) {
285 case SHM_OBJECT_SHM:
286 shm_object = _shm_object_table_alloc_shm(table, memory_map_size,
287 stream_fd);
288 break;
289 case SHM_OBJECT_MEM:
290 shm_object = _shm_object_table_alloc_mem(table, memory_map_size);
291 break;
292 default:
293 assert(0);
294 }
295 #ifdef HAVE_LIBNUMA
296 if (numa_avail)
297 numa_set_preferred(oldnode);
298 #endif /* HAVE_LIBNUMA */
299 return shm_object;
300 }
301
302 struct shm_object *shm_object_table_append_shm(struct shm_object_table *table,
303 int shm_fd, int wakeup_fd, uint32_t stream_nr,
304 size_t memory_map_size)
305 {
306 struct shm_object *obj;
307 char *memory_map;
308 int ret;
309
310 if (table->allocated_len >= table->size)
311 return NULL;
312 /* streams _must_ be received in sequential order, else fail. */
313 if (stream_nr + 1 != table->allocated_len)
314 return NULL;
315
316 obj = &table->objects[table->allocated_len];
317
318 /* wait_fd: set write end of the pipe. */
319 obj->wait_fd[0] = -1; /* read end is unset */
320 obj->wait_fd[1] = wakeup_fd;
321 obj->shm_fd = shm_fd;
322 obj->shm_fd_ownership = 1;
323
324 /* The write end of the pipe needs to be non-blocking */
325 ret = fcntl(obj->wait_fd[1], F_SETFL, O_NONBLOCK);
326 if (ret < 0) {
327 PERROR("fcntl");
328 goto error_fcntl;
329 }
330
331 /* memory_map: mmap */
332 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
333 MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0);
334 if (memory_map == MAP_FAILED) {
335 PERROR("mmap");
336 goto error_mmap;
337 }
338 obj->type = SHM_OBJECT_SHM;
339 obj->memory_map = memory_map;
340 obj->memory_map_size = memory_map_size;
341 obj->allocated_len = memory_map_size;
342 obj->index = table->allocated_len++;
343
344 return obj;
345
346 error_fcntl:
347 error_mmap:
348 return NULL;
349 }
350
351 /*
352 * Passing ownership of mem to object.
353 */
354 struct shm_object *shm_object_table_append_mem(struct shm_object_table *table,
355 void *mem, size_t memory_map_size, int wakeup_fd)
356 {
357 struct shm_object *obj;
358 int ret;
359
360 if (table->allocated_len >= table->size)
361 return NULL;
362 obj = &table->objects[table->allocated_len];
363
364 obj->wait_fd[0] = -1; /* read end is unset */
365 obj->wait_fd[1] = wakeup_fd;
366 obj->shm_fd = -1;
367 obj->shm_fd_ownership = 0;
368
369 /* The write end of the pipe needs to be non-blocking */
370 ret = fcntl(obj->wait_fd[1], F_SETFL, O_NONBLOCK);
371 if (ret < 0) {
372 PERROR("fcntl");
373 goto error_fcntl;
374 }
375
376 obj->type = SHM_OBJECT_MEM;
377 obj->memory_map = mem;
378 obj->memory_map_size = memory_map_size;
379 obj->allocated_len = memory_map_size;
380 obj->index = table->allocated_len++;
381
382 return obj;
383
384 error_fcntl:
385 return NULL;
386 }
387
388 static
389 void shmp_object_destroy(struct shm_object *obj, int consumer)
390 {
391 switch (obj->type) {
392 case SHM_OBJECT_SHM:
393 {
394 int ret, i;
395
396 ret = munmap(obj->memory_map, obj->memory_map_size);
397 if (ret) {
398 PERROR("umnmap");
399 assert(0);
400 }
401
402 if (obj->shm_fd_ownership) {
403 /* Delete FDs only if called from app (not consumer). */
404 if (!consumer) {
405 lttng_ust_lock_fd_tracker();
406 ret = close(obj->shm_fd);
407 if (!ret) {
408 lttng_ust_delete_fd_from_tracker(obj->shm_fd);
409 } else {
410 PERROR("close");
411 assert(0);
412 }
413 lttng_ust_unlock_fd_tracker();
414 } else {
415 ret = close(obj->shm_fd);
416 if (ret) {
417 PERROR("close");
418 assert(0);
419 }
420 }
421 }
422 for (i = 0; i < 2; i++) {
423 if (obj->wait_fd[i] < 0)
424 continue;
425 if (!consumer) {
426 lttng_ust_lock_fd_tracker();
427 ret = close(obj->wait_fd[i]);
428 if (!ret) {
429 lttng_ust_delete_fd_from_tracker(obj->wait_fd[i]);
430 } else {
431 PERROR("close");
432 assert(0);
433 }
434 lttng_ust_unlock_fd_tracker();
435 } else {
436 ret = close(obj->wait_fd[i]);
437 if (ret) {
438 PERROR("close");
439 assert(0);
440 }
441 }
442 }
443 break;
444 }
445 case SHM_OBJECT_MEM:
446 {
447 int ret, i;
448
449 for (i = 0; i < 2; i++) {
450 if (obj->wait_fd[i] < 0)
451 continue;
452 if (!consumer) {
453 lttng_ust_lock_fd_tracker();
454 ret = close(obj->wait_fd[i]);
455 if (!ret) {
456 lttng_ust_delete_fd_from_tracker(obj->wait_fd[i]);
457 } else {
458 PERROR("close");
459 assert(0);
460 }
461 lttng_ust_unlock_fd_tracker();
462 } else {
463 ret = close(obj->wait_fd[i]);
464 if (ret) {
465 PERROR("close");
466 assert(0);
467 }
468 }
469 }
470 free(obj->memory_map);
471 break;
472 }
473 default:
474 assert(0);
475 }
476 }
477
478 void shm_object_table_destroy(struct shm_object_table *table, int consumer)
479 {
480 int i;
481
482 for (i = 0; i < table->allocated_len; i++)
483 shmp_object_destroy(&table->objects[i], consumer);
484 free(table);
485 }
486
487 /*
488 * zalloc_shm - allocate memory within a shm object.
489 *
490 * Shared memory is already zeroed by shmget.
491 * *NOT* multithread-safe (should be protected by mutex).
492 * Returns a -1, -1 tuple on error.
493 */
494 struct shm_ref zalloc_shm(struct shm_object *obj, size_t len)
495 {
496 struct shm_ref ref;
497 struct shm_ref shm_ref_error = { -1, -1 };
498
499 if (obj->memory_map_size - obj->allocated_len < len)
500 return shm_ref_error;
501 ref.index = obj->index;
502 ref.offset = obj->allocated_len;
503 obj->allocated_len += len;
504 return ref;
505 }
506
507 void align_shm(struct shm_object *obj, size_t align)
508 {
509 size_t offset_len = lttng_ust_offset_align(obj->allocated_len, align);
510 obj->allocated_len += offset_len;
511 }
This page took 0.038602 seconds and 3 git commands to generate.