Cleanup: apply `include-what-you-use` guideline for `uint*_t`
[lttng-ust.git] / libringbuffer / shm.c
1 /*
2 * libringbuffer/shm.c
3 *
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; only
9 * version 2.1 of the License.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #define _LGPL_SOURCE
22 #include <config.h>
23 #include "shm.h"
24 #include <unistd.h>
25 #include <fcntl.h>
26 #include <sys/mman.h>
27 #include <sys/types.h>
28 #include <sys/stat.h> /* For mode constants */
29 #include <fcntl.h> /* For O_* constants */
30 #include <assert.h>
31 #include <stdio.h>
32 #include <signal.h>
33 #include <dirent.h>
34 #include <lttng/align.h>
35 #include <limits.h>
36 #include <stdbool.h>
37 #include <stdint.h>
38 #ifdef HAVE_LIBNUMA
39 #include <numa.h>
40 #include <numaif.h>
41 #endif
42 #include <helper.h>
43 #include <ust-fd.h>
44 #include "mmap.h"
45
46 /*
47 * Ensure we have the required amount of space available by writing 0
48 * into the entire buffer. Not doing so can trigger SIGBUS when going
49 * beyond the available shm space.
50 */
51 static
52 int zero_file(int fd, size_t len)
53 {
54 ssize_t retlen;
55 size_t written = 0;
56 char *zeropage;
57 long pagelen;
58 int ret;
59
60 pagelen = sysconf(_SC_PAGESIZE);
61 if (pagelen < 0)
62 return (int) pagelen;
63 zeropage = calloc(pagelen, 1);
64 if (!zeropage)
65 return -ENOMEM;
66
67 while (len > written) {
68 do {
69 retlen = write(fd, zeropage,
70 min_t(size_t, pagelen, len - written));
71 } while (retlen == -1UL && errno == EINTR);
72 if (retlen < 0) {
73 ret = (int) retlen;
74 goto error;
75 }
76 written += retlen;
77 }
78 ret = 0;
79 error:
80 free(zeropage);
81 return ret;
82 }
83
84 struct shm_object_table *shm_object_table_create(size_t max_nb_obj)
85 {
86 struct shm_object_table *table;
87
88 table = zmalloc(sizeof(struct shm_object_table) +
89 max_nb_obj * sizeof(table->objects[0]));
90 if (!table)
91 return NULL;
92 table->size = max_nb_obj;
93 return table;
94 }
95
96 static
97 struct shm_object *_shm_object_table_alloc_shm(struct shm_object_table *table,
98 size_t memory_map_size,
99 int stream_fd)
100 {
101 int shmfd, waitfd[2], ret, i;
102 struct shm_object *obj;
103 char *memory_map;
104
105 if (stream_fd < 0)
106 return NULL;
107 if (table->allocated_len >= table->size)
108 return NULL;
109 obj = &table->objects[table->allocated_len];
110
111 /* wait_fd: create pipe */
112 ret = pipe(waitfd);
113 if (ret < 0) {
114 PERROR("pipe");
115 goto error_pipe;
116 }
117 for (i = 0; i < 2; i++) {
118 ret = fcntl(waitfd[i], F_SETFD, FD_CLOEXEC);
119 if (ret < 0) {
120 PERROR("fcntl");
121 goto error_fcntl;
122 }
123 }
124 /* The write end of the pipe needs to be non-blocking */
125 ret = fcntl(waitfd[1], F_SETFL, O_NONBLOCK);
126 if (ret < 0) {
127 PERROR("fcntl");
128 goto error_fcntl;
129 }
130 memcpy(obj->wait_fd, waitfd, sizeof(waitfd));
131
132 /* create shm */
133
134 shmfd = stream_fd;
135 ret = zero_file(shmfd, memory_map_size);
136 if (ret) {
137 PERROR("zero_file");
138 goto error_zero_file;
139 }
140 ret = ftruncate(shmfd, memory_map_size);
141 if (ret) {
142 PERROR("ftruncate");
143 goto error_ftruncate;
144 }
145 /*
146 * Also ensure the file metadata is synced with the storage by using
147 * fsync(2).
148 */
149 ret = fsync(shmfd);
150 if (ret) {
151 PERROR("fsync");
152 goto error_fsync;
153 }
154 obj->shm_fd_ownership = 0;
155 obj->shm_fd = shmfd;
156
157 /* memory_map: mmap */
158 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
159 MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0);
160 if (memory_map == MAP_FAILED) {
161 PERROR("mmap");
162 goto error_mmap;
163 }
164 obj->type = SHM_OBJECT_SHM;
165 obj->memory_map = memory_map;
166 obj->memory_map_size = memory_map_size;
167 obj->allocated_len = 0;
168 obj->index = table->allocated_len++;
169
170 return obj;
171
172 error_mmap:
173 error_fsync:
174 error_ftruncate:
175 error_zero_file:
176 error_fcntl:
177 for (i = 0; i < 2; i++) {
178 ret = close(waitfd[i]);
179 if (ret) {
180 PERROR("close");
181 assert(0);
182 }
183 }
184 error_pipe:
185 return NULL;
186 }
187
188 static
189 struct shm_object *_shm_object_table_alloc_mem(struct shm_object_table *table,
190 size_t memory_map_size)
191 {
192 struct shm_object *obj;
193 void *memory_map;
194 int waitfd[2], i, ret;
195
196 if (table->allocated_len >= table->size)
197 return NULL;
198 obj = &table->objects[table->allocated_len];
199
200 memory_map = zmalloc(memory_map_size);
201 if (!memory_map)
202 goto alloc_error;
203
204 /* wait_fd: create pipe */
205 ret = pipe(waitfd);
206 if (ret < 0) {
207 PERROR("pipe");
208 goto error_pipe;
209 }
210 for (i = 0; i < 2; i++) {
211 ret = fcntl(waitfd[i], F_SETFD, FD_CLOEXEC);
212 if (ret < 0) {
213 PERROR("fcntl");
214 goto error_fcntl;
215 }
216 }
217 /* The write end of the pipe needs to be non-blocking */
218 ret = fcntl(waitfd[1], F_SETFL, O_NONBLOCK);
219 if (ret < 0) {
220 PERROR("fcntl");
221 goto error_fcntl;
222 }
223 memcpy(obj->wait_fd, waitfd, sizeof(waitfd));
224
225 /* no shm_fd */
226 obj->shm_fd = -1;
227 obj->shm_fd_ownership = 0;
228
229 obj->type = SHM_OBJECT_MEM;
230 obj->memory_map = memory_map;
231 obj->memory_map_size = memory_map_size;
232 obj->allocated_len = 0;
233 obj->index = table->allocated_len++;
234
235 return obj;
236
237 error_fcntl:
238 for (i = 0; i < 2; i++) {
239 ret = close(waitfd[i]);
240 if (ret) {
241 PERROR("close");
242 assert(0);
243 }
244 }
245 error_pipe:
246 free(memory_map);
247 alloc_error:
248 return NULL;
249 }
250
251 /*
252 * libnuma prints errors on the console even for numa_available().
253 * Work-around this limitation by using get_mempolicy() directly to
254 * check whether the kernel supports mempolicy.
255 */
256 #ifdef HAVE_LIBNUMA
257 static bool lttng_is_numa_available(void)
258 {
259 int ret;
260
261 ret = get_mempolicy(NULL, NULL, 0, NULL, 0);
262 if (ret && errno == ENOSYS) {
263 return false;
264 }
265 return numa_available() > 0;
266 }
267 #endif
268
269 struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
270 size_t memory_map_size,
271 enum shm_object_type type,
272 int stream_fd,
273 int cpu)
274 {
275 struct shm_object *shm_object;
276 #ifdef HAVE_LIBNUMA
277 int oldnode = 0, node;
278 bool numa_avail;
279
280 numa_avail = lttng_is_numa_available();
281 if (numa_avail) {
282 oldnode = numa_preferred();
283 if (cpu >= 0) {
284 node = numa_node_of_cpu(cpu);
285 if (node >= 0)
286 numa_set_preferred(node);
287 }
288 if (cpu < 0 || node < 0)
289 numa_set_localalloc();
290 }
291 #endif /* HAVE_LIBNUMA */
292 switch (type) {
293 case SHM_OBJECT_SHM:
294 shm_object = _shm_object_table_alloc_shm(table, memory_map_size,
295 stream_fd);
296 break;
297 case SHM_OBJECT_MEM:
298 shm_object = _shm_object_table_alloc_mem(table, memory_map_size);
299 break;
300 default:
301 assert(0);
302 }
303 #ifdef HAVE_LIBNUMA
304 if (numa_avail)
305 numa_set_preferred(oldnode);
306 #endif /* HAVE_LIBNUMA */
307 return shm_object;
308 }
309
310 struct shm_object *shm_object_table_append_shm(struct shm_object_table *table,
311 int shm_fd, int wakeup_fd, uint32_t stream_nr,
312 size_t memory_map_size)
313 {
314 struct shm_object *obj;
315 char *memory_map;
316 int ret;
317
318 if (table->allocated_len >= table->size)
319 return NULL;
320 /* streams _must_ be received in sequential order, else fail. */
321 if (stream_nr + 1 != table->allocated_len)
322 return NULL;
323
324 obj = &table->objects[table->allocated_len];
325
326 /* wait_fd: set write end of the pipe. */
327 obj->wait_fd[0] = -1; /* read end is unset */
328 obj->wait_fd[1] = wakeup_fd;
329 obj->shm_fd = shm_fd;
330 obj->shm_fd_ownership = 1;
331
332 ret = fcntl(obj->wait_fd[1], F_SETFD, FD_CLOEXEC);
333 if (ret < 0) {
334 PERROR("fcntl");
335 goto error_fcntl;
336 }
337 /* The write end of the pipe needs to be non-blocking */
338 ret = fcntl(obj->wait_fd[1], F_SETFL, O_NONBLOCK);
339 if (ret < 0) {
340 PERROR("fcntl");
341 goto error_fcntl;
342 }
343
344 /* memory_map: mmap */
345 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
346 MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0);
347 if (memory_map == MAP_FAILED) {
348 PERROR("mmap");
349 goto error_mmap;
350 }
351 obj->type = SHM_OBJECT_SHM;
352 obj->memory_map = memory_map;
353 obj->memory_map_size = memory_map_size;
354 obj->allocated_len = memory_map_size;
355 obj->index = table->allocated_len++;
356
357 return obj;
358
359 error_fcntl:
360 error_mmap:
361 return NULL;
362 }
363
364 /*
365 * Passing ownership of mem to object.
366 */
367 struct shm_object *shm_object_table_append_mem(struct shm_object_table *table,
368 void *mem, size_t memory_map_size, int wakeup_fd)
369 {
370 struct shm_object *obj;
371 int ret;
372
373 if (table->allocated_len >= table->size)
374 return NULL;
375 obj = &table->objects[table->allocated_len];
376
377 obj->wait_fd[0] = -1; /* read end is unset */
378 obj->wait_fd[1] = wakeup_fd;
379 obj->shm_fd = -1;
380 obj->shm_fd_ownership = 0;
381
382 ret = fcntl(obj->wait_fd[1], F_SETFD, FD_CLOEXEC);
383 if (ret < 0) {
384 PERROR("fcntl");
385 goto error_fcntl;
386 }
387 /* The write end of the pipe needs to be non-blocking */
388 ret = fcntl(obj->wait_fd[1], F_SETFL, O_NONBLOCK);
389 if (ret < 0) {
390 PERROR("fcntl");
391 goto error_fcntl;
392 }
393
394 obj->type = SHM_OBJECT_MEM;
395 obj->memory_map = mem;
396 obj->memory_map_size = memory_map_size;
397 obj->allocated_len = memory_map_size;
398 obj->index = table->allocated_len++;
399
400 return obj;
401
402 error_fcntl:
403 return NULL;
404 }
405
406 static
407 void shmp_object_destroy(struct shm_object *obj, int consumer)
408 {
409 switch (obj->type) {
410 case SHM_OBJECT_SHM:
411 {
412 int ret, i;
413
414 ret = munmap(obj->memory_map, obj->memory_map_size);
415 if (ret) {
416 PERROR("umnmap");
417 assert(0);
418 }
419
420 if (obj->shm_fd_ownership) {
421 /* Delete FDs only if called from app (not consumer). */
422 if (!consumer) {
423 lttng_ust_lock_fd_tracker();
424 ret = close(obj->shm_fd);
425 if (!ret) {
426 lttng_ust_delete_fd_from_tracker(obj->shm_fd);
427 } else {
428 PERROR("close");
429 assert(0);
430 }
431 lttng_ust_unlock_fd_tracker();
432 } else {
433 ret = close(obj->shm_fd);
434 if (ret) {
435 PERROR("close");
436 assert(0);
437 }
438 }
439 }
440 for (i = 0; i < 2; i++) {
441 if (obj->wait_fd[i] < 0)
442 continue;
443 if (!consumer) {
444 lttng_ust_lock_fd_tracker();
445 ret = close(obj->wait_fd[i]);
446 if (!ret) {
447 lttng_ust_delete_fd_from_tracker(obj->wait_fd[i]);
448 } else {
449 PERROR("close");
450 assert(0);
451 }
452 lttng_ust_unlock_fd_tracker();
453 } else {
454 ret = close(obj->wait_fd[i]);
455 if (ret) {
456 PERROR("close");
457 assert(0);
458 }
459 }
460 }
461 break;
462 }
463 case SHM_OBJECT_MEM:
464 {
465 int ret, i;
466
467 for (i = 0; i < 2; i++) {
468 if (obj->wait_fd[i] < 0)
469 continue;
470 if (!consumer) {
471 lttng_ust_lock_fd_tracker();
472 ret = close(obj->wait_fd[i]);
473 if (!ret) {
474 lttng_ust_delete_fd_from_tracker(obj->wait_fd[i]);
475 } else {
476 PERROR("close");
477 assert(0);
478 }
479 lttng_ust_unlock_fd_tracker();
480 } else {
481 ret = close(obj->wait_fd[i]);
482 if (ret) {
483 PERROR("close");
484 assert(0);
485 }
486 }
487 }
488 free(obj->memory_map);
489 break;
490 }
491 default:
492 assert(0);
493 }
494 }
495
496 void shm_object_table_destroy(struct shm_object_table *table, int consumer)
497 {
498 int i;
499
500 for (i = 0; i < table->allocated_len; i++)
501 shmp_object_destroy(&table->objects[i], consumer);
502 free(table);
503 }
504
505 /*
506 * zalloc_shm - allocate memory within a shm object.
507 *
508 * Shared memory is already zeroed by shmget.
509 * *NOT* multithread-safe (should be protected by mutex).
510 * Returns a -1, -1 tuple on error.
511 */
512 struct shm_ref zalloc_shm(struct shm_object *obj, size_t len)
513 {
514 struct shm_ref ref;
515 struct shm_ref shm_ref_error = { -1, -1 };
516
517 if (obj->memory_map_size - obj->allocated_len < len)
518 return shm_ref_error;
519 ref.index = obj->index;
520 ref.offset = obj->allocated_len;
521 obj->allocated_len += len;
522 return ref;
523 }
524
525 void align_shm(struct shm_object *obj, size_t align)
526 {
527 size_t offset_len = offset_align(obj->allocated_len, align);
528 obj->allocated_len += offset_len;
529 }
This page took 0.038949 seconds and 4 git commands to generate.