Rename "tsc" to "timestamp"
[lttng-ust.git] / src / common / ringbuffer / shm.c
1 /*
2 * SPDX-License-Identifier: LGPL-2.1-only
3 *
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 */
6
7 #define _LGPL_SOURCE
8 #include "shm.h"
9 #include <unistd.h>
10 #include <fcntl.h>
11 #include <sys/mman.h>
12 #include <sys/types.h>
13 #include <sys/stat.h> /* For mode constants */
14 #include <fcntl.h> /* For O_* constants */
15 #include <assert.h>
16 #include <stdio.h>
17 #include <signal.h>
18 #include <dirent.h>
19 #include <limits.h>
20 #include <stdbool.h>
21 #include <stdint.h>
22
23 #ifdef HAVE_LIBNUMA
24 #include <numa.h>
25 #include <numaif.h>
26 #endif
27
28 #include <lttng/ust-utils.h>
29
30 #include "common/macros.h"
31 #include "common/ust-fd.h"
32 #include "common/compat/mmap.h"
33
34 /*
35 * Ensure we have the required amount of space available by writing 0
36 * into the entire buffer. Not doing so can trigger SIGBUS when going
37 * beyond the available shm space.
38 */
39 static
40 int zero_file(int fd, size_t len)
41 {
42 ssize_t retlen;
43 size_t written = 0;
44 char *zeropage;
45 long pagelen;
46 int ret;
47
48 pagelen = sysconf(_SC_PAGESIZE);
49 if (pagelen < 0)
50 return (int) pagelen;
51 zeropage = calloc(pagelen, 1);
52 if (!zeropage)
53 return -ENOMEM;
54
55 while (len > written) {
56 do {
57 retlen = write(fd, zeropage,
58 min_t(size_t, pagelen, len - written));
59 } while (retlen == -1UL && errno == EINTR);
60 if (retlen < 0) {
61 ret = (int) retlen;
62 goto error;
63 }
64 written += retlen;
65 }
66 ret = 0;
67 error:
68 free(zeropage);
69 return ret;
70 }
71
72 struct shm_object_table *shm_object_table_create(size_t max_nb_obj, bool populate)
73 {
74 struct shm_object_table *table;
75
76 table = zmalloc_populate(sizeof(struct shm_object_table) +
77 max_nb_obj * sizeof(table->objects[0]), populate);
78 if (!table)
79 return NULL;
80 table->size = max_nb_obj;
81 return table;
82 }
83
84 static
85 struct shm_object *_shm_object_table_alloc_shm(struct shm_object_table *table,
86 size_t memory_map_size,
87 int stream_fd,
88 bool populate)
89 {
90 int shmfd, waitfd[2], ret, i;
91 int flags = MAP_SHARED;
92 struct shm_object *obj;
93 char *memory_map;
94
95 if (stream_fd < 0)
96 return NULL;
97 if (table->allocated_len >= table->size)
98 return NULL;
99 obj = &table->objects[table->allocated_len];
100
101 /* wait_fd: create pipe */
102 ret = pipe2(waitfd, O_CLOEXEC);
103 if (ret < 0) {
104 PERROR("pipe");
105 goto error_pipe;
106 }
107 /* The write end of the pipe needs to be non-blocking */
108 ret = fcntl(waitfd[1], F_SETFL, O_NONBLOCK);
109 if (ret < 0) {
110 PERROR("fcntl");
111 goto error_fcntl;
112 }
113 memcpy(obj->wait_fd, waitfd, sizeof(waitfd));
114
115 /*
116 * Set POSIX shared memory object size
117 *
118 * First, use ftruncate() to set its size, some implementations won't
119 * allow writes past the size set by ftruncate.
120 * Then, use write() to fill it with zeros, this allows us to fully
121 * allocate it and detect a shortage of shm space without dealing with
122 * a SIGBUS.
123 */
124
125 shmfd = stream_fd;
126 ret = ftruncate(shmfd, memory_map_size);
127 if (ret) {
128 PERROR("ftruncate");
129 goto error_ftruncate;
130 }
131 ret = zero_file(shmfd, memory_map_size);
132 if (ret) {
133 PERROR("zero_file");
134 goto error_zero_file;
135 }
136
137 /*
138 * Also ensure the file metadata is synced with the storage by using
139 * fsync(2). Some platforms don't allow fsync on POSIX shm fds, ignore
140 * EINVAL accordingly.
141 */
142 ret = fsync(shmfd);
143 if (ret && errno != EINVAL) {
144 PERROR("fsync");
145 goto error_fsync;
146 }
147 obj->shm_fd_ownership = 0;
148 obj->shm_fd = shmfd;
149
150 if (populate)
151 flags |= LTTNG_MAP_POPULATE;
152 /* memory_map: mmap */
153 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
154 flags, shmfd, 0);
155 if (memory_map == MAP_FAILED) {
156 PERROR("mmap");
157 goto error_mmap;
158 }
159 obj->type = SHM_OBJECT_SHM;
160 obj->memory_map = memory_map;
161 obj->memory_map_size = memory_map_size;
162 obj->allocated_len = 0;
163 obj->index = table->allocated_len++;
164
165 return obj;
166
167 error_mmap:
168 error_fsync:
169 error_ftruncate:
170 error_zero_file:
171 error_fcntl:
172 for (i = 0; i < 2; i++) {
173 ret = close(waitfd[i]);
174 if (ret) {
175 PERROR("close");
176 assert(0);
177 }
178 }
179 error_pipe:
180 return NULL;
181 }
182
183 static
184 struct shm_object *_shm_object_table_alloc_mem(struct shm_object_table *table,
185 size_t memory_map_size, bool populate)
186 {
187 struct shm_object *obj;
188 void *memory_map;
189 int waitfd[2], i, ret;
190
191 if (table->allocated_len >= table->size)
192 return NULL;
193 obj = &table->objects[table->allocated_len];
194
195 memory_map = zmalloc_populate(memory_map_size, populate);
196 if (!memory_map)
197 goto alloc_error;
198
199 /* wait_fd: create pipe */
200 ret = pipe2(waitfd, O_CLOEXEC);
201 if (ret < 0) {
202 PERROR("pipe");
203 goto error_pipe;
204 }
205 /* The write end of the pipe needs to be non-blocking */
206 ret = fcntl(waitfd[1], F_SETFL, O_NONBLOCK);
207 if (ret < 0) {
208 PERROR("fcntl");
209 goto error_fcntl;
210 }
211 memcpy(obj->wait_fd, waitfd, sizeof(waitfd));
212
213 /* no shm_fd */
214 obj->shm_fd = -1;
215 obj->shm_fd_ownership = 0;
216
217 obj->type = SHM_OBJECT_MEM;
218 obj->memory_map = memory_map;
219 obj->memory_map_size = memory_map_size;
220 obj->allocated_len = 0;
221 obj->index = table->allocated_len++;
222
223 return obj;
224
225 error_fcntl:
226 for (i = 0; i < 2; i++) {
227 ret = close(waitfd[i]);
228 if (ret) {
229 PERROR("close");
230 assert(0);
231 }
232 }
233 error_pipe:
234 free(memory_map);
235 alloc_error:
236 return NULL;
237 }
238
239 /*
240 * libnuma prints errors on the console even for numa_available().
241 * Work-around this limitation by using get_mempolicy() directly to
242 * check whether the kernel supports mempolicy.
243 */
244 #ifdef HAVE_LIBNUMA
245 static bool lttng_is_numa_available(void)
246 {
247 int ret;
248
249 ret = get_mempolicy(NULL, NULL, 0, NULL, 0);
250 if (ret && errno == ENOSYS) {
251 return false;
252 }
253 return numa_available() > 0;
254 }
255 #endif
256
257 #ifdef HAVE_LIBNUMA
258 struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
259 size_t memory_map_size,
260 enum shm_object_type type,
261 int stream_fd,
262 int cpu,
263 bool populate)
264 #else
265 struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
266 size_t memory_map_size,
267 enum shm_object_type type,
268 int stream_fd,
269 int cpu __attribute__((unused)),
270 bool populate)
271 #endif
272 {
273 struct shm_object *shm_object;
274 #ifdef HAVE_LIBNUMA
275 int oldnode = 0, node;
276 bool numa_avail;
277
278 numa_avail = lttng_is_numa_available();
279 if (numa_avail) {
280 oldnode = numa_preferred();
281 if (cpu >= 0) {
282 node = numa_node_of_cpu(cpu);
283 if (node >= 0)
284 numa_set_preferred(node);
285 }
286 if (cpu < 0 || node < 0)
287 numa_set_localalloc();
288 }
289 #endif /* HAVE_LIBNUMA */
290 switch (type) {
291 case SHM_OBJECT_SHM:
292 shm_object = _shm_object_table_alloc_shm(table, memory_map_size,
293 stream_fd, populate);
294 break;
295 case SHM_OBJECT_MEM:
296 shm_object = _shm_object_table_alloc_mem(table, memory_map_size,
297 populate);
298 break;
299 default:
300 assert(0);
301 }
302 #ifdef HAVE_LIBNUMA
303 if (numa_avail)
304 numa_set_preferred(oldnode);
305 #endif /* HAVE_LIBNUMA */
306 return shm_object;
307 }
308
309 struct shm_object *shm_object_table_append_shm(struct shm_object_table *table,
310 int shm_fd, int wakeup_fd, uint32_t stream_nr,
311 size_t memory_map_size, bool populate)
312 {
313 int flags = MAP_SHARED;
314 struct shm_object *obj;
315 char *memory_map;
316 int ret;
317
318 if (table->allocated_len >= table->size)
319 return NULL;
320 /* streams _must_ be received in sequential order, else fail. */
321 if (stream_nr + 1 != table->allocated_len)
322 return NULL;
323
324 obj = &table->objects[table->allocated_len];
325
326 /* wait_fd: set write end of the pipe. */
327 obj->wait_fd[0] = -1; /* read end is unset */
328 obj->wait_fd[1] = wakeup_fd;
329 obj->shm_fd = shm_fd;
330 obj->shm_fd_ownership = 1;
331
332 /* The write end of the pipe needs to be non-blocking */
333 ret = fcntl(obj->wait_fd[1], F_SETFL, O_NONBLOCK);
334 if (ret < 0) {
335 PERROR("fcntl");
336 goto error_fcntl;
337 }
338
339 if (populate)
340 flags |= LTTNG_MAP_POPULATE;
341 /* memory_map: mmap */
342 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
343 flags, shm_fd, 0);
344 if (memory_map == MAP_FAILED) {
345 PERROR("mmap");
346 goto error_mmap;
347 }
348 obj->type = SHM_OBJECT_SHM;
349 obj->memory_map = memory_map;
350 obj->memory_map_size = memory_map_size;
351 obj->allocated_len = memory_map_size;
352 obj->index = table->allocated_len++;
353
354 return obj;
355
356 error_fcntl:
357 error_mmap:
358 return NULL;
359 }
360
361 /*
362 * Passing ownership of mem to object.
363 */
364 struct shm_object *shm_object_table_append_mem(struct shm_object_table *table,
365 void *mem, size_t memory_map_size, int wakeup_fd)
366 {
367 struct shm_object *obj;
368 int ret;
369
370 if (table->allocated_len >= table->size)
371 return NULL;
372 obj = &table->objects[table->allocated_len];
373
374 obj->wait_fd[0] = -1; /* read end is unset */
375 obj->wait_fd[1] = wakeup_fd;
376 obj->shm_fd = -1;
377 obj->shm_fd_ownership = 0;
378
379 /* The write end of the pipe needs to be non-blocking */
380 ret = fcntl(obj->wait_fd[1], F_SETFL, O_NONBLOCK);
381 if (ret < 0) {
382 PERROR("fcntl");
383 goto error_fcntl;
384 }
385
386 obj->type = SHM_OBJECT_MEM;
387 obj->memory_map = mem;
388 obj->memory_map_size = memory_map_size;
389 obj->allocated_len = memory_map_size;
390 obj->index = table->allocated_len++;
391
392 return obj;
393
394 error_fcntl:
395 return NULL;
396 }
397
398 static
399 void shmp_object_destroy(struct shm_object *obj, int consumer)
400 {
401 switch (obj->type) {
402 case SHM_OBJECT_SHM:
403 {
404 int ret, i;
405
406 ret = munmap(obj->memory_map, obj->memory_map_size);
407 if (ret) {
408 PERROR("umnmap");
409 assert(0);
410 }
411
412 if (obj->shm_fd_ownership) {
413 /* Delete FDs only if called from app (not consumer). */
414 if (!consumer) {
415 lttng_ust_lock_fd_tracker();
416 ret = close(obj->shm_fd);
417 if (!ret) {
418 lttng_ust_delete_fd_from_tracker(obj->shm_fd);
419 } else {
420 PERROR("close");
421 assert(0);
422 }
423 lttng_ust_unlock_fd_tracker();
424 } else {
425 ret = close(obj->shm_fd);
426 if (ret) {
427 PERROR("close");
428 assert(0);
429 }
430 }
431 }
432 for (i = 0; i < 2; i++) {
433 if (obj->wait_fd[i] < 0)
434 continue;
435 if (!consumer) {
436 lttng_ust_lock_fd_tracker();
437 ret = close(obj->wait_fd[i]);
438 if (!ret) {
439 lttng_ust_delete_fd_from_tracker(obj->wait_fd[i]);
440 } else {
441 PERROR("close");
442 assert(0);
443 }
444 lttng_ust_unlock_fd_tracker();
445 } else {
446 ret = close(obj->wait_fd[i]);
447 if (ret) {
448 PERROR("close");
449 assert(0);
450 }
451 }
452 }
453 break;
454 }
455 case SHM_OBJECT_MEM:
456 {
457 int ret, i;
458
459 for (i = 0; i < 2; i++) {
460 if (obj->wait_fd[i] < 0)
461 continue;
462 if (!consumer) {
463 lttng_ust_lock_fd_tracker();
464 ret = close(obj->wait_fd[i]);
465 if (!ret) {
466 lttng_ust_delete_fd_from_tracker(obj->wait_fd[i]);
467 } else {
468 PERROR("close");
469 assert(0);
470 }
471 lttng_ust_unlock_fd_tracker();
472 } else {
473 ret = close(obj->wait_fd[i]);
474 if (ret) {
475 PERROR("close");
476 assert(0);
477 }
478 }
479 }
480 free(obj->memory_map);
481 break;
482 }
483 default:
484 assert(0);
485 }
486 }
487
488 void shm_object_table_destroy(struct shm_object_table *table, int consumer)
489 {
490 int i;
491
492 for (i = 0; i < table->allocated_len; i++)
493 shmp_object_destroy(&table->objects[i], consumer);
494 free(table);
495 }
496
497 /*
498 * zalloc_shm - allocate memory within a shm object.
499 *
500 * Shared memory is already zeroed by shmget.
501 * *NOT* multithread-safe (should be protected by mutex).
502 * Returns a -1, -1 tuple on error.
503 */
504 struct shm_ref zalloc_shm(struct shm_object *obj, size_t len)
505 {
506 struct shm_ref ref;
507 struct shm_ref shm_ref_error = { -1, -1 };
508
509 if (obj->memory_map_size - obj->allocated_len < len)
510 return shm_ref_error;
511 ref.index = obj->index;
512 ref.offset = obj->allocated_len;
513 obj->allocated_len += len;
514 return ref;
515 }
516
517 void align_shm(struct shm_object *obj, size_t align)
518 {
519 size_t offset_len = lttng_ust_offset_align(obj->allocated_len, align);
520 obj->allocated_len += offset_len;
521 }
This page took 0.044086 seconds and 4 git commands to generate.