Fix: set FD_CLOEXEC on incoming FDs.
[lttng-ust.git] / libringbuffer / shm.c
1 /*
2 * libringbuffer/shm.c
3 *
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; only
9 * version 2.1 of the License.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #define _LGPL_SOURCE
22 #include "shm.h"
23 #include <unistd.h>
24 #include <fcntl.h>
25 #include <sys/mman.h>
26 #include <sys/types.h>
27 #include <sys/stat.h> /* For mode constants */
28 #include <fcntl.h> /* For O_* constants */
29 #include <assert.h>
30 #include <stdio.h>
31 #include <signal.h>
32 #include <dirent.h>
33 #include <lttng/align.h>
34 #include <limits.h>
35 #include <helper.h>
36 #include <ust-fd.h>
37
38 /*
39 * Ensure we have the required amount of space available by writing 0
40 * into the entire buffer. Not doing so can trigger SIGBUS when going
41 * beyond the available shm space.
42 */
43 static
44 int zero_file(int fd, size_t len)
45 {
46 ssize_t retlen;
47 size_t written = 0;
48 char *zeropage;
49 long pagelen;
50 int ret;
51
52 pagelen = sysconf(_SC_PAGESIZE);
53 if (pagelen < 0)
54 return (int) pagelen;
55 zeropage = calloc(pagelen, 1);
56 if (!zeropage)
57 return -ENOMEM;
58
59 while (len > written) {
60 do {
61 retlen = write(fd, zeropage,
62 min_t(size_t, pagelen, len - written));
63 } while (retlen == -1UL && errno == EINTR);
64 if (retlen < 0) {
65 ret = (int) retlen;
66 goto error;
67 }
68 written += retlen;
69 }
70 ret = 0;
71 error:
72 free(zeropage);
73 return ret;
74 }
75
76 struct shm_object_table *shm_object_table_create(size_t max_nb_obj)
77 {
78 struct shm_object_table *table;
79
80 table = zmalloc(sizeof(struct shm_object_table) +
81 max_nb_obj * sizeof(table->objects[0]));
82 if (!table)
83 return NULL;
84 table->size = max_nb_obj;
85 return table;
86 }
87
88 static
89 struct shm_object *_shm_object_table_alloc_shm(struct shm_object_table *table,
90 size_t memory_map_size,
91 int stream_fd)
92 {
93 int shmfd, waitfd[2], ret, i;
94 struct shm_object *obj;
95 char *memory_map;
96
97 if (stream_fd < 0)
98 return NULL;
99 if (table->allocated_len >= table->size)
100 return NULL;
101 obj = &table->objects[table->allocated_len];
102
103 /* wait_fd: create pipe */
104 ret = pipe(waitfd);
105 if (ret < 0) {
106 PERROR("pipe");
107 goto error_pipe;
108 }
109 for (i = 0; i < 2; i++) {
110 ret = fcntl(waitfd[i], F_SETFD, FD_CLOEXEC);
111 if (ret < 0) {
112 PERROR("fcntl");
113 goto error_fcntl;
114 }
115 }
116 /* The write end of the pipe needs to be non-blocking */
117 ret = fcntl(waitfd[1], F_SETFL, O_NONBLOCK);
118 if (ret < 0) {
119 PERROR("fcntl");
120 goto error_fcntl;
121 }
122 memcpy(obj->wait_fd, waitfd, sizeof(waitfd));
123
124 /* create shm */
125
126 shmfd = stream_fd;
127 ret = zero_file(shmfd, memory_map_size);
128 if (ret) {
129 PERROR("zero_file");
130 goto error_zero_file;
131 }
132 ret = ftruncate(shmfd, memory_map_size);
133 if (ret) {
134 PERROR("ftruncate");
135 goto error_ftruncate;
136 }
137 /*
138 * Also ensure the file metadata is synced with the storage by using
139 * fsync(2).
140 */
141 ret = fsync(shmfd);
142 if (ret) {
143 PERROR("fsync");
144 goto error_fsync;
145 }
146 obj->shm_fd_ownership = 0;
147 obj->shm_fd = shmfd;
148
149 /* memory_map: mmap */
150 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
151 MAP_SHARED, shmfd, 0);
152 if (memory_map == MAP_FAILED) {
153 PERROR("mmap");
154 goto error_mmap;
155 }
156 obj->type = SHM_OBJECT_SHM;
157 obj->memory_map = memory_map;
158 obj->memory_map_size = memory_map_size;
159 obj->allocated_len = 0;
160 obj->index = table->allocated_len++;
161
162 return obj;
163
164 error_mmap:
165 error_fsync:
166 error_ftruncate:
167 error_zero_file:
168 error_fcntl:
169 for (i = 0; i < 2; i++) {
170 ret = close(waitfd[i]);
171 if (ret) {
172 PERROR("close");
173 assert(0);
174 }
175 }
176 error_pipe:
177 return NULL;
178 }
179
180 static
181 struct shm_object *_shm_object_table_alloc_mem(struct shm_object_table *table,
182 size_t memory_map_size)
183 {
184 struct shm_object *obj;
185 void *memory_map;
186 int waitfd[2], i, ret;
187
188 if (table->allocated_len >= table->size)
189 return NULL;
190 obj = &table->objects[table->allocated_len];
191
192 memory_map = zmalloc(memory_map_size);
193 if (!memory_map)
194 goto alloc_error;
195
196 /* wait_fd: create pipe */
197 ret = pipe(waitfd);
198 if (ret < 0) {
199 PERROR("pipe");
200 goto error_pipe;
201 }
202 for (i = 0; i < 2; i++) {
203 ret = fcntl(waitfd[i], F_SETFD, FD_CLOEXEC);
204 if (ret < 0) {
205 PERROR("fcntl");
206 goto error_fcntl;
207 }
208 }
209 /* The write end of the pipe needs to be non-blocking */
210 ret = fcntl(waitfd[1], F_SETFL, O_NONBLOCK);
211 if (ret < 0) {
212 PERROR("fcntl");
213 goto error_fcntl;
214 }
215 memcpy(obj->wait_fd, waitfd, sizeof(waitfd));
216
217 /* no shm_fd */
218 obj->shm_fd = -1;
219 obj->shm_fd_ownership = 0;
220
221 obj->type = SHM_OBJECT_MEM;
222 obj->memory_map = memory_map;
223 obj->memory_map_size = memory_map_size;
224 obj->allocated_len = 0;
225 obj->index = table->allocated_len++;
226
227 return obj;
228
229 error_fcntl:
230 for (i = 0; i < 2; i++) {
231 ret = close(waitfd[i]);
232 if (ret) {
233 PERROR("close");
234 assert(0);
235 }
236 }
237 error_pipe:
238 free(memory_map);
239 alloc_error:
240 return NULL;
241 }
242
243 struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
244 size_t memory_map_size,
245 enum shm_object_type type,
246 int stream_fd)
247 {
248 switch (type) {
249 case SHM_OBJECT_SHM:
250 return _shm_object_table_alloc_shm(table, memory_map_size,
251 stream_fd);
252 case SHM_OBJECT_MEM:
253 return _shm_object_table_alloc_mem(table, memory_map_size);
254 default:
255 assert(0);
256 }
257 return NULL;
258 }
259
260 struct shm_object *shm_object_table_append_shm(struct shm_object_table *table,
261 int shm_fd, int wakeup_fd, uint32_t stream_nr,
262 size_t memory_map_size)
263 {
264 struct shm_object *obj;
265 char *memory_map;
266 int ret;
267
268 if (table->allocated_len >= table->size)
269 return NULL;
270 /* streams _must_ be received in sequential order, else fail. */
271 if (stream_nr + 1 != table->allocated_len)
272 return NULL;
273
274 obj = &table->objects[table->allocated_len];
275
276 /* wait_fd: set write end of the pipe. */
277 obj->wait_fd[0] = -1; /* read end is unset */
278 obj->wait_fd[1] = wakeup_fd;
279 obj->shm_fd = shm_fd;
280 obj->shm_fd_ownership = 1;
281
282 /* The write end of the pipe needs to be non-blocking */
283 ret = fcntl(obj->wait_fd[1], F_SETFL, O_NONBLOCK);
284 if (ret < 0) {
285 PERROR("fcntl");
286 goto error_fcntl;
287 }
288
289 /* memory_map: mmap */
290 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
291 MAP_SHARED, shm_fd, 0);
292 if (memory_map == MAP_FAILED) {
293 PERROR("mmap");
294 goto error_mmap;
295 }
296 obj->type = SHM_OBJECT_SHM;
297 obj->memory_map = memory_map;
298 obj->memory_map_size = memory_map_size;
299 obj->allocated_len = memory_map_size;
300 obj->index = table->allocated_len++;
301
302 return obj;
303
304 error_fcntl:
305 error_mmap:
306 return NULL;
307 }
308
309 /*
310 * Passing ownership of mem to object.
311 */
312 struct shm_object *shm_object_table_append_mem(struct shm_object_table *table,
313 void *mem, size_t memory_map_size, int wakeup_fd)
314 {
315 struct shm_object *obj;
316 int ret;
317
318 if (table->allocated_len >= table->size)
319 return NULL;
320 obj = &table->objects[table->allocated_len];
321
322 obj->wait_fd[0] = -1; /* read end is unset */
323 obj->wait_fd[1] = wakeup_fd;
324 obj->shm_fd = -1;
325 obj->shm_fd_ownership = 0;
326
327 ret = fcntl(obj->wait_fd[1], F_SETFD, FD_CLOEXEC);
328 if (ret < 0) {
329 PERROR("fcntl");
330 goto error_fcntl;
331 }
332 /* The write end of the pipe needs to be non-blocking */
333 ret = fcntl(obj->wait_fd[1], F_SETFL, O_NONBLOCK);
334 if (ret < 0) {
335 PERROR("fcntl");
336 goto error_fcntl;
337 }
338
339 obj->type = SHM_OBJECT_MEM;
340 obj->memory_map = mem;
341 obj->memory_map_size = memory_map_size;
342 obj->allocated_len = memory_map_size;
343 obj->index = table->allocated_len++;
344
345 return obj;
346
347 error_fcntl:
348 return NULL;
349 }
350
351 static
352 void shmp_object_destroy(struct shm_object *obj, int consumer)
353 {
354 switch (obj->type) {
355 case SHM_OBJECT_SHM:
356 {
357 int ret, i;
358
359 ret = munmap(obj->memory_map, obj->memory_map_size);
360 if (ret) {
361 PERROR("umnmap");
362 assert(0);
363 }
364
365 if (obj->shm_fd_ownership) {
366 /* Delete FDs only if called from app (not consumer). */
367 if (!consumer) {
368 lttng_ust_lock_fd_tracker();
369 ret = close(obj->shm_fd);
370 if (!ret) {
371 lttng_ust_delete_fd_from_tracker(obj->shm_fd);
372 } else {
373 PERROR("close");
374 assert(0);
375 }
376 lttng_ust_unlock_fd_tracker();
377 } else {
378 ret = close(obj->shm_fd);
379 if (ret) {
380 PERROR("close");
381 assert(0);
382 }
383 }
384 }
385 for (i = 0; i < 2; i++) {
386 if (obj->wait_fd[i] < 0)
387 continue;
388 if (!consumer) {
389 lttng_ust_lock_fd_tracker();
390 ret = close(obj->wait_fd[i]);
391 if (!ret) {
392 lttng_ust_delete_fd_from_tracker(obj->wait_fd[i]);
393 } else {
394 PERROR("close");
395 assert(0);
396 }
397 lttng_ust_unlock_fd_tracker();
398 } else {
399 ret = close(obj->wait_fd[i]);
400 if (ret) {
401 PERROR("close");
402 assert(0);
403 }
404 }
405 }
406 break;
407 }
408 case SHM_OBJECT_MEM:
409 {
410 int ret, i;
411
412 for (i = 0; i < 2; i++) {
413 if (obj->wait_fd[i] < 0)
414 continue;
415 if (!consumer) {
416 lttng_ust_lock_fd_tracker();
417 ret = close(obj->wait_fd[i]);
418 if (!ret) {
419 lttng_ust_delete_fd_from_tracker(obj->wait_fd[i]);
420 } else {
421 PERROR("close");
422 assert(0);
423 }
424 lttng_ust_unlock_fd_tracker();
425 } else {
426 ret = close(obj->wait_fd[i]);
427 if (ret) {
428 PERROR("close");
429 assert(0);
430 }
431 }
432 }
433 free(obj->memory_map);
434 break;
435 }
436 default:
437 assert(0);
438 }
439 }
440
441 void shm_object_table_destroy(struct shm_object_table *table, int consumer)
442 {
443 int i;
444
445 for (i = 0; i < table->allocated_len; i++)
446 shmp_object_destroy(&table->objects[i], consumer);
447 free(table);
448 }
449
450 /*
451 * zalloc_shm - allocate memory within a shm object.
452 *
453 * Shared memory is already zeroed by shmget.
454 * *NOT* multithread-safe (should be protected by mutex).
455 * Returns a -1, -1 tuple on error.
456 */
457 struct shm_ref zalloc_shm(struct shm_object *obj, size_t len)
458 {
459 struct shm_ref ref;
460 struct shm_ref shm_ref_error = { -1, -1 };
461
462 if (obj->memory_map_size - obj->allocated_len < len)
463 return shm_ref_error;
464 ref.index = obj->index;
465 ref.offset = obj->allocated_len;
466 obj->allocated_len += len;
467 return ref;
468 }
469
470 void align_shm(struct shm_object *obj, size_t align)
471 {
472 size_t offset_len = offset_align(obj->allocated_len, align);
473 obj->allocated_len += offset_len;
474 }
This page took 0.038084 seconds and 4 git commands to generate.