Fix: move fsync after ftruncate
[lttng-ust.git] / libringbuffer / shm.c
1 /*
2 * libringbuffer/shm.c
3 *
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; only
9 * version 2.1 of the License.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #define _LGPL_SOURCE
22 #include "shm.h"
23 #include <unistd.h>
24 #include <fcntl.h>
25 #include <sys/mman.h>
26 #include <sys/types.h>
27 #include <sys/stat.h> /* For mode constants */
28 #include <fcntl.h> /* For O_* constants */
29 #include <assert.h>
30 #include <stdio.h>
31 #include <signal.h>
32 #include <dirent.h>
33 #include <lttng/align.h>
34 #include <limits.h>
35 #include <helper.h>
36 #include <ust-fd.h>
37
38 /*
39 * Ensure we have the required amount of space available by writing 0
40 * into the entire buffer. Not doing so can trigger SIGBUS when going
41 * beyond the available shm space.
42 */
43 static
44 int zero_file(int fd, size_t len)
45 {
46 ssize_t retlen;
47 size_t written = 0;
48 char *zeropage;
49 long pagelen;
50 int ret;
51
52 pagelen = sysconf(_SC_PAGESIZE);
53 if (pagelen < 0)
54 return (int) pagelen;
55 zeropage = calloc(pagelen, 1);
56 if (!zeropage)
57 return -ENOMEM;
58
59 while (len > written) {
60 do {
61 retlen = write(fd, zeropage,
62 min_t(size_t, pagelen, len - written));
63 } while (retlen == -1UL && errno == EINTR);
64 if (retlen < 0) {
65 ret = (int) retlen;
66 goto error;
67 }
68 written += retlen;
69 }
70 ret = 0;
71 error:
72 free(zeropage);
73 return ret;
74 }
75
76 struct shm_object_table *shm_object_table_create(size_t max_nb_obj)
77 {
78 struct shm_object_table *table;
79
80 table = zmalloc(sizeof(struct shm_object_table) +
81 max_nb_obj * sizeof(table->objects[0]));
82 if (!table)
83 return NULL;
84 table->size = max_nb_obj;
85 return table;
86 }
87
88 static
89 struct shm_object *_shm_object_table_alloc_shm(struct shm_object_table *table,
90 size_t memory_map_size,
91 int stream_fd)
92 {
93 int shmfd, waitfd[2], ret, i;
94 struct shm_object *obj;
95 char *memory_map;
96
97 if (stream_fd < 0)
98 return NULL;
99 if (table->allocated_len >= table->size)
100 return NULL;
101 obj = &table->objects[table->allocated_len];
102
103 /* wait_fd: create pipe */
104 ret = pipe(waitfd);
105 if (ret < 0) {
106 PERROR("pipe");
107 goto error_pipe;
108 }
109 for (i = 0; i < 2; i++) {
110 ret = fcntl(waitfd[i], F_SETFD, FD_CLOEXEC);
111 if (ret < 0) {
112 PERROR("fcntl");
113 goto error_fcntl;
114 }
115 }
116 /* The write end of the pipe needs to be non-blocking */
117 ret = fcntl(waitfd[1], F_SETFL, O_NONBLOCK);
118 if (ret < 0) {
119 PERROR("fcntl");
120 goto error_fcntl;
121 }
122 memcpy(obj->wait_fd, waitfd, sizeof(waitfd));
123
124 /* create shm */
125
126 shmfd = stream_fd;
127 ret = zero_file(shmfd, memory_map_size);
128 if (ret) {
129 PERROR("zero_file");
130 goto error_zero_file;
131 }
132 ret = ftruncate(shmfd, memory_map_size);
133 if (ret) {
134 PERROR("ftruncate");
135 goto error_ftruncate;
136 }
137 /*
138 * Also ensure the file metadata is synced with the storage by using
139 * fsync(2).
140 */
141 ret = fsync(shmfd);
142 if (ret) {
143 PERROR("fsync");
144 goto error_fsync;
145 }
146 obj->shm_fd_ownership = 0;
147 obj->shm_fd = shmfd;
148
149 /* memory_map: mmap */
150 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
151 MAP_SHARED, shmfd, 0);
152 if (memory_map == MAP_FAILED) {
153 PERROR("mmap");
154 goto error_mmap;
155 }
156 obj->type = SHM_OBJECT_SHM;
157 obj->memory_map = memory_map;
158 obj->memory_map_size = memory_map_size;
159 obj->allocated_len = 0;
160 obj->index = table->allocated_len++;
161
162 return obj;
163
164 error_mmap:
165 error_fsync:
166 error_ftruncate:
167 error_zero_file:
168 error_fcntl:
169 for (i = 0; i < 2; i++) {
170 ret = close(waitfd[i]);
171 if (ret) {
172 PERROR("close");
173 assert(0);
174 }
175 }
176 error_pipe:
177 return NULL;
178 }
179
180 static
181 struct shm_object *_shm_object_table_alloc_mem(struct shm_object_table *table,
182 size_t memory_map_size)
183 {
184 struct shm_object *obj;
185 void *memory_map;
186 int waitfd[2], i, ret;
187
188 if (table->allocated_len >= table->size)
189 return NULL;
190 obj = &table->objects[table->allocated_len];
191
192 memory_map = zmalloc(memory_map_size);
193 if (!memory_map)
194 goto alloc_error;
195
196 /* wait_fd: create pipe */
197 ret = pipe(waitfd);
198 if (ret < 0) {
199 PERROR("pipe");
200 goto error_pipe;
201 }
202 for (i = 0; i < 2; i++) {
203 ret = fcntl(waitfd[i], F_SETFD, FD_CLOEXEC);
204 if (ret < 0) {
205 PERROR("fcntl");
206 goto error_fcntl;
207 }
208 }
209 /* The write end of the pipe needs to be non-blocking */
210 ret = fcntl(waitfd[1], F_SETFL, O_NONBLOCK);
211 if (ret < 0) {
212 PERROR("fcntl");
213 goto error_fcntl;
214 }
215 memcpy(obj->wait_fd, waitfd, sizeof(waitfd));
216
217 /* no shm_fd */
218 obj->shm_fd = -1;
219 obj->shm_fd_ownership = 0;
220
221 obj->type = SHM_OBJECT_MEM;
222 obj->memory_map = memory_map;
223 obj->memory_map_size = memory_map_size;
224 obj->allocated_len = 0;
225 obj->index = table->allocated_len++;
226
227 return obj;
228
229 error_fcntl:
230 for (i = 0; i < 2; i++) {
231 ret = close(waitfd[i]);
232 if (ret) {
233 PERROR("close");
234 assert(0);
235 }
236 }
237 error_pipe:
238 free(memory_map);
239 alloc_error:
240 return NULL;
241 }
242
243 struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
244 size_t memory_map_size,
245 enum shm_object_type type,
246 int stream_fd)
247 {
248 switch (type) {
249 case SHM_OBJECT_SHM:
250 return _shm_object_table_alloc_shm(table, memory_map_size,
251 stream_fd);
252 case SHM_OBJECT_MEM:
253 return _shm_object_table_alloc_mem(table, memory_map_size);
254 default:
255 assert(0);
256 }
257 return NULL;
258 }
259
260 struct shm_object *shm_object_table_append_shm(struct shm_object_table *table,
261 int shm_fd, int wakeup_fd, uint32_t stream_nr,
262 size_t memory_map_size)
263 {
264 struct shm_object *obj;
265 char *memory_map;
266 int ret;
267
268 if (table->allocated_len >= table->size)
269 return NULL;
270 /* streams _must_ be received in sequential order, else fail. */
271 if (stream_nr + 1 != table->allocated_len)
272 return NULL;
273
274 obj = &table->objects[table->allocated_len];
275
276 /* wait_fd: set write end of the pipe. */
277 obj->wait_fd[0] = -1; /* read end is unset */
278 obj->wait_fd[1] = wakeup_fd;
279 obj->shm_fd = shm_fd;
280 obj->shm_fd_ownership = 1;
281
282 ret = fcntl(obj->wait_fd[1], F_SETFD, FD_CLOEXEC);
283 if (ret < 0) {
284 PERROR("fcntl");
285 goto error_fcntl;
286 }
287 /* The write end of the pipe needs to be non-blocking */
288 ret = fcntl(obj->wait_fd[1], F_SETFL, O_NONBLOCK);
289 if (ret < 0) {
290 PERROR("fcntl");
291 goto error_fcntl;
292 }
293
294 /* memory_map: mmap */
295 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
296 MAP_SHARED, shm_fd, 0);
297 if (memory_map == MAP_FAILED) {
298 PERROR("mmap");
299 goto error_mmap;
300 }
301 obj->type = SHM_OBJECT_SHM;
302 obj->memory_map = memory_map;
303 obj->memory_map_size = memory_map_size;
304 obj->allocated_len = memory_map_size;
305 obj->index = table->allocated_len++;
306
307 return obj;
308
309 error_fcntl:
310 error_mmap:
311 return NULL;
312 }
313
314 /*
315 * Passing ownership of mem to object.
316 */
317 struct shm_object *shm_object_table_append_mem(struct shm_object_table *table,
318 void *mem, size_t memory_map_size, int wakeup_fd)
319 {
320 struct shm_object *obj;
321 int ret;
322
323 if (table->allocated_len >= table->size)
324 return NULL;
325 obj = &table->objects[table->allocated_len];
326
327 obj->wait_fd[0] = -1; /* read end is unset */
328 obj->wait_fd[1] = wakeup_fd;
329 obj->shm_fd = -1;
330 obj->shm_fd_ownership = 0;
331
332 ret = fcntl(obj->wait_fd[1], F_SETFD, FD_CLOEXEC);
333 if (ret < 0) {
334 PERROR("fcntl");
335 goto error_fcntl;
336 }
337 /* The write end of the pipe needs to be non-blocking */
338 ret = fcntl(obj->wait_fd[1], F_SETFL, O_NONBLOCK);
339 if (ret < 0) {
340 PERROR("fcntl");
341 goto error_fcntl;
342 }
343
344 obj->type = SHM_OBJECT_MEM;
345 obj->memory_map = mem;
346 obj->memory_map_size = memory_map_size;
347 obj->allocated_len = memory_map_size;
348 obj->index = table->allocated_len++;
349
350 return obj;
351
352 error_fcntl:
353 return NULL;
354 }
355
356 static
357 void shmp_object_destroy(struct shm_object *obj, int consumer)
358 {
359 switch (obj->type) {
360 case SHM_OBJECT_SHM:
361 {
362 int ret, i;
363
364 ret = munmap(obj->memory_map, obj->memory_map_size);
365 if (ret) {
366 PERROR("umnmap");
367 assert(0);
368 }
369
370 if (obj->shm_fd_ownership) {
371 /* Delete FDs only if called from app (not consumer). */
372 if (!consumer) {
373 lttng_ust_lock_fd_tracker();
374 ret = close(obj->shm_fd);
375 if (!ret) {
376 lttng_ust_delete_fd_from_tracker(obj->shm_fd);
377 } else {
378 PERROR("close");
379 assert(0);
380 }
381 lttng_ust_unlock_fd_tracker();
382 } else {
383 ret = close(obj->shm_fd);
384 if (ret) {
385 PERROR("close");
386 assert(0);
387 }
388 }
389 }
390 for (i = 0; i < 2; i++) {
391 if (obj->wait_fd[i] < 0)
392 continue;
393 if (!consumer) {
394 lttng_ust_lock_fd_tracker();
395 ret = close(obj->wait_fd[i]);
396 if (!ret) {
397 lttng_ust_delete_fd_from_tracker(obj->wait_fd[i]);
398 } else {
399 PERROR("close");
400 assert(0);
401 }
402 lttng_ust_unlock_fd_tracker();
403 } else {
404 ret = close(obj->wait_fd[i]);
405 if (ret) {
406 PERROR("close");
407 assert(0);
408 }
409 }
410 }
411 break;
412 }
413 case SHM_OBJECT_MEM:
414 {
415 int ret, i;
416
417 for (i = 0; i < 2; i++) {
418 if (obj->wait_fd[i] < 0)
419 continue;
420 if (!consumer) {
421 lttng_ust_lock_fd_tracker();
422 ret = close(obj->wait_fd[i]);
423 if (!ret) {
424 lttng_ust_delete_fd_from_tracker(obj->wait_fd[i]);
425 } else {
426 PERROR("close");
427 assert(0);
428 }
429 lttng_ust_unlock_fd_tracker();
430 } else {
431 ret = close(obj->wait_fd[i]);
432 if (ret) {
433 PERROR("close");
434 assert(0);
435 }
436 }
437 }
438 free(obj->memory_map);
439 break;
440 }
441 default:
442 assert(0);
443 }
444 }
445
446 void shm_object_table_destroy(struct shm_object_table *table, int consumer)
447 {
448 int i;
449
450 for (i = 0; i < table->allocated_len; i++)
451 shmp_object_destroy(&table->objects[i], consumer);
452 free(table);
453 }
454
455 /*
456 * zalloc_shm - allocate memory within a shm object.
457 *
458 * Shared memory is already zeroed by shmget.
459 * *NOT* multithread-safe (should be protected by mutex).
460 * Returns a -1, -1 tuple on error.
461 */
462 struct shm_ref zalloc_shm(struct shm_object *obj, size_t len)
463 {
464 struct shm_ref ref;
465 struct shm_ref shm_ref_error = { -1, -1 };
466
467 if (obj->memory_map_size - obj->allocated_len < len)
468 return shm_ref_error;
469 ref.index = obj->index;
470 ref.offset = obj->allocated_len;
471 obj->allocated_len += len;
472 return ref;
473 }
474
475 void align_shm(struct shm_object *obj, size_t align)
476 {
477 size_t offset_len = offset_align(obj->allocated_len, align);
478 obj->allocated_len += offset_len;
479 }
This page took 0.038465 seconds and 4 git commands to generate.