Cygwin: Pass file paths instead of file descriptors over UNIX sockets
[lttng-ust.git] / libringbuffer / shm.c
1 /*
2 * libringbuffer/shm.c
3 *
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; only
9 * version 2.1 of the License.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include "shm.h"
22 #include <unistd.h>
23 #include <string.h>
24 #include <fcntl.h>
25 #include <sys/mman.h>
26 #include <sys/stat.h> /* For mode constants */
27 #include <fcntl.h> /* For O_* constants */
28 #include <assert.h>
29 #include <stdio.h>
30 #include <signal.h>
31 #include <dirent.h>
32 #include <lttng/align.h>
33 #include <helper.h>
34 #include <limits.h>
35 #include <helper.h>
36 /* FIXME: Include UUID the proper way, e.g. config.h... */
37 #include <uuid/uuid.h>
38
39 /*
40 * Ensure we have the required amount of space available by writing 0
41 * into the entire buffer. Not doing so can trigger SIGBUS when going
42 * beyond the available shm space.
43 */
44 static
45 int zero_file(int fd, size_t len)
46 {
47 ssize_t retlen;
48 size_t written = 0;
49 char *zeropage;
50 long pagelen;
51 int ret;
52
53 pagelen = sysconf(_SC_PAGESIZE);
54 if (pagelen < 0)
55 return (int) pagelen;
56 zeropage = calloc(pagelen, 1);
57 if (!zeropage)
58 return -ENOMEM;
59
60 while (len > written) {
61 do {
62 retlen = write(fd, zeropage,
63 min_t(size_t, pagelen, len - written));
64 } while (retlen == -1UL && errno == EINTR);
65 if (retlen < 0) {
66 ret = (int) retlen;
67 goto error;
68 }
69 written += retlen;
70 }
71 ret = 0;
72 error:
73 free(zeropage);
74 return ret;
75 }
76
77 struct shm_object_table *shm_object_table_create(size_t max_nb_obj)
78 {
79 struct shm_object_table *table;
80
81 table = zmalloc(sizeof(struct shm_object_table) +
82 max_nb_obj * sizeof(table->objects[0]));
83 table->size = max_nb_obj;
84 return table;
85 }
86
87 /*
88 * Generate a unique name with the desired prefix.
89 * Pattern is as follow: prefix-pid-uuid.
90 * Caller is responsible of freeing the resulting string.
91 */
92 static
93 char *gen_unique_name(const char *prefix)
94 {
95 int written;
96 pid_t pid;
97 uuid_t uuid;
98 char uuid_str[37];
99 char tmp_name[NAME_MAX];
100 char *name;
101
102 if (!prefix)
103 return NULL;
104
105 pid = getpid();
106
107 uuid_generate(uuid);
108 uuid_unparse(uuid, uuid_str);
109
110 written = snprintf(tmp_name, NAME_MAX,
111 "%s-%d-%s", prefix, pid, uuid_str);
112
113 if (written < 0 || written >= NAME_MAX)
114 return NULL;
115
116 name = zmalloc(written + 1);
117
118 if (!name)
119 return NULL;
120
121 return strncpy(name, tmp_name, written);
122 }
123
124 struct shm_object *shm_object_table_append(struct shm_object_table *table,
125 size_t memory_map_size)
126 {
127 int shmfd, ret, sigblocked = 0;
128 struct shm_object *obj;
129 char *memory_map;
130
131 const char *base_shm = "/dev/shm/";
132 const char *base_path = "/tmp/lttng-fds/";
133 const char *waitfd_prefix = "ust-wait";
134 const char *shm_prefix = "ust-shm";
135
136 char *wait_pipe_path, *wait_pipe_file;
137 char *shm_path, *shm_symlink_path, *shm_file;
138
139 char tmp_name[NAME_MAX] = "ust-shm-tmp-XXXXXX";
140
141 sigset_t all_sigs, orig_sigs;
142
143 if (table->allocated_len >= table->size)
144 return NULL;
145 obj = &table->objects[table->allocated_len];
146
147 wait_pipe_file = gen_unique_name(waitfd_prefix);
148
149 if (!wait_pipe_file) {
150 goto error_gen_unique_wait;
151 }
152
153 wait_pipe_path = zmalloc(strlen(base_path)
154 + strlen(wait_pipe_file) + 1);
155
156 if (!wait_pipe_path) {
157 free(wait_pipe_file);
158 goto error_wait_alloc;
159 }
160
161 strncat(wait_pipe_path, base_path, strlen(base_path));
162 strncat(wait_pipe_path, wait_pipe_file, strlen(wait_pipe_file));
163
164 free(wait_pipe_file);
165
166 /* wait_fd: create named pipe */
167 ret = mkfifo(wait_pipe_path, 0777);
168 if (ret < 0) {
169 PERROR("mkfifo");
170 goto error_mkfifo;
171 }
172
173 obj->wait_fd[0] = -1;
174 obj->wait_fd[1] = -1;
175 obj->wait_pipe_path = wait_pipe_path;
176
177 /* shm_fd: create shm */
178
179 /*
180 * Theoretically, we could leak a shm if the application crashes
181 * between open and unlink. Disable signals on this thread for
182 * increased safety against this scenario.
183 */
184 sigfillset(&all_sigs);
185 ret = pthread_sigmask(SIG_BLOCK, &all_sigs, &orig_sigs);
186 if (ret == -1) {
187 PERROR("pthread_sigmask");
188 goto error_pthread_sigmask;
189 }
190 sigblocked = 1;
191
192 /*
193 * We specifically do _not_ use the / at the beginning of the
194 * pathname so that some OS implementations can keep it local to
195 * the process (POSIX leaves this implementation-defined).
196 */
197 do {
198 /*
199 * Using mktemp filename with O_CREAT | O_EXCL open
200 * flags.
201 */
202 mktemp(tmp_name);
203 if (tmp_name[0] == '\0') {
204 PERROR("mktemp");
205 goto error_shm_open;
206 }
207 shmfd = shm_open(tmp_name,
208 O_CREAT | O_EXCL | O_RDWR, 0700);
209 } while (shmfd < 0 && (errno == EEXIST || errno == EACCES));
210 if (shmfd < 0) {
211 PERROR("shm_open");
212 goto error_shm_open;
213 }
214
215 sigblocked = 0;
216 ret = pthread_sigmask(SIG_SETMASK, &orig_sigs, NULL);
217 if (ret == -1) {
218 PERROR("pthread_sigmask");
219 goto error_sigmask_release;
220 }
221
222 /* Create unique symlink to shm */
223 shm_path = zmalloc(strlen(base_shm) + strlen(tmp_name) + 1);
224
225 if (!shm_path) {
226 goto error_shm_alloc;
227 }
228
229 strncat(shm_path, base_shm, strlen(base_shm));
230 strncat(shm_path, tmp_name, strlen(tmp_name));
231
232 shm_file = gen_unique_name(shm_prefix);
233
234 if (!shm_file) {
235 free(shm_path);
236 goto error_gen_unique_shm;
237 }
238
239 shm_symlink_path = zmalloc(strlen(base_path) + strlen(shm_file) + 1);
240
241 if (!shm_symlink_path) {
242 free(shm_path);
243 free(shm_file);
244 goto error_symlink_alloc;
245 }
246
247 strncat(shm_symlink_path, base_path, strlen(base_path));
248 strncat(shm_symlink_path, shm_file, strlen(shm_file));
249
250 free(shm_file);
251
252 ret = symlink(shm_path, shm_symlink_path);
253 if (ret < 0) {
254 PERROR("symlink");
255 free(shm_path);
256 free(shm_symlink_path);
257 goto error_symlink_shm;
258 }
259
260 free(shm_path);
261
262 ret = zero_file(shmfd, memory_map_size);
263 if (ret) {
264 PERROR("zero_file");
265 goto error_zero_file;
266 }
267 ret = ftruncate(shmfd, memory_map_size);
268 if (ret) {
269 PERROR("ftruncate");
270 goto error_ftruncate;
271 }
272 obj->shm_fd = shmfd;
273 obj->shm_path = shm_symlink_path;
274
275 /* memory_map: mmap */
276 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
277 MAP_SHARED, shmfd, 0);
278 if (memory_map == MAP_FAILED) {
279 PERROR("mmap");
280 goto error_mmap;
281 }
282 obj->memory_map = memory_map;
283 obj->memory_map_size = memory_map_size;
284 obj->allocated_len = 0;
285 obj->index = table->allocated_len++;
286
287 return obj;
288
289 error_mmap:
290 error_ftruncate:
291 error_zero_file:
292 free(shm_symlink_path);
293 error_symlink_shm:
294 error_symlink_alloc:
295 error_gen_unique_shm:
296 error_shm_alloc:
297 error_sigmask_release:
298 ret = close(shmfd);
299 if (ret) {
300 PERROR("close");
301 assert(0);
302 }
303 error_shm_open:
304 if (sigblocked) {
305 ret = pthread_sigmask(SIG_SETMASK, &orig_sigs, NULL);
306 if (ret == -1) {
307 PERROR("pthread_sigmask");
308 }
309 }
310 error_pthread_sigmask:
311 error_mkfifo:
312 free(wait_pipe_path);
313 error_wait_alloc:
314 error_gen_unique_wait:
315 return NULL;
316 }
317
318 struct shm_object *shm_object_table_append_shadow(struct shm_object_table *table,
319 int shm_fd, int wait_fd, size_t memory_map_size)
320 {
321 struct shm_object *obj;
322 char *memory_map;
323
324 if (table->allocated_len >= table->size)
325 return NULL;
326 obj = &table->objects[table->allocated_len];
327
328 /* wait_fd: set read end of the pipe. */
329 obj->wait_fd[0] = wait_fd;
330 obj->wait_fd[1] = -1; /* write end is unset. */
331 obj->shm_fd = shm_fd;
332
333 /* memory_map: mmap */
334 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
335 MAP_SHARED, shm_fd, 0);
336 if (memory_map == MAP_FAILED) {
337 PERROR("mmap");
338 goto error_mmap;
339 }
340 obj->memory_map = memory_map;
341 obj->memory_map_size = memory_map_size;
342 obj->allocated_len = memory_map_size;
343 obj->index = table->allocated_len++;
344
345 return obj;
346
347 error_mmap:
348 return NULL;
349 }
350
351 static
352 void shmp_object_destroy(struct shm_object *obj)
353 {
354 int ret, i;
355
356 if (!obj->is_shadow) {
357 ret = munmap(obj->memory_map, obj->memory_map_size);
358 if (ret) {
359 PERROR("umnmap");
360 assert(0);
361 }
362 }
363 if (obj->shm_fd >= 0) {
364 ret = close(obj->shm_fd);
365 if (ret) {
366 PERROR("close");
367 assert(0);
368 }
369 }
370
371 if (obj->shm_path) {
372 free(obj->shm_path);
373 }
374
375 for (i = 0; i < 2; i++) {
376 if (obj->wait_fd[i] < 0)
377 continue;
378 ret = close(obj->wait_fd[i]);
379 if (ret) {
380 PERROR("close");
381 assert(0);
382 }
383 }
384
385 if (obj->wait_pipe_path) {
386 free(obj->wait_pipe_path);
387 }
388 }
389
390 void shm_object_table_destroy(struct shm_object_table *table)
391 {
392 int i;
393
394 for (i = 0; i < table->allocated_len; i++)
395 shmp_object_destroy(&table->objects[i]);
396 free(table);
397 }
398
399 /*
400 * zalloc_shm - allocate memory within a shm object.
401 *
402 * Shared memory is already zeroed by shmget.
403 * *NOT* multithread-safe (should be protected by mutex).
404 * Returns a -1, -1 tuple on error.
405 */
406 struct shm_ref zalloc_shm(struct shm_object *obj, size_t len)
407 {
408 struct shm_ref ref;
409 struct shm_ref shm_ref_error = { -1, -1 };
410
411 if (obj->memory_map_size - obj->allocated_len < len)
412 return shm_ref_error;
413 ref.index = obj->index;
414 ref.offset = obj->allocated_len;
415 obj->allocated_len += len;
416 return ref;
417 }
418
419 void align_shm(struct shm_object *obj, size_t align)
420 {
421 size_t offset_len = offset_align(obj->allocated_len, align);
422 obj->allocated_len += offset_len;
423 }
This page took 0.036832 seconds and 4 git commands to generate.