Move to kernel style SPDX license identifiers
[lttng-ust.git] / libcounter / shm.c
1 /*
2 * SPDX-License-Identifier: LGPL-2.1-only
3 *
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 */
6
7 #define _LGPL_SOURCE
8 #include <config.h>
9 #include "shm.h"
10 #include <unistd.h>
11 #include <fcntl.h>
12 #include <sys/mman.h>
13 #include <sys/types.h>
14 #include <sys/stat.h> /* For mode constants */
15 #include <fcntl.h> /* For O_* constants */
16 #include <assert.h>
17 #include <stdio.h>
18 #include <signal.h>
19 #include <dirent.h>
20 #include <lttng/align.h>
21 #include <limits.h>
22 #include <stdbool.h>
23 #include <stdint.h>
24 #ifdef HAVE_LIBNUMA
25 #include <numa.h>
26 #include <numaif.h>
27 #endif
28 #include <helper.h>
29 #include <ust-fd.h>
30 #include "../libringbuffer/mmap.h"
31
32 /*
33 * Ensure we have the required amount of space available by writing 0
34 * into the entire buffer. Not doing so can trigger SIGBUS when going
35 * beyond the available shm space.
36 */
37 static
38 int zero_file(int fd, size_t len)
39 {
40 ssize_t retlen;
41 size_t written = 0;
42 char *zeropage;
43 long pagelen;
44 int ret;
45
46 pagelen = sysconf(_SC_PAGESIZE);
47 if (pagelen < 0)
48 return (int) pagelen;
49 zeropage = calloc(pagelen, 1);
50 if (!zeropage)
51 return -ENOMEM;
52
53 while (len > written) {
54 do {
55 retlen = write(fd, zeropage,
56 min_t(size_t, pagelen, len - written));
57 } while (retlen == -1UL && errno == EINTR);
58 if (retlen < 0) {
59 ret = (int) retlen;
60 goto error;
61 }
62 written += retlen;
63 }
64 ret = 0;
65 error:
66 free(zeropage);
67 return ret;
68 }
69
70 struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj)
71 {
72 struct lttng_counter_shm_object_table *table;
73
74 table = zmalloc(sizeof(struct lttng_counter_shm_object_table) +
75 max_nb_obj * sizeof(table->objects[0]));
76 if (!table)
77 return NULL;
78 table->size = max_nb_obj;
79 return table;
80 }
81
82 static
83 struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_shm(struct lttng_counter_shm_object_table *table,
84 size_t memory_map_size,
85 int cpu_fd)
86 {
87 int shmfd, ret;
88 struct lttng_counter_shm_object *obj;
89 char *memory_map;
90
91 if (cpu_fd < 0)
92 return NULL;
93 if (table->allocated_len >= table->size)
94 return NULL;
95 obj = &table->objects[table->allocated_len];
96
97 /* create shm */
98
99 shmfd = cpu_fd;
100 ret = zero_file(shmfd, memory_map_size);
101 if (ret) {
102 PERROR("zero_file");
103 goto error_zero_file;
104 }
105 ret = ftruncate(shmfd, memory_map_size);
106 if (ret) {
107 PERROR("ftruncate");
108 goto error_ftruncate;
109 }
110 /*
111 * Also ensure the file metadata is synced with the storage by using
112 * fsync(2).
113 */
114 ret = fsync(shmfd);
115 if (ret) {
116 PERROR("fsync");
117 goto error_fsync;
118 }
119 obj->shm_fd_ownership = 0;
120 obj->shm_fd = shmfd;
121
122 /* memory_map: mmap */
123 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
124 MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0);
125 if (memory_map == MAP_FAILED) {
126 PERROR("mmap");
127 goto error_mmap;
128 }
129 obj->type = LTTNG_COUNTER_SHM_OBJECT_SHM;
130 obj->memory_map = memory_map;
131 obj->memory_map_size = memory_map_size;
132 obj->allocated_len = 0;
133 obj->index = table->allocated_len++;
134
135 return obj;
136
137 error_mmap:
138 error_fsync:
139 error_ftruncate:
140 error_zero_file:
141 return NULL;
142 }
143
144 static
145 struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_mem(struct lttng_counter_shm_object_table *table,
146 size_t memory_map_size)
147 {
148 struct lttng_counter_shm_object *obj;
149 void *memory_map;
150
151 if (table->allocated_len >= table->size)
152 return NULL;
153 obj = &table->objects[table->allocated_len];
154
155 memory_map = zmalloc(memory_map_size);
156 if (!memory_map)
157 goto alloc_error;
158
159 /* no shm_fd */
160 obj->shm_fd = -1;
161 obj->shm_fd_ownership = 0;
162
163 obj->type = LTTNG_COUNTER_SHM_OBJECT_MEM;
164 obj->memory_map = memory_map;
165 obj->memory_map_size = memory_map_size;
166 obj->allocated_len = 0;
167 obj->index = table->allocated_len++;
168
169 return obj;
170
171 alloc_error:
172 return NULL;
173 }
174
175 /*
176 * libnuma prints errors on the console even for numa_available().
177 * Work-around this limitation by using get_mempolicy() directly to
178 * check whether the kernel supports mempolicy.
179 */
180 #ifdef HAVE_LIBNUMA
181 static bool lttng_is_numa_available(void)
182 {
183 int ret;
184
185 ret = get_mempolicy(NULL, NULL, 0, NULL, 0);
186 if (ret && errno == ENOSYS) {
187 return false;
188 }
189 return numa_available() > 0;
190 }
191 #endif
192
193 struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
194 size_t memory_map_size,
195 enum lttng_counter_shm_object_type type,
196 int cpu_fd,
197 int cpu)
198 {
199 struct lttng_counter_shm_object *shm_object;
200 #ifdef HAVE_LIBNUMA
201 int oldnode = 0, node;
202 bool numa_avail;
203
204 numa_avail = lttng_is_numa_available();
205 if (numa_avail) {
206 oldnode = numa_preferred();
207 if (cpu >= 0) {
208 node = numa_node_of_cpu(cpu);
209 if (node >= 0)
210 numa_set_preferred(node);
211 }
212 if (cpu < 0 || node < 0)
213 numa_set_localalloc();
214 }
215 #endif /* HAVE_LIBNUMA */
216 switch (type) {
217 case LTTNG_COUNTER_SHM_OBJECT_SHM:
218 shm_object = _lttng_counter_shm_object_table_alloc_shm(table, memory_map_size,
219 cpu_fd);
220 break;
221 case LTTNG_COUNTER_SHM_OBJECT_MEM:
222 shm_object = _lttng_counter_shm_object_table_alloc_mem(table, memory_map_size);
223 break;
224 default:
225 assert(0);
226 }
227 #ifdef HAVE_LIBNUMA
228 if (numa_avail)
229 numa_set_preferred(oldnode);
230 #endif /* HAVE_LIBNUMA */
231 return shm_object;
232 }
233
234 struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table,
235 int shm_fd,
236 size_t memory_map_size)
237 {
238 struct lttng_counter_shm_object *obj;
239 char *memory_map;
240
241 if (table->allocated_len >= table->size)
242 return NULL;
243
244 obj = &table->objects[table->allocated_len];
245
246 obj->shm_fd = shm_fd;
247 obj->shm_fd_ownership = 1;
248
249 /* memory_map: mmap */
250 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
251 MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0);
252 if (memory_map == MAP_FAILED) {
253 PERROR("mmap");
254 goto error_mmap;
255 }
256 obj->type = LTTNG_COUNTER_SHM_OBJECT_SHM;
257 obj->memory_map = memory_map;
258 obj->memory_map_size = memory_map_size;
259 obj->allocated_len = memory_map_size;
260 obj->index = table->allocated_len++;
261
262 return obj;
263
264 error_mmap:
265 return NULL;
266 }
267
268 /*
269 * Passing ownership of mem to object.
270 */
271 struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_mem(struct lttng_counter_shm_object_table *table,
272 void *mem, size_t memory_map_size)
273 {
274 struct lttng_counter_shm_object *obj;
275
276 if (table->allocated_len >= table->size)
277 return NULL;
278 obj = &table->objects[table->allocated_len];
279
280 obj->shm_fd = -1;
281 obj->shm_fd_ownership = 0;
282
283 obj->type = LTTNG_COUNTER_SHM_OBJECT_MEM;
284 obj->memory_map = mem;
285 obj->memory_map_size = memory_map_size;
286 obj->allocated_len = memory_map_size;
287 obj->index = table->allocated_len++;
288
289 return obj;
290
291 return NULL;
292 }
293
294 static
295 void lttng_counter_shmp_object_destroy(struct lttng_counter_shm_object *obj, int consumer)
296 {
297 switch (obj->type) {
298 case LTTNG_COUNTER_SHM_OBJECT_SHM:
299 {
300 int ret;
301
302 ret = munmap(obj->memory_map, obj->memory_map_size);
303 if (ret) {
304 PERROR("umnmap");
305 assert(0);
306 }
307
308 if (obj->shm_fd_ownership) {
309 /* Delete FDs only if called from app (not consumer). */
310 if (!consumer) {
311 lttng_ust_lock_fd_tracker();
312 ret = close(obj->shm_fd);
313 if (!ret) {
314 lttng_ust_delete_fd_from_tracker(obj->shm_fd);
315 } else {
316 PERROR("close");
317 assert(0);
318 }
319 lttng_ust_unlock_fd_tracker();
320 } else {
321 ret = close(obj->shm_fd);
322 if (ret) {
323 PERROR("close");
324 assert(0);
325 }
326 }
327 }
328 break;
329 }
330 case LTTNG_COUNTER_SHM_OBJECT_MEM:
331 {
332 free(obj->memory_map);
333 break;
334 }
335 default:
336 assert(0);
337 }
338 }
339
340 void lttng_counter_shm_object_table_destroy(struct lttng_counter_shm_object_table *table, int consumer)
341 {
342 int i;
343
344 for (i = 0; i < table->allocated_len; i++)
345 lttng_counter_shmp_object_destroy(&table->objects[i], consumer);
346 free(table);
347 }
348
349 /*
350 * lttng_counter_zalloc_shm - allocate memory within a shm object.
351 *
352 * Shared memory is already zeroed by shmget.
353 * *NOT* multithread-safe (should be protected by mutex).
354 * Returns a -1, -1 tuple on error.
355 */
356 struct lttng_counter_shm_ref lttng_counter_zalloc_shm(struct lttng_counter_shm_object *obj, size_t len)
357 {
358 struct lttng_counter_shm_ref ref;
359 struct lttng_counter_shm_ref shm_ref_error = { -1, -1 };
360
361 if (obj->memory_map_size - obj->allocated_len < len)
362 return shm_ref_error;
363 ref.index = obj->index;
364 ref.offset = obj->allocated_len;
365 obj->allocated_len += len;
366 return ref;
367 }
368
369 void lttng_counter_align_shm(struct lttng_counter_shm_object *obj, size_t align)
370 {
371 size_t offset_len = offset_align(obj->allocated_len, align);
372 obj->allocated_len += offset_len;
373 }
This page took 0.036035 seconds and 4 git commands to generate.