Namespace remaining symbols in lttng/ringbuffer-context.h
[lttng-ust.git] / libringbuffer / shm.h
1 /*
2 * SPDX-License-Identifier: LGPL-2.1-only
3 *
4 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 */
6
7 #ifndef _LIBRINGBUFFER_SHM_H
8 #define _LIBRINGBUFFER_SHM_H
9
10 #include <stddef.h>
11 #include <stdint.h>
12 #include <unistd.h>
13 #include <usterr-signal-safe.h>
14 #include <urcu/compiler.h>
15 #include "shm_types.h"
16
17 /* channel_handle_create - for UST. */
18 __attribute__((visibility("hidden")))
19 extern
20 struct lttng_ust_shm_handle *channel_handle_create(void *data,
21 uint64_t memory_map_size, int wakeup_fd);
22
23 /* channel_handle_add_stream - for UST. */
24 __attribute__((visibility("hidden")))
25 extern
26 int channel_handle_add_stream(struct lttng_ust_shm_handle *handle,
27 int shm_fd, int wakeup_fd, uint32_t stream_nr,
28 uint64_t memory_map_size);
29
30 __attribute__((visibility("hidden")))
31 unsigned int channel_handle_get_nr_streams(struct lttng_ust_shm_handle *handle);
32
33 __attribute__((visibility("hidden")))
34 extern
35 void channel_destroy(struct lttng_ust_lib_ring_buffer_channel *chan,
36 struct lttng_ust_shm_handle *handle,
37 int consumer);
38
39 /*
40 * Pointer dereferencing. We don't trust the shm_ref, so we validate
41 * both the index and offset with known boundaries.
42 *
43 * "shmp" and "shmp_index" guarantee that it's safe to use the pointer
44 * target type, even in the occurrence of shm_ref modification by an
45 * untrusted process having write access to the shm_ref. We return a
46 * NULL pointer if the ranges are invalid.
47 */
48 static inline
49 char *_shmp_offset(struct shm_object_table *table, struct shm_ref *ref,
50 size_t idx, size_t elem_size)
51 {
52 struct shm_object *obj;
53 size_t objindex, ref_offset;
54
55 objindex = (size_t) ref->index;
56 if (caa_unlikely(objindex >= table->allocated_len))
57 return NULL;
58 obj = &table->objects[objindex];
59 ref_offset = (size_t) ref->offset;
60 ref_offset += idx * elem_size;
61 /* Check if part of the element returned would exceed the limits. */
62 if (caa_unlikely(ref_offset + elem_size > obj->memory_map_size))
63 return NULL;
64 return &obj->memory_map[ref_offset];
65 }
66
67 #define shmp_index(handle, ref, index) \
68 ({ \
69 __typeof__((ref)._type) ____ptr_ret; \
70 ____ptr_ret = (__typeof__(____ptr_ret)) _shmp_offset((handle)->table, &(ref)._ref, index, sizeof(*____ptr_ret)); \
71 ____ptr_ret; \
72 })
73
74 #define shmp(handle, ref) shmp_index(handle, ref, 0)
75
76 static inline
77 void _set_shmp(struct shm_ref *ref, struct shm_ref src)
78 {
79 *ref = src;
80 }
81
82 #define set_shmp(ref, src) _set_shmp(&(ref)._ref, src)
83
84 __attribute__((visibility("hidden")))
85 struct shm_object_table *shm_object_table_create(size_t max_nb_obj);
86
87 __attribute__((visibility("hidden")))
88 struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
89 size_t memory_map_size,
90 enum shm_object_type type,
91 const int stream_fd,
92 int cpu);
93
94 __attribute__((visibility("hidden")))
95 struct shm_object *shm_object_table_append_shm(struct shm_object_table *table,
96 int shm_fd, int wakeup_fd, uint32_t stream_nr,
97 size_t memory_map_size);
98
99 /* mem ownership is passed to shm_object_table_append_mem(). */
100 __attribute__((visibility("hidden")))
101 struct shm_object *shm_object_table_append_mem(struct shm_object_table *table,
102 void *mem, size_t memory_map_size, int wakeup_fd);
103
104 __attribute__((visibility("hidden")))
105 void shm_object_table_destroy(struct shm_object_table *table, int consumer);
106
107 /*
108 * zalloc_shm - allocate memory within a shm object.
109 *
110 * Shared memory is already zeroed by shmget.
111 * *NOT* multithread-safe (should be protected by mutex).
112 * Returns a -1, -1 tuple on error.
113 */
114 __attribute__((visibility("hidden")))
115 struct shm_ref zalloc_shm(struct shm_object *obj, size_t len);
116
117 __attribute__((visibility("hidden")))
118 void align_shm(struct shm_object *obj, size_t align);
119
120 static inline
121 int shm_get_wait_fd(struct lttng_ust_shm_handle *handle, struct shm_ref *ref)
122 {
123 struct shm_object_table *table = handle->table;
124 struct shm_object *obj;
125 size_t index;
126
127 index = (size_t) ref->index;
128 if (caa_unlikely(index >= table->allocated_len))
129 return -EPERM;
130 obj = &table->objects[index];
131 return obj->wait_fd[0];
132 }
133
134 static inline
135 int shm_get_wakeup_fd(struct lttng_ust_shm_handle *handle, struct shm_ref *ref)
136 {
137 struct shm_object_table *table = handle->table;
138 struct shm_object *obj;
139 size_t index;
140
141 index = (size_t) ref->index;
142 if (caa_unlikely(index >= table->allocated_len))
143 return -EPERM;
144 obj = &table->objects[index];
145 return obj->wait_fd[1];
146 }
147
148 static inline
149 int shm_close_wait_fd(struct lttng_ust_shm_handle *handle,
150 struct shm_ref *ref)
151 {
152 struct shm_object_table *table = handle->table;
153 struct shm_object *obj;
154 int wait_fd;
155 size_t index;
156 int ret;
157
158 index = (size_t) ref->index;
159 if (caa_unlikely(index >= table->allocated_len))
160 return -EPERM;
161 obj = &table->objects[index];
162 wait_fd = obj->wait_fd[0];
163 if (wait_fd < 0)
164 return -ENOENT;
165 obj->wait_fd[0] = -1;
166 ret = close(wait_fd);
167 if (ret) {
168 ret = -errno;
169 return ret;
170 }
171 return 0;
172 }
173
174 static inline
175 int shm_close_wakeup_fd(struct lttng_ust_shm_handle *handle,
176 struct shm_ref *ref)
177 {
178 struct shm_object_table *table = handle->table;
179 struct shm_object *obj;
180 int wakeup_fd;
181 size_t index;
182 int ret;
183
184 index = (size_t) ref->index;
185 if (caa_unlikely(index >= table->allocated_len))
186 return -EPERM;
187 obj = &table->objects[index];
188 wakeup_fd = obj->wait_fd[1];
189 if (wakeup_fd < 0)
190 return -ENOENT;
191 obj->wait_fd[1] = -1;
192 ret = close(wakeup_fd);
193 if (ret) {
194 ret = -errno;
195 return ret;
196 }
197 return 0;
198 }
199
200 static inline
201 int shm_get_shm_fd(struct lttng_ust_shm_handle *handle, struct shm_ref *ref)
202 {
203 struct shm_object_table *table = handle->table;
204 struct shm_object *obj;
205 size_t index;
206
207 index = (size_t) ref->index;
208 if (caa_unlikely(index >= table->allocated_len))
209 return -EPERM;
210 obj = &table->objects[index];
211 return obj->shm_fd;
212 }
213
214
215 static inline
216 int shm_get_shm_size(struct lttng_ust_shm_handle *handle, struct shm_ref *ref,
217 uint64_t *size)
218 {
219 struct shm_object_table *table = handle->table;
220 struct shm_object *obj;
221 size_t index;
222
223 index = (size_t) ref->index;
224 if (caa_unlikely(index >= table->allocated_len))
225 return -EPERM;
226 obj = &table->objects[index];
227 *size = obj->memory_map_size;
228 return 0;
229 }
230
231 #endif /* _LIBRINGBUFFER_SHM_H */
This page took 0.040524 seconds and 4 git commands to generate.