Update to use caa_likely/caa_unlikely from urcu 0.6.6
[lttng-ust.git] / libringbuffer / shm.h
1 #ifndef _LIBRINGBUFFER_SHM_H
2 #define _LIBRINGBUFFER_SHM_H
3
4 /*
5 * libringbuffer/shm.h
6 *
7 * Copyright 2011 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Dual LGPL v2.1/GPL v2 license.
10 */
11
12 #include <stdint.h>
13 #include <ust/usterr-signal-safe.h>
14 #include "ust/core.h"
15 #include "shm_types.h"
16
17 /*
18 * Pointer dereferencing. We don't trust the shm_ref, so we validate
19 * both the index and offset with known boundaries.
20 *
21 * "shmp" and "shmp_index" guarantee that it's safe to use the pointer
22 * target type, even in the occurrence of shm_ref modification by an
23 * untrusted process having write access to the shm_ref. We return a
24 * NULL pointer if the ranges are invalid.
25 */
26 static inline
27 char *_shmp_offset(struct shm_object_table *table, struct shm_ref *ref,
28 size_t idx, size_t elem_size)
29 {
30 struct shm_object *obj;
31 size_t objindex, ref_offset;
32
33 objindex = (size_t) ref->index;
34 if (caa_unlikely(objindex >= table->allocated_len))
35 return NULL;
36 obj = &table->objects[objindex];
37 ref_offset = (size_t) ref->offset;
38 ref_offset += idx * elem_size;
39 /* Check if part of the element returned would exceed the limits. */
40 if (caa_unlikely(ref_offset + elem_size > obj->memory_map_size))
41 return NULL;
42 return &obj->memory_map[ref_offset];
43 }
44
45 #define shmp_index(handle, ref, index) \
46 ({ \
47 __typeof__((ref)._type) ____ptr_ret; \
48 ____ptr_ret = (__typeof__(____ptr_ret)) _shmp_offset((handle)->table, &(ref)._ref, index, sizeof(*____ptr_ret)); \
49 ____ptr_ret; \
50 })
51
52 #define shmp(handle, ref) shmp_index(handle, ref, 0)
53
54 static inline
55 void _set_shmp(struct shm_ref *ref, struct shm_ref src)
56 {
57 *ref = src;
58 }
59
60 #define set_shmp(ref, src) _set_shmp(&(ref)._ref, src)
61
62 struct shm_object_table *shm_object_table_create(size_t max_nb_obj);
63 struct shm_object *shm_object_table_append_shadow(struct shm_object_table *table,
64 int shm_fd, int wait_fd, size_t memory_map_size);
65 void shm_object_table_destroy(struct shm_object_table *table);
66 struct shm_object *shm_object_table_append(struct shm_object_table *table,
67 size_t memory_map_size);
68
69 /*
70 * zalloc_shm - allocate memory within a shm object.
71 *
72 * Shared memory is already zeroed by shmget.
73 * *NOT* multithread-safe (should be protected by mutex).
74 * Returns a -1, -1 tuple on error.
75 */
76 struct shm_ref zalloc_shm(struct shm_object *obj, size_t len);
77 void align_shm(struct shm_object *obj, size_t align);
78
79 static inline
80 int shm_get_wakeup_fd(struct lttng_ust_shm_handle *handle, struct shm_ref *ref)
81 {
82 struct shm_object_table *table = handle->table;
83 struct shm_object *obj;
84 size_t index;
85
86 index = (size_t) ref->index;
87 if (caa_unlikely(index >= table->allocated_len))
88 return -EPERM;
89 obj = &table->objects[index];
90 return obj->wait_fd[1];
91
92 }
93
94 static inline
95 int shm_get_wait_fd(struct lttng_ust_shm_handle *handle, struct shm_ref *ref)
96 {
97 struct shm_object_table *table = handle->table;
98 struct shm_object *obj;
99 size_t index;
100
101 index = (size_t) ref->index;
102 if (caa_unlikely(index >= table->allocated_len))
103 return -EPERM;
104 obj = &table->objects[index];
105 return obj->wait_fd[0];
106 }
107
108 static inline
109 int shm_get_object_data(struct lttng_ust_shm_handle *handle, struct shm_ref *ref,
110 int *shm_fd, int *wait_fd, uint64_t *memory_map_size)
111 {
112 struct shm_object_table *table = handle->table;
113 struct shm_object *obj;
114 size_t index;
115
116 index = (size_t) ref->index;
117 if (caa_unlikely(index >= table->allocated_len))
118 return -EPERM;
119 obj = &table->objects[index];
120 *shm_fd = obj->shm_fd;
121 *wait_fd = obj->wait_fd[0];
122 *memory_map_size = obj->allocated_len;
123 return 0;
124 }
125
126 #endif /* _LIBRINGBUFFER_SHM_H */
This page took 0.03265 seconds and 5 git commands to generate.