732a4687b82cd335284402cd4b5108be9d4e346b
[lttng-tools.git] / src / common / consumer / consumer-metadata-cache.c
1 /*
2 * Copyright (C) 2013 Julien Desfossez <jdesfossez@efficios.com>
3 * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
4 *
5 * SPDX-License-Identifier: GPL-2.0-only
6 *
7 */
8
9 #define _LGPL_SOURCE
10 #include <assert.h>
11 #include <pthread.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <sys/types.h>
15 #include <unistd.h>
16 #include <inttypes.h>
17
18 #include <common/common.h>
19 #include <common/utils.h>
20 #include <common/sessiond-comm/sessiond-comm.h>
21 #include <common/ust-consumer/ust-consumer.h>
22 #include <common/consumer/consumer.h>
23
24 #include "consumer-metadata-cache.h"
25
26 enum metadata_cache_update_version_status {
27 METADATA_CACHE_UPDATE_STATUS_VERSION_UPDATED,
28 METADATA_CACHE_UPDATE_STATUS_VERSION_NOT_UPDATED,
29 };
30
31 extern struct lttng_consumer_global_data consumer_data;
32
33 /*
34 * Extend the allocated size of the metadata cache. Called only from
35 * lttng_ustconsumer_write_metadata_cache.
36 *
37 * Return 0 on success, a negative value on error.
38 */
39 static int extend_metadata_cache(struct consumer_metadata_cache *cache,
40 unsigned int size)
41 {
42 int ret = 0;
43 char *tmp_data_ptr;
44 unsigned int new_size, old_size;
45
46 assert(cache);
47
48 old_size = cache->cache_alloc_size;
49 new_size = max_t(unsigned int, old_size + size, old_size << 1);
50 DBG("Extending metadata cache: old size = %u, new size = %u", old_size,
51 new_size);
52
53 tmp_data_ptr = realloc(cache->data, new_size);
54 if (!tmp_data_ptr) {
55 ERR("Failed to re-allocate metadata cache");
56 free(cache->data);
57 ret = -1;
58 goto end;
59 }
60
61 /* Zero newly allocated memory. */
62 memset(tmp_data_ptr + old_size, 0, new_size - old_size);
63 cache->data = tmp_data_ptr;
64 cache->cache_alloc_size = new_size;
65
66 end:
67 return ret;
68 }
69
70 /*
71 * Reset the metadata cache.
72 */
73 static
74 void metadata_cache_reset(struct consumer_metadata_cache *cache)
75 {
76 memset(cache->data, 0, cache->cache_alloc_size);
77 cache->max_offset = 0;
78 }
79
80 /*
81 * Check if the metadata cache version changed.
82 * If it did, reset the metadata cache.
83 * The metadata cache lock MUST be held.
84 */
85 static enum metadata_cache_update_version_status metadata_cache_update_version(
86 struct consumer_metadata_cache *cache, uint64_t version)
87 {
88 enum metadata_cache_update_version_status status;
89
90 if (cache->version == version) {
91 status = METADATA_CACHE_UPDATE_STATUS_VERSION_NOT_UPDATED;
92 goto end;
93 }
94
95 DBG("Metadata cache version update to %" PRIu64, version);
96 cache->version = version;
97 status = METADATA_CACHE_UPDATE_STATUS_VERSION_UPDATED;
98
99 end:
100 return status;
101 }
102
103 /*
104 * Write metadata to the cache, extend the cache if necessary. We support
105 * overlapping updates, but they need to be contiguous. Send the
106 * contiguous metadata in cache to the ring buffer. The metadata cache
107 * lock MUST be acquired to write in the cache.
108 *
109 * See `enum consumer_metadata_cache_write_status` for the meaning of the
110 * various returned status codes.
111 */
112 enum consumer_metadata_cache_write_status
113 consumer_metadata_cache_write(struct consumer_metadata_cache *cache,
114 unsigned int offset, unsigned int len, uint64_t version,
115 const char *data)
116 {
117 int ret = 0;
118 enum consumer_metadata_cache_write_status status;
119 bool cache_is_invalidated = false;
120 uint64_t original_max_offset;
121
122 assert(cache);
123 ASSERT_LOCKED(cache->lock);
124 original_max_offset = cache->max_offset;
125
126 if (metadata_cache_update_version(cache, version) ==
127 METADATA_CACHE_UPDATE_STATUS_VERSION_UPDATED) {
128 metadata_cache_reset(cache);
129 cache_is_invalidated = true;
130 }
131
132 DBG("Writing %u bytes from offset %u in metadata cache", len, offset);
133
134 if (offset + len > cache->cache_alloc_size) {
135 ret = extend_metadata_cache(cache,
136 len - cache->cache_alloc_size + offset);
137 if (ret < 0) {
138 ERR("Extending metadata cache");
139 status = CONSUMER_METADATA_CACHE_WRITE_STATUS_ERROR;
140 goto end;
141 }
142 }
143
144 memcpy(cache->data + offset, data, len);
145 cache->max_offset = max(cache->max_offset, offset + len);
146
147 if (cache_is_invalidated) {
148 status = CONSUMER_METADATA_CACHE_WRITE_STATUS_INVALIDATED;
149 } else if (cache->max_offset > original_max_offset) {
150 status = CONSUMER_METADATA_CACHE_WRITE_STATUS_APPENDED_CONTENT;
151 } else {
152 status = CONSUMER_METADATA_CACHE_WRITE_STATUS_NO_CHANGE;
153 assert(cache->max_offset == original_max_offset);
154 }
155
156 end:
157 return status;
158 }
159
160 /*
161 * Create the metadata cache, original allocated size: max_sb_size
162 *
163 * Return 0 on success, a negative value on error.
164 */
165 int consumer_metadata_cache_allocate(struct lttng_consumer_channel *channel)
166 {
167 int ret;
168
169 assert(channel);
170
171 channel->metadata_cache = zmalloc(
172 sizeof(struct consumer_metadata_cache));
173 if (!channel->metadata_cache) {
174 PERROR("zmalloc metadata cache struct");
175 ret = -1;
176 goto end;
177 }
178 ret = pthread_mutex_init(&channel->metadata_cache->lock, NULL);
179 if (ret != 0) {
180 PERROR("mutex init");
181 goto end_free_cache;
182 }
183
184 channel->metadata_cache->cache_alloc_size = DEFAULT_METADATA_CACHE_SIZE;
185 channel->metadata_cache->data = zmalloc(
186 channel->metadata_cache->cache_alloc_size * sizeof(char));
187 if (!channel->metadata_cache->data) {
188 PERROR("zmalloc metadata cache data");
189 ret = -1;
190 goto end_free_mutex;
191 }
192 DBG("Allocated metadata cache of %" PRIu64 " bytes",
193 channel->metadata_cache->cache_alloc_size);
194
195 ret = 0;
196 goto end;
197
198 end_free_mutex:
199 pthread_mutex_destroy(&channel->metadata_cache->lock);
200 end_free_cache:
201 free(channel->metadata_cache);
202 end:
203 return ret;
204 }
205
206 /*
207 * Destroy and free the metadata cache
208 */
209 void consumer_metadata_cache_destroy(struct lttng_consumer_channel *channel)
210 {
211 if (!channel || !channel->metadata_cache) {
212 return;
213 }
214
215 DBG("Destroying metadata cache");
216
217 pthread_mutex_destroy(&channel->metadata_cache->lock);
218 free(channel->metadata_cache->data);
219 free(channel->metadata_cache);
220 }
221
222 /*
223 * Check if the cache is flushed up to the offset passed in parameter.
224 *
225 * Return 0 if everything has been flushed, 1 if there is data not flushed.
226 */
227 int consumer_metadata_cache_flushed(struct lttng_consumer_channel *channel,
228 uint64_t offset, int timer)
229 {
230 int ret = 0;
231 struct lttng_consumer_stream *metadata_stream;
232
233 assert(channel);
234 assert(channel->metadata_cache);
235
236 /*
237 * If not called from a timer handler, we have to take the
238 * channel lock to be mutually exclusive with channel teardown.
239 * Timer handler does not need to take this lock because it is
240 * already synchronized by timer stop (and, more importantly,
241 * taking this lock in a timer handler would cause a deadlock).
242 */
243 if (!timer) {
244 pthread_mutex_lock(&channel->lock);
245 }
246 pthread_mutex_lock(&channel->timer_lock);
247 metadata_stream = channel->metadata_stream;
248 if (!metadata_stream) {
249 /*
250 * Having no metadata stream means the channel is being destroyed so there
251 * is no cache to flush anymore.
252 */
253 ret = 0;
254 goto end_unlock_channel;
255 }
256
257 pthread_mutex_lock(&metadata_stream->lock);
258 pthread_mutex_lock(&channel->metadata_cache->lock);
259
260 if (metadata_stream->ust_metadata_pushed >= offset) {
261 ret = 0;
262 } else if (channel->metadata_stream->endpoint_status !=
263 CONSUMER_ENDPOINT_ACTIVE) {
264 /* An inactive endpoint means we don't have to flush anymore. */
265 ret = 0;
266 } else {
267 /* Still not completely flushed. */
268 ret = 1;
269 }
270
271 pthread_mutex_unlock(&channel->metadata_cache->lock);
272 pthread_mutex_unlock(&metadata_stream->lock);
273 end_unlock_channel:
274 pthread_mutex_unlock(&channel->timer_lock);
275 if (!timer) {
276 pthread_mutex_unlock(&channel->lock);
277 }
278
279 return ret;
280 }
This page took 0.033892 seconds and 3 git commands to generate.