Fix: consumerd: slow metadata push slows down application registration
[lttng-tools.git] / src / common / consumer / consumer-metadata-cache.c
CommitLineData
331744e3 1/*
ab5be9fa
MJ
2 * Copyright (C) 2013 Julien Desfossez <jdesfossez@efficios.com>
3 * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
331744e3 4 *
ab5be9fa 5 * SPDX-License-Identifier: GPL-2.0-only
331744e3 6 *
331744e3
JD
7 */
8
6c1c0768 9#define _LGPL_SOURCE
331744e3
JD
10#include <assert.h>
11#include <pthread.h>
12#include <stdlib.h>
13#include <string.h>
14#include <sys/types.h>
15#include <unistd.h>
16#include <inttypes.h>
17
18#include <common/common.h>
19#include <common/utils.h>
20#include <common/sessiond-comm/sessiond-comm.h>
21#include <common/ust-consumer/ust-consumer.h>
c8fea79c 22#include <common/consumer/consumer.h>
331744e3
JD
23
24#include "consumer-metadata-cache.h"
25
b1316da1
JG
26enum metadata_cache_update_version_status {
27 METADATA_CACHE_UPDATE_STATUS_VERSION_UPDATED,
28 METADATA_CACHE_UPDATE_STATUS_VERSION_NOT_UPDATED,
29};
30
fa29bfbf 31extern struct lttng_consumer_global_data the_consumer_data;
fe81e5c9 32
93ec662e
JD
33/*
34 * Reset the metadata cache.
35 */
36static
37void metadata_cache_reset(struct consumer_metadata_cache *cache)
38{
9eac9828
JG
39 const int ret = lttng_dynamic_buffer_set_size(&cache->contents, 0);
40
41 assert(ret == 0);
93ec662e
JD
42}
43
44/*
45 * Check if the metadata cache version changed.
46 * If it did, reset the metadata cache.
47 * The metadata cache lock MUST be held.
93ec662e 48 */
b1316da1
JG
49static enum metadata_cache_update_version_status metadata_cache_update_version(
50 struct consumer_metadata_cache *cache, uint64_t version)
93ec662e 51{
b1316da1 52 enum metadata_cache_update_version_status status;
93ec662e
JD
53
54 if (cache->version == version) {
b1316da1 55 status = METADATA_CACHE_UPDATE_STATUS_VERSION_NOT_UPDATED;
93ec662e
JD
56 goto end;
57 }
58
59 DBG("Metadata cache version update to %" PRIu64, version);
93ec662e 60 cache->version = version;
b1316da1 61 status = METADATA_CACHE_UPDATE_STATUS_VERSION_UPDATED;
93ec662e
JD
62
63end:
b1316da1 64 return status;
5b8eb761
JD
65}
66
331744e3
JD
67/*
68 * Write metadata to the cache, extend the cache if necessary. We support
c585821b
MD
69 * overlapping updates, but they need to be contiguous. Send the
70 * contiguous metadata in cache to the ring buffer. The metadata cache
331744e3
JD
71 * lock MUST be acquired to write in the cache.
72 *
b1316da1
JG
73 * See `enum consumer_metadata_cache_write_status` for the meaning of the
74 * various returned status codes.
331744e3 75 */
b1316da1 76enum consumer_metadata_cache_write_status
a25d34bc 77consumer_metadata_cache_write(struct consumer_metadata_cache *cache,
93ec662e 78 unsigned int offset, unsigned int len, uint64_t version,
d44f9a18 79 const char *data)
331744e3
JD
80{
81 int ret = 0;
b1316da1
JG
82 enum consumer_metadata_cache_write_status status;
83 bool cache_is_invalidated = false;
9eac9828 84 uint64_t original_size;
331744e3 85
a25d34bc 86 assert(cache);
b1316da1 87 ASSERT_LOCKED(cache->lock);
9eac9828 88 original_size = cache->contents.size;
93ec662e 89
b1316da1
JG
90 if (metadata_cache_update_version(cache, version) ==
91 METADATA_CACHE_UPDATE_STATUS_VERSION_UPDATED) {
92 metadata_cache_reset(cache);
93 cache_is_invalidated = true;
93ec662e
JD
94 }
95
331744e3 96 DBG("Writing %u bytes from offset %u in metadata cache", len, offset);
9eac9828
JG
97 if (offset + len > cache->contents.size) {
98 ret = lttng_dynamic_buffer_set_size(
99 &cache->contents, offset + len);
100 if (ret) {
331744e3 101 ERR("Extending metadata cache");
b1316da1 102 status = CONSUMER_METADATA_CACHE_WRITE_STATUS_ERROR;
331744e3
JD
103 goto end;
104 }
105 }
106
9eac9828 107 memcpy(cache->contents.data + offset, data, len);
b1316da1
JG
108
109 if (cache_is_invalidated) {
110 status = CONSUMER_METADATA_CACHE_WRITE_STATUS_INVALIDATED;
9eac9828 111 } else if (cache->contents.size > original_size) {
b1316da1
JG
112 status = CONSUMER_METADATA_CACHE_WRITE_STATUS_APPENDED_CONTENT;
113 } else {
114 status = CONSUMER_METADATA_CACHE_WRITE_STATUS_NO_CHANGE;
9eac9828 115 assert(cache->contents.size == original_size);
331744e3
JD
116 }
117
118end:
b1316da1 119 return status;
331744e3
JD
120}
121
122/*
123 * Create the metadata cache, original allocated size: max_sb_size
124 *
125 * Return 0 on success, a negative value on error.
126 */
127int consumer_metadata_cache_allocate(struct lttng_consumer_channel *channel)
128{
129 int ret;
130
131 assert(channel);
132
133 channel->metadata_cache = zmalloc(
134 sizeof(struct consumer_metadata_cache));
135 if (!channel->metadata_cache) {
136 PERROR("zmalloc metadata cache struct");
137 ret = -1;
138 goto end;
139 }
140 ret = pthread_mutex_init(&channel->metadata_cache->lock, NULL);
141 if (ret != 0) {
142 PERROR("mutex init");
143 goto end_free_cache;
144 }
145
9eac9828
JG
146 lttng_dynamic_buffer_init(&channel->metadata_cache->contents);
147 ret = lttng_dynamic_buffer_set_capacity(
148 &channel->metadata_cache->contents,
149 DEFAULT_METADATA_CACHE_SIZE);
150 if (ret) {
151 PERROR("Failed to pre-allocate metadata cache storage of %d bytes on creation",
152 DEFAULT_METADATA_CACHE_SIZE);
331744e3
JD
153 ret = -1;
154 goto end_free_mutex;
155 }
9eac9828
JG
156
157 DBG("Allocated metadata cache: current capacity = %zu",
158 lttng_dynamic_buffer_get_capacity_left(
159 &channel->metadata_cache->contents));
331744e3
JD
160
161 ret = 0;
162 goto end;
163
164end_free_mutex:
165 pthread_mutex_destroy(&channel->metadata_cache->lock);
166end_free_cache:
167 free(channel->metadata_cache);
168end:
169 return ret;
170}
171
172/*
173 * Destroy and free the metadata cache
174 */
175void consumer_metadata_cache_destroy(struct lttng_consumer_channel *channel)
176{
177 if (!channel || !channel->metadata_cache) {
178 return;
179 }
180
181 DBG("Destroying metadata cache");
182
331744e3 183 pthread_mutex_destroy(&channel->metadata_cache->lock);
9eac9828 184 lttng_dynamic_buffer_reset(&channel->metadata_cache->contents);
331744e3
JD
185 free(channel->metadata_cache);
186}
187
188/*
189 * Check if the cache is flushed up to the offset passed in parameter.
190 *
95671f53 191 * Return true if everything has been flushed, false if there is data not flushed.
331744e3 192 */
95671f53
JG
193static
194bool consumer_metadata_cache_is_flushed(struct lttng_consumer_channel *channel,
5e41ebe1 195 uint64_t offset, int timer)
331744e3 196{
95671f53 197 bool done_flushing = false;
04ef1097 198 struct lttng_consumer_stream *metadata_stream;
331744e3 199
7f725ec5 200 /*
5e41ebe1
MD
201 * If not called from a timer handler, we have to take the
202 * channel lock to be mutually exclusive with channel teardown.
203 * Timer handler does not need to take this lock because it is
204 * already synchronized by timer stop (and, more importantly,
205 * taking this lock in a timer handler would cause a deadlock).
7f725ec5 206 */
5e41ebe1
MD
207 if (!timer) {
208 pthread_mutex_lock(&channel->lock);
209 }
ec6ea7d0 210 pthread_mutex_lock(&channel->timer_lock);
04ef1097 211 metadata_stream = channel->metadata_stream;
04ef1097 212 if (!metadata_stream) {
fe81e5c9
DG
213 /*
214 * Having no metadata stream means the channel is being destroyed so there
215 * is no cache to flush anymore.
216 */
95671f53 217 done_flushing = true;
e524139e
JG
218 goto end_unlock_channel;
219 }
220
221 pthread_mutex_lock(&metadata_stream->lock);
222 pthread_mutex_lock(&channel->metadata_cache->lock);
223
224 if (metadata_stream->ust_metadata_pushed >= offset) {
95671f53 225 done_flushing = true;
fe81e5c9
DG
226 } else if (channel->metadata_stream->endpoint_status !=
227 CONSUMER_ENDPOINT_ACTIVE) {
228 /* An inactive endpoint means we don't have to flush anymore. */
95671f53 229 done_flushing = true;
331744e3 230 } else {
fe81e5c9 231 /* Still not completely flushed. */
95671f53 232 done_flushing = false;
331744e3 233 }
fe81e5c9 234
331744e3 235 pthread_mutex_unlock(&channel->metadata_cache->lock);
8e1ef46e 236 pthread_mutex_unlock(&metadata_stream->lock);
95671f53 237
e524139e 238end_unlock_channel:
ec6ea7d0 239 pthread_mutex_unlock(&channel->timer_lock);
5e41ebe1
MD
240 if (!timer) {
241 pthread_mutex_unlock(&channel->lock);
242 }
331744e3 243
95671f53
JG
244 return done_flushing;
245}
246
247/*
248 * Wait until the cache is flushed up to the offset passed in parameter or the
249 * metadata stream has been destroyed.
250 */
251void consumer_wait_metadata_cache_flushed(struct lttng_consumer_channel *channel,
252 uint64_t offset, bool invoked_by_timer)
253{
254 assert(channel);
255 assert(channel->metadata_cache);
256
257 if (consumer_metadata_cache_is_flushed(channel, offset, invoked_by_timer)) {
258 return;
259 }
260
261 /* Metadata cache is not currently flushed, wait on wait queue. */
262 for (;;) {
263 struct lttng_waiter waiter;
264
265 lttng_waiter_init(&waiter);
266 lttng_wait_queue_add(&channel->metadata_pushed_wait_queue, &waiter);
267 if (consumer_metadata_cache_is_flushed(channel, offset, invoked_by_timer)) {
268 /* Wake up all waiters, ourself included. */
269 lttng_wait_queue_wake_all(&channel->metadata_pushed_wait_queue);
270 /* Ensure proper teardown of waiter. */
271 lttng_waiter_wait(&waiter);
272 break;
273 }
274
275 lttng_waiter_wait(&waiter);
276 }
331744e3 277}
This page took 0.07242 seconds and 4 git commands to generate.