Fix: libc wrapper: use initial-exec for malloc_nesting TLS
[lttng-ust.git] / src / lib / lttng-ust-libc-wrapper / lttng-ust-malloc.c
CommitLineData
b27f8e75 1/*
c0c0989a 2 * SPDX-License-Identifier: LGPL-2.1-or-later
c39c72ee 3 *
c0c0989a
MJ
4 * Copyright (C) 2009 Pierre-Marc Fournier
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
c39c72ee
PMF
6 */
7
d7e89462
MD
8/*
9 * Do _not_ define _LGPL_SOURCE because we don't want to create a
10 * circular dependency loop between this malloc wrapper, liburcu and
11 * libc.
12 */
9d315d6d
MJ
13
14/* Has to be included first to override dlfcn.h */
15#include <common/compat/dlfcn.h>
16
e541a28d
PMF
17#include <sys/types.h>
18#include <stdio.h>
2594a5b4 19#include <assert.h>
4b4a1337
MJ
20#include <malloc.h>
21
4c3536e0
MD
22#include <urcu/system.h>
23#include <urcu/uatomic.h>
2594a5b4 24#include <urcu/compiler.h>
20ef5166 25#include <urcu/arch.h>
4b4a1337
MJ
26
27#include <lttng/ust-libc-wrapper.h>
28
9d315d6d
MJ
29#include "common/macros.h"
30#include "common/align.h"
1622ba22 31
6ba0c2b2
MD
32#define LTTNG_UST_TRACEPOINT_HIDDEN_DEFINITION
33#define LTTNG_UST_TRACEPOINT_PROVIDER_HIDDEN_DEFINITION
34
88c7c4ea 35#define LTTNG_UST_TRACEPOINT_DEFINE
660323e6 36#define LTTNG_UST_TRACEPOINT_CREATE_PROBES
c28c4a88 37#define LTTNG_UST_TP_IP_PARAM ip
1622ba22 38#include "ust_libc.h"
fbd8191b 39
f95b2888
SS
40#define STATIC_CALLOC_LEN 4096
41static char static_calloc_buf[STATIC_CALLOC_LEN];
4c3536e0 42static unsigned long static_calloc_buf_offset;
f95b2888 43
2594a5b4
MD
44struct alloc_functions {
45 void *(*calloc)(size_t nmemb, size_t size);
46 void *(*malloc)(size_t size);
47 void (*free)(void *ptr);
48 void *(*realloc)(void *ptr, size_t size);
49 void *(*memalign)(size_t alignment, size_t size);
50 int (*posix_memalign)(void **memptr, size_t alignment, size_t size);
51};
52
53static
54struct alloc_functions cur_alloc;
55
8c06ba6f
MD
56/*
57 * Make sure our own use of the LTS compat layer will not cause infinite
58 * recursion by calling calloc.
59 */
60
61static
62void *static_calloc(size_t nmemb, size_t size);
63
20ef5166
MD
64/*
65 * pthread mutex replacement for URCU tls compat layer.
66 */
67static int ust_malloc_lock;
68
8da9deee
MJ
69static
70void ust_malloc_spin_lock(pthread_mutex_t *lock)
71 __attribute__((unused));
72static
2208d8b5 73void ust_malloc_spin_lock(pthread_mutex_t *lock __attribute__((unused)))
20ef5166
MD
74{
75 /*
76 * The memory barrier within cmpxchg takes care of ordering
77 * memory accesses with respect to the start of the critical
78 * section.
79 */
80 while (uatomic_cmpxchg(&ust_malloc_lock, 0, 1) != 0)
81 caa_cpu_relax();
82}
83
8da9deee
MJ
84static
85void ust_malloc_spin_unlock(pthread_mutex_t *lock)
86 __attribute__((unused));
87static
2208d8b5 88void ust_malloc_spin_unlock(pthread_mutex_t *lock __attribute__((unused)))
20ef5166
MD
89{
90 /*
91 * Ensure memory accesses within the critical section do not
92 * leak outside.
93 */
94 cmm_smp_mb();
95 uatomic_set(&ust_malloc_lock, 0);
96}
97
49400d96
MD
98/*
99 * Use initial-exec TLS model for the malloc_nesting nesting guard
100 * variable to ensure that the glibc implementation of the TLS access
101 * don't trigger infinite recursion by calling the memory allocator
102 * wrapper functions, which could happen with global-dynamic.
103 */
104static __thread __attribute__((tls_model("initial-exec"))) int malloc_nesting;
8c06ba6f 105
2594a5b4
MD
106/*
107 * Static allocator to use when initially executing dlsym(). It keeps a
108 * size_t value of each object size prior to the object.
109 */
110static
111void *static_calloc_aligned(size_t nmemb, size_t size, size_t alignment)
f95b2888 112{
2594a5b4
MD
113 size_t prev_offset, new_offset, res_offset, aligned_offset;
114
115 if (nmemb * size == 0) {
116 return NULL;
117 }
f95b2888 118
4c3536e0
MD
119 /*
120 * Protect static_calloc_buf_offset from concurrent updates
121 * using a cmpxchg loop rather than a mutex to remove a
122 * dependency on pthread. This will minimize the risk of bad
123 * interaction between mutex and malloc instrumentation.
124 */
125 res_offset = CMM_LOAD_SHARED(static_calloc_buf_offset);
126 do {
127 prev_offset = res_offset;
b72687b8 128 aligned_offset = LTTNG_UST_ALIGN(prev_offset + sizeof(size_t), alignment);
2594a5b4
MD
129 new_offset = aligned_offset + nmemb * size;
130 if (new_offset > sizeof(static_calloc_buf)) {
131 abort();
4c3536e0 132 }
4c3536e0
MD
133 } while ((res_offset = uatomic_cmpxchg(&static_calloc_buf_offset,
134 prev_offset, new_offset)) != prev_offset);
2594a5b4
MD
135 *(size_t *) &static_calloc_buf[aligned_offset - sizeof(size_t)] = size;
136 return &static_calloc_buf[aligned_offset];
137}
138
139static
140void *static_calloc(size_t nmemb, size_t size)
141{
142 void *retval;
143
144 retval = static_calloc_aligned(nmemb, size, 1);
2594a5b4
MD
145 return retval;
146}
147
148static
149void *static_malloc(size_t size)
150{
151 void *retval;
152
153 retval = static_calloc_aligned(1, size, 1);
2594a5b4
MD
154 return retval;
155}
156
157static
2208d8b5 158void static_free(void *ptr __attribute__((unused)))
2594a5b4
MD
159{
160 /* no-op. */
2594a5b4
MD
161}
162
163static
164void *static_realloc(void *ptr, size_t size)
165{
166 size_t *old_size = NULL;
167 void *retval;
168
169 if (size == 0) {
170 retval = NULL;
171 goto end;
172 }
173
174 if (ptr) {
175 old_size = (size_t *) ptr - 1;
176 if (size <= *old_size) {
177 /* We can re-use the old entry. */
178 *old_size = size;
179 retval = ptr;
180 goto end;
181 }
182 }
183 /* We need to expand. Don't free previous memory location. */
184 retval = static_calloc_aligned(1, size, 1);
185 assert(retval);
186 if (ptr)
187 memcpy(retval, ptr, *old_size);
188end:
2594a5b4
MD
189 return retval;
190}
191
192static
193void *static_memalign(size_t alignment, size_t size)
194{
195 void *retval;
196
197 retval = static_calloc_aligned(1, size, alignment);
2594a5b4
MD
198 return retval;
199}
200
201static
202int static_posix_memalign(void **memptr, size_t alignment, size_t size)
203{
2594a5b4
MD
204 void *ptr;
205
206 /* Check for power of 2, larger than void *. */
207 if (alignment & (alignment - 1)
208 || alignment < sizeof(void *)
209 || alignment == 0) {
2594a5b4
MD
210 goto end;
211 }
212 ptr = static_calloc_aligned(1, size, alignment);
213 *memptr = ptr;
2594a5b4 214end:
2594a5b4
MD
215 return 0;
216}
217
218static
219void setup_static_allocator(void)
220{
221 assert(cur_alloc.calloc == NULL);
222 cur_alloc.calloc = static_calloc;
223 assert(cur_alloc.malloc == NULL);
224 cur_alloc.malloc = static_malloc;
225 assert(cur_alloc.free == NULL);
226 cur_alloc.free = static_free;
227 assert(cur_alloc.realloc == NULL);
228 cur_alloc.realloc = static_realloc;
229 assert(cur_alloc.memalign == NULL);
230 cur_alloc.memalign = static_memalign;
231 assert(cur_alloc.posix_memalign == NULL);
232 cur_alloc.posix_memalign = static_posix_memalign;
233}
234
235static
236void lookup_all_symbols(void)
237{
238 struct alloc_functions af;
239
240 /*
241 * Temporarily redirect allocation functions to
242 * static_calloc_aligned, and free function to static_free
243 * (no-op), until the dlsym lookup has completed.
244 */
245 setup_static_allocator();
246
247 /* Perform the actual lookups */
248 af.calloc = dlsym(RTLD_NEXT, "calloc");
249 af.malloc = dlsym(RTLD_NEXT, "malloc");
250 af.free = dlsym(RTLD_NEXT, "free");
251 af.realloc = dlsym(RTLD_NEXT, "realloc");
252 af.memalign = dlsym(RTLD_NEXT, "memalign");
253 af.posix_memalign = dlsym(RTLD_NEXT, "posix_memalign");
254
255 /* Populate the new allocator functions */
256 memcpy(&cur_alloc, &af, sizeof(cur_alloc));
f95b2888
SS
257}
258
e541a28d
PMF
259void *malloc(size_t size)
260{
1c184644
PMF
261 void *retval;
262
49400d96 263 malloc_nesting++;
2594a5b4
MD
264 if (cur_alloc.malloc == NULL) {
265 lookup_all_symbols();
266 if (cur_alloc.malloc == NULL) {
e541a28d 267 fprintf(stderr, "mallocwrap: unable to find malloc\n");
2594a5b4 268 abort();
e541a28d
PMF
269 }
270 }
2594a5b4 271 retval = cur_alloc.malloc(size);
49400d96 272 if (malloc_nesting == 1) {
cbc06a3b 273 lttng_ust_tracepoint(lttng_ust_libc, malloc,
171fcc6f 274 size, retval, LTTNG_UST_CALLER_IP());
8c06ba6f 275 }
49400d96 276 malloc_nesting--;
1c184644
PMF
277 return retval;
278}
279
280void free(void *ptr)
281{
49400d96 282 malloc_nesting++;
2594a5b4
MD
283 /*
284 * Check whether the memory was allocated with
285 * static_calloc_align, in which case there is nothing to free.
f95b2888 286 */
2594a5b4
MD
287 if (caa_unlikely((char *)ptr >= static_calloc_buf &&
288 (char *)ptr < static_calloc_buf + STATIC_CALLOC_LEN)) {
8c06ba6f
MD
289 goto end;
290 }
291
49400d96 292 if (malloc_nesting == 1) {
cbc06a3b 293 lttng_ust_tracepoint(lttng_ust_libc, free,
171fcc6f 294 ptr, LTTNG_UST_CALLER_IP());
f95b2888 295 }
1c184644 296
2594a5b4
MD
297 if (cur_alloc.free == NULL) {
298 lookup_all_symbols();
299 if (cur_alloc.free == NULL) {
1c184644 300 fprintf(stderr, "mallocwrap: unable to find free\n");
2594a5b4 301 abort();
1c184644
PMF
302 }
303 }
2594a5b4 304 cur_alloc.free(ptr);
8c06ba6f 305end:
49400d96 306 malloc_nesting--;
e541a28d 307}
f95b2888
SS
308
309void *calloc(size_t nmemb, size_t size)
310{
f95b2888
SS
311 void *retval;
312
49400d96 313 malloc_nesting++;
2594a5b4
MD
314 if (cur_alloc.calloc == NULL) {
315 lookup_all_symbols();
316 if (cur_alloc.calloc == NULL) {
f95b2888 317 fprintf(stderr, "callocwrap: unable to find calloc\n");
2594a5b4 318 abort();
f95b2888
SS
319 }
320 }
2594a5b4 321 retval = cur_alloc.calloc(nmemb, size);
49400d96 322 if (malloc_nesting == 1) {
cbc06a3b 323 lttng_ust_tracepoint(lttng_ust_libc, calloc,
171fcc6f 324 nmemb, size, retval, LTTNG_UST_CALLER_IP());
8c06ba6f 325 }
49400d96 326 malloc_nesting--;
f95b2888
SS
327 return retval;
328}
329
330void *realloc(void *ptr, size_t size)
331{
f95b2888
SS
332 void *retval;
333
49400d96 334 malloc_nesting++;
8c06ba6f
MD
335 /*
336 * Check whether the memory was allocated with
2594a5b4
MD
337 * static_calloc_align, in which case there is nothing
338 * to free, and we need to copy the old data.
339 */
340 if (caa_unlikely((char *)ptr >= static_calloc_buf &&
341 (char *)ptr < static_calloc_buf + STATIC_CALLOC_LEN)) {
342 size_t *old_size;
343
344 old_size = (size_t *) ptr - 1;
345 if (cur_alloc.calloc == NULL) {
346 lookup_all_symbols();
347 if (cur_alloc.calloc == NULL) {
348 fprintf(stderr, "reallocwrap: unable to find calloc\n");
349 abort();
350 }
351 }
352 retval = cur_alloc.calloc(1, size);
353 if (retval) {
354 memcpy(retval, ptr, *old_size);
355 }
8c06ba6f 356 /*
2fbda51c 357 * Mimic that a NULL pointer has been received, so
8c06ba6f
MD
358 * memory allocation analysis based on the trace don't
359 * get confused by the address from the static
360 * allocator.
361 */
362 ptr = NULL;
2594a5b4
MD
363 goto end;
364 }
365
366 if (cur_alloc.realloc == NULL) {
367 lookup_all_symbols();
368 if (cur_alloc.realloc == NULL) {
f95b2888 369 fprintf(stderr, "reallocwrap: unable to find realloc\n");
2594a5b4 370 abort();
f95b2888
SS
371 }
372 }
2594a5b4
MD
373 retval = cur_alloc.realloc(ptr, size);
374end:
49400d96 375 if (malloc_nesting == 1) {
cbc06a3b 376 lttng_ust_tracepoint(lttng_ust_libc, realloc,
171fcc6f 377 ptr, size, retval, LTTNG_UST_CALLER_IP());
8c06ba6f 378 }
49400d96 379 malloc_nesting--;
f95b2888
SS
380 return retval;
381}
9d34b226
SS
382
383void *memalign(size_t alignment, size_t size)
384{
9d34b226
SS
385 void *retval;
386
49400d96 387 malloc_nesting++;
2594a5b4
MD
388 if (cur_alloc.memalign == NULL) {
389 lookup_all_symbols();
390 if (cur_alloc.memalign == NULL) {
9d34b226 391 fprintf(stderr, "memalignwrap: unable to find memalign\n");
2594a5b4 392 abort();
9d34b226
SS
393 }
394 }
2594a5b4 395 retval = cur_alloc.memalign(alignment, size);
49400d96 396 if (malloc_nesting == 1) {
cbc06a3b 397 lttng_ust_tracepoint(lttng_ust_libc, memalign,
6d4658aa 398 alignment, size, retval,
171fcc6f 399 LTTNG_UST_CALLER_IP());
8c06ba6f 400 }
49400d96 401 malloc_nesting--;
9d34b226
SS
402 return retval;
403}
404
405int posix_memalign(void **memptr, size_t alignment, size_t size)
406{
9d34b226
SS
407 int retval;
408
49400d96 409 malloc_nesting++;
2594a5b4
MD
410 if (cur_alloc.posix_memalign == NULL) {
411 lookup_all_symbols();
412 if (cur_alloc.posix_memalign == NULL) {
9d34b226 413 fprintf(stderr, "posix_memalignwrap: unable to find posix_memalign\n");
2594a5b4 414 abort();
9d34b226
SS
415 }
416 }
2594a5b4 417 retval = cur_alloc.posix_memalign(memptr, alignment, size);
49400d96 418 if (malloc_nesting == 1) {
cbc06a3b 419 lttng_ust_tracepoint(lttng_ust_libc, posix_memalign,
6d4658aa 420 *memptr, alignment, size,
171fcc6f 421 retval, LTTNG_UST_CALLER_IP());
8c06ba6f 422 }
49400d96 423 malloc_nesting--;
9d34b226
SS
424 return retval;
425}
2594a5b4 426
fca97dfd 427void lttng_ust_libc_wrapper_malloc_ctor(void)
2594a5b4
MD
428{
429 /* Initialization already done */
430 if (cur_alloc.calloc) {
431 return;
432 }
433 /*
434 * Ensure the allocator is in place before the process becomes
435 * multithreaded.
436 */
437 lookup_all_symbols();
438}
This page took 0.0600850000000001 seconds and 4 git commands to generate.