Tracepoint API namespacing 'TRACEPOINT_DEFINE'
[lttng-ust.git] / src / lib / lttng-ust-libc-wrapper / lttng-ust-malloc.c
CommitLineData
b27f8e75 1/*
c0c0989a 2 * SPDX-License-Identifier: LGPL-2.1-or-later
c39c72ee 3 *
c0c0989a
MJ
4 * Copyright (C) 2009 Pierre-Marc Fournier
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
c39c72ee
PMF
6 */
7
d7e89462
MD
8/*
9 * Do _not_ define _LGPL_SOURCE because we don't want to create a
10 * circular dependency loop between this malloc wrapper, liburcu and
11 * libc.
12 */
9d315d6d
MJ
13
14/* Has to be included first to override dlfcn.h */
15#include <common/compat/dlfcn.h>
16
e541a28d
PMF
17#include <sys/types.h>
18#include <stdio.h>
2594a5b4 19#include <assert.h>
4b4a1337
MJ
20#include <malloc.h>
21
4c3536e0
MD
22#include <urcu/system.h>
23#include <urcu/uatomic.h>
2594a5b4 24#include <urcu/compiler.h>
8c06ba6f 25#include <urcu/tls-compat.h>
20ef5166 26#include <urcu/arch.h>
4b4a1337
MJ
27
28#include <lttng/ust-libc-wrapper.h>
29
9d315d6d
MJ
30#include "common/macros.h"
31#include "common/align.h"
1622ba22 32
88c7c4ea 33#define LTTNG_UST_TRACEPOINT_DEFINE
1622ba22 34#define TRACEPOINT_CREATE_PROBES
52c95399 35#define TP_IP_PARAM ip
1622ba22 36#include "ust_libc.h"
fbd8191b 37
f95b2888
SS
38#define STATIC_CALLOC_LEN 4096
39static char static_calloc_buf[STATIC_CALLOC_LEN];
4c3536e0 40static unsigned long static_calloc_buf_offset;
f95b2888 41
2594a5b4
MD
42struct alloc_functions {
43 void *(*calloc)(size_t nmemb, size_t size);
44 void *(*malloc)(size_t size);
45 void (*free)(void *ptr);
46 void *(*realloc)(void *ptr, size_t size);
47 void *(*memalign)(size_t alignment, size_t size);
48 int (*posix_memalign)(void **memptr, size_t alignment, size_t size);
49};
50
51static
52struct alloc_functions cur_alloc;
53
8c06ba6f
MD
54/*
55 * Make sure our own use of the LTS compat layer will not cause infinite
56 * recursion by calling calloc.
57 */
58
59static
60void *static_calloc(size_t nmemb, size_t size);
61
20ef5166
MD
62/*
63 * pthread mutex replacement for URCU tls compat layer.
64 */
65static int ust_malloc_lock;
66
8da9deee
MJ
67static
68void ust_malloc_spin_lock(pthread_mutex_t *lock)
69 __attribute__((unused));
70static
2208d8b5 71void ust_malloc_spin_lock(pthread_mutex_t *lock __attribute__((unused)))
20ef5166
MD
72{
73 /*
74 * The memory barrier within cmpxchg takes care of ordering
75 * memory accesses with respect to the start of the critical
76 * section.
77 */
78 while (uatomic_cmpxchg(&ust_malloc_lock, 0, 1) != 0)
79 caa_cpu_relax();
80}
81
8da9deee
MJ
82static
83void ust_malloc_spin_unlock(pthread_mutex_t *lock)
84 __attribute__((unused));
85static
2208d8b5 86void ust_malloc_spin_unlock(pthread_mutex_t *lock __attribute__((unused)))
20ef5166
MD
87{
88 /*
89 * Ensure memory accesses within the critical section do not
90 * leak outside.
91 */
92 cmm_smp_mb();
93 uatomic_set(&ust_malloc_lock, 0);
94}
95
8c06ba6f 96#define calloc static_calloc
20ef5166
MD
97#define pthread_mutex_lock ust_malloc_spin_lock
98#define pthread_mutex_unlock ust_malloc_spin_unlock
16adecf1 99static DEFINE_URCU_TLS(int, malloc_nesting);
50170875
CB
100#undef pthread_mutex_unlock
101#undef pthread_mutex_lock
8c06ba6f
MD
102#undef calloc
103
2594a5b4
MD
104/*
105 * Static allocator to use when initially executing dlsym(). It keeps a
106 * size_t value of each object size prior to the object.
107 */
108static
109void *static_calloc_aligned(size_t nmemb, size_t size, size_t alignment)
f95b2888 110{
2594a5b4
MD
111 size_t prev_offset, new_offset, res_offset, aligned_offset;
112
113 if (nmemb * size == 0) {
114 return NULL;
115 }
f95b2888 116
4c3536e0
MD
117 /*
118 * Protect static_calloc_buf_offset from concurrent updates
119 * using a cmpxchg loop rather than a mutex to remove a
120 * dependency on pthread. This will minimize the risk of bad
121 * interaction between mutex and malloc instrumentation.
122 */
123 res_offset = CMM_LOAD_SHARED(static_calloc_buf_offset);
124 do {
125 prev_offset = res_offset;
b72687b8 126 aligned_offset = LTTNG_UST_ALIGN(prev_offset + sizeof(size_t), alignment);
2594a5b4
MD
127 new_offset = aligned_offset + nmemb * size;
128 if (new_offset > sizeof(static_calloc_buf)) {
129 abort();
4c3536e0 130 }
4c3536e0
MD
131 } while ((res_offset = uatomic_cmpxchg(&static_calloc_buf_offset,
132 prev_offset, new_offset)) != prev_offset);
2594a5b4
MD
133 *(size_t *) &static_calloc_buf[aligned_offset - sizeof(size_t)] = size;
134 return &static_calloc_buf[aligned_offset];
135}
136
137static
138void *static_calloc(size_t nmemb, size_t size)
139{
140 void *retval;
141
142 retval = static_calloc_aligned(nmemb, size, 1);
2594a5b4
MD
143 return retval;
144}
145
146static
147void *static_malloc(size_t size)
148{
149 void *retval;
150
151 retval = static_calloc_aligned(1, size, 1);
2594a5b4
MD
152 return retval;
153}
154
155static
2208d8b5 156void static_free(void *ptr __attribute__((unused)))
2594a5b4
MD
157{
158 /* no-op. */
2594a5b4
MD
159}
160
161static
162void *static_realloc(void *ptr, size_t size)
163{
164 size_t *old_size = NULL;
165 void *retval;
166
167 if (size == 0) {
168 retval = NULL;
169 goto end;
170 }
171
172 if (ptr) {
173 old_size = (size_t *) ptr - 1;
174 if (size <= *old_size) {
175 /* We can re-use the old entry. */
176 *old_size = size;
177 retval = ptr;
178 goto end;
179 }
180 }
181 /* We need to expand. Don't free previous memory location. */
182 retval = static_calloc_aligned(1, size, 1);
183 assert(retval);
184 if (ptr)
185 memcpy(retval, ptr, *old_size);
186end:
2594a5b4
MD
187 return retval;
188}
189
190static
191void *static_memalign(size_t alignment, size_t size)
192{
193 void *retval;
194
195 retval = static_calloc_aligned(1, size, alignment);
2594a5b4
MD
196 return retval;
197}
198
199static
200int static_posix_memalign(void **memptr, size_t alignment, size_t size)
201{
2594a5b4
MD
202 void *ptr;
203
204 /* Check for power of 2, larger than void *. */
205 if (alignment & (alignment - 1)
206 || alignment < sizeof(void *)
207 || alignment == 0) {
2594a5b4
MD
208 goto end;
209 }
210 ptr = static_calloc_aligned(1, size, alignment);
211 *memptr = ptr;
2594a5b4 212end:
2594a5b4
MD
213 return 0;
214}
215
216static
217void setup_static_allocator(void)
218{
219 assert(cur_alloc.calloc == NULL);
220 cur_alloc.calloc = static_calloc;
221 assert(cur_alloc.malloc == NULL);
222 cur_alloc.malloc = static_malloc;
223 assert(cur_alloc.free == NULL);
224 cur_alloc.free = static_free;
225 assert(cur_alloc.realloc == NULL);
226 cur_alloc.realloc = static_realloc;
227 assert(cur_alloc.memalign == NULL);
228 cur_alloc.memalign = static_memalign;
229 assert(cur_alloc.posix_memalign == NULL);
230 cur_alloc.posix_memalign = static_posix_memalign;
231}
232
233static
234void lookup_all_symbols(void)
235{
236 struct alloc_functions af;
237
238 /*
239 * Temporarily redirect allocation functions to
240 * static_calloc_aligned, and free function to static_free
241 * (no-op), until the dlsym lookup has completed.
242 */
243 setup_static_allocator();
244
245 /* Perform the actual lookups */
246 af.calloc = dlsym(RTLD_NEXT, "calloc");
247 af.malloc = dlsym(RTLD_NEXT, "malloc");
248 af.free = dlsym(RTLD_NEXT, "free");
249 af.realloc = dlsym(RTLD_NEXT, "realloc");
250 af.memalign = dlsym(RTLD_NEXT, "memalign");
251 af.posix_memalign = dlsym(RTLD_NEXT, "posix_memalign");
252
253 /* Populate the new allocator functions */
254 memcpy(&cur_alloc, &af, sizeof(cur_alloc));
f95b2888
SS
255}
256
e541a28d
PMF
257void *malloc(size_t size)
258{
1c184644
PMF
259 void *retval;
260
8c06ba6f 261 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
262 if (cur_alloc.malloc == NULL) {
263 lookup_all_symbols();
264 if (cur_alloc.malloc == NULL) {
e541a28d 265 fprintf(stderr, "mallocwrap: unable to find malloc\n");
2594a5b4 266 abort();
e541a28d
PMF
267 }
268 }
2594a5b4 269 retval = cur_alloc.malloc(size);
8c06ba6f 270 if (URCU_TLS(malloc_nesting) == 1) {
cbc06a3b 271 lttng_ust_tracepoint(lttng_ust_libc, malloc,
171fcc6f 272 size, retval, LTTNG_UST_CALLER_IP());
8c06ba6f
MD
273 }
274 URCU_TLS(malloc_nesting)--;
1c184644
PMF
275 return retval;
276}
277
278void free(void *ptr)
279{
8c06ba6f 280 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
281 /*
282 * Check whether the memory was allocated with
283 * static_calloc_align, in which case there is nothing to free.
f95b2888 284 */
2594a5b4
MD
285 if (caa_unlikely((char *)ptr >= static_calloc_buf &&
286 (char *)ptr < static_calloc_buf + STATIC_CALLOC_LEN)) {
8c06ba6f
MD
287 goto end;
288 }
289
290 if (URCU_TLS(malloc_nesting) == 1) {
cbc06a3b 291 lttng_ust_tracepoint(lttng_ust_libc, free,
171fcc6f 292 ptr, LTTNG_UST_CALLER_IP());
f95b2888 293 }
1c184644 294
2594a5b4
MD
295 if (cur_alloc.free == NULL) {
296 lookup_all_symbols();
297 if (cur_alloc.free == NULL) {
1c184644 298 fprintf(stderr, "mallocwrap: unable to find free\n");
2594a5b4 299 abort();
1c184644
PMF
300 }
301 }
2594a5b4 302 cur_alloc.free(ptr);
8c06ba6f
MD
303end:
304 URCU_TLS(malloc_nesting)--;
e541a28d 305}
f95b2888
SS
306
307void *calloc(size_t nmemb, size_t size)
308{
f95b2888
SS
309 void *retval;
310
8c06ba6f 311 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
312 if (cur_alloc.calloc == NULL) {
313 lookup_all_symbols();
314 if (cur_alloc.calloc == NULL) {
f95b2888 315 fprintf(stderr, "callocwrap: unable to find calloc\n");
2594a5b4 316 abort();
f95b2888
SS
317 }
318 }
2594a5b4 319 retval = cur_alloc.calloc(nmemb, size);
8c06ba6f 320 if (URCU_TLS(malloc_nesting) == 1) {
cbc06a3b 321 lttng_ust_tracepoint(lttng_ust_libc, calloc,
171fcc6f 322 nmemb, size, retval, LTTNG_UST_CALLER_IP());
8c06ba6f
MD
323 }
324 URCU_TLS(malloc_nesting)--;
f95b2888
SS
325 return retval;
326}
327
328void *realloc(void *ptr, size_t size)
329{
f95b2888
SS
330 void *retval;
331
8c06ba6f
MD
332 URCU_TLS(malloc_nesting)++;
333 /*
334 * Check whether the memory was allocated with
2594a5b4
MD
335 * static_calloc_align, in which case there is nothing
336 * to free, and we need to copy the old data.
337 */
338 if (caa_unlikely((char *)ptr >= static_calloc_buf &&
339 (char *)ptr < static_calloc_buf + STATIC_CALLOC_LEN)) {
340 size_t *old_size;
341
342 old_size = (size_t *) ptr - 1;
343 if (cur_alloc.calloc == NULL) {
344 lookup_all_symbols();
345 if (cur_alloc.calloc == NULL) {
346 fprintf(stderr, "reallocwrap: unable to find calloc\n");
347 abort();
348 }
349 }
350 retval = cur_alloc.calloc(1, size);
351 if (retval) {
352 memcpy(retval, ptr, *old_size);
353 }
8c06ba6f
MD
354 /*
355 * Mimick that a NULL pointer has been received, so
356 * memory allocation analysis based on the trace don't
357 * get confused by the address from the static
358 * allocator.
359 */
360 ptr = NULL;
2594a5b4
MD
361 goto end;
362 }
363
364 if (cur_alloc.realloc == NULL) {
365 lookup_all_symbols();
366 if (cur_alloc.realloc == NULL) {
f95b2888 367 fprintf(stderr, "reallocwrap: unable to find realloc\n");
2594a5b4 368 abort();
f95b2888
SS
369 }
370 }
2594a5b4
MD
371 retval = cur_alloc.realloc(ptr, size);
372end:
8c06ba6f 373 if (URCU_TLS(malloc_nesting) == 1) {
cbc06a3b 374 lttng_ust_tracepoint(lttng_ust_libc, realloc,
171fcc6f 375 ptr, size, retval, LTTNG_UST_CALLER_IP());
8c06ba6f
MD
376 }
377 URCU_TLS(malloc_nesting)--;
f95b2888
SS
378 return retval;
379}
9d34b226
SS
380
381void *memalign(size_t alignment, size_t size)
382{
9d34b226
SS
383 void *retval;
384
8c06ba6f 385 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
386 if (cur_alloc.memalign == NULL) {
387 lookup_all_symbols();
388 if (cur_alloc.memalign == NULL) {
9d34b226 389 fprintf(stderr, "memalignwrap: unable to find memalign\n");
2594a5b4 390 abort();
9d34b226
SS
391 }
392 }
2594a5b4 393 retval = cur_alloc.memalign(alignment, size);
8c06ba6f 394 if (URCU_TLS(malloc_nesting) == 1) {
cbc06a3b 395 lttng_ust_tracepoint(lttng_ust_libc, memalign,
6d4658aa 396 alignment, size, retval,
171fcc6f 397 LTTNG_UST_CALLER_IP());
8c06ba6f
MD
398 }
399 URCU_TLS(malloc_nesting)--;
9d34b226
SS
400 return retval;
401}
402
403int posix_memalign(void **memptr, size_t alignment, size_t size)
404{
9d34b226
SS
405 int retval;
406
8c06ba6f 407 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
408 if (cur_alloc.posix_memalign == NULL) {
409 lookup_all_symbols();
410 if (cur_alloc.posix_memalign == NULL) {
9d34b226 411 fprintf(stderr, "posix_memalignwrap: unable to find posix_memalign\n");
2594a5b4 412 abort();
9d34b226
SS
413 }
414 }
2594a5b4 415 retval = cur_alloc.posix_memalign(memptr, alignment, size);
8c06ba6f 416 if (URCU_TLS(malloc_nesting) == 1) {
cbc06a3b 417 lttng_ust_tracepoint(lttng_ust_libc, posix_memalign,
6d4658aa 418 *memptr, alignment, size,
171fcc6f 419 retval, LTTNG_UST_CALLER_IP());
8c06ba6f
MD
420 }
421 URCU_TLS(malloc_nesting)--;
9d34b226
SS
422 return retval;
423}
2594a5b4 424
f4a90c3e
MD
425static
426void lttng_ust_fixup_malloc_nesting_tls(void)
427{
428 asm volatile ("" : : "m" (URCU_TLS(malloc_nesting)));
429}
430
fca97dfd 431void lttng_ust_libc_wrapper_malloc_ctor(void)
2594a5b4
MD
432{
433 /* Initialization already done */
434 if (cur_alloc.calloc) {
435 return;
436 }
f4a90c3e 437 lttng_ust_fixup_malloc_nesting_tls();
2594a5b4
MD
438 /*
439 * Ensure the allocator is in place before the process becomes
440 * multithreaded.
441 */
442 lookup_all_symbols();
443}
This page took 0.053636 seconds and 4 git commands to generate.