2 * SPDX-License-Identifier: LGPL-2.1-or-later
4 * Copyright (C) 2009 Pierre-Marc Fournier
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 * Do _not_ define _LGPL_SOURCE because we don't want to create a
10 * circular dependency loop between this malloc wrapper, liburcu and
14 /* Has to be included first to override dlfcn.h */
15 #include <common/compat/dlfcn.h>
17 #include <sys/types.h>
22 #include <urcu/system.h>
23 #include <urcu/uatomic.h>
24 #include <urcu/compiler.h>
25 #include <urcu/tls-compat.h>
26 #include <urcu/arch.h>
28 #include <lttng/ust-libc-wrapper.h>
30 #include "common/macros.h"
31 #include "common/align.h"
33 #define LTTNG_UST_TRACEPOINT_HIDDEN_DEFINITION
34 #define LTTNG_UST_TRACEPOINT_PROVIDER_HIDDEN_DEFINITION
36 #define LTTNG_UST_TRACEPOINT_DEFINE
37 #define LTTNG_UST_TRACEPOINT_CREATE_PROBES
38 #define LTTNG_UST_TP_IP_PARAM ip
41 #define STATIC_CALLOC_LEN 4096
42 static char static_calloc_buf
[STATIC_CALLOC_LEN
];
43 static unsigned long static_calloc_buf_offset
;
45 struct alloc_functions
{
46 void *(*calloc
)(size_t nmemb
, size_t size
);
47 void *(*malloc
)(size_t size
);
48 void (*free
)(void *ptr
);
49 void *(*realloc
)(void *ptr
, size_t size
);
50 void *(*memalign
)(size_t alignment
, size_t size
);
51 int (*posix_memalign
)(void **memptr
, size_t alignment
, size_t size
);
55 struct alloc_functions cur_alloc
;
58 * Make sure our own use of the LTS compat layer will not cause infinite
59 * recursion by calling calloc.
63 void *static_calloc(size_t nmemb
, size_t size
);
66 * pthread mutex replacement for URCU tls compat layer.
68 static int ust_malloc_lock
;
71 void ust_malloc_spin_lock(pthread_mutex_t
*lock
)
72 __attribute__((unused
));
74 void ust_malloc_spin_lock(pthread_mutex_t
*lock
__attribute__((unused
)))
77 * The memory barrier within cmpxchg takes care of ordering
78 * memory accesses with respect to the start of the critical
81 while (uatomic_cmpxchg(&ust_malloc_lock
, 0, 1) != 0)
86 void ust_malloc_spin_unlock(pthread_mutex_t
*lock
)
87 __attribute__((unused
));
89 void ust_malloc_spin_unlock(pthread_mutex_t
*lock
__attribute__((unused
)))
92 * Ensure memory accesses within the critical section do not
96 uatomic_set(&ust_malloc_lock
, 0);
99 #define calloc static_calloc
100 #define pthread_mutex_lock ust_malloc_spin_lock
101 #define pthread_mutex_unlock ust_malloc_spin_unlock
102 static DEFINE_URCU_TLS(int, malloc_nesting
);
103 #undef pthread_mutex_unlock
104 #undef pthread_mutex_lock
108 * Static allocator to use when initially executing dlsym(). It keeps a
109 * size_t value of each object size prior to the object.
112 void *static_calloc_aligned(size_t nmemb
, size_t size
, size_t alignment
)
114 size_t prev_offset
, new_offset
, res_offset
, aligned_offset
;
116 if (nmemb
* size
== 0) {
121 * Protect static_calloc_buf_offset from concurrent updates
122 * using a cmpxchg loop rather than a mutex to remove a
123 * dependency on pthread. This will minimize the risk of bad
124 * interaction between mutex and malloc instrumentation.
126 res_offset
= CMM_LOAD_SHARED(static_calloc_buf_offset
);
128 prev_offset
= res_offset
;
129 aligned_offset
= LTTNG_UST_ALIGN(prev_offset
+ sizeof(size_t), alignment
);
130 new_offset
= aligned_offset
+ nmemb
* size
;
131 if (new_offset
> sizeof(static_calloc_buf
)) {
134 } while ((res_offset
= uatomic_cmpxchg(&static_calloc_buf_offset
,
135 prev_offset
, new_offset
)) != prev_offset
);
136 *(size_t *) &static_calloc_buf
[aligned_offset
- sizeof(size_t)] = size
;
137 return &static_calloc_buf
[aligned_offset
];
141 void *static_calloc(size_t nmemb
, size_t size
)
145 retval
= static_calloc_aligned(nmemb
, size
, 1);
150 void *static_malloc(size_t size
)
154 retval
= static_calloc_aligned(1, size
, 1);
159 void static_free(void *ptr
__attribute__((unused
)))
165 void *static_realloc(void *ptr
, size_t size
)
167 size_t *old_size
= NULL
;
176 old_size
= (size_t *) ptr
- 1;
177 if (size
<= *old_size
) {
178 /* We can re-use the old entry. */
184 /* We need to expand. Don't free previous memory location. */
185 retval
= static_calloc_aligned(1, size
, 1);
188 memcpy(retval
, ptr
, *old_size
);
194 void *static_memalign(size_t alignment
, size_t size
)
198 retval
= static_calloc_aligned(1, size
, alignment
);
203 int static_posix_memalign(void **memptr
, size_t alignment
, size_t size
)
207 /* Check for power of 2, larger than void *. */
208 if (alignment
& (alignment
- 1)
209 || alignment
< sizeof(void *)
213 ptr
= static_calloc_aligned(1, size
, alignment
);
220 void setup_static_allocator(void)
222 assert(cur_alloc
.calloc
== NULL
);
223 cur_alloc
.calloc
= static_calloc
;
224 assert(cur_alloc
.malloc
== NULL
);
225 cur_alloc
.malloc
= static_malloc
;
226 assert(cur_alloc
.free
== NULL
);
227 cur_alloc
.free
= static_free
;
228 assert(cur_alloc
.realloc
== NULL
);
229 cur_alloc
.realloc
= static_realloc
;
230 assert(cur_alloc
.memalign
== NULL
);
231 cur_alloc
.memalign
= static_memalign
;
232 assert(cur_alloc
.posix_memalign
== NULL
);
233 cur_alloc
.posix_memalign
= static_posix_memalign
;
237 void lookup_all_symbols(void)
239 struct alloc_functions af
;
242 * Temporarily redirect allocation functions to
243 * static_calloc_aligned, and free function to static_free
244 * (no-op), until the dlsym lookup has completed.
246 setup_static_allocator();
248 /* Perform the actual lookups */
249 af
.calloc
= dlsym(RTLD_NEXT
, "calloc");
250 af
.malloc
= dlsym(RTLD_NEXT
, "malloc");
251 af
.free
= dlsym(RTLD_NEXT
, "free");
252 af
.realloc
= dlsym(RTLD_NEXT
, "realloc");
253 af
.memalign
= dlsym(RTLD_NEXT
, "memalign");
254 af
.posix_memalign
= dlsym(RTLD_NEXT
, "posix_memalign");
256 /* Populate the new allocator functions */
257 memcpy(&cur_alloc
, &af
, sizeof(cur_alloc
));
260 void *malloc(size_t size
)
264 URCU_TLS(malloc_nesting
)++;
265 if (cur_alloc
.malloc
== NULL
) {
266 lookup_all_symbols();
267 if (cur_alloc
.malloc
== NULL
) {
268 fprintf(stderr
, "mallocwrap: unable to find malloc\n");
272 retval
= cur_alloc
.malloc(size
);
273 if (URCU_TLS(malloc_nesting
) == 1) {
274 lttng_ust_tracepoint(lttng_ust_libc
, malloc
,
275 size
, retval
, LTTNG_UST_CALLER_IP());
277 URCU_TLS(malloc_nesting
)--;
283 URCU_TLS(malloc_nesting
)++;
285 * Check whether the memory was allocated with
286 * static_calloc_align, in which case there is nothing to free.
288 if (caa_unlikely((char *)ptr
>= static_calloc_buf
&&
289 (char *)ptr
< static_calloc_buf
+ STATIC_CALLOC_LEN
)) {
293 if (URCU_TLS(malloc_nesting
) == 1) {
294 lttng_ust_tracepoint(lttng_ust_libc
, free
,
295 ptr
, LTTNG_UST_CALLER_IP());
298 if (cur_alloc
.free
== NULL
) {
299 lookup_all_symbols();
300 if (cur_alloc
.free
== NULL
) {
301 fprintf(stderr
, "mallocwrap: unable to find free\n");
307 URCU_TLS(malloc_nesting
)--;
310 void *calloc(size_t nmemb
, size_t size
)
314 URCU_TLS(malloc_nesting
)++;
315 if (cur_alloc
.calloc
== NULL
) {
316 lookup_all_symbols();
317 if (cur_alloc
.calloc
== NULL
) {
318 fprintf(stderr
, "callocwrap: unable to find calloc\n");
322 retval
= cur_alloc
.calloc(nmemb
, size
);
323 if (URCU_TLS(malloc_nesting
) == 1) {
324 lttng_ust_tracepoint(lttng_ust_libc
, calloc
,
325 nmemb
, size
, retval
, LTTNG_UST_CALLER_IP());
327 URCU_TLS(malloc_nesting
)--;
331 void *realloc(void *ptr
, size_t size
)
335 URCU_TLS(malloc_nesting
)++;
337 * Check whether the memory was allocated with
338 * static_calloc_align, in which case there is nothing
339 * to free, and we need to copy the old data.
341 if (caa_unlikely((char *)ptr
>= static_calloc_buf
&&
342 (char *)ptr
< static_calloc_buf
+ STATIC_CALLOC_LEN
)) {
345 old_size
= (size_t *) ptr
- 1;
346 if (cur_alloc
.calloc
== NULL
) {
347 lookup_all_symbols();
348 if (cur_alloc
.calloc
== NULL
) {
349 fprintf(stderr
, "reallocwrap: unable to find calloc\n");
353 retval
= cur_alloc
.calloc(1, size
);
355 memcpy(retval
, ptr
, *old_size
);
358 * Mimic that a NULL pointer has been received, so
359 * memory allocation analysis based on the trace don't
360 * get confused by the address from the static
367 if (cur_alloc
.realloc
== NULL
) {
368 lookup_all_symbols();
369 if (cur_alloc
.realloc
== NULL
) {
370 fprintf(stderr
, "reallocwrap: unable to find realloc\n");
374 retval
= cur_alloc
.realloc(ptr
, size
);
376 if (URCU_TLS(malloc_nesting
) == 1) {
377 lttng_ust_tracepoint(lttng_ust_libc
, realloc
,
378 ptr
, size
, retval
, LTTNG_UST_CALLER_IP());
380 URCU_TLS(malloc_nesting
)--;
384 void *memalign(size_t alignment
, size_t size
)
388 URCU_TLS(malloc_nesting
)++;
389 if (cur_alloc
.memalign
== NULL
) {
390 lookup_all_symbols();
391 if (cur_alloc
.memalign
== NULL
) {
392 fprintf(stderr
, "memalignwrap: unable to find memalign\n");
396 retval
= cur_alloc
.memalign(alignment
, size
);
397 if (URCU_TLS(malloc_nesting
) == 1) {
398 lttng_ust_tracepoint(lttng_ust_libc
, memalign
,
399 alignment
, size
, retval
,
400 LTTNG_UST_CALLER_IP());
402 URCU_TLS(malloc_nesting
)--;
406 int posix_memalign(void **memptr
, size_t alignment
, size_t size
)
410 URCU_TLS(malloc_nesting
)++;
411 if (cur_alloc
.posix_memalign
== NULL
) {
412 lookup_all_symbols();
413 if (cur_alloc
.posix_memalign
== NULL
) {
414 fprintf(stderr
, "posix_memalignwrap: unable to find posix_memalign\n");
418 retval
= cur_alloc
.posix_memalign(memptr
, alignment
, size
);
419 if (URCU_TLS(malloc_nesting
) == 1) {
420 lttng_ust_tracepoint(lttng_ust_libc
, posix_memalign
,
421 *memptr
, alignment
, size
,
422 retval
, LTTNG_UST_CALLER_IP());
424 URCU_TLS(malloc_nesting
)--;
429 void lttng_ust_malloc_nesting_alloc_tls(void)
431 asm volatile ("" : : "m" (URCU_TLS(malloc_nesting
)));
434 void lttng_ust_libc_wrapper_malloc_ctor(void)
436 /* Initialization already done */
437 if (cur_alloc
.calloc
) {
440 lttng_ust_malloc_nesting_alloc_tls();
442 * Ensure the allocator is in place before the process becomes
445 lookup_all_symbols();