2 * Copyright (C) 2009 Pierre-Marc Fournier
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2.1 of the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include <lttng/ust-dlfcn.h>
22 #include <sys/types.h>
25 #include <urcu/system.h>
26 #include <urcu/uatomic.h>
27 #include <urcu/compiler.h>
28 #include <urcu/tls-compat.h>
29 #include <urcu/arch.h>
30 #include <lttng/align.h>
33 #define TRACEPOINT_DEFINE
34 #define TRACEPOINT_CREATE_PROBES
35 #define TP_IP_PARAM ip
38 #define STATIC_CALLOC_LEN 4096
39 static char static_calloc_buf
[STATIC_CALLOC_LEN
];
40 static unsigned long static_calloc_buf_offset
;
42 struct alloc_functions
{
43 void *(*calloc
)(size_t nmemb
, size_t size
);
44 void *(*malloc
)(size_t size
);
45 void (*free
)(void *ptr
);
46 void *(*realloc
)(void *ptr
, size_t size
);
47 void *(*memalign
)(size_t alignment
, size_t size
);
48 int (*posix_memalign
)(void **memptr
, size_t alignment
, size_t size
);
52 struct alloc_functions cur_alloc
;
55 * Make sure our own use of the LTS compat layer will not cause infinite
56 * recursion by calling calloc.
60 void *static_calloc(size_t nmemb
, size_t size
);
63 * pthread mutex replacement for URCU tls compat layer.
65 static int ust_malloc_lock
;
67 static __attribute__((unused
))
68 void ust_malloc_spin_lock(pthread_mutex_t
*lock
)
71 * The memory barrier within cmpxchg takes care of ordering
72 * memory accesses with respect to the start of the critical
75 while (uatomic_cmpxchg(&ust_malloc_lock
, 0, 1) != 0)
79 static __attribute__((unused
))
80 void ust_malloc_spin_unlock(pthread_mutex_t
*lock
)
83 * Ensure memory accesses within the critical section do not
87 uatomic_set(&ust_malloc_lock
, 0);
90 #define calloc static_calloc
91 #define pthread_mutex_lock ust_malloc_spin_lock
92 #define pthread_mutex_unlock ust_malloc_spin_unlock
93 static DEFINE_URCU_TLS(int, malloc_nesting
);
94 #undef ust_malloc_spin_unlock
95 #undef ust_malloc_spin_lock
99 * Static allocator to use when initially executing dlsym(). It keeps a
100 * size_t value of each object size prior to the object.
103 void *static_calloc_aligned(size_t nmemb
, size_t size
, size_t alignment
)
105 size_t prev_offset
, new_offset
, res_offset
, aligned_offset
;
107 if (nmemb
* size
== 0) {
112 * Protect static_calloc_buf_offset from concurrent updates
113 * using a cmpxchg loop rather than a mutex to remove a
114 * dependency on pthread. This will minimize the risk of bad
115 * interaction between mutex and malloc instrumentation.
117 res_offset
= CMM_LOAD_SHARED(static_calloc_buf_offset
);
119 prev_offset
= res_offset
;
120 aligned_offset
= ALIGN(prev_offset
+ sizeof(size_t), alignment
);
121 new_offset
= aligned_offset
+ nmemb
* size
;
122 if (new_offset
> sizeof(static_calloc_buf
)) {
125 } while ((res_offset
= uatomic_cmpxchg(&static_calloc_buf_offset
,
126 prev_offset
, new_offset
)) != prev_offset
);
127 *(size_t *) &static_calloc_buf
[aligned_offset
- sizeof(size_t)] = size
;
128 return &static_calloc_buf
[aligned_offset
];
132 void *static_calloc(size_t nmemb
, size_t size
)
136 retval
= static_calloc_aligned(nmemb
, size
, 1);
141 void *static_malloc(size_t size
)
145 retval
= static_calloc_aligned(1, size
, 1);
150 void static_free(void *ptr
)
156 void *static_realloc(void *ptr
, size_t size
)
158 size_t *old_size
= NULL
;
167 old_size
= (size_t *) ptr
- 1;
168 if (size
<= *old_size
) {
169 /* We can re-use the old entry. */
175 /* We need to expand. Don't free previous memory location. */
176 retval
= static_calloc_aligned(1, size
, 1);
179 memcpy(retval
, ptr
, *old_size
);
185 void *static_memalign(size_t alignment
, size_t size
)
189 retval
= static_calloc_aligned(1, size
, alignment
);
194 int static_posix_memalign(void **memptr
, size_t alignment
, size_t size
)
198 /* Check for power of 2, larger than void *. */
199 if (alignment
& (alignment
- 1)
200 || alignment
< sizeof(void *)
204 ptr
= static_calloc_aligned(1, size
, alignment
);
211 void setup_static_allocator(void)
213 assert(cur_alloc
.calloc
== NULL
);
214 cur_alloc
.calloc
= static_calloc
;
215 assert(cur_alloc
.malloc
== NULL
);
216 cur_alloc
.malloc
= static_malloc
;
217 assert(cur_alloc
.free
== NULL
);
218 cur_alloc
.free
= static_free
;
219 assert(cur_alloc
.realloc
== NULL
);
220 cur_alloc
.realloc
= static_realloc
;
221 assert(cur_alloc
.memalign
== NULL
);
222 cur_alloc
.memalign
= static_memalign
;
223 assert(cur_alloc
.posix_memalign
== NULL
);
224 cur_alloc
.posix_memalign
= static_posix_memalign
;
228 void lookup_all_symbols(void)
230 struct alloc_functions af
;
233 * Temporarily redirect allocation functions to
234 * static_calloc_aligned, and free function to static_free
235 * (no-op), until the dlsym lookup has completed.
237 setup_static_allocator();
239 /* Perform the actual lookups */
240 af
.calloc
= dlsym(RTLD_NEXT
, "calloc");
241 af
.malloc
= dlsym(RTLD_NEXT
, "malloc");
242 af
.free
= dlsym(RTLD_NEXT
, "free");
243 af
.realloc
= dlsym(RTLD_NEXT
, "realloc");
244 af
.memalign
= dlsym(RTLD_NEXT
, "memalign");
245 af
.posix_memalign
= dlsym(RTLD_NEXT
, "posix_memalign");
247 /* Populate the new allocator functions */
248 memcpy(&cur_alloc
, &af
, sizeof(cur_alloc
));
251 void *malloc(size_t size
)
255 URCU_TLS(malloc_nesting
)++;
256 if (cur_alloc
.malloc
== NULL
) {
257 lookup_all_symbols();
258 if (cur_alloc
.malloc
== NULL
) {
259 fprintf(stderr
, "mallocwrap: unable to find malloc\n");
263 retval
= cur_alloc
.malloc(size
);
264 if (URCU_TLS(malloc_nesting
) == 1) {
265 tracepoint(lttng_ust_libc
, malloc
,
266 size
, retval
, LTTNG_UST_CALLER_IP());
268 URCU_TLS(malloc_nesting
)--;
274 URCU_TLS(malloc_nesting
)++;
276 * Check whether the memory was allocated with
277 * static_calloc_align, in which case there is nothing to free.
279 if (caa_unlikely((char *)ptr
>= static_calloc_buf
&&
280 (char *)ptr
< static_calloc_buf
+ STATIC_CALLOC_LEN
)) {
284 if (URCU_TLS(malloc_nesting
) == 1) {
285 tracepoint(lttng_ust_libc
, free
,
286 ptr
, LTTNG_UST_CALLER_IP());
289 if (cur_alloc
.free
== NULL
) {
290 lookup_all_symbols();
291 if (cur_alloc
.free
== NULL
) {
292 fprintf(stderr
, "mallocwrap: unable to find free\n");
298 URCU_TLS(malloc_nesting
)--;
301 void *calloc(size_t nmemb
, size_t size
)
305 URCU_TLS(malloc_nesting
)++;
306 if (cur_alloc
.calloc
== NULL
) {
307 lookup_all_symbols();
308 if (cur_alloc
.calloc
== NULL
) {
309 fprintf(stderr
, "callocwrap: unable to find calloc\n");
313 retval
= cur_alloc
.calloc(nmemb
, size
);
314 if (URCU_TLS(malloc_nesting
) == 1) {
315 tracepoint(lttng_ust_libc
, calloc
,
316 nmemb
, size
, retval
, LTTNG_UST_CALLER_IP());
318 URCU_TLS(malloc_nesting
)--;
322 void *realloc(void *ptr
, size_t size
)
326 URCU_TLS(malloc_nesting
)++;
328 * Check whether the memory was allocated with
329 * static_calloc_align, in which case there is nothing
330 * to free, and we need to copy the old data.
332 if (caa_unlikely((char *)ptr
>= static_calloc_buf
&&
333 (char *)ptr
< static_calloc_buf
+ STATIC_CALLOC_LEN
)) {
336 old_size
= (size_t *) ptr
- 1;
337 if (cur_alloc
.calloc
== NULL
) {
338 lookup_all_symbols();
339 if (cur_alloc
.calloc
== NULL
) {
340 fprintf(stderr
, "reallocwrap: unable to find calloc\n");
344 retval
= cur_alloc
.calloc(1, size
);
346 memcpy(retval
, ptr
, *old_size
);
349 * Mimick that a NULL pointer has been received, so
350 * memory allocation analysis based on the trace don't
351 * get confused by the address from the static
358 if (cur_alloc
.realloc
== NULL
) {
359 lookup_all_symbols();
360 if (cur_alloc
.realloc
== NULL
) {
361 fprintf(stderr
, "reallocwrap: unable to find realloc\n");
365 retval
= cur_alloc
.realloc(ptr
, size
);
367 if (URCU_TLS(malloc_nesting
) == 1) {
368 tracepoint(lttng_ust_libc
, realloc
,
369 ptr
, size
, retval
, LTTNG_UST_CALLER_IP());
371 URCU_TLS(malloc_nesting
)--;
375 void *memalign(size_t alignment
, size_t size
)
379 URCU_TLS(malloc_nesting
)++;
380 if (cur_alloc
.memalign
== NULL
) {
381 lookup_all_symbols();
382 if (cur_alloc
.memalign
== NULL
) {
383 fprintf(stderr
, "memalignwrap: unable to find memalign\n");
387 retval
= cur_alloc
.memalign(alignment
, size
);
388 if (URCU_TLS(malloc_nesting
) == 1) {
389 tracepoint(lttng_ust_libc
, memalign
,
390 alignment
, size
, retval
,
391 LTTNG_UST_CALLER_IP());
393 URCU_TLS(malloc_nesting
)--;
397 int posix_memalign(void **memptr
, size_t alignment
, size_t size
)
401 URCU_TLS(malloc_nesting
)++;
402 if (cur_alloc
.posix_memalign
== NULL
) {
403 lookup_all_symbols();
404 if (cur_alloc
.posix_memalign
== NULL
) {
405 fprintf(stderr
, "posix_memalignwrap: unable to find posix_memalign\n");
409 retval
= cur_alloc
.posix_memalign(memptr
, alignment
, size
);
410 if (URCU_TLS(malloc_nesting
) == 1) {
411 tracepoint(lttng_ust_libc
, posix_memalign
,
412 *memptr
, alignment
, size
,
413 retval
, LTTNG_UST_CALLER_IP());
415 URCU_TLS(malloc_nesting
)--;
419 __attribute__((constructor
))
420 void lttng_ust_malloc_wrapper_init(void)
422 /* Initialization already done */
423 if (cur_alloc
.calloc
) {
427 * Ensure the allocator is in place before the process becomes
430 lookup_all_symbols();