Move to kernel style SPDX license identifiers
[lttng-ust.git] / liblttng-ust-libc-wrapper / lttng-ust-malloc.c
CommitLineData
b27f8e75 1/*
c0c0989a 2 * SPDX-License-Identifier: LGPL-2.1-or-later
c39c72ee 3 *
c0c0989a
MJ
4 * Copyright (C) 2009 Pierre-Marc Fournier
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
c39c72ee
PMF
6 */
7
d7e89462
MD
8/*
9 * Do _not_ define _LGPL_SOURCE because we don't want to create a
10 * circular dependency loop between this malloc wrapper, liburcu and
11 * libc.
12 */
f02baefb 13#include <lttng/ust-dlfcn.h>
e541a28d
PMF
14#include <sys/types.h>
15#include <stdio.h>
2594a5b4 16#include <assert.h>
4c3536e0
MD
17#include <urcu/system.h>
18#include <urcu/uatomic.h>
2594a5b4 19#include <urcu/compiler.h>
8c06ba6f 20#include <urcu/tls-compat.h>
20ef5166 21#include <urcu/arch.h>
2594a5b4 22#include <lttng/align.h>
171fcc6f 23#include <helper.h>
1622ba22
MD
24
25#define TRACEPOINT_DEFINE
26#define TRACEPOINT_CREATE_PROBES
52c95399 27#define TP_IP_PARAM ip
1622ba22 28#include "ust_libc.h"
fbd8191b 29
f95b2888
SS
30#define STATIC_CALLOC_LEN 4096
31static char static_calloc_buf[STATIC_CALLOC_LEN];
4c3536e0 32static unsigned long static_calloc_buf_offset;
f95b2888 33
2594a5b4
MD
34struct alloc_functions {
35 void *(*calloc)(size_t nmemb, size_t size);
36 void *(*malloc)(size_t size);
37 void (*free)(void *ptr);
38 void *(*realloc)(void *ptr, size_t size);
39 void *(*memalign)(size_t alignment, size_t size);
40 int (*posix_memalign)(void **memptr, size_t alignment, size_t size);
41};
42
43static
44struct alloc_functions cur_alloc;
45
8c06ba6f
MD
46/*
47 * Make sure our own use of the LTS compat layer will not cause infinite
48 * recursion by calling calloc.
49 */
50
51static
52void *static_calloc(size_t nmemb, size_t size);
53
20ef5166
MD
54/*
55 * pthread mutex replacement for URCU tls compat layer.
56 */
57static int ust_malloc_lock;
58
59static __attribute__((unused))
60void ust_malloc_spin_lock(pthread_mutex_t *lock)
61{
62 /*
63 * The memory barrier within cmpxchg takes care of ordering
64 * memory accesses with respect to the start of the critical
65 * section.
66 */
67 while (uatomic_cmpxchg(&ust_malloc_lock, 0, 1) != 0)
68 caa_cpu_relax();
69}
70
71static __attribute__((unused))
72void ust_malloc_spin_unlock(pthread_mutex_t *lock)
73{
74 /*
75 * Ensure memory accesses within the critical section do not
76 * leak outside.
77 */
78 cmm_smp_mb();
79 uatomic_set(&ust_malloc_lock, 0);
80}
81
8c06ba6f 82#define calloc static_calloc
20ef5166
MD
83#define pthread_mutex_lock ust_malloc_spin_lock
84#define pthread_mutex_unlock ust_malloc_spin_unlock
16adecf1 85static DEFINE_URCU_TLS(int, malloc_nesting);
50170875
CB
86#undef pthread_mutex_unlock
87#undef pthread_mutex_lock
8c06ba6f
MD
88#undef calloc
89
2594a5b4
MD
90/*
91 * Static allocator to use when initially executing dlsym(). It keeps a
92 * size_t value of each object size prior to the object.
93 */
94static
95void *static_calloc_aligned(size_t nmemb, size_t size, size_t alignment)
f95b2888 96{
2594a5b4
MD
97 size_t prev_offset, new_offset, res_offset, aligned_offset;
98
99 if (nmemb * size == 0) {
100 return NULL;
101 }
f95b2888 102
4c3536e0
MD
103 /*
104 * Protect static_calloc_buf_offset from concurrent updates
105 * using a cmpxchg loop rather than a mutex to remove a
106 * dependency on pthread. This will minimize the risk of bad
107 * interaction between mutex and malloc instrumentation.
108 */
109 res_offset = CMM_LOAD_SHARED(static_calloc_buf_offset);
110 do {
111 prev_offset = res_offset;
b72687b8 112 aligned_offset = LTTNG_UST_ALIGN(prev_offset + sizeof(size_t), alignment);
2594a5b4
MD
113 new_offset = aligned_offset + nmemb * size;
114 if (new_offset > sizeof(static_calloc_buf)) {
115 abort();
4c3536e0 116 }
4c3536e0
MD
117 } while ((res_offset = uatomic_cmpxchg(&static_calloc_buf_offset,
118 prev_offset, new_offset)) != prev_offset);
2594a5b4
MD
119 *(size_t *) &static_calloc_buf[aligned_offset - sizeof(size_t)] = size;
120 return &static_calloc_buf[aligned_offset];
121}
122
123static
124void *static_calloc(size_t nmemb, size_t size)
125{
126 void *retval;
127
128 retval = static_calloc_aligned(nmemb, size, 1);
2594a5b4
MD
129 return retval;
130}
131
132static
133void *static_malloc(size_t size)
134{
135 void *retval;
136
137 retval = static_calloc_aligned(1, size, 1);
2594a5b4
MD
138 return retval;
139}
140
141static
142void static_free(void *ptr)
143{
144 /* no-op. */
2594a5b4
MD
145}
146
147static
148void *static_realloc(void *ptr, size_t size)
149{
150 size_t *old_size = NULL;
151 void *retval;
152
153 if (size == 0) {
154 retval = NULL;
155 goto end;
156 }
157
158 if (ptr) {
159 old_size = (size_t *) ptr - 1;
160 if (size <= *old_size) {
161 /* We can re-use the old entry. */
162 *old_size = size;
163 retval = ptr;
164 goto end;
165 }
166 }
167 /* We need to expand. Don't free previous memory location. */
168 retval = static_calloc_aligned(1, size, 1);
169 assert(retval);
170 if (ptr)
171 memcpy(retval, ptr, *old_size);
172end:
2594a5b4
MD
173 return retval;
174}
175
176static
177void *static_memalign(size_t alignment, size_t size)
178{
179 void *retval;
180
181 retval = static_calloc_aligned(1, size, alignment);
2594a5b4
MD
182 return retval;
183}
184
185static
186int static_posix_memalign(void **memptr, size_t alignment, size_t size)
187{
2594a5b4
MD
188 void *ptr;
189
190 /* Check for power of 2, larger than void *. */
191 if (alignment & (alignment - 1)
192 || alignment < sizeof(void *)
193 || alignment == 0) {
2594a5b4
MD
194 goto end;
195 }
196 ptr = static_calloc_aligned(1, size, alignment);
197 *memptr = ptr;
2594a5b4 198end:
2594a5b4
MD
199 return 0;
200}
201
202static
203void setup_static_allocator(void)
204{
205 assert(cur_alloc.calloc == NULL);
206 cur_alloc.calloc = static_calloc;
207 assert(cur_alloc.malloc == NULL);
208 cur_alloc.malloc = static_malloc;
209 assert(cur_alloc.free == NULL);
210 cur_alloc.free = static_free;
211 assert(cur_alloc.realloc == NULL);
212 cur_alloc.realloc = static_realloc;
213 assert(cur_alloc.memalign == NULL);
214 cur_alloc.memalign = static_memalign;
215 assert(cur_alloc.posix_memalign == NULL);
216 cur_alloc.posix_memalign = static_posix_memalign;
217}
218
219static
220void lookup_all_symbols(void)
221{
222 struct alloc_functions af;
223
224 /*
225 * Temporarily redirect allocation functions to
226 * static_calloc_aligned, and free function to static_free
227 * (no-op), until the dlsym lookup has completed.
228 */
229 setup_static_allocator();
230
231 /* Perform the actual lookups */
232 af.calloc = dlsym(RTLD_NEXT, "calloc");
233 af.malloc = dlsym(RTLD_NEXT, "malloc");
234 af.free = dlsym(RTLD_NEXT, "free");
235 af.realloc = dlsym(RTLD_NEXT, "realloc");
236 af.memalign = dlsym(RTLD_NEXT, "memalign");
237 af.posix_memalign = dlsym(RTLD_NEXT, "posix_memalign");
238
239 /* Populate the new allocator functions */
240 memcpy(&cur_alloc, &af, sizeof(cur_alloc));
f95b2888
SS
241}
242
e541a28d
PMF
243void *malloc(size_t size)
244{
1c184644
PMF
245 void *retval;
246
8c06ba6f 247 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
248 if (cur_alloc.malloc == NULL) {
249 lookup_all_symbols();
250 if (cur_alloc.malloc == NULL) {
e541a28d 251 fprintf(stderr, "mallocwrap: unable to find malloc\n");
2594a5b4 252 abort();
e541a28d
PMF
253 }
254 }
2594a5b4 255 retval = cur_alloc.malloc(size);
8c06ba6f 256 if (URCU_TLS(malloc_nesting) == 1) {
6d4658aa 257 tracepoint(lttng_ust_libc, malloc,
171fcc6f 258 size, retval, LTTNG_UST_CALLER_IP());
8c06ba6f
MD
259 }
260 URCU_TLS(malloc_nesting)--;
1c184644
PMF
261 return retval;
262}
263
264void free(void *ptr)
265{
8c06ba6f 266 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
267 /*
268 * Check whether the memory was allocated with
269 * static_calloc_align, in which case there is nothing to free.
f95b2888 270 */
2594a5b4
MD
271 if (caa_unlikely((char *)ptr >= static_calloc_buf &&
272 (char *)ptr < static_calloc_buf + STATIC_CALLOC_LEN)) {
8c06ba6f
MD
273 goto end;
274 }
275
276 if (URCU_TLS(malloc_nesting) == 1) {
6d4658aa 277 tracepoint(lttng_ust_libc, free,
171fcc6f 278 ptr, LTTNG_UST_CALLER_IP());
f95b2888 279 }
1c184644 280
2594a5b4
MD
281 if (cur_alloc.free == NULL) {
282 lookup_all_symbols();
283 if (cur_alloc.free == NULL) {
1c184644 284 fprintf(stderr, "mallocwrap: unable to find free\n");
2594a5b4 285 abort();
1c184644
PMF
286 }
287 }
2594a5b4 288 cur_alloc.free(ptr);
8c06ba6f
MD
289end:
290 URCU_TLS(malloc_nesting)--;
e541a28d 291}
f95b2888
SS
292
293void *calloc(size_t nmemb, size_t size)
294{
f95b2888
SS
295 void *retval;
296
8c06ba6f 297 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
298 if (cur_alloc.calloc == NULL) {
299 lookup_all_symbols();
300 if (cur_alloc.calloc == NULL) {
f95b2888 301 fprintf(stderr, "callocwrap: unable to find calloc\n");
2594a5b4 302 abort();
f95b2888
SS
303 }
304 }
2594a5b4 305 retval = cur_alloc.calloc(nmemb, size);
8c06ba6f 306 if (URCU_TLS(malloc_nesting) == 1) {
6d4658aa 307 tracepoint(lttng_ust_libc, calloc,
171fcc6f 308 nmemb, size, retval, LTTNG_UST_CALLER_IP());
8c06ba6f
MD
309 }
310 URCU_TLS(malloc_nesting)--;
f95b2888
SS
311 return retval;
312}
313
314void *realloc(void *ptr, size_t size)
315{
f95b2888
SS
316 void *retval;
317
8c06ba6f
MD
318 URCU_TLS(malloc_nesting)++;
319 /*
320 * Check whether the memory was allocated with
2594a5b4
MD
321 * static_calloc_align, in which case there is nothing
322 * to free, and we need to copy the old data.
323 */
324 if (caa_unlikely((char *)ptr >= static_calloc_buf &&
325 (char *)ptr < static_calloc_buf + STATIC_CALLOC_LEN)) {
326 size_t *old_size;
327
328 old_size = (size_t *) ptr - 1;
329 if (cur_alloc.calloc == NULL) {
330 lookup_all_symbols();
331 if (cur_alloc.calloc == NULL) {
332 fprintf(stderr, "reallocwrap: unable to find calloc\n");
333 abort();
334 }
335 }
336 retval = cur_alloc.calloc(1, size);
337 if (retval) {
338 memcpy(retval, ptr, *old_size);
339 }
8c06ba6f
MD
340 /*
341 * Mimick that a NULL pointer has been received, so
342 * memory allocation analysis based on the trace don't
343 * get confused by the address from the static
344 * allocator.
345 */
346 ptr = NULL;
2594a5b4
MD
347 goto end;
348 }
349
350 if (cur_alloc.realloc == NULL) {
351 lookup_all_symbols();
352 if (cur_alloc.realloc == NULL) {
f95b2888 353 fprintf(stderr, "reallocwrap: unable to find realloc\n");
2594a5b4 354 abort();
f95b2888
SS
355 }
356 }
2594a5b4
MD
357 retval = cur_alloc.realloc(ptr, size);
358end:
8c06ba6f 359 if (URCU_TLS(malloc_nesting) == 1) {
6d4658aa 360 tracepoint(lttng_ust_libc, realloc,
171fcc6f 361 ptr, size, retval, LTTNG_UST_CALLER_IP());
8c06ba6f
MD
362 }
363 URCU_TLS(malloc_nesting)--;
f95b2888
SS
364 return retval;
365}
9d34b226
SS
366
367void *memalign(size_t alignment, size_t size)
368{
9d34b226
SS
369 void *retval;
370
8c06ba6f 371 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
372 if (cur_alloc.memalign == NULL) {
373 lookup_all_symbols();
374 if (cur_alloc.memalign == NULL) {
9d34b226 375 fprintf(stderr, "memalignwrap: unable to find memalign\n");
2594a5b4 376 abort();
9d34b226
SS
377 }
378 }
2594a5b4 379 retval = cur_alloc.memalign(alignment, size);
8c06ba6f 380 if (URCU_TLS(malloc_nesting) == 1) {
6d4658aa
AB
381 tracepoint(lttng_ust_libc, memalign,
382 alignment, size, retval,
171fcc6f 383 LTTNG_UST_CALLER_IP());
8c06ba6f
MD
384 }
385 URCU_TLS(malloc_nesting)--;
9d34b226
SS
386 return retval;
387}
388
389int posix_memalign(void **memptr, size_t alignment, size_t size)
390{
9d34b226
SS
391 int retval;
392
8c06ba6f 393 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
394 if (cur_alloc.posix_memalign == NULL) {
395 lookup_all_symbols();
396 if (cur_alloc.posix_memalign == NULL) {
9d34b226 397 fprintf(stderr, "posix_memalignwrap: unable to find posix_memalign\n");
2594a5b4 398 abort();
9d34b226
SS
399 }
400 }
2594a5b4 401 retval = cur_alloc.posix_memalign(memptr, alignment, size);
8c06ba6f 402 if (URCU_TLS(malloc_nesting) == 1) {
6d4658aa
AB
403 tracepoint(lttng_ust_libc, posix_memalign,
404 *memptr, alignment, size,
171fcc6f 405 retval, LTTNG_UST_CALLER_IP());
8c06ba6f
MD
406 }
407 URCU_TLS(malloc_nesting)--;
9d34b226
SS
408 return retval;
409}
2594a5b4 410
f4a90c3e
MD
411static
412void lttng_ust_fixup_malloc_nesting_tls(void)
413{
414 asm volatile ("" : : "m" (URCU_TLS(malloc_nesting)));
415}
416
2594a5b4
MD
417__attribute__((constructor))
418void lttng_ust_malloc_wrapper_init(void)
419{
420 /* Initialization already done */
421 if (cur_alloc.calloc) {
422 return;
423 }
f4a90c3e 424 lttng_ust_fixup_malloc_nesting_tls();
2594a5b4
MD
425 /*
426 * Ensure the allocator is in place before the process becomes
427 * multithreaded.
428 */
429 lookup_all_symbols();
430}
This page took 0.049961 seconds and 4 git commands to generate.