cleanup: spelling fixes in comments
[lttng-ust.git] / src / lib / lttng-ust-libc-wrapper / lttng-ust-malloc.c
CommitLineData
b27f8e75 1/*
c0c0989a 2 * SPDX-License-Identifier: LGPL-2.1-or-later
c39c72ee 3 *
c0c0989a
MJ
4 * Copyright (C) 2009 Pierre-Marc Fournier
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
c39c72ee
PMF
6 */
7
d7e89462
MD
8/*
9 * Do _not_ define _LGPL_SOURCE because we don't want to create a
10 * circular dependency loop between this malloc wrapper, liburcu and
11 * libc.
12 */
9d315d6d
MJ
13
14/* Has to be included first to override dlfcn.h */
15#include <common/compat/dlfcn.h>
16
e541a28d
PMF
17#include <sys/types.h>
18#include <stdio.h>
2594a5b4 19#include <assert.h>
4b4a1337
MJ
20#include <malloc.h>
21
4c3536e0
MD
22#include <urcu/system.h>
23#include <urcu/uatomic.h>
2594a5b4 24#include <urcu/compiler.h>
8c06ba6f 25#include <urcu/tls-compat.h>
20ef5166 26#include <urcu/arch.h>
4b4a1337
MJ
27
28#include <lttng/ust-libc-wrapper.h>
29
9d315d6d
MJ
30#include "common/macros.h"
31#include "common/align.h"
1622ba22 32
6ba0c2b2
MD
33#define LTTNG_UST_TRACEPOINT_HIDDEN_DEFINITION
34#define LTTNG_UST_TRACEPOINT_PROVIDER_HIDDEN_DEFINITION
35
88c7c4ea 36#define LTTNG_UST_TRACEPOINT_DEFINE
660323e6 37#define LTTNG_UST_TRACEPOINT_CREATE_PROBES
c28c4a88 38#define LTTNG_UST_TP_IP_PARAM ip
1622ba22 39#include "ust_libc.h"
fbd8191b 40
f95b2888
SS
41#define STATIC_CALLOC_LEN 4096
42static char static_calloc_buf[STATIC_CALLOC_LEN];
4c3536e0 43static unsigned long static_calloc_buf_offset;
f95b2888 44
2594a5b4
MD
45struct alloc_functions {
46 void *(*calloc)(size_t nmemb, size_t size);
47 void *(*malloc)(size_t size);
48 void (*free)(void *ptr);
49 void *(*realloc)(void *ptr, size_t size);
50 void *(*memalign)(size_t alignment, size_t size);
51 int (*posix_memalign)(void **memptr, size_t alignment, size_t size);
52};
53
54static
55struct alloc_functions cur_alloc;
56
8c06ba6f
MD
57/*
58 * Make sure our own use of the LTS compat layer will not cause infinite
59 * recursion by calling calloc.
60 */
61
62static
63void *static_calloc(size_t nmemb, size_t size);
64
20ef5166
MD
65/*
66 * pthread mutex replacement for URCU tls compat layer.
67 */
68static int ust_malloc_lock;
69
8da9deee
MJ
70static
71void ust_malloc_spin_lock(pthread_mutex_t *lock)
72 __attribute__((unused));
73static
2208d8b5 74void ust_malloc_spin_lock(pthread_mutex_t *lock __attribute__((unused)))
20ef5166
MD
75{
76 /*
77 * The memory barrier within cmpxchg takes care of ordering
78 * memory accesses with respect to the start of the critical
79 * section.
80 */
81 while (uatomic_cmpxchg(&ust_malloc_lock, 0, 1) != 0)
82 caa_cpu_relax();
83}
84
8da9deee
MJ
85static
86void ust_malloc_spin_unlock(pthread_mutex_t *lock)
87 __attribute__((unused));
88static
2208d8b5 89void ust_malloc_spin_unlock(pthread_mutex_t *lock __attribute__((unused)))
20ef5166
MD
90{
91 /*
92 * Ensure memory accesses within the critical section do not
93 * leak outside.
94 */
95 cmm_smp_mb();
96 uatomic_set(&ust_malloc_lock, 0);
97}
98
8c06ba6f 99#define calloc static_calloc
20ef5166
MD
100#define pthread_mutex_lock ust_malloc_spin_lock
101#define pthread_mutex_unlock ust_malloc_spin_unlock
16adecf1 102static DEFINE_URCU_TLS(int, malloc_nesting);
50170875
CB
103#undef pthread_mutex_unlock
104#undef pthread_mutex_lock
8c06ba6f
MD
105#undef calloc
106
2594a5b4
MD
107/*
108 * Static allocator to use when initially executing dlsym(). It keeps a
109 * size_t value of each object size prior to the object.
110 */
111static
112void *static_calloc_aligned(size_t nmemb, size_t size, size_t alignment)
f95b2888 113{
2594a5b4
MD
114 size_t prev_offset, new_offset, res_offset, aligned_offset;
115
116 if (nmemb * size == 0) {
117 return NULL;
118 }
f95b2888 119
4c3536e0
MD
120 /*
121 * Protect static_calloc_buf_offset from concurrent updates
122 * using a cmpxchg loop rather than a mutex to remove a
123 * dependency on pthread. This will minimize the risk of bad
124 * interaction between mutex and malloc instrumentation.
125 */
126 res_offset = CMM_LOAD_SHARED(static_calloc_buf_offset);
127 do {
128 prev_offset = res_offset;
b72687b8 129 aligned_offset = LTTNG_UST_ALIGN(prev_offset + sizeof(size_t), alignment);
2594a5b4
MD
130 new_offset = aligned_offset + nmemb * size;
131 if (new_offset > sizeof(static_calloc_buf)) {
132 abort();
4c3536e0 133 }
4c3536e0
MD
134 } while ((res_offset = uatomic_cmpxchg(&static_calloc_buf_offset,
135 prev_offset, new_offset)) != prev_offset);
2594a5b4
MD
136 *(size_t *) &static_calloc_buf[aligned_offset - sizeof(size_t)] = size;
137 return &static_calloc_buf[aligned_offset];
138}
139
140static
141void *static_calloc(size_t nmemb, size_t size)
142{
143 void *retval;
144
145 retval = static_calloc_aligned(nmemb, size, 1);
2594a5b4
MD
146 return retval;
147}
148
149static
150void *static_malloc(size_t size)
151{
152 void *retval;
153
154 retval = static_calloc_aligned(1, size, 1);
2594a5b4
MD
155 return retval;
156}
157
158static
2208d8b5 159void static_free(void *ptr __attribute__((unused)))
2594a5b4
MD
160{
161 /* no-op. */
2594a5b4
MD
162}
163
164static
165void *static_realloc(void *ptr, size_t size)
166{
167 size_t *old_size = NULL;
168 void *retval;
169
170 if (size == 0) {
171 retval = NULL;
172 goto end;
173 }
174
175 if (ptr) {
176 old_size = (size_t *) ptr - 1;
177 if (size <= *old_size) {
178 /* We can re-use the old entry. */
179 *old_size = size;
180 retval = ptr;
181 goto end;
182 }
183 }
184 /* We need to expand. Don't free previous memory location. */
185 retval = static_calloc_aligned(1, size, 1);
186 assert(retval);
187 if (ptr)
188 memcpy(retval, ptr, *old_size);
189end:
2594a5b4
MD
190 return retval;
191}
192
193static
194void *static_memalign(size_t alignment, size_t size)
195{
196 void *retval;
197
198 retval = static_calloc_aligned(1, size, alignment);
2594a5b4
MD
199 return retval;
200}
201
202static
203int static_posix_memalign(void **memptr, size_t alignment, size_t size)
204{
2594a5b4
MD
205 void *ptr;
206
207 /* Check for power of 2, larger than void *. */
208 if (alignment & (alignment - 1)
209 || alignment < sizeof(void *)
210 || alignment == 0) {
2594a5b4
MD
211 goto end;
212 }
213 ptr = static_calloc_aligned(1, size, alignment);
214 *memptr = ptr;
2594a5b4 215end:
2594a5b4
MD
216 return 0;
217}
218
219static
220void setup_static_allocator(void)
221{
222 assert(cur_alloc.calloc == NULL);
223 cur_alloc.calloc = static_calloc;
224 assert(cur_alloc.malloc == NULL);
225 cur_alloc.malloc = static_malloc;
226 assert(cur_alloc.free == NULL);
227 cur_alloc.free = static_free;
228 assert(cur_alloc.realloc == NULL);
229 cur_alloc.realloc = static_realloc;
230 assert(cur_alloc.memalign == NULL);
231 cur_alloc.memalign = static_memalign;
232 assert(cur_alloc.posix_memalign == NULL);
233 cur_alloc.posix_memalign = static_posix_memalign;
234}
235
236static
237void lookup_all_symbols(void)
238{
239 struct alloc_functions af;
240
241 /*
242 * Temporarily redirect allocation functions to
243 * static_calloc_aligned, and free function to static_free
244 * (no-op), until the dlsym lookup has completed.
245 */
246 setup_static_allocator();
247
248 /* Perform the actual lookups */
249 af.calloc = dlsym(RTLD_NEXT, "calloc");
250 af.malloc = dlsym(RTLD_NEXT, "malloc");
251 af.free = dlsym(RTLD_NEXT, "free");
252 af.realloc = dlsym(RTLD_NEXT, "realloc");
253 af.memalign = dlsym(RTLD_NEXT, "memalign");
254 af.posix_memalign = dlsym(RTLD_NEXT, "posix_memalign");
255
256 /* Populate the new allocator functions */
257 memcpy(&cur_alloc, &af, sizeof(cur_alloc));
f95b2888
SS
258}
259
e541a28d
PMF
260void *malloc(size_t size)
261{
1c184644
PMF
262 void *retval;
263
8c06ba6f 264 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
265 if (cur_alloc.malloc == NULL) {
266 lookup_all_symbols();
267 if (cur_alloc.malloc == NULL) {
e541a28d 268 fprintf(stderr, "mallocwrap: unable to find malloc\n");
2594a5b4 269 abort();
e541a28d
PMF
270 }
271 }
2594a5b4 272 retval = cur_alloc.malloc(size);
8c06ba6f 273 if (URCU_TLS(malloc_nesting) == 1) {
cbc06a3b 274 lttng_ust_tracepoint(lttng_ust_libc, malloc,
171fcc6f 275 size, retval, LTTNG_UST_CALLER_IP());
8c06ba6f
MD
276 }
277 URCU_TLS(malloc_nesting)--;
1c184644
PMF
278 return retval;
279}
280
281void free(void *ptr)
282{
8c06ba6f 283 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
284 /*
285 * Check whether the memory was allocated with
286 * static_calloc_align, in which case there is nothing to free.
f95b2888 287 */
2594a5b4
MD
288 if (caa_unlikely((char *)ptr >= static_calloc_buf &&
289 (char *)ptr < static_calloc_buf + STATIC_CALLOC_LEN)) {
8c06ba6f
MD
290 goto end;
291 }
292
293 if (URCU_TLS(malloc_nesting) == 1) {
cbc06a3b 294 lttng_ust_tracepoint(lttng_ust_libc, free,
171fcc6f 295 ptr, LTTNG_UST_CALLER_IP());
f95b2888 296 }
1c184644 297
2594a5b4
MD
298 if (cur_alloc.free == NULL) {
299 lookup_all_symbols();
300 if (cur_alloc.free == NULL) {
1c184644 301 fprintf(stderr, "mallocwrap: unable to find free\n");
2594a5b4 302 abort();
1c184644
PMF
303 }
304 }
2594a5b4 305 cur_alloc.free(ptr);
8c06ba6f
MD
306end:
307 URCU_TLS(malloc_nesting)--;
e541a28d 308}
f95b2888
SS
309
310void *calloc(size_t nmemb, size_t size)
311{
f95b2888
SS
312 void *retval;
313
8c06ba6f 314 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
315 if (cur_alloc.calloc == NULL) {
316 lookup_all_symbols();
317 if (cur_alloc.calloc == NULL) {
f95b2888 318 fprintf(stderr, "callocwrap: unable to find calloc\n");
2594a5b4 319 abort();
f95b2888
SS
320 }
321 }
2594a5b4 322 retval = cur_alloc.calloc(nmemb, size);
8c06ba6f 323 if (URCU_TLS(malloc_nesting) == 1) {
cbc06a3b 324 lttng_ust_tracepoint(lttng_ust_libc, calloc,
171fcc6f 325 nmemb, size, retval, LTTNG_UST_CALLER_IP());
8c06ba6f
MD
326 }
327 URCU_TLS(malloc_nesting)--;
f95b2888
SS
328 return retval;
329}
330
331void *realloc(void *ptr, size_t size)
332{
f95b2888
SS
333 void *retval;
334
8c06ba6f
MD
335 URCU_TLS(malloc_nesting)++;
336 /*
337 * Check whether the memory was allocated with
2594a5b4
MD
338 * static_calloc_align, in which case there is nothing
339 * to free, and we need to copy the old data.
340 */
341 if (caa_unlikely((char *)ptr >= static_calloc_buf &&
342 (char *)ptr < static_calloc_buf + STATIC_CALLOC_LEN)) {
343 size_t *old_size;
344
345 old_size = (size_t *) ptr - 1;
346 if (cur_alloc.calloc == NULL) {
347 lookup_all_symbols();
348 if (cur_alloc.calloc == NULL) {
349 fprintf(stderr, "reallocwrap: unable to find calloc\n");
350 abort();
351 }
352 }
353 retval = cur_alloc.calloc(1, size);
354 if (retval) {
355 memcpy(retval, ptr, *old_size);
356 }
8c06ba6f 357 /*
2fbda51c 358 * Mimic that a NULL pointer has been received, so
8c06ba6f
MD
359 * memory allocation analysis based on the trace don't
360 * get confused by the address from the static
361 * allocator.
362 */
363 ptr = NULL;
2594a5b4
MD
364 goto end;
365 }
366
367 if (cur_alloc.realloc == NULL) {
368 lookup_all_symbols();
369 if (cur_alloc.realloc == NULL) {
f95b2888 370 fprintf(stderr, "reallocwrap: unable to find realloc\n");
2594a5b4 371 abort();
f95b2888
SS
372 }
373 }
2594a5b4
MD
374 retval = cur_alloc.realloc(ptr, size);
375end:
8c06ba6f 376 if (URCU_TLS(malloc_nesting) == 1) {
cbc06a3b 377 lttng_ust_tracepoint(lttng_ust_libc, realloc,
171fcc6f 378 ptr, size, retval, LTTNG_UST_CALLER_IP());
8c06ba6f
MD
379 }
380 URCU_TLS(malloc_nesting)--;
f95b2888
SS
381 return retval;
382}
9d34b226
SS
383
384void *memalign(size_t alignment, size_t size)
385{
9d34b226
SS
386 void *retval;
387
8c06ba6f 388 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
389 if (cur_alloc.memalign == NULL) {
390 lookup_all_symbols();
391 if (cur_alloc.memalign == NULL) {
9d34b226 392 fprintf(stderr, "memalignwrap: unable to find memalign\n");
2594a5b4 393 abort();
9d34b226
SS
394 }
395 }
2594a5b4 396 retval = cur_alloc.memalign(alignment, size);
8c06ba6f 397 if (URCU_TLS(malloc_nesting) == 1) {
cbc06a3b 398 lttng_ust_tracepoint(lttng_ust_libc, memalign,
6d4658aa 399 alignment, size, retval,
171fcc6f 400 LTTNG_UST_CALLER_IP());
8c06ba6f
MD
401 }
402 URCU_TLS(malloc_nesting)--;
9d34b226
SS
403 return retval;
404}
405
406int posix_memalign(void **memptr, size_t alignment, size_t size)
407{
9d34b226
SS
408 int retval;
409
8c06ba6f 410 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
411 if (cur_alloc.posix_memalign == NULL) {
412 lookup_all_symbols();
413 if (cur_alloc.posix_memalign == NULL) {
9d34b226 414 fprintf(stderr, "posix_memalignwrap: unable to find posix_memalign\n");
2594a5b4 415 abort();
9d34b226
SS
416 }
417 }
2594a5b4 418 retval = cur_alloc.posix_memalign(memptr, alignment, size);
8c06ba6f 419 if (URCU_TLS(malloc_nesting) == 1) {
cbc06a3b 420 lttng_ust_tracepoint(lttng_ust_libc, posix_memalign,
6d4658aa 421 *memptr, alignment, size,
171fcc6f 422 retval, LTTNG_UST_CALLER_IP());
8c06ba6f
MD
423 }
424 URCU_TLS(malloc_nesting)--;
9d34b226
SS
425 return retval;
426}
2594a5b4 427
f4a90c3e 428static
a9fd951a 429void lttng_ust_malloc_nesting_alloc_tls(void)
f4a90c3e
MD
430{
431 asm volatile ("" : : "m" (URCU_TLS(malloc_nesting)));
432}
433
fca97dfd 434void lttng_ust_libc_wrapper_malloc_ctor(void)
2594a5b4
MD
435{
436 /* Initialization already done */
437 if (cur_alloc.calloc) {
438 return;
439 }
a9fd951a 440 lttng_ust_malloc_nesting_alloc_tls();
2594a5b4
MD
441 /*
442 * Ensure the allocator is in place before the process becomes
443 * multithreaded.
444 */
445 lookup_all_symbols();
446}
This page took 0.055229 seconds and 4 git commands to generate.