Fix: event-notifier: not propagating error counter indexes
[lttng-ust.git] / liblttng-ust-libc-wrapper / lttng-ust-malloc.c
CommitLineData
b27f8e75
MD
1/*
2 * Copyright (C) 2009 Pierre-Marc Fournier
1622ba22 3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
c39c72ee
PMF
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2.1 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
6d4658aa 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
c39c72ee
PMF
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
d7e89462
MD
20/*
21 * Do _not_ define _LGPL_SOURCE because we don't want to create a
22 * circular dependency loop between this malloc wrapper, liburcu and
23 * libc.
24 */
f02baefb 25#include <lttng/ust-dlfcn.h>
e541a28d
PMF
26#include <sys/types.h>
27#include <stdio.h>
2594a5b4 28#include <assert.h>
4c3536e0
MD
29#include <urcu/system.h>
30#include <urcu/uatomic.h>
2594a5b4 31#include <urcu/compiler.h>
8c06ba6f 32#include <urcu/tls-compat.h>
20ef5166 33#include <urcu/arch.h>
2594a5b4 34#include <lttng/align.h>
171fcc6f 35#include <helper.h>
1622ba22
MD
36
37#define TRACEPOINT_DEFINE
38#define TRACEPOINT_CREATE_PROBES
52c95399 39#define TP_IP_PARAM ip
1622ba22 40#include "ust_libc.h"
fbd8191b 41
f95b2888
SS
42#define STATIC_CALLOC_LEN 4096
43static char static_calloc_buf[STATIC_CALLOC_LEN];
4c3536e0 44static unsigned long static_calloc_buf_offset;
f95b2888 45
2594a5b4
MD
46struct alloc_functions {
47 void *(*calloc)(size_t nmemb, size_t size);
48 void *(*malloc)(size_t size);
49 void (*free)(void *ptr);
50 void *(*realloc)(void *ptr, size_t size);
51 void *(*memalign)(size_t alignment, size_t size);
52 int (*posix_memalign)(void **memptr, size_t alignment, size_t size);
53};
54
55static
56struct alloc_functions cur_alloc;
57
8c06ba6f
MD
58/*
59 * Make sure our own use of the LTS compat layer will not cause infinite
60 * recursion by calling calloc.
61 */
62
63static
64void *static_calloc(size_t nmemb, size_t size);
65
20ef5166
MD
66/*
67 * pthread mutex replacement for URCU tls compat layer.
68 */
69static int ust_malloc_lock;
70
71static __attribute__((unused))
72void ust_malloc_spin_lock(pthread_mutex_t *lock)
73{
74 /*
75 * The memory barrier within cmpxchg takes care of ordering
76 * memory accesses with respect to the start of the critical
77 * section.
78 */
79 while (uatomic_cmpxchg(&ust_malloc_lock, 0, 1) != 0)
80 caa_cpu_relax();
81}
82
83static __attribute__((unused))
84void ust_malloc_spin_unlock(pthread_mutex_t *lock)
85{
86 /*
87 * Ensure memory accesses within the critical section do not
88 * leak outside.
89 */
90 cmm_smp_mb();
91 uatomic_set(&ust_malloc_lock, 0);
92}
93
8c06ba6f 94#define calloc static_calloc
20ef5166
MD
95#define pthread_mutex_lock ust_malloc_spin_lock
96#define pthread_mutex_unlock ust_malloc_spin_unlock
16adecf1 97static DEFINE_URCU_TLS(int, malloc_nesting);
50170875
CB
98#undef pthread_mutex_unlock
99#undef pthread_mutex_lock
8c06ba6f
MD
100#undef calloc
101
2594a5b4
MD
102/*
103 * Static allocator to use when initially executing dlsym(). It keeps a
104 * size_t value of each object size prior to the object.
105 */
106static
107void *static_calloc_aligned(size_t nmemb, size_t size, size_t alignment)
f95b2888 108{
2594a5b4
MD
109 size_t prev_offset, new_offset, res_offset, aligned_offset;
110
111 if (nmemb * size == 0) {
112 return NULL;
113 }
f95b2888 114
4c3536e0
MD
115 /*
116 * Protect static_calloc_buf_offset from concurrent updates
117 * using a cmpxchg loop rather than a mutex to remove a
118 * dependency on pthread. This will minimize the risk of bad
119 * interaction between mutex and malloc instrumentation.
120 */
121 res_offset = CMM_LOAD_SHARED(static_calloc_buf_offset);
122 do {
123 prev_offset = res_offset;
b72687b8 124 aligned_offset = LTTNG_UST_ALIGN(prev_offset + sizeof(size_t), alignment);
2594a5b4
MD
125 new_offset = aligned_offset + nmemb * size;
126 if (new_offset > sizeof(static_calloc_buf)) {
127 abort();
4c3536e0 128 }
4c3536e0
MD
129 } while ((res_offset = uatomic_cmpxchg(&static_calloc_buf_offset,
130 prev_offset, new_offset)) != prev_offset);
2594a5b4
MD
131 *(size_t *) &static_calloc_buf[aligned_offset - sizeof(size_t)] = size;
132 return &static_calloc_buf[aligned_offset];
133}
134
135static
136void *static_calloc(size_t nmemb, size_t size)
137{
138 void *retval;
139
140 retval = static_calloc_aligned(nmemb, size, 1);
2594a5b4
MD
141 return retval;
142}
143
144static
145void *static_malloc(size_t size)
146{
147 void *retval;
148
149 retval = static_calloc_aligned(1, size, 1);
2594a5b4
MD
150 return retval;
151}
152
153static
154void static_free(void *ptr)
155{
156 /* no-op. */
2594a5b4
MD
157}
158
159static
160void *static_realloc(void *ptr, size_t size)
161{
162 size_t *old_size = NULL;
163 void *retval;
164
165 if (size == 0) {
166 retval = NULL;
167 goto end;
168 }
169
170 if (ptr) {
171 old_size = (size_t *) ptr - 1;
172 if (size <= *old_size) {
173 /* We can re-use the old entry. */
174 *old_size = size;
175 retval = ptr;
176 goto end;
177 }
178 }
179 /* We need to expand. Don't free previous memory location. */
180 retval = static_calloc_aligned(1, size, 1);
181 assert(retval);
182 if (ptr)
183 memcpy(retval, ptr, *old_size);
184end:
2594a5b4
MD
185 return retval;
186}
187
188static
189void *static_memalign(size_t alignment, size_t size)
190{
191 void *retval;
192
193 retval = static_calloc_aligned(1, size, alignment);
2594a5b4
MD
194 return retval;
195}
196
197static
198int static_posix_memalign(void **memptr, size_t alignment, size_t size)
199{
2594a5b4
MD
200 void *ptr;
201
202 /* Check for power of 2, larger than void *. */
203 if (alignment & (alignment - 1)
204 || alignment < sizeof(void *)
205 || alignment == 0) {
2594a5b4
MD
206 goto end;
207 }
208 ptr = static_calloc_aligned(1, size, alignment);
209 *memptr = ptr;
2594a5b4 210end:
2594a5b4
MD
211 return 0;
212}
213
214static
215void setup_static_allocator(void)
216{
217 assert(cur_alloc.calloc == NULL);
218 cur_alloc.calloc = static_calloc;
219 assert(cur_alloc.malloc == NULL);
220 cur_alloc.malloc = static_malloc;
221 assert(cur_alloc.free == NULL);
222 cur_alloc.free = static_free;
223 assert(cur_alloc.realloc == NULL);
224 cur_alloc.realloc = static_realloc;
225 assert(cur_alloc.memalign == NULL);
226 cur_alloc.memalign = static_memalign;
227 assert(cur_alloc.posix_memalign == NULL);
228 cur_alloc.posix_memalign = static_posix_memalign;
229}
230
231static
232void lookup_all_symbols(void)
233{
234 struct alloc_functions af;
235
236 /*
237 * Temporarily redirect allocation functions to
238 * static_calloc_aligned, and free function to static_free
239 * (no-op), until the dlsym lookup has completed.
240 */
241 setup_static_allocator();
242
243 /* Perform the actual lookups */
244 af.calloc = dlsym(RTLD_NEXT, "calloc");
245 af.malloc = dlsym(RTLD_NEXT, "malloc");
246 af.free = dlsym(RTLD_NEXT, "free");
247 af.realloc = dlsym(RTLD_NEXT, "realloc");
248 af.memalign = dlsym(RTLD_NEXT, "memalign");
249 af.posix_memalign = dlsym(RTLD_NEXT, "posix_memalign");
250
251 /* Populate the new allocator functions */
252 memcpy(&cur_alloc, &af, sizeof(cur_alloc));
f95b2888
SS
253}
254
e541a28d
PMF
255void *malloc(size_t size)
256{
1c184644
PMF
257 void *retval;
258
8c06ba6f 259 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
260 if (cur_alloc.malloc == NULL) {
261 lookup_all_symbols();
262 if (cur_alloc.malloc == NULL) {
e541a28d 263 fprintf(stderr, "mallocwrap: unable to find malloc\n");
2594a5b4 264 abort();
e541a28d
PMF
265 }
266 }
2594a5b4 267 retval = cur_alloc.malloc(size);
8c06ba6f 268 if (URCU_TLS(malloc_nesting) == 1) {
6d4658aa 269 tracepoint(lttng_ust_libc, malloc,
171fcc6f 270 size, retval, LTTNG_UST_CALLER_IP());
8c06ba6f
MD
271 }
272 URCU_TLS(malloc_nesting)--;
1c184644
PMF
273 return retval;
274}
275
276void free(void *ptr)
277{
8c06ba6f 278 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
279 /*
280 * Check whether the memory was allocated with
281 * static_calloc_align, in which case there is nothing to free.
f95b2888 282 */
2594a5b4
MD
283 if (caa_unlikely((char *)ptr >= static_calloc_buf &&
284 (char *)ptr < static_calloc_buf + STATIC_CALLOC_LEN)) {
8c06ba6f
MD
285 goto end;
286 }
287
288 if (URCU_TLS(malloc_nesting) == 1) {
6d4658aa 289 tracepoint(lttng_ust_libc, free,
171fcc6f 290 ptr, LTTNG_UST_CALLER_IP());
f95b2888 291 }
1c184644 292
2594a5b4
MD
293 if (cur_alloc.free == NULL) {
294 lookup_all_symbols();
295 if (cur_alloc.free == NULL) {
1c184644 296 fprintf(stderr, "mallocwrap: unable to find free\n");
2594a5b4 297 abort();
1c184644
PMF
298 }
299 }
2594a5b4 300 cur_alloc.free(ptr);
8c06ba6f
MD
301end:
302 URCU_TLS(malloc_nesting)--;
e541a28d 303}
f95b2888
SS
304
305void *calloc(size_t nmemb, size_t size)
306{
f95b2888
SS
307 void *retval;
308
8c06ba6f 309 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
310 if (cur_alloc.calloc == NULL) {
311 lookup_all_symbols();
312 if (cur_alloc.calloc == NULL) {
f95b2888 313 fprintf(stderr, "callocwrap: unable to find calloc\n");
2594a5b4 314 abort();
f95b2888
SS
315 }
316 }
2594a5b4 317 retval = cur_alloc.calloc(nmemb, size);
8c06ba6f 318 if (URCU_TLS(malloc_nesting) == 1) {
6d4658aa 319 tracepoint(lttng_ust_libc, calloc,
171fcc6f 320 nmemb, size, retval, LTTNG_UST_CALLER_IP());
8c06ba6f
MD
321 }
322 URCU_TLS(malloc_nesting)--;
f95b2888
SS
323 return retval;
324}
325
326void *realloc(void *ptr, size_t size)
327{
f95b2888
SS
328 void *retval;
329
8c06ba6f
MD
330 URCU_TLS(malloc_nesting)++;
331 /*
332 * Check whether the memory was allocated with
2594a5b4
MD
333 * static_calloc_align, in which case there is nothing
334 * to free, and we need to copy the old data.
335 */
336 if (caa_unlikely((char *)ptr >= static_calloc_buf &&
337 (char *)ptr < static_calloc_buf + STATIC_CALLOC_LEN)) {
338 size_t *old_size;
339
340 old_size = (size_t *) ptr - 1;
341 if (cur_alloc.calloc == NULL) {
342 lookup_all_symbols();
343 if (cur_alloc.calloc == NULL) {
344 fprintf(stderr, "reallocwrap: unable to find calloc\n");
345 abort();
346 }
347 }
348 retval = cur_alloc.calloc(1, size);
349 if (retval) {
350 memcpy(retval, ptr, *old_size);
351 }
8c06ba6f
MD
352 /*
353 * Mimick that a NULL pointer has been received, so
354 * memory allocation analysis based on the trace don't
355 * get confused by the address from the static
356 * allocator.
357 */
358 ptr = NULL;
2594a5b4
MD
359 goto end;
360 }
361
362 if (cur_alloc.realloc == NULL) {
363 lookup_all_symbols();
364 if (cur_alloc.realloc == NULL) {
f95b2888 365 fprintf(stderr, "reallocwrap: unable to find realloc\n");
2594a5b4 366 abort();
f95b2888
SS
367 }
368 }
2594a5b4
MD
369 retval = cur_alloc.realloc(ptr, size);
370end:
8c06ba6f 371 if (URCU_TLS(malloc_nesting) == 1) {
6d4658aa 372 tracepoint(lttng_ust_libc, realloc,
171fcc6f 373 ptr, size, retval, LTTNG_UST_CALLER_IP());
8c06ba6f
MD
374 }
375 URCU_TLS(malloc_nesting)--;
f95b2888
SS
376 return retval;
377}
9d34b226
SS
378
379void *memalign(size_t alignment, size_t size)
380{
9d34b226
SS
381 void *retval;
382
8c06ba6f 383 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
384 if (cur_alloc.memalign == NULL) {
385 lookup_all_symbols();
386 if (cur_alloc.memalign == NULL) {
9d34b226 387 fprintf(stderr, "memalignwrap: unable to find memalign\n");
2594a5b4 388 abort();
9d34b226
SS
389 }
390 }
2594a5b4 391 retval = cur_alloc.memalign(alignment, size);
8c06ba6f 392 if (URCU_TLS(malloc_nesting) == 1) {
6d4658aa
AB
393 tracepoint(lttng_ust_libc, memalign,
394 alignment, size, retval,
171fcc6f 395 LTTNG_UST_CALLER_IP());
8c06ba6f
MD
396 }
397 URCU_TLS(malloc_nesting)--;
9d34b226
SS
398 return retval;
399}
400
401int posix_memalign(void **memptr, size_t alignment, size_t size)
402{
9d34b226
SS
403 int retval;
404
8c06ba6f 405 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
406 if (cur_alloc.posix_memalign == NULL) {
407 lookup_all_symbols();
408 if (cur_alloc.posix_memalign == NULL) {
9d34b226 409 fprintf(stderr, "posix_memalignwrap: unable to find posix_memalign\n");
2594a5b4 410 abort();
9d34b226
SS
411 }
412 }
2594a5b4 413 retval = cur_alloc.posix_memalign(memptr, alignment, size);
8c06ba6f 414 if (URCU_TLS(malloc_nesting) == 1) {
6d4658aa
AB
415 tracepoint(lttng_ust_libc, posix_memalign,
416 *memptr, alignment, size,
171fcc6f 417 retval, LTTNG_UST_CALLER_IP());
8c06ba6f
MD
418 }
419 URCU_TLS(malloc_nesting)--;
9d34b226
SS
420 return retval;
421}
2594a5b4 422
f4a90c3e
MD
423static
424void lttng_ust_fixup_malloc_nesting_tls(void)
425{
426 asm volatile ("" : : "m" (URCU_TLS(malloc_nesting)));
427}
428
2594a5b4
MD
429__attribute__((constructor))
430void lttng_ust_malloc_wrapper_init(void)
431{
432 /* Initialization already done */
433 if (cur_alloc.calloc) {
434 return;
435 }
f4a90c3e 436 lttng_ust_fixup_malloc_nesting_tls();
2594a5b4
MD
437 /*
438 * Ensure the allocator is in place before the process becomes
439 * multithreaded.
440 */
441 lookup_all_symbols();
442}
This page took 0.052129 seconds and 4 git commands to generate.