Use the initial-exec TLS model for the malloc_nesting nesting guard
variable to ensure that the glibc implementation of the TLS access don't
trigger infinite recursion by calling the memory allocator wrapper
functions, which can happen with global-dynamic.
Considering that the libc wrapper is meant to be loaded with LD_PRELOAD
anyway (never with dlopen(3)), we always expect the libc to have enough
space to hold the malloc_nesting variable.
In addition to change the malloc_nesting from global-dynamic to
initial-exec, this removes the URCU TLS compatibility layer from the
libc wrapper, which is a good thing: this compatibility layer relies
on pthread key and calloc internally, which makes it a bad fit for TLS
accesses guarding access to malloc wrappers, due to possible infinite
recursion.
Link: https://lists.lttng.org/pipermail/lttng-dev/2024-January/030697.html
Reported-by: Florian Weimer <fweimer@redhat.com>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Change-Id: I72c42bc09c1a06e2922b184b85abeb9c94200ee2
#include <urcu/system.h>
#include <urcu/uatomic.h>
#include <urcu/compiler.h>
#include <urcu/system.h>
#include <urcu/uatomic.h>
#include <urcu/compiler.h>
-#include <urcu/tls-compat.h>
#include <urcu/arch.h>
#include <lttng/ust-libc-wrapper.h>
#include <urcu/arch.h>
#include <lttng/ust-libc-wrapper.h>
uatomic_set(&ust_malloc_lock, 0);
}
uatomic_set(&ust_malloc_lock, 0);
}
-#define calloc static_calloc
-#define pthread_mutex_lock ust_malloc_spin_lock
-#define pthread_mutex_unlock ust_malloc_spin_unlock
-static DEFINE_URCU_TLS(int, malloc_nesting);
-#undef pthread_mutex_unlock
-#undef pthread_mutex_lock
-#undef calloc
+/*
+ * Use initial-exec TLS model for the malloc_nesting nesting guard
+ * variable to ensure that the glibc implementation of the TLS access
+ * don't trigger infinite recursion by calling the memory allocator
+ * wrapper functions, which could happen with global-dynamic.
+ */
+static __thread __attribute__((tls_model("initial-exec"))) int malloc_nesting;
/*
* Static allocator to use when initially executing dlsym(). It keeps a
/*
* Static allocator to use when initially executing dlsym(). It keeps a
- URCU_TLS(malloc_nesting)++;
if (cur_alloc.malloc == NULL) {
lookup_all_symbols();
if (cur_alloc.malloc == NULL) {
if (cur_alloc.malloc == NULL) {
lookup_all_symbols();
if (cur_alloc.malloc == NULL) {
}
}
retval = cur_alloc.malloc(size);
}
}
retval = cur_alloc.malloc(size);
- if (URCU_TLS(malloc_nesting) == 1) {
+ if (malloc_nesting == 1) {
lttng_ust_tracepoint(lttng_ust_libc, malloc,
size, retval, LTTNG_UST_CALLER_IP());
}
lttng_ust_tracepoint(lttng_ust_libc, malloc,
size, retval, LTTNG_UST_CALLER_IP());
}
- URCU_TLS(malloc_nesting)--;
return retval;
}
void free(void *ptr)
{
return retval;
}
void free(void *ptr)
{
- URCU_TLS(malloc_nesting)++;
/*
* Check whether the memory was allocated with
* static_calloc_align, in which case there is nothing to free.
/*
* Check whether the memory was allocated with
* static_calloc_align, in which case there is nothing to free.
- if (URCU_TLS(malloc_nesting) == 1) {
+ if (malloc_nesting == 1) {
lttng_ust_tracepoint(lttng_ust_libc, free,
ptr, LTTNG_UST_CALLER_IP());
}
lttng_ust_tracepoint(lttng_ust_libc, free,
ptr, LTTNG_UST_CALLER_IP());
}
}
cur_alloc.free(ptr);
end:
}
cur_alloc.free(ptr);
end:
- URCU_TLS(malloc_nesting)--;
}
void *calloc(size_t nmemb, size_t size)
{
void *retval;
}
void *calloc(size_t nmemb, size_t size)
{
void *retval;
- URCU_TLS(malloc_nesting)++;
if (cur_alloc.calloc == NULL) {
lookup_all_symbols();
if (cur_alloc.calloc == NULL) {
if (cur_alloc.calloc == NULL) {
lookup_all_symbols();
if (cur_alloc.calloc == NULL) {
}
}
retval = cur_alloc.calloc(nmemb, size);
}
}
retval = cur_alloc.calloc(nmemb, size);
- if (URCU_TLS(malloc_nesting) == 1) {
+ if (malloc_nesting == 1) {
lttng_ust_tracepoint(lttng_ust_libc, calloc,
nmemb, size, retval, LTTNG_UST_CALLER_IP());
}
lttng_ust_tracepoint(lttng_ust_libc, calloc,
nmemb, size, retval, LTTNG_UST_CALLER_IP());
}
- URCU_TLS(malloc_nesting)--;
- URCU_TLS(malloc_nesting)++;
/*
* Check whether the memory was allocated with
* static_calloc_align, in which case there is nothing
/*
* Check whether the memory was allocated with
* static_calloc_align, in which case there is nothing
}
retval = cur_alloc.realloc(ptr, size);
end:
}
retval = cur_alloc.realloc(ptr, size);
end:
- if (URCU_TLS(malloc_nesting) == 1) {
+ if (malloc_nesting == 1) {
lttng_ust_tracepoint(lttng_ust_libc, realloc,
ptr, size, retval, LTTNG_UST_CALLER_IP());
}
lttng_ust_tracepoint(lttng_ust_libc, realloc,
ptr, size, retval, LTTNG_UST_CALLER_IP());
}
- URCU_TLS(malloc_nesting)--;
- URCU_TLS(malloc_nesting)++;
if (cur_alloc.memalign == NULL) {
lookup_all_symbols();
if (cur_alloc.memalign == NULL) {
if (cur_alloc.memalign == NULL) {
lookup_all_symbols();
if (cur_alloc.memalign == NULL) {
}
}
retval = cur_alloc.memalign(alignment, size);
}
}
retval = cur_alloc.memalign(alignment, size);
- if (URCU_TLS(malloc_nesting) == 1) {
+ if (malloc_nesting == 1) {
lttng_ust_tracepoint(lttng_ust_libc, memalign,
alignment, size, retval,
LTTNG_UST_CALLER_IP());
}
lttng_ust_tracepoint(lttng_ust_libc, memalign,
alignment, size, retval,
LTTNG_UST_CALLER_IP());
}
- URCU_TLS(malloc_nesting)--;
- URCU_TLS(malloc_nesting)++;
if (cur_alloc.posix_memalign == NULL) {
lookup_all_symbols();
if (cur_alloc.posix_memalign == NULL) {
if (cur_alloc.posix_memalign == NULL) {
lookup_all_symbols();
if (cur_alloc.posix_memalign == NULL) {
}
}
retval = cur_alloc.posix_memalign(memptr, alignment, size);
}
}
retval = cur_alloc.posix_memalign(memptr, alignment, size);
- if (URCU_TLS(malloc_nesting) == 1) {
+ if (malloc_nesting == 1) {
lttng_ust_tracepoint(lttng_ust_libc, posix_memalign,
*memptr, alignment, size,
retval, LTTNG_UST_CALLER_IP());
}
lttng_ust_tracepoint(lttng_ust_libc, posix_memalign,
*memptr, alignment, size,
retval, LTTNG_UST_CALLER_IP());
}
- URCU_TLS(malloc_nesting)--;
-static
-void lttng_ust_malloc_nesting_alloc_tls(void)
-{
- __asm__ __volatile__ ("" : : "m" (URCU_TLS(malloc_nesting)));
-}
-
void lttng_ust_libc_wrapper_malloc_ctor(void)
{
/* Initialization already done */
if (cur_alloc.calloc) {
return;
}
void lttng_ust_libc_wrapper_malloc_ctor(void)
{
/* Initialization already done */
if (cur_alloc.calloc) {
return;
}
- lttng_ust_malloc_nesting_alloc_tls();
/*
* Ensure the allocator is in place before the process becomes
* multithreaded.
/*
* Ensure the allocator is in place before the process becomes
* multithreaded.