Fix: set FD_CLOEXEC on incoming FDs.
[lttng-ust.git] / libringbuffer / shm.c
index 0153578c93a2cfddf3bdd69ae57324b516086651..7a41d4de51d4e4744172dd9e235cb29640bd50de 100644 (file)
 #include <dirent.h>
 #include <lttng/align.h>
 #include <limits.h>
+#include <stdbool.h>
 #ifdef HAVE_LIBNUMA
 #include <numa.h>
+#include <numaif.h>
 #endif
 #include <helper.h>
 #include <ust-fd.h>
+#include "mmap.h"
 
 /*
  * Ensure we have the required amount of space available by writing 0
@@ -152,7 +155,7 @@ struct shm_object *_shm_object_table_alloc_shm(struct shm_object_table *table,
 
        /* memory_map: mmap */
        memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
-                         MAP_SHARED, shmfd, 0);
+                         MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0);
        if (memory_map == MAP_FAILED) {
                PERROR("mmap");
                goto error_mmap;
@@ -244,6 +247,24 @@ alloc_error:
        return NULL;
 }
 
+/*
+ * libnuma prints errors on the console even for numa_available().
+ * Work-around this limitation by using get_mempolicy() directly to
+ * check whether the kernel supports mempolicy.
+ */
+#ifdef HAVE_LIBNUMA
+static bool lttng_is_numa_available(void)
+{
+       int ret;
+
+       ret = get_mempolicy(NULL, NULL, 0, NULL, 0);
+       if (ret && errno == ENOSYS) {
+               return false;
+       }
+       return numa_available() > 0;
+}
+#endif
+
 struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
                        size_t memory_map_size,
                        enum shm_object_type type,
@@ -252,16 +273,20 @@ struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
 {
        struct shm_object *shm_object;
 #ifdef HAVE_LIBNUMA
-       int oldnode, node;
-
-       oldnode = numa_preferred();
-       if (cpu >= 0) {
-               node = numa_node_of_cpu(cpu);
-               if (node >= 0)
-                       numa_set_preferred(node);
+       int oldnode = 0, node;
+       bool numa_avail;
+
+       numa_avail = lttng_is_numa_available();
+       if (numa_avail) {
+               oldnode = numa_preferred();
+               if (cpu >= 0) {
+                       node = numa_node_of_cpu(cpu);
+                       if (node >= 0)
+                               numa_set_preferred(node);
+               }
+               if (cpu < 0 || node < 0)
+                       numa_set_localalloc();
        }
-       if (cpu < 0 || node < 0)
-               numa_set_localalloc();
 #endif /* HAVE_LIBNUMA */
        switch (type) {
        case SHM_OBJECT_SHM:
@@ -275,7 +300,8 @@ struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
                assert(0);
        }
 #ifdef HAVE_LIBNUMA
-       numa_set_preferred(oldnode);
+       if (numa_avail)
+               numa_set_preferred(oldnode);
 #endif /* HAVE_LIBNUMA */
        return shm_object;
 }
@@ -302,11 +328,6 @@ struct shm_object *shm_object_table_append_shm(struct shm_object_table *table,
        obj->shm_fd = shm_fd;
        obj->shm_fd_ownership = 1;
 
-       ret = fcntl(obj->wait_fd[1], F_SETFD, FD_CLOEXEC);
-       if (ret < 0) {
-               PERROR("fcntl");
-               goto error_fcntl;
-       }
        /* The write end of the pipe needs to be non-blocking */
        ret = fcntl(obj->wait_fd[1], F_SETFL, O_NONBLOCK);
        if (ret < 0) {
@@ -316,7 +337,7 @@ struct shm_object *shm_object_table_append_shm(struct shm_object_table *table,
 
        /* memory_map: mmap */
        memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
-                         MAP_SHARED, shm_fd, 0);
+                         MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0);
        if (memory_map == MAP_FAILED) {
                PERROR("mmap");
                goto error_mmap;
This page took 0.023951 seconds and 4 git commands to generate.