uatomic/x86: Remove redundant memory barriers
[urcu.git] / src / rculfhash-mm-mmap.c
CommitLineData
acdb82a2
MJ
1// SPDX-FileCopyrightText: 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4
b0b55251 5/*
b0b55251 6 * mmap/reservation based memory management for Lock-Free RCU Hash Table
b0b55251
LJ
7 */
8
9#include <unistd.h>
6ef90903
MD
10#include <stdio.h>
11#include <errno.h>
12#include <stdlib.h>
b0b55251 13#include <sys/mman.h>
01477510 14#include <urcu/assert.h>
b0b55251
LJ
15#include "rculfhash-internal.h"
16
0d0cf93f
MD
17#ifndef MAP_ANONYMOUS
18#define MAP_ANONYMOUS MAP_ANON
19#endif
20
142af0ff
MJ
21/*
22 * The allocation scheme used by the mmap based RCU hash table is to make a
23 * large unaccessible mapping to reserve memory without allocating it.
24 * Then smaller chunks are allocated by overlapping read/write mappings which
25 * do allocate memory. Deallocation is done by an overlapping unaccessible
26 * mapping.
27 *
28 * This scheme was tested on Linux, macOS and Solaris. However, on Cygwin the
29 * mmap wrapper is based on the Windows NtMapViewOfSection API which doesn't
30 * support overlapping mappings.
31 *
32 * An alternative to the overlapping mappings is to use mprotect to change the
33 * protection on chunks of the large mapping, read/write to allocate and none
34 * to deallocate. This works perfecty on Cygwin and Solaris but on Linux a
35 * call to madvise is also required to deallocate and it just doesn't work on
36 * macOS.
37 *
38 * For this reason, we keep to original scheme on all platforms except Cygwin.
39 */
40
41
42/* Reserve inaccessible memory space without allocating it */
43static
44void *memory_map(size_t length)
b0b55251 45{
6ef90903 46 void *ret;
b0b55251 47
6ef90903
MD
48 ret = mmap(NULL, length, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
49 if (ret == MAP_FAILED) {
50 perror("mmap");
51 abort();
52 }
b0b55251
LJ
53 return ret;
54}
55
142af0ff
MJ
56static
57void memory_unmap(void *ptr, size_t length)
b0b55251 58{
6ef90903
MD
59 if (munmap(ptr, length)) {
60 perror("munmap");
61 abort();
62 }
b0b55251
LJ
63}
64
142af0ff
MJ
65#ifdef __CYGWIN__
66/* Set protection to read/write to allocate a memory chunk */
67static
68void memory_populate(void *ptr, size_t length)
69{
6ef90903
MD
70 if (mprotect(ptr, length, PROT_READ | PROT_WRITE)) {
71 perror("mprotect");
72 abort();
73 }
142af0ff
MJ
74}
75
76/* Set protection to none to deallocate a memory chunk */
77static
78void memory_discard(void *ptr, size_t length)
79{
6ef90903
MD
80 if (mprotect(ptr, length, PROT_NONE)) {
81 perror("mprotect");
82 abort();
83 }
142af0ff
MJ
84}
85
86#else /* __CYGWIN__ */
87
88static
89void memory_populate(void *ptr, size_t length)
b0b55251 90{
6ef90903
MD
91 if (mmap(ptr, length, PROT_READ | PROT_WRITE,
92 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
93 -1, 0) != ptr) {
94 perror("mmap");
95 abort();
96 }
b0b55251
LJ
97}
98
99/*
100 * Discard garbage memory and avoid system save it when try to swap it out.
101 * Make it still reserved, inaccessible.
102 */
142af0ff
MJ
103static
104void memory_discard(void *ptr, size_t length)
b0b55251 105{
6ef90903
MD
106 if (mmap(ptr, length, PROT_NONE,
107 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
108 -1, 0) != ptr) {
109 perror("mmap");
110 abort();
111 }
b0b55251 112}
142af0ff 113#endif /* __CYGWIN__ */
b0b55251
LJ
114
115static
116void cds_lfht_alloc_bucket_table(struct cds_lfht *ht, unsigned long order)
117{
118 if (order == 0) {
119 if (ht->min_nr_alloc_buckets == ht->max_nr_buckets) {
120 /* small table */
ac735254
XF
121 ht->tbl_mmap = ht->alloc->calloc(ht->alloc->state,
122 ht->max_nr_buckets, sizeof(*ht->tbl_mmap));
01477510 123 urcu_posix_assert(ht->tbl_mmap);
b0b55251
LJ
124 return;
125 }
126 /* large table */
127 ht->tbl_mmap = memory_map(ht->max_nr_buckets
128 * sizeof(*ht->tbl_mmap));
129 memory_populate(ht->tbl_mmap,
130 ht->min_nr_alloc_buckets * sizeof(*ht->tbl_mmap));
131 } else if (order > ht->min_alloc_buckets_order) {
132 /* large table */
133 unsigned long len = 1UL << (order - 1);
134
01477510 135 urcu_posix_assert(ht->min_nr_alloc_buckets < ht->max_nr_buckets);
b0b55251
LJ
136 memory_populate(ht->tbl_mmap + len,
137 len * sizeof(*ht->tbl_mmap));
138 }
139 /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
140}
141
142/*
143 * cds_lfht_free_bucket_table() should be called with decreasing order.
144 * When cds_lfht_free_bucket_table(0) is called, it means the whole
145 * lfht is destroyed.
146 */
147static
148void cds_lfht_free_bucket_table(struct cds_lfht *ht, unsigned long order)
149{
150 if (order == 0) {
151 if (ht->min_nr_alloc_buckets == ht->max_nr_buckets) {
152 /* small table */
ac735254 153 poison_free(ht->alloc, ht->tbl_mmap);
b0b55251
LJ
154 return;
155 }
156 /* large table */
157 memory_unmap(ht->tbl_mmap,
158 ht->max_nr_buckets * sizeof(*ht->tbl_mmap));
159 } else if (order > ht->min_alloc_buckets_order) {
160 /* large table */
161 unsigned long len = 1UL << (order - 1);
162
01477510 163 urcu_posix_assert(ht->min_nr_alloc_buckets < ht->max_nr_buckets);
b0b55251
LJ
164 memory_discard(ht->tbl_mmap + len, len * sizeof(*ht->tbl_mmap));
165 }
166 /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
167}
168
169static
170struct cds_lfht_node *bucket_at(struct cds_lfht *ht, unsigned long index)
171{
172 return &ht->tbl_mmap[index];
173}
174
175static
176struct cds_lfht *alloc_cds_lfht(unsigned long min_nr_alloc_buckets,
ac735254 177 unsigned long max_nr_buckets, const struct cds_lfht_alloc *alloc)
b0b55251 178{
1228af1c 179 unsigned long page_bucket_size;
b0b55251 180
1228af1c 181 page_bucket_size = getpagesize() / sizeof(struct cds_lfht_node);
b0b55251
LJ
182 if (max_nr_buckets <= page_bucket_size) {
183 /* small table */
184 min_nr_alloc_buckets = max_nr_buckets;
185 } else {
186 /* large table */
187 min_nr_alloc_buckets = max(min_nr_alloc_buckets,
188 page_bucket_size);
189 }
190
1228af1c 191 return __default_alloc_cds_lfht(
ac735254 192 &cds_lfht_mm_mmap, alloc, sizeof(struct cds_lfht),
1228af1c 193 min_nr_alloc_buckets, max_nr_buckets);
b0b55251
LJ
194}
195
196const struct cds_lfht_mm_type cds_lfht_mm_mmap = {
197 .alloc_cds_lfht = alloc_cds_lfht,
198 .alloc_bucket_table = cds_lfht_alloc_bucket_table,
199 .free_bucket_table = cds_lfht_free_bucket_table,
200 .bucket_at = bucket_at,
201};
This page took 0.048819 seconds and 4 git commands to generate.