Skip to content

Commit 0140f31

Browse files
committed
[EASY] Extract hpa_central component from hpa source file
1 parent dbc02e0 commit 0140f31

File tree

5 files changed

+173
-141
lines changed

5 files changed

+173
-141
lines changed

Makefile.in

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,7 @@ C_SRCS := $(srcroot)src/jemalloc.c \
123123
$(srcroot)src/san_bump.c \
124124
$(srcroot)src/hook.c \
125125
$(srcroot)src/hpa.c \
126+
$(srcroot)src/hpa_central.c \
126127
$(srcroot)src/hpa_hooks.c \
127128
$(srcroot)src/hpa_utils.c \
128129
$(srcroot)src/hpdata.c \

include/jemalloc/internal/hpa.h

Lines changed: 1 addition & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -6,36 +6,13 @@
66
#include "jemalloc/internal/edata_cache.h"
77
#include "jemalloc/internal/emap.h"
88
#include "jemalloc/internal/exp_grow.h"
9+
#include "jemalloc/internal/hpa_central.h"
910
#include "jemalloc/internal/hpa_hooks.h"
1011
#include "jemalloc/internal/hpa_opts.h"
1112
#include "jemalloc/internal/mutex.h"
1213
#include "jemalloc/internal/pai.h"
1314
#include "jemalloc/internal/psset.h"
1415

15-
typedef struct hpa_central_s hpa_central_t;
16-
struct hpa_central_s {
17-
/*
18-
* Guards expansion of eden. We separate this from the regular mutex so
19-
* that cheaper operations can still continue while we're doing the OS
20-
* call.
21-
*/
22-
malloc_mutex_t grow_mtx;
23-
/*
24-
* Either NULL (if empty), or some integer multiple of a
25-
* hugepage-aligned number of hugepages. We carve them off one at a
26-
* time to satisfy new pageslab requests.
27-
*
28-
* Guarded by grow_mtx.
29-
*/
30-
void *eden;
31-
size_t eden_len;
32-
/* Source for metadata. */
33-
base_t *base;
34-
35-
/* The HPA hooks. */
36-
hpa_hooks_t hooks;
37-
};
38-
3916
typedef struct hpa_shard_nonderived_stats_s hpa_shard_nonderived_stats_t;
4017
struct hpa_shard_nonderived_stats_s {
4118
/*
@@ -165,8 +142,6 @@ bool hpa_hugepage_size_exceeds_limit(void);
165142
* just that it can function properly given the system it's running on.
166143
*/
167144
bool hpa_supported(void);
168-
bool hpa_central_init(
169-
hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks);
170145
bool hpa_shard_init(hpa_shard_t *shard, hpa_central_t *central, emap_t *emap,
171146
base_t *base, edata_cache_t *edata_cache, unsigned ind,
172147
const hpa_shard_opts_t *opts);
Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
#ifndef JEMALLOC_INTERNAL_HPA_CENTRAL_H
2+
#define JEMALLOC_INTERNAL_HPA_CENTRAL_H
3+
4+
#include "jemalloc/internal/jemalloc_preamble.h"
5+
#include "jemalloc/internal/base.h"
6+
#include "jemalloc/internal/hpa_hooks.h"
7+
#include "jemalloc/internal/hpdata.h"
8+
#include "jemalloc/internal/mutex.h"
9+
#include "jemalloc/internal/tsd_types.h"
10+
11+
typedef struct hpa_central_s hpa_central_t;
12+
struct hpa_central_s {
13+
/*
14+
* Guards expansion of eden. We separate this from the regular mutex so
15+
* that cheaper operations can still continue while we're doing the OS
16+
* call.
17+
*/
18+
malloc_mutex_t grow_mtx;
19+
/*
20+
* Either NULL (if empty), or some integer multiple of a
21+
* hugepage-aligned number of hugepages. We carve them off one at a
22+
* time to satisfy new pageslab requests.
23+
*
24+
* Guarded by grow_mtx.
25+
*/
26+
void *eden;
27+
size_t eden_len;
28+
/* Source for metadata. */
29+
base_t *base;
30+
31+
/* The HPA hooks. */
32+
hpa_hooks_t hooks;
33+
};
34+
35+
bool hpa_central_init(
36+
hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks);
37+
38+
hpdata_t *hpa_central_extract(tsdn_t *tsdn, hpa_central_t *central, size_t size,
39+
uint64_t age, bool hugify_eager, bool *oom);
40+
41+
#endif /* JEMALLOC_INTERNAL_HPA_CENTRAL_H */

src/hpa.c

Lines changed: 0 additions & 115 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,6 @@
88
#include "jemalloc/internal/witness.h"
99
#include "jemalloc/internal/jemalloc_probe.h"
1010

11-
#define HPA_EDEN_SIZE (128 * HUGEPAGE)
12-
1311
static edata_t *hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
1412
size_t alignment, bool zero, bool guarded, bool frequent_reuse,
1513
bool *deferred_work_generated);
@@ -74,119 +72,6 @@ hpa_do_consistency_checks(hpa_shard_t *shard) {
7472
assert(shard->base != NULL);
7573
}
7674

77-
bool
78-
hpa_central_init(
79-
hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks) {
80-
/* malloc_conf processing should have filtered out these cases. */
81-
assert(hpa_supported());
82-
bool err;
83-
err = malloc_mutex_init(&central->grow_mtx, "hpa_central_grow",
84-
WITNESS_RANK_HPA_CENTRAL_GROW, malloc_mutex_rank_exclusive);
85-
if (err) {
86-
return true;
87-
}
88-
89-
central->base = base;
90-
central->eden = NULL;
91-
central->eden_len = 0;
92-
central->hooks = *hooks;
93-
return false;
94-
}
95-
96-
static hpdata_t *
97-
hpa_alloc_ps(tsdn_t *tsdn, hpa_central_t *central) {
98-
return (hpdata_t *)base_alloc(
99-
tsdn, central->base, sizeof(hpdata_t), CACHELINE);
100-
}
101-
102-
static hpdata_t *
103-
hpa_central_extract(tsdn_t *tsdn, hpa_central_t *central, size_t size,
104-
uint64_t age, bool hugify_eager, bool *oom) {
105-
/* Don't yet support big allocations; these should get filtered out. */
106-
assert(size <= HUGEPAGE);
107-
/*
108-
* Should only try to extract from the central allocator if the local
109-
* shard is exhausted. We should hold the grow_mtx on that shard.
110-
*/
111-
witness_assert_positive_depth_to_rank(
112-
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_HPA_SHARD_GROW);
113-
114-
malloc_mutex_lock(tsdn, &central->grow_mtx);
115-
*oom = false;
116-
117-
hpdata_t *ps = NULL;
118-
bool start_as_huge = hugify_eager
119-
|| (init_system_thp_mode == system_thp_mode_always
120-
&& opt_experimental_hpa_start_huge_if_thp_always);
121-
122-
/* Is eden a perfect fit? */
123-
if (central->eden != NULL && central->eden_len == HUGEPAGE) {
124-
ps = hpa_alloc_ps(tsdn, central);
125-
if (ps == NULL) {
126-
*oom = true;
127-
malloc_mutex_unlock(tsdn, &central->grow_mtx);
128-
return NULL;
129-
}
130-
hpdata_init(ps, central->eden, age, start_as_huge);
131-
central->eden = NULL;
132-
central->eden_len = 0;
133-
malloc_mutex_unlock(tsdn, &central->grow_mtx);
134-
return ps;
135-
}
136-
137-
/*
138-
* We're about to try to allocate from eden by splitting. If eden is
139-
* NULL, we have to allocate it too. Otherwise, we just have to
140-
* allocate an edata_t for the new psset.
141-
*/
142-
if (central->eden == NULL) {
143-
/* Allocate address space, bailing if we fail. */
144-
void *new_eden = central->hooks.map(HPA_EDEN_SIZE);
145-
if (new_eden == NULL) {
146-
*oom = true;
147-
malloc_mutex_unlock(tsdn, &central->grow_mtx);
148-
return NULL;
149-
}
150-
if (hugify_eager) {
151-
central->hooks.hugify(
152-
new_eden, HPA_EDEN_SIZE, /* sync */ false);
153-
}
154-
ps = hpa_alloc_ps(tsdn, central);
155-
if (ps == NULL) {
156-
central->hooks.unmap(new_eden, HPA_EDEN_SIZE);
157-
*oom = true;
158-
malloc_mutex_unlock(tsdn, &central->grow_mtx);
159-
return NULL;
160-
}
161-
central->eden = new_eden;
162-
central->eden_len = HPA_EDEN_SIZE;
163-
} else {
164-
/* Eden is already nonempty; only need an edata for ps. */
165-
ps = hpa_alloc_ps(tsdn, central);
166-
if (ps == NULL) {
167-
*oom = true;
168-
malloc_mutex_unlock(tsdn, &central->grow_mtx);
169-
return NULL;
170-
}
171-
}
172-
assert(ps != NULL);
173-
assert(central->eden != NULL);
174-
assert(central->eden_len > HUGEPAGE);
175-
assert(central->eden_len % HUGEPAGE == 0);
176-
assert(HUGEPAGE_ADDR2BASE(central->eden) == central->eden);
177-
178-
hpdata_init(ps, central->eden, age, start_as_huge);
179-
180-
char *eden_char = (char *)central->eden;
181-
eden_char += HUGEPAGE;
182-
central->eden = (void *)eden_char;
183-
central->eden_len -= HUGEPAGE;
184-
185-
malloc_mutex_unlock(tsdn, &central->grow_mtx);
186-
187-
return ps;
188-
}
189-
19075
bool
19176
hpa_shard_init(hpa_shard_t *shard, hpa_central_t *central, emap_t *emap,
19277
base_t *base, edata_cache_t *edata_cache, unsigned ind,

src/hpa_central.c

Lines changed: 130 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,130 @@
1+
#include "jemalloc/internal/jemalloc_preamble.h"
2+
#include "jemalloc/internal/jemalloc_internal_includes.h"
3+
4+
#include "jemalloc/internal/hpa_central.h"
5+
#include "jemalloc/internal/tsd.h"
6+
#include "jemalloc/internal/witness.h"
7+
8+
#define HPA_EDEN_SIZE (128 * HUGEPAGE)
9+
10+
bool
11+
hpa_central_init(
12+
hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks) {
13+
/* malloc_conf processing should have filtered out these cases. */
14+
assert(hpa_supported());
15+
bool err;
16+
err = malloc_mutex_init(&central->grow_mtx, "hpa_central_grow",
17+
WITNESS_RANK_HPA_CENTRAL_GROW, malloc_mutex_rank_exclusive);
18+
if (err) {
19+
return true;
20+
}
21+
22+
central->base = base;
23+
central->eden = NULL;
24+
central->eden_len = 0;
25+
central->hooks = *hooks;
26+
return false;
27+
}
28+
29+
static hpdata_t *
30+
hpa_alloc_ps(tsdn_t *tsdn, hpa_central_t *central) {
31+
return (hpdata_t *)base_alloc(
32+
tsdn, central->base, sizeof(hpdata_t), CACHELINE);
33+
}
34+
35+
hpdata_t *
36+
hpa_central_extract(tsdn_t *tsdn, hpa_central_t *central, size_t size,
37+
uint64_t age, bool hugify_eager, bool *oom) {
38+
/* Don't yet support big allocations; these should get filtered out. */
39+
assert(size <= HUGEPAGE);
40+
/*
41+
* Should only try to extract from the central allocator if the local
42+
* shard is exhausted. We should hold the grow_mtx on that shard.
43+
*/
44+
witness_assert_positive_depth_to_rank(
45+
tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_HPA_SHARD_GROW);
46+
47+
malloc_mutex_lock(tsdn, &central->grow_mtx);
48+
*oom = false;
49+
50+
hpdata_t *ps = NULL;
51+
bool start_as_huge = hugify_eager
52+
|| (init_system_thp_mode == system_thp_mode_always
53+
&& opt_experimental_hpa_start_huge_if_thp_always);
54+
55+
/* Is eden a perfect fit? */
56+
if (central->eden != NULL && central->eden_len == HUGEPAGE) {
57+
ps = hpa_alloc_ps(tsdn, central);
58+
if (ps == NULL) {
59+
*oom = true;
60+
malloc_mutex_unlock(tsdn, &central->grow_mtx);
61+
return NULL;
62+
}
63+
hpdata_init(ps, central->eden, age, start_as_huge);
64+
central->eden = NULL;
65+
central->eden_len = 0;
66+
malloc_mutex_unlock(tsdn, &central->grow_mtx);
67+
return ps;
68+
}
69+
70+
/*
71+
* We're about to try to allocate from eden by splitting. If eden is
72+
* NULL, we have to allocate it too. Otherwise, we just have to
73+
* allocate an edata_t for the new psset.
74+
*/
75+
if (central->eden == NULL) {
76+
/* Allocate address space, bailing if we fail. */
77+
void *new_eden = central->hooks.map(HPA_EDEN_SIZE);
78+
if (new_eden == NULL) {
79+
*oom = true;
80+
malloc_mutex_unlock(tsdn, &central->grow_mtx);
81+
return NULL;
82+
}
83+
if (hugify_eager) {
84+
central->hooks.hugify(
85+
new_eden, HPA_EDEN_SIZE, /* sync */ false);
86+
}
87+
ps = hpa_alloc_ps(tsdn, central);
88+
if (ps == NULL) {
89+
central->hooks.unmap(new_eden, HPA_EDEN_SIZE);
90+
*oom = true;
91+
malloc_mutex_unlock(tsdn, &central->grow_mtx);
92+
return NULL;
93+
}
94+
central->eden = new_eden;
95+
central->eden_len = HPA_EDEN_SIZE;
96+
} else {
97+
/* Eden is already nonempty; only need an edata for ps. */
98+
ps = hpa_alloc_ps(tsdn, central);
99+
if (ps == NULL) {
100+
*oom = true;
101+
malloc_mutex_unlock(tsdn, &central->grow_mtx);
102+
return NULL;
103+
}
104+
}
105+
assert(ps != NULL);
106+
assert(central->eden != NULL);
107+
assert(central->eden_len > HUGEPAGE);
108+
assert(central->eden_len % HUGEPAGE == 0);
109+
assert(HUGEPAGE_ADDR2BASE(central->eden) == central->eden);
110+
111+
hpdata_init(ps, central->eden, age, start_as_huge);
112+
113+
char *eden_char = (char *)central->eden;
114+
eden_char += HUGEPAGE;
115+
central->eden = (void *)eden_char;
116+
central->eden_len -= HUGEPAGE;
117+
118+
malloc_mutex_unlock(tsdn, &central->grow_mtx);
119+
120+
return ps;
121+
}
122+
123+
/*
124+
* Please note that we do not have preforkN and postfork parent and child
125+
* functions here. This is so because the only way to access central is via
126+
* hpa_shard_t and shard's mutex must be held. Thus, ensuring that shard's
127+
* mutex is held is sufficient to guarantee that parent and child will be in
128+
* a proper state. If this assumption ever changes, we would need to implement
129+
* prefork/postfork mechanism that would be handled separately.
130+
*/

0 commit comments

Comments
 (0)