Skip to content

Commit 592afcb

Browse files
fingolfinKristofferC
authored and
KristofferC
committed
Don't expose guard pages to malloc_stack API consumers (#54591)
Whether or not a guard page is in effect is an implementation detail and consumers of the `malloc_stack` API should not have to worry about that. In particular, if a stack of a certain size is requested, a stack of that size should be delivered, and not be reduced on some systems because we park a guard page in that range. This also helps consumers of the gcext API implementing stack scanning (i.e., GAP.jl), as it does not have to worry about running into those guard pages anymore. (cherry picked from commit 5dfd57d)
1 parent 95a3792 commit 592afcb

File tree

1 file changed

+32
-2
lines changed

1 file changed

+32
-2
lines changed

src/gc-stacks.c

Lines changed: 32 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,13 +22,22 @@
2222
// number of stacks to always keep available per pool
2323
#define MIN_STACK_MAPPINGS_PER_POOL 5
2424

25+
#if defined(_OS_WINDOWS_) || (!defined(_OS_OPENBSD_) && !defined(JL_HAVE_UCONTEXT) && !defined(JL_HAVE_SIGALTSTACK))
26+
#define JL_USE_GUARD_PAGE 1
2527
const size_t jl_guard_size = (4096 * 8);
28+
#else
29+
const size_t jl_guard_size = 0;
30+
#endif
31+
2632
static _Atomic(uint32_t) num_stack_mappings = 0;
2733

2834
#ifdef _OS_WINDOWS_
2935
#define MAP_FAILED NULL
3036
static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT
3137
{
38+
size_t guard_size = LLT_ALIGN(jl_guard_size, jl_page_size);
39+
bufsz += guard_size;
40+
3241
void *stk = VirtualAlloc(NULL, bufsz, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
3342
if (stk == NULL)
3443
return MAP_FAILED;
@@ -37,13 +46,21 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT
3746
VirtualFree(stk, 0, MEM_RELEASE);
3847
return MAP_FAILED;
3948
}
49+
stk = (char *)stk + guard_size;
50+
4051
jl_atomic_fetch_add(&num_stack_mappings, 1);
4152
return stk;
4253
}
4354

4455

4556
static void free_stack(void *stkbuf, size_t bufsz)
4657
{
58+
#ifdef JL_USE_GUARD_PAGE
59+
size_t guard_size = LLT_ALIGN(jl_guard_size, jl_page_size);
60+
bufsz += guard_size;
61+
stkbuf = (char *)stkbuf - guard_size;
62+
#endif
63+
4764
VirtualFree(stkbuf, 0, MEM_RELEASE);
4865
jl_atomic_fetch_add(&num_stack_mappings, -1);
4966
}
@@ -52,22 +69,35 @@ static void free_stack(void *stkbuf, size_t bufsz)
5269

5370
static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT
5471
{
72+
#ifdef JL_USE_GUARD_PAGE
73+
size_t guard_size = LLT_ALIGN(jl_guard_size, jl_page_size);
74+
bufsz += guard_size;
75+
#endif
76+
5577
void* stk = mmap(0, bufsz, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
5678
if (stk == MAP_FAILED)
5779
return MAP_FAILED;
58-
#if !defined(JL_HAVE_UCONTEXT) && !defined(JL_HAVE_SIGALTSTACK)
59-
// setup a guard page to detect stack overflow
80+
81+
#ifdef JL_USE_GUARD_PAGE
82+
// set up a guard page to detect stack overflow
6083
if (mprotect(stk, jl_guard_size, PROT_NONE) == -1) {
6184
munmap(stk, bufsz);
6285
return MAP_FAILED;
6386
}
87+
stk = (char *)stk + guard_size;
6488
#endif
6589
jl_atomic_fetch_add(&num_stack_mappings, 1);
6690
return stk;
6791
}
6892

6993
static void free_stack(void *stkbuf, size_t bufsz)
7094
{
95+
#ifdef JL_USE_GUARD_PAGE
96+
size_t guard_size = LLT_ALIGN(jl_guard_size, jl_page_size);
97+
bufsz += guard_size;
98+
stkbuf = (char *)stkbuf - guard_size;
99+
#endif
100+
71101
munmap(stkbuf, bufsz);
72102
jl_atomic_fetch_add(&num_stack_mappings, -1);
73103
}

0 commit comments

Comments
 (0)