23
23
// number of stacks to always keep available per pool
24
24
#define MIN_STACK_MAPPINGS_PER_POOL 5
25
25
26
- #if defined(_OS_WINDOWS_ ) || (!defined(_OS_OPENBSD_ ) && !defined(JL_HAVE_UCONTEXT ) && !defined(JL_HAVE_SIGALTSTACK ))
27
- #define JL_USE_GUARD_PAGE 1
28
26
const size_t jl_guard_size = (4096 * 8 );
29
- #else
30
- const size_t jl_guard_size = 0 ;
31
- #endif
32
-
33
27
static _Atomic (uint32_t ) num_stack_mappings = 0 ;
34
28
35
29
#ifdef _OS_WINDOWS_
36
30
#define MAP_FAILED NULL
37
31
static void * malloc_stack (size_t bufsz ) JL_NOTSAFEPOINT
38
32
{
39
- size_t guard_size = LLT_ALIGN (jl_guard_size , jl_page_size );
40
- bufsz += guard_size ;
41
-
42
33
void * stk = VirtualAlloc (NULL , bufsz , MEM_RESERVE | MEM_COMMIT , PAGE_READWRITE );
43
34
if (stk == NULL )
44
35
return MAP_FAILED ;
@@ -49,7 +40,6 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT
49
40
VirtualFree (stk , 0 , MEM_RELEASE );
50
41
return MAP_FAILED ;
51
42
}
52
- stk = (char * )stk + guard_size ;
53
43
54
44
jl_atomic_fetch_add_relaxed (& num_stack_mappings , 1 );
55
45
return stk ;
@@ -58,68 +48,41 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT
58
48
59
49
static void free_stack (void * stkbuf , size_t bufsz ) JL_NOTSAFEPOINT
60
50
{
61
- #ifdef JL_USE_GUARD_PAGE
62
- size_t guard_size = LLT_ALIGN (jl_guard_size , jl_page_size );
63
- bufsz += guard_size ;
64
- stkbuf = (char * )stkbuf - guard_size ;
65
- #endif
66
-
67
51
VirtualFree (stkbuf , 0 , MEM_RELEASE );
68
52
jl_atomic_fetch_add_relaxed (& num_stack_mappings , -1 );
69
53
}
70
54
71
55
#else
72
56
73
- # ifdef _OS_OPENBSD_
74
57
static void * malloc_stack (size_t bufsz ) JL_NOTSAFEPOINT
75
58
{
76
- void * stk = mmap (0 , bufsz , PROT_READ | PROT_WRITE , MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK , -1 , 0 );
77
- if (stk == MAP_FAILED )
78
- return MAP_FAILED ;
79
-
59
+ # ifdef _OS_OPENBSD_
80
60
// we don't set up a guard page to detect stack overflow: on OpenBSD, any
81
61
// mmap-ed region has guard page managed by the kernel, so there is no
82
62
// need for it. Additionally, a memory region used as stack (memory
83
63
// allocated with MAP_STACK option) has strict permission, and you can't
84
64
// "create" a guard page on such memory by using `mprotect` on it
85
-
86
- jl_atomic_fetch_add_relaxed (& num_stack_mappings , 1 );
87
- return stk ;
88
- }
65
+ void * stk = mmap (0 , bufsz , PROT_READ | PROT_WRITE , MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK , -1 , 0 );
66
+ if (stk == MAP_FAILED )
67
+ return MAP_FAILED ;
89
68
# else
90
- static void * malloc_stack (size_t bufsz ) JL_NOTSAFEPOINT
91
- {
92
- #ifdef JL_USE_GUARD_PAGE
93
- size_t guard_size = LLT_ALIGN (jl_guard_size , jl_page_size );
94
- bufsz += guard_size ;
95
- #endif
96
-
97
69
void * stk = mmap (0 , bufsz , PROT_READ | PROT_WRITE , MAP_PRIVATE | MAP_ANONYMOUS , -1 , 0 );
98
70
if (stk == MAP_FAILED )
99
71
return MAP_FAILED ;
100
72
101
- #ifdef JL_USE_GUARD_PAGE
102
73
// set up a guard page to detect stack overflow
103
74
if (mprotect (stk , jl_guard_size , PROT_NONE ) == -1 ) {
104
75
munmap (stk , bufsz );
105
76
return MAP_FAILED ;
106
77
}
107
- stk = (char * )stk + guard_size ;
108
- #endif
78
+ # endif
109
79
110
80
jl_atomic_fetch_add_relaxed (& num_stack_mappings , 1 );
111
81
return stk ;
112
82
}
113
- # endif
114
83
115
84
static void free_stack (void * stkbuf , size_t bufsz ) JL_NOTSAFEPOINT
116
85
{
117
- #ifdef JL_USE_GUARD_PAGE
118
- size_t guard_size = LLT_ALIGN (jl_guard_size , jl_page_size );
119
- bufsz += guard_size ;
120
- stkbuf = (char * )stkbuf - guard_size ;
121
- #endif
122
-
123
86
munmap (stkbuf , bufsz );
124
87
jl_atomic_fetch_add_relaxed (& num_stack_mappings , -1 );
125
88
}
0 commit comments