2323// number of stacks to always keep available per pool
2424#define MIN_STACK_MAPPINGS_PER_POOL 5
2525
26- #if defined(_OS_WINDOWS_ ) || (!defined(_OS_OPENBSD_ ) && !defined(JL_HAVE_UCONTEXT ) && !defined(JL_HAVE_SIGALTSTACK ))
27- #define JL_USE_GUARD_PAGE 1
2826const size_t jl_guard_size = (4096 * 8 );
29- #else
30- const size_t jl_guard_size = 0 ;
31- #endif
32-
3327static _Atomic (uint32_t ) num_stack_mappings = 0 ;
3428
3529#ifdef _OS_WINDOWS_
3630#define MAP_FAILED NULL
3731static void * malloc_stack (size_t bufsz ) JL_NOTSAFEPOINT
3832{
39- size_t guard_size = LLT_ALIGN (jl_guard_size , jl_page_size );
40- bufsz += guard_size ;
41-
4233 void * stk = VirtualAlloc (NULL , bufsz , MEM_RESERVE | MEM_COMMIT , PAGE_READWRITE );
4334 if (stk == NULL )
4435 return MAP_FAILED ;
@@ -49,7 +40,6 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT
4940 VirtualFree (stk , 0 , MEM_RELEASE );
5041 return MAP_FAILED ;
5142 }
52- stk = (char * )stk + guard_size ;
5343
5444 jl_atomic_fetch_add_relaxed (& num_stack_mappings , 1 );
5545 return stk ;
@@ -58,12 +48,6 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT
5848
5949static void free_stack (void * stkbuf , size_t bufsz ) JL_NOTSAFEPOINT
6050{
61- #ifdef JL_USE_GUARD_PAGE
62- size_t guard_size = LLT_ALIGN (jl_guard_size , jl_page_size );
63- bufsz += guard_size ;
64- stkbuf = (char * )stkbuf - guard_size ;
65- #endif
66-
6751 VirtualFree (stkbuf , 0 , MEM_RELEASE );
6852 jl_atomic_fetch_add_relaxed (& num_stack_mappings , -1 );
6953}
@@ -89,22 +73,16 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT
8973# else
9074static void * malloc_stack (size_t bufsz ) JL_NOTSAFEPOINT
9175{
92- #ifdef JL_USE_GUARD_PAGE
93- size_t guard_size = LLT_ALIGN (jl_guard_size , jl_page_size );
94- bufsz += guard_size ;
95- #endif
96-
9776 void * stk = mmap (0 , bufsz , PROT_READ | PROT_WRITE , MAP_PRIVATE | MAP_ANONYMOUS , -1 , 0 );
9877 if (stk == MAP_FAILED )
9978 return MAP_FAILED ;
10079
101- #ifdef JL_USE_GUARD_PAGE
80+ #if !defined( JL_HAVE_UCONTEXT ) && !defined( JL_HAVE_SIGALTSTACK )
10281 // set up a guard page to detect stack overflow
10382 if (mprotect (stk , jl_guard_size , PROT_NONE ) == -1 ) {
10483 munmap (stk , bufsz );
10584 return MAP_FAILED ;
10685 }
107- stk = (char * )stk + guard_size ;
10886#endif
10987
11088 jl_atomic_fetch_add_relaxed (& num_stack_mappings , 1 );
@@ -114,12 +92,6 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT
11492
11593static void free_stack (void * stkbuf , size_t bufsz ) JL_NOTSAFEPOINT
11694{
117- #ifdef JL_USE_GUARD_PAGE
118- size_t guard_size = LLT_ALIGN (jl_guard_size , jl_page_size );
119- bufsz += guard_size ;
120- stkbuf = (char * )stkbuf - guard_size ;
121- #endif
122-
12395 munmap (stkbuf , bufsz );
12496 jl_atomic_fetch_add_relaxed (& num_stack_mappings , -1 );
12597}
0 commit comments