From e921dc8fb8af13c65b9f5d14fa48b39073741c1a Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Thu, 29 Aug 2024 14:12:40 -0400 Subject: [PATCH] Revert "Don't expose guard pages to malloc_stack API consumers" (#55555) Reverts JuliaLang/julia#54591 This cause the runtime to misbehave and crash, since all of the consumers of this information in the runtime assumed that the guard pages are accounted for correctly as part of the reserved allocation. Nothing in the runtime ever promised that it is valid to access the pages beyond the current redzone (indeed, ASAN would forbid it as well). --- src/gc-stacks.c | 47 +++++------------------------------------------ 1 file changed, 5 insertions(+), 42 deletions(-) diff --git a/src/gc-stacks.c b/src/gc-stacks.c index 08425019a4daf..783129ea97693 100644 --- a/src/gc-stacks.c +++ b/src/gc-stacks.c @@ -23,22 +23,13 @@ // number of stacks to always keep available per pool #define MIN_STACK_MAPPINGS_PER_POOL 5 -#if defined(_OS_WINDOWS_) || (!defined(_OS_OPENBSD_) && !defined(JL_HAVE_UCONTEXT) && !defined(JL_HAVE_SIGALTSTACK)) -#define JL_USE_GUARD_PAGE 1 const size_t jl_guard_size = (4096 * 8); -#else -const size_t jl_guard_size = 0; -#endif - static _Atomic(uint32_t) num_stack_mappings = 0; #ifdef _OS_WINDOWS_ #define MAP_FAILED NULL static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT { - size_t guard_size = LLT_ALIGN(jl_guard_size, jl_page_size); - bufsz += guard_size; - void *stk = VirtualAlloc(NULL, bufsz, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); if (stk == NULL) return MAP_FAILED; @@ -49,7 +40,6 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT VirtualFree(stk, 0, MEM_RELEASE); return MAP_FAILED; } - stk = (char *)stk + guard_size; jl_atomic_fetch_add_relaxed(&num_stack_mappings, 1); return stk; @@ -58,68 +48,41 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT static void free_stack(void *stkbuf, size_t bufsz) JL_NOTSAFEPOINT { -#ifdef JL_USE_GUARD_PAGE - size_t guard_size = LLT_ALIGN(jl_guard_size, jl_page_size); - bufsz += guard_size; - stkbuf = (char *)stkbuf - guard_size; -#endif - VirtualFree(stkbuf, 0, MEM_RELEASE); jl_atomic_fetch_add_relaxed(&num_stack_mappings, -1); } #else -# ifdef _OS_OPENBSD_ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT { - void* stk = mmap(0, bufsz, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0); - if (stk == MAP_FAILED) - return MAP_FAILED; - +# ifdef _OS_OPENBSD_ // we don't set up a guard page to detect stack overflow: on OpenBSD, any // mmap-ed region has guard page managed by the kernel, so there is no // need for it. Additionally, a memory region used as stack (memory // allocated with MAP_STACK option) has strict permission, and you can't // "create" a guard page on such memory by using `mprotect` on it - - jl_atomic_fetch_add_relaxed(&num_stack_mappings, 1); - return stk; -} + void* stk = mmap(0, bufsz, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0); + if (stk == MAP_FAILED) + return MAP_FAILED; # else -static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT -{ -#ifdef JL_USE_GUARD_PAGE - size_t guard_size = LLT_ALIGN(jl_guard_size, jl_page_size); - bufsz += guard_size; -#endif - void* stk = mmap(0, bufsz, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (stk == MAP_FAILED) return MAP_FAILED; -#ifdef JL_USE_GUARD_PAGE // set up a guard page to detect stack overflow if (mprotect(stk, jl_guard_size, PROT_NONE) == -1) { munmap(stk, bufsz); return MAP_FAILED; } - stk = (char *)stk + guard_size; -#endif +# endif jl_atomic_fetch_add_relaxed(&num_stack_mappings, 1); return stk; } -# endif static void free_stack(void *stkbuf, size_t bufsz) JL_NOTSAFEPOINT { -#ifdef JL_USE_GUARD_PAGE - size_t guard_size = LLT_ALIGN(jl_guard_size, jl_page_size); - bufsz += guard_size; - stkbuf = (char *)stkbuf - guard_size; -#endif - munmap(stkbuf, bufsz); jl_atomic_fetch_add_relaxed(&num_stack_mappings, -1); }