2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
16 #include "GuardPageAllocator.h"
26 #include <folly/Singleton.h>
27 #include <folly/SpinLock.h>
28 #include <folly/Synchronized.h>
29 #include <folly/portability/SysMman.h>
30 #include <folly/portability/Unistd.h>
32 #include <glog/logging.h>
38 * Each stack with a guard page creates two memory mappings.
39 * Since this is a limited resource, we don't want to create too many of these.
41 * The upper bound on total number of mappings created
42 * is kNumGuarded * kMaxInUse.
46 * Number of guarded stacks per allocator instance
48 constexpr size_t kNumGuarded = 100;
51 * Maximum number of allocator instances with guarded stacks enabled
53 constexpr size_t kMaxInUse = 100;
56 * A cache for kNumGuarded stacks of a given size
62 explicit StackCache(size_t stackSize) : allocSize_(allocSize(stackSize)) {
65 allocSize_ * kNumGuarded,
66 PROT_READ | PROT_WRITE,
67 MAP_PRIVATE | MAP_ANONYMOUS,
70 PCHECK(p != (void*)(-1));
71 storage_ = reinterpret_cast<unsigned char*>(p);
73 /* Protect the bottommost page of every stack allocation */
74 for (size_t i = 0; i < kNumGuarded; ++i) {
75 auto allocBegin = storage_ + allocSize_ * i;
76 freeList_.emplace_back(allocBegin, /* protected= */ false);
80 unsigned char* borrow(size_t size) {
81 std::lock_guard<folly::SpinLock> lg(lock_);
85 auto as = allocSize(size);
86 if (as != allocSize_ || freeList_.empty()) {
90 auto p = freeList_.back().first;
91 if (!freeList_.back().second) {
92 PCHECK(0 == ::mprotect(p, pagesize(), PROT_NONE));
93 SYNCHRONIZED(pages, protectedPages()) {
94 pages.insert(reinterpret_cast<intptr_t>(p));
99 /* We allocate minimum number of pages required, plus a guard page.
100 Since we use this for stack storage, requested allocation is aligned
101 at the top of the allocated pages, while the guard page is at the bottom.
103 -- increasing addresses -->
104 Guard page Normal pages
105 |xxxxxxxxxx|..........|..........|
106 <- allocSize_ ------------------->
107 p -^ <- size -------->
110 auto limit = p + allocSize_ - size;
111 assert(limit >= p + pagesize());
115 bool giveBack(unsigned char* limit, size_t size) {
116 std::lock_guard<folly::SpinLock> lg(lock_);
120 auto as = allocSize(size);
121 auto p = limit + size - as;
122 if (p < storage_ || p >= storage_ + allocSize_ * kNumGuarded) {
127 assert(as == allocSize_);
128 assert((p - storage_) % allocSize_ == 0);
129 freeList_.emplace_back(p, /* protected= */ true);
135 SYNCHRONIZED(pages, protectedPages()) {
136 for (const auto& item : freeList_) {
137 pages.erase(reinterpret_cast<intptr_t>(item.first));
140 PCHECK(0 == ::munmap(storage_, allocSize_ * kNumGuarded));
143 static bool isProtected(intptr_t addr) {
144 // Use a read lock for reading.
145 SYNCHRONIZED_CONST(pages, protectedPages()) {
146 for (const auto& page : pages) {
147 intptr_t pageEnd = intptr_t(page + pagesize());
148 if (page <= addr && addr < pageEnd) {
157 folly::SpinLock lock_;
158 unsigned char* storage_{nullptr};
159 size_t allocSize_{0};
162 * LIFO free list. Each pair contains stack pointer and protected flag.
164 std::vector<std::pair<unsigned char*, bool>> freeList_;
166 static size_t pagesize() {
167 static const size_t pagesize = size_t(sysconf(_SC_PAGESIZE));
171 /* Returns a multiple of pagesize() enough to store size + one guard page */
172 static size_t allocSize(size_t size) {
173 return pagesize() * ((size + pagesize() - 1) / pagesize() + 1);
176 static folly::Synchronized<std::unordered_set<intptr_t>>& protectedPages() {
177 static auto instance =
178 new folly::Synchronized<std::unordered_set<intptr_t>>();
187 struct sigaction oldSigsegvAction;
189 void sigsegvSignalHandler(int signum, siginfo_t* info, void*) {
190 if (signum != SIGSEGV) {
191 std::cerr << "GuardPageAllocator signal handler called for signal: "
197 StackCache::isProtected(reinterpret_cast<intptr_t>(info->si_addr))) {
198 std::cerr << "folly::fibers Fiber stack overflow detected." << std::endl;
201 // Restore old signal handler and let it handle the signal.
202 sigaction(signum, &oldSigsegvAction, nullptr);
207 auto getCreated = dlsym(RTLD_DEFAULT, "JNI_GetCreatedJavaVMs");
211 void installSignalHandler() {
212 static std::once_flag onceFlag;
213 std::call_once(onceFlag, []() {
215 // Don't install signal handler, since JVM internal signal handler doesn't
216 // work with SA_ONSTACK
221 memset(&sa, 0, sizeof(sa));
222 sigemptyset(&sa.sa_mask);
223 // By default signal handlers are run on the signaled thread's stack.
224 // In case of stack overflow running the SIGSEGV signal handler on
225 // the same stack leads to another SIGSEGV and crashes the program.
226 // Use SA_ONSTACK, so alternate stack is used (only if configured via
228 sa.sa_flags |= SA_SIGINFO | SA_ONSTACK;
229 sa.sa_sigaction = &sigsegvSignalHandler;
230 sigaction(SIGSEGV, &sa, &oldSigsegvAction);
239 static CacheManager& instance() {
240 static auto inst = new CacheManager();
244 std::unique_ptr<StackCacheEntry> getStackCache(size_t stackSize) {
245 std::lock_guard<folly::SpinLock> lg(lock_);
246 if (inUse_ < kMaxInUse) {
248 return std::make_unique<StackCacheEntry>(stackSize);
255 folly::SpinLock lock_;
258 friend class StackCacheEntry;
260 void giveBack(std::unique_ptr<StackCache> /* stackCache_ */) {
263 /* Note: we can add a free list for each size bucket
264 if stack re-use is important.
265 In this case this needs to be a folly::Singleton
266 to make sure the free list is cleaned up on fork.
268 TODO(t7351705): fix Singleton destruction order
274 * RAII Wrapper around a StackCache that calls
275 * CacheManager::giveBack() on destruction.
277 class StackCacheEntry {
279 explicit StackCacheEntry(size_t stackSize)
280 : stackCache_(std::make_unique<StackCache>(stackSize)) {}
282 StackCache& cache() const noexcept {
287 CacheManager::instance().giveBack(std::move(stackCache_));
291 std::unique_ptr<StackCache> stackCache_;
294 GuardPageAllocator::GuardPageAllocator(bool useGuardPages)
295 : useGuardPages_(useGuardPages) {
297 installSignalHandler();
301 GuardPageAllocator::~GuardPageAllocator() = default;
303 unsigned char* GuardPageAllocator::allocate(size_t size) {
304 if (useGuardPages_ && !stackCache_) {
305 stackCache_ = CacheManager::instance().getStackCache(size);
309 auto p = stackCache_->cache().borrow(size);
314 return fallbackAllocator_.allocate(size);
317 void GuardPageAllocator::deallocate(unsigned char* limit, size_t size) {
318 if (!(stackCache_ && stackCache_->cache().giveBack(limit, size))) {
319 fallbackAllocator_.deallocate(limit, size);
322 } // namespace fibers