2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
16 #include "GuardPageAllocator.h"
22 #include <folly/Singleton.h>
23 #include <folly/SpinLock.h>
24 #include <folly/Synchronized.h>
25 #include <folly/portability/SysMman.h>
26 #include <folly/portability/Unistd.h>
28 #include <glog/logging.h>
34 * Each stack with a guard page creates two memory mappings.
35 * Since this is a limited resource, we don't want to create too many of these.
37 * The upper bound on total number of mappings created
38 * is kNumGuarded * kMaxInUse.
42 * Number of guarded stacks per allocator instance
44 constexpr size_t kNumGuarded = 100;
47 * Maximum number of allocator instances with guarded stacks enabled
49 constexpr size_t kMaxInUse = 100;
52 * A cache for kNumGuarded stacks of a given size
58 explicit StackCache(size_t stackSize) : allocSize_(allocSize(stackSize)) {
61 allocSize_ * kNumGuarded,
62 PROT_READ | PROT_WRITE,
63 MAP_PRIVATE | MAP_ANONYMOUS,
66 PCHECK(p != (void*)(-1));
67 storage_ = reinterpret_cast<unsigned char*>(p);
69 /* Protect the bottommost page of every stack allocation */
70 for (size_t i = 0; i < kNumGuarded; ++i) {
71 auto allocBegin = storage_ + allocSize_ * i;
72 freeList_.emplace_back(allocBegin, /* protected= */ false);
76 unsigned char* borrow(size_t size) {
77 std::lock_guard<folly::SpinLock> lg(lock_);
81 auto as = allocSize(size);
82 if (as != allocSize_ || freeList_.empty()) {
86 auto p = freeList_.back().first;
87 if (!freeList_.back().second) {
88 PCHECK(0 == ::mprotect(p, pagesize(), PROT_NONE));
89 SYNCHRONIZED(pages, protectedPages()) {
90 pages.insert(reinterpret_cast<intptr_t>(p));
95 /* We allocate minimum number of pages required, plus a guard page.
96 Since we use this for stack storage, requested allocation is aligned
97 at the top of the allocated pages, while the guard page is at the bottom.
99 -- increasing addresses -->
100 Guard page Normal pages
101 |xxxxxxxxxx|..........|..........|
102 <- allocSize_ ------------------->
103 p -^ <- size -------->
106 auto limit = p + allocSize_ - size;
107 assert(limit >= p + pagesize());
111 bool giveBack(unsigned char* limit, size_t size) {
112 std::lock_guard<folly::SpinLock> lg(lock_);
116 auto as = allocSize(size);
117 auto p = limit + size - as;
118 if (p < storage_ || p >= storage_ + allocSize_ * kNumGuarded) {
123 assert(as == allocSize_);
124 assert((p - storage_) % allocSize_ == 0);
125 freeList_.emplace_back(p, /* protected= */ true);
131 SYNCHRONIZED(pages, protectedPages()) {
132 for (const auto& item : freeList_) {
133 pages.erase(reinterpret_cast<intptr_t>(item.first));
136 PCHECK(0 == ::munmap(storage_, allocSize_ * kNumGuarded));
139 static bool isProtected(intptr_t addr) {
140 // Use a read lock for reading.
141 SYNCHRONIZED_CONST(pages, protectedPages()) {
142 for (const auto& page : pages) {
143 intptr_t pageEnd = page + pagesize();
144 if (page <= addr && addr < pageEnd) {
153 folly::SpinLock lock_;
154 unsigned char* storage_{nullptr};
155 size_t allocSize_{0};
158 * LIFO free list. Each pair contains stack pointer and protected flag.
160 std::vector<std::pair<unsigned char*, bool>> freeList_;
162 static size_t pagesize() {
163 static const size_t pagesize = sysconf(_SC_PAGESIZE);
167 /* Returns a multiple of pagesize() enough to store size + one guard page */
168 static size_t allocSize(size_t size) {
169 return pagesize() * ((size + pagesize() - 1) / pagesize() + 1);
172 static folly::Synchronized<std::unordered_set<intptr_t>>& protectedPages() {
173 static auto instance =
174 new folly::Synchronized<std::unordered_set<intptr_t>>();
183 struct sigaction oldSigsegvAction;
185 void sigsegvSignalHandler(int signum, siginfo_t* info, void*) {
186 if (signum != SIGSEGV) {
187 std::cerr << "GuardPageAllocator signal handler called for signal: "
193 StackCache::isProtected(reinterpret_cast<intptr_t>(info->si_addr))) {
194 std::cerr << "folly::fibers Fiber stack overflow detected." << std::endl;
197 // Restore old signal handler and let it handle the signal.
198 sigaction(signum, &oldSigsegvAction, nullptr);
202 void installSignalHandler() {
203 static std::once_flag onceFlag;
204 std::call_once(onceFlag, []() {
206 memset(&sa, 0, sizeof(sa));
207 sigemptyset(&sa.sa_mask);
208 // By default signal handlers are run on the signaled thread's stack.
209 // In case of stack overflow running the SIGSEGV signal handler on
210 // the same stack leads to another SIGSEGV and crashes the program.
211 // Use SA_ONSTACK, so alternate stack is used (only if configured via
213 sa.sa_flags |= SA_SIGINFO | SA_ONSTACK;
214 sa.sa_sigaction = &sigsegvSignalHandler;
215 sigaction(SIGSEGV, &sa, &oldSigsegvAction);
224 static CacheManager& instance() {
225 static auto inst = new CacheManager();
229 std::unique_ptr<StackCacheEntry> getStackCache(size_t stackSize) {
230 std::lock_guard<folly::SpinLock> lg(lock_);
231 if (inUse_ < kMaxInUse) {
233 return folly::make_unique<StackCacheEntry>(stackSize);
240 folly::SpinLock lock_;
243 friend class StackCacheEntry;
245 void giveBack(std::unique_ptr<StackCache> /* stackCache_ */) {
248 /* Note: we can add a free list for each size bucket
249 if stack re-use is important.
250 In this case this needs to be a folly::Singleton
251 to make sure the free list is cleaned up on fork.
253 TODO(t7351705): fix Singleton destruction order
259 * RAII Wrapper around a StackCache that calls
260 * CacheManager::giveBack() on destruction.
262 class StackCacheEntry {
264 explicit StackCacheEntry(size_t stackSize)
265 : stackCache_(folly::make_unique<StackCache>(stackSize)) {}
267 StackCache& cache() const noexcept {
272 CacheManager::instance().giveBack(std::move(stackCache_));
276 std::unique_ptr<StackCache> stackCache_;
279 GuardPageAllocator::GuardPageAllocator(bool useGuardPages)
280 : useGuardPages_(useGuardPages) {
282 installSignalHandler();
286 GuardPageAllocator::~GuardPageAllocator() = default;
288 unsigned char* GuardPageAllocator::allocate(size_t size) {
289 if (useGuardPages_ && !stackCache_) {
290 stackCache_ = CacheManager::instance().getStackCache(size);
294 auto p = stackCache_->cache().borrow(size);
299 return fallbackAllocator_.allocate(size);
302 void GuardPageAllocator::deallocate(unsigned char* limit, size_t size) {
303 if (!(stackCache_ && stackCache_->cache().giveBack(limit, size))) {
304 fallbackAllocator_.deallocate(limit, size);