2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
16 #include "GuardPageAllocator.h"
22 #include <folly/Singleton.h>
23 #include <folly/SpinLock.h>
24 #include <folly/portability/SysMman.h>
26 #include <glog/logging.h>
32 * Each stack with a guard page creates two memory mappings.
33 * Since this is a limited resource, we don't want to create too many of these.
35 * The upper bound on total number of mappings created
36 * is kNumGuarded * kMaxInUse.
40 * Number of guarded stacks per allocator instance
42 constexpr size_t kNumGuarded = 100;
45 * Maximum number of allocator instances with guarded stacks enabled
47 constexpr size_t kMaxInUse = 100;
50 * A cache for kNumGuarded stacks of a given size
56 explicit StackCache(size_t stackSize) : allocSize_(allocSize(stackSize)) {
59 allocSize_ * kNumGuarded,
60 PROT_READ | PROT_WRITE,
61 MAP_PRIVATE | MAP_ANONYMOUS,
64 PCHECK(p != (void*)(-1));
65 storage_ = reinterpret_cast<unsigned char*>(p);
67 /* Protect the bottommost page of every stack allocation */
68 for (size_t i = 0; i < kNumGuarded; ++i) {
69 auto allocBegin = storage_ + allocSize_ * i;
70 freeList_.emplace_back(allocBegin, /* protected= */ false);
74 unsigned char* borrow(size_t size) {
75 std::lock_guard<folly::SpinLock> lg(lock_);
79 auto as = allocSize(size);
80 if (as != allocSize_ || freeList_.empty()) {
84 auto p = freeList_.back().first;
85 if (!freeList_.back().second) {
86 PCHECK(0 == ::mprotect(p, pagesize(), PROT_NONE));
90 /* We allocate minimum number of pages required, plus a guard page.
91 Since we use this for stack storage, requested allocation is aligned
92 at the top of the allocated pages, while the guard page is at the bottom.
94 -- increasing addresses -->
95 Guard page Normal pages
96 |xxxxxxxxxx|..........|..........|
97 <- allocSize_ ------------------->
98 p -^ <- size -------->
101 auto limit = p + allocSize_ - size;
102 assert(limit >= p + pagesize());
106 bool giveBack(unsigned char* limit, size_t size) {
107 std::lock_guard<folly::SpinLock> lg(lock_);
111 auto as = allocSize(size);
112 auto p = limit + size - as;
113 if (p < storage_ || p >= storage_ + allocSize_ * kNumGuarded) {
118 assert(as == allocSize_);
119 assert((p - storage_) % allocSize_ == 0);
120 freeList_.emplace_back(p, /* protected= */ true);
126 PCHECK(0 == ::munmap(storage_, allocSize_ * kNumGuarded));
130 folly::SpinLock lock_;
131 unsigned char* storage_{nullptr};
132 size_t allocSize_{0};
135 * LIFO free list. Each pair contains stack pointer and protected flag.
137 std::vector<std::pair<unsigned char*, bool>> freeList_;
139 static size_t pagesize() {
140 static const size_t pagesize = sysconf(_SC_PAGESIZE);
144 /* Returns a multiple of pagesize() enough to store size + one guard page */
145 static size_t allocSize(size_t size) {
146 return pagesize() * ((size + pagesize() - 1) / pagesize() + 1);
152 static CacheManager& instance() {
153 static auto inst = new CacheManager();
157 std::unique_ptr<StackCacheEntry> getStackCache(size_t stackSize) {
158 std::lock_guard<folly::SpinLock> lg(lock_);
159 if (inUse_ < kMaxInUse) {
161 return folly::make_unique<StackCacheEntry>(stackSize);
168 folly::SpinLock lock_;
171 friend class StackCacheEntry;
173 void giveBack(std::unique_ptr<StackCache> /* stackCache_ */) {
176 /* Note: we can add a free list for each size bucket
177 if stack re-use is important.
178 In this case this needs to be a folly::Singleton
179 to make sure the free list is cleaned up on fork.
181 TODO(t7351705): fix Singleton destruction order
187 * RAII Wrapper around a StackCache that calls
188 * CacheManager::giveBack() on destruction.
190 class StackCacheEntry {
192 explicit StackCacheEntry(size_t stackSize)
193 : stackCache_(folly::make_unique<StackCache>(stackSize)) {}
195 StackCache& cache() const noexcept {
200 CacheManager::instance().giveBack(std::move(stackCache_));
204 std::unique_ptr<StackCache> stackCache_;
207 GuardPageAllocator::GuardPageAllocator(bool useGuardPages)
208 : useGuardPages_(useGuardPages) {}
210 GuardPageAllocator::~GuardPageAllocator() = default;
212 unsigned char* GuardPageAllocator::allocate(size_t size) {
213 if (useGuardPages_ && !stackCache_) {
214 stackCache_ = CacheManager::instance().getStackCache(size);
218 auto p = stackCache_->cache().borrow(size);
223 return fallbackAllocator_.allocate(size);
226 void GuardPageAllocator::deallocate(unsigned char* limit, size_t size) {
227 if (!(stackCache_ && stackCache_->cache().giveBack(limit, size))) {
228 fallbackAllocator_.deallocate(limit, size);