2 * Copyright 2015 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
16 #include "GuardPageAllocator.h"
23 #include <folly/Singleton.h>
24 #include <folly/SpinLock.h>
26 #include <glog/logging.h>
28 namespace folly { namespace fibers {
31 * Each stack with a guard page creates two memory mappings.
32 * Since this is a limited resource, we don't want to create too many of these.
34 * The upper bound on total number of mappings created
35 * is kNumGuarded * kMaxInUse.
39 * Number of guarded stacks per allocator instance
41 constexpr size_t kNumGuarded = 100;
44 * Maximum number of allocator instances with guarded stacks enabled
46 constexpr size_t kMaxInUse = 100;
49 * A cache for kNumGuarded stacks of a given size
53 explicit StackCache(size_t stackSize)
54 : allocSize_(allocSize(stackSize)) {
55 auto p = ::mmap(nullptr, allocSize_ * kNumGuarded,
56 PROT_READ | PROT_WRITE,
57 MAP_PRIVATE | MAP_ANONYMOUS,
59 PCHECK(p != (void*)(-1));
60 storage_ = reinterpret_cast<unsigned char*>(p);
62 /* Protect the bottommost page of every stack allocation */
63 for (size_t i = 0; i < kNumGuarded; ++i) {
64 auto allocBegin = storage_ + allocSize_ * i;
65 freeList_.push_back(allocBegin);
66 PCHECK(0 == ::mprotect(allocBegin, pagesize(), PROT_NONE));
70 unsigned char* borrow(size_t size) {
71 std::lock_guard<folly::SpinLock> lg(lock_);
75 auto as = allocSize(size);
76 if (as != allocSize_ || freeList_.empty()) {
80 auto p = freeList_.back();
83 /* We allocate minimum number of pages required, plus a guard page.
84 Since we use this for stack storage, requested allocation is aligned
85 at the top of the allocated pages, while the guard page is at the bottom.
87 -- increasing addresses -->
88 Guard page Normal pages
89 |xxxxxxxxxx|..........|..........|
90 <- allocSize_ ------------------->
91 p -^ <- size -------->
94 auto limit = p + allocSize_ - size;
95 assert(limit >= p + pagesize());
99 bool giveBack(unsigned char* limit, size_t size) {
100 std::lock_guard<folly::SpinLock> lg(lock_);
104 auto as = allocSize(size);
105 auto p = limit + size - as;
106 if (p < storage_ || p >= storage_ + allocSize_ * kNumGuarded) {
111 assert(as == allocSize_);
112 assert((p - storage_) % allocSize_ == 0);
113 freeList_.push_back(p);
119 PCHECK(0 == ::munmap(storage_, allocSize_ * kNumGuarded));
123 folly::SpinLock lock_;
124 unsigned char* storage_{nullptr};
125 size_t allocSize_{0};
130 std::vector<unsigned char*> freeList_;
132 static size_t pagesize() {
133 static const size_t pagesize = sysconf(_SC_PAGESIZE);
137 /* Returns a multiple of pagesize() enough to store size + one guard page */
138 static size_t allocSize(size_t size) {
139 return pagesize() * ((size + pagesize() - 1)/pagesize() + 1);
145 static CacheManager& instance() {
146 static auto inst = new CacheManager();
150 std::unique_ptr<StackCacheEntry> getStackCache(size_t stackSize) {
151 std::lock_guard<folly::SpinLock> lg(lock_);
152 if (inUse_ < kMaxInUse) {
154 return folly::make_unique<StackCacheEntry>(stackSize);
161 folly::SpinLock lock_;
164 friend class StackCacheEntry;
166 void giveBack(std::unique_ptr<StackCache> stackCache_) {
169 /* Note: we can add a free list for each size bucket
170 if stack re-use is important.
171 In this case this needs to be a folly::Singleton
172 to make sure the free list is cleaned up on fork.
174 TODO(t7351705): fix Singleton destruction order
179 class StackCacheEntry {
181 explicit StackCacheEntry(size_t stackSize)
182 : stackCache_(folly::make_unique<StackCache>(stackSize)) {
185 StackCache& cache() const noexcept {
190 CacheManager::instance().giveBack(std::move(stackCache_));
194 std::unique_ptr<StackCache> stackCache_;
197 GuardPageAllocator::GuardPageAllocator(bool useGuardPages)
198 : useGuardPages_(useGuardPages) {
201 GuardPageAllocator::~GuardPageAllocator() = default;
203 unsigned char* GuardPageAllocator::allocate(size_t size) {
204 if (useGuardPages_ && !stackCache_) {
205 stackCache_ = CacheManager::instance().getStackCache(size);
209 auto p = stackCache_->cache().borrow(size);
214 return fallbackAllocator_.allocate(size);
217 void GuardPageAllocator::deallocate(unsigned char* limit, size_t size) {
218 if (!(stackCache_ && stackCache_->cache().giveBack(limit, size))) {
219 fallbackAllocator_.deallocate(limit, size);