2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
27 #include <type_traits>
28 #include <unordered_map>
31 #include <folly/Indestructible.h>
32 #include <folly/Likely.h>
33 #include <folly/Memory.h>
34 #include <folly/Portability.h>
35 #include <folly/hash/Hash.h>
36 #include <folly/lang/Align.h>
37 #include <folly/portability/BitsFunctexcept.h>
38 #include <folly/portability/Memory.h>
39 #include <folly/system/ThreadId.h>
43 // This file contains several classes that might be useful if you are
44 // trying to dynamically optimize cache locality: CacheLocality reads
45 // cache sharing information from sysfs to determine how CPUs should be
46 // grouped to minimize contention, Getcpu provides fast access to the
47 // current CPU via __vdso_getcpu, and AccessSpreader uses these two to
48 // optimally spread accesses among a predetermined number of stripes.
50 // AccessSpreader<>::current(n) microbenchmarks at 22 nanos, which is
51 // substantially less than the cost of a cache miss. This means that we
52 // can effectively use it to reduce cache line ping-pong on striped data
53 // structures such as IndexedMemPool or statistics counters.
55 // Because CacheLocality looks at all of the cache levels, it can be
56 // used for different levels of optimization. AccessSpreader(2) does
57 // per-chip spreading on a dual socket system. AccessSpreader(numCpus)
58 // does perfect per-cpu spreading. AccessSpreader(numCpus / 2) does
59 // perfect L1 spreading in a system with hyperthreading enabled.
61 struct CacheLocality {
62 /// 1 more than the maximum value that can be returned from sched_getcpu
63 /// or getcpu. This is the number of hardware thread contexts provided
67 /// Holds the number of caches present at each cache level (0 is
68 /// the closest to the cpu). This is the number of AccessSpreader
69 /// stripes needed to avoid cross-cache communication at the specified
70 /// layer. numCachesByLevel.front() is the number of L1 caches and
71 /// numCachesByLevel.back() is the number of last-level caches.
72 std::vector<size_t> numCachesByLevel;
74 /// A map from cpu (from sched_getcpu or getcpu) to an index in the
75 /// range 0..numCpus-1, where neighboring locality indices are more
76 /// likely to share caches then indices far away. All of the members
77 /// of a particular cache level be contiguous in their locality index.
78 /// For example, if numCpus is 32 and numCachesByLevel.back() is 2,
79 /// then cpus with a locality index < 16 will share one last-level
80 /// cache and cpus with a locality index >= 16 will share the other.
81 std::vector<size_t> localityIndexByCpu;
83 /// Returns the best CacheLocality information available for the current
84 /// system, cached for fast access. This will be loaded from sysfs if
85 /// possible, otherwise it will be correct in the number of CPUs but
86 /// not in their sharing structure.
88 /// If you are into yo dawgs, this is a shared cache of the local
89 /// locality of the shared caches.
91 /// The template parameter here is used to allow injection of a
92 /// repeatable CacheLocality structure during testing. Rather than
93 /// inject the type of the CacheLocality provider into every data type
94 /// that transitively uses it, all components select between the default
95 /// sysfs implementation and a deterministic implementation by keying
96 /// off the type of the underlying atomic. See DeterministicScheduler.
97 template <template <typename> class Atom = std::atomic>
98 static const CacheLocality& system();
100 /// Reads CacheLocality information from a tree structured like
101 /// the sysfs filesystem. The provided function will be evaluated
102 /// for each sysfs file that needs to be queried. The function
103 /// should return a string containing the first line of the file
104 /// (not including the newline), or an empty string if the file does
105 /// not exist. The function will be called with paths of the form
106 /// /sys/devices/system/cpu/cpu*/cache/index*/{type,shared_cpu_list} .
107 /// Throws an exception if no caches can be parsed at all.
108 static CacheLocality readFromSysfsTree(
109 const std::function<std::string(std::string)>& mapping);
111 /// Reads CacheLocality information from the real sysfs filesystem.
112 /// Throws an exception if no cache information can be loaded.
113 static CacheLocality readFromSysfs();
115 /// Returns a usable (but probably not reflective of reality)
116 /// CacheLocality structure with the specified number of cpus and a
117 /// single cache level that associates one cpu per cache.
118 static CacheLocality uniform(size_t numCpus);
121 // TODO replace with alignas(hardware_destructive_interference_size)
123 /// An attribute that will cause a variable or field to be aligned so that
124 /// it doesn't have false sharing with anything at a smaller memory address.
125 #define FOLLY_ALIGN_TO_AVOID_FALSE_SHARING FOLLY_ALIGNED(128)
127 /// Knows how to derive a function pointer to the VDSO implementation of
128 /// getcpu(2), if available
130 /// Function pointer to a function with the same signature as getcpu(2).
131 typedef int (*Func)(unsigned* cpu, unsigned* node, void* unused);
133 /// Returns a pointer to the VDSO implementation of getcpu(2), if
134 /// available, or nullptr otherwise. This function may be quite
135 /// expensive, be sure to cache the result.
136 static Func resolveVdsoFunc();
140 template <template <typename> class Atom>
141 struct SequentialThreadId {
142 /// Returns the thread id assigned to the current thread
143 static unsigned get() {
145 if (UNLIKELY(rv == 0)) {
146 rv = currentId = ++prevId;
152 static Atom<unsigned> prevId;
154 static FOLLY_TLS unsigned currentId;
157 template <template <typename> class Atom>
158 Atom<unsigned> SequentialThreadId<Atom>::prevId(0);
160 template <template <typename> class Atom>
161 FOLLY_TLS unsigned SequentialThreadId<Atom>::currentId(0);
163 // Suppress this instantiation in other translation units. It is
164 // instantiated in CacheLocality.cpp
165 extern template struct SequentialThreadId<std::atomic>;
168 struct HashingThreadId {
169 static unsigned get() {
170 return hash::twang_32from64(getCurrentThreadID());
174 /// A class that lazily binds a unique (for each implementation of Atom)
175 /// identifier to a thread. This is a fallback mechanism for the access
176 /// spreader if __vdso_getcpu can't be loaded
177 template <typename ThreadId>
178 struct FallbackGetcpu {
179 /// Fills the thread id into the cpu and node out params (if they
180 /// are non-null). This method is intended to act like getcpu when a
181 /// fast-enough form of getcpu isn't available or isn't desired
182 static int getcpu(unsigned* cpu, unsigned* node, void* /* unused */) {
183 auto id = ThreadId::get();
195 typedef FallbackGetcpu<SequentialThreadId<std::atomic>> FallbackGetcpuType;
197 typedef FallbackGetcpu<HashingThreadId> FallbackGetcpuType;
200 /// AccessSpreader arranges access to a striped data structure in such a
201 /// way that concurrently executing threads are likely to be accessing
202 /// different stripes. It does NOT guarantee uncontended access.
203 /// Your underlying algorithm must be thread-safe without spreading, this
204 /// is merely an optimization. AccessSpreader::current(n) is typically
205 /// much faster than a cache miss (12 nanos on my dev box, tested fast
206 /// in both 2.6 and 3.2 kernels).
208 /// If available (and not using the deterministic testing implementation)
209 /// AccessSpreader uses the getcpu system call via VDSO and the
210 /// precise locality information retrieved from sysfs by CacheLocality.
211 /// This provides optimal anti-sharing at a fraction of the cost of a
214 /// When there are not as many stripes as processors, we try to optimally
215 /// place the cache sharing boundaries. This means that if you have 2
216 /// stripes and run on a dual-socket system, your 2 stripes will each get
217 /// all of the cores from a single socket. If you have 16 stripes on a
218 /// 16 core system plus hyperthreading (32 cpus), each core will get its
219 /// own stripe and there will be no cache sharing at all.
221 /// AccessSpreader has a fallback mechanism for when __vdso_getcpu can't be
222 /// loaded, or for use during deterministic testing. Using sched_getcpu
223 /// or the getcpu syscall would negate the performance advantages of
224 /// access spreading, so we use a thread-local value and a shared atomic
225 /// counter to spread access out. On systems lacking both a fast getcpu()
226 /// and TLS, we hash the thread id to spread accesses.
228 /// AccessSpreader is templated on the template type that is used
229 /// to implement atomics, as a way to instantiate the underlying
230 /// heuristics differently for production use and deterministic unit
231 /// testing. See DeterministicScheduler for more. If you aren't using
232 /// DeterministicScheduler, you can just use the default template parameter
234 template <template <typename> class Atom = std::atomic>
235 struct AccessSpreader {
236 /// Returns the stripe associated with the current CPU. The returned
237 /// value will be < numStripes.
238 static size_t current(size_t numStripes) {
239 // widthAndCpuToStripe[0] will actually work okay (all zeros), but
240 // something's wrong with the caller
241 assert(numStripes > 0);
244 getcpuFunc(&cpu, nullptr, nullptr);
245 return widthAndCpuToStripe[std::min(size_t(kMaxCpus), numStripes)]
250 /// If there are more cpus than this nothing will crash, but there
251 /// might be unnecessary sharing
252 enum { kMaxCpus = 128 };
254 typedef uint8_t CompactStripe;
257 (kMaxCpus & (kMaxCpus - 1)) == 0,
258 "kMaxCpus should be a power of two so modulo is fast");
260 kMaxCpus - 1 <= std::numeric_limits<CompactStripe>::max(),
261 "stripeByCpu element type isn't wide enough");
263 /// Points to the getcpu-like function we are using to obtain the
264 /// current cpu. It should not be assumed that the returned cpu value
265 /// is in range. We use a static for this so that we can prearrange a
266 /// valid value in the pre-constructed state and avoid the need for a
267 /// conditional on every subsequent invocation (not normally a big win,
268 /// but 20% on some inner loops here).
269 static Getcpu::Func getcpuFunc;
271 /// For each level of splitting up to kMaxCpus, maps the cpu (mod
272 /// kMaxCpus) to the stripe. Rather than performing any inequalities
273 /// or modulo on the actual number of cpus, we just fill in the entire
275 static CompactStripe widthAndCpuToStripe[kMaxCpus + 1][kMaxCpus];
277 static bool initialized;
279 /// Returns the best getcpu implementation for Atom
280 static Getcpu::Func pickGetcpuFunc() {
281 auto best = Getcpu::resolveVdsoFunc();
282 return best ? best : &FallbackGetcpuType::getcpu;
285 /// Always claims to be on CPU zero, node zero
286 static int degenerateGetcpu(unsigned* cpu, unsigned* node, void*) {
287 if (cpu != nullptr) {
290 if (node != nullptr) {
296 // The function to call for fast lookup of getcpu is a singleton, as
297 // is the precomputed table of locality information. AccessSpreader
298 // is used in very tight loops, however (we're trying to race an L1
299 // cache miss!), so the normal singleton mechanisms are noticeably
300 // expensive. Even a not-taken branch guarding access to getcpuFunc
301 // slows AccessSpreader::current from 12 nanos to 14. As a result, we
302 // populate the static members with simple (but valid) values that can
303 // be filled in by the linker, and then follow up with a normal static
304 // initializer call that puts in the proper version. This means that
305 // when there are initialization order issues we will just observe a
306 // zero stripe. Once a sanitizer gets smart enough to detect this as
307 // a race or undefined behavior, we can annotate it.
309 static bool initialize() {
310 getcpuFunc = pickGetcpuFunc();
312 auto& cacheLocality = CacheLocality::system<Atom>();
313 auto n = cacheLocality.numCpus;
314 for (size_t width = 0; width <= kMaxCpus; ++width) {
315 auto numStripes = std::max(size_t{1}, width);
316 for (size_t cpu = 0; cpu < kMaxCpus && cpu < n; ++cpu) {
317 auto index = cacheLocality.localityIndexByCpu[cpu];
319 // as index goes from 0..n, post-transform value goes from
321 widthAndCpuToStripe[width][cpu] =
322 CompactStripe((index * numStripes) / n);
323 assert(widthAndCpuToStripe[width][cpu] < numStripes);
325 for (size_t cpu = n; cpu < kMaxCpus; ++cpu) {
326 widthAndCpuToStripe[width][cpu] = widthAndCpuToStripe[width][cpu - n];
333 template <template <typename> class Atom>
334 Getcpu::Func AccessSpreader<Atom>::getcpuFunc =
335 AccessSpreader<Atom>::degenerateGetcpu;
337 template <template <typename> class Atom>
338 typename AccessSpreader<Atom>::CompactStripe
339 AccessSpreader<Atom>::widthAndCpuToStripe[kMaxCpus + 1][kMaxCpus] = {};
341 template <template <typename> class Atom>
342 bool AccessSpreader<Atom>::initialized = AccessSpreader<Atom>::initialize();
344 // Suppress this instantiation in other translation units. It is
345 // instantiated in CacheLocality.cpp
346 extern template struct AccessSpreader<std::atomic>;
349 * A simple freelist allocator. Allocates things of size sz, from
350 * slabs of size allocSize. Takes a lock on each
351 * allocation/deallocation.
353 class SimpleAllocator {
355 uint8_t* mem_{nullptr};
356 uint8_t* end_{nullptr};
357 void* freelist_{nullptr};
360 std::vector<void*> blocks_;
363 SimpleAllocator(size_t allocSize, size_t sz);
365 void* allocateHard();
367 // Inline fast-paths.
369 std::lock_guard<std::mutex> g(m_);
370 // Freelist allocation.
372 auto mem = freelist_;
373 freelist_ = *static_cast<void**>(freelist_);
377 // Bump-ptr allocation.
378 if (intptr_t(mem_) % 128 == 0) {
379 // Avoid allocating pointers that may look like malloc
381 mem_ += std::min(sz_, max_align_v);
383 if (mem_ && (mem_ + sz_ <= end_)) {
387 assert(intptr_t(mem) % 128 != 0);
391 return allocateHard();
393 void deallocate(void* mem) {
394 std::lock_guard<std::mutex> g(m_);
395 *static_cast<void**>(mem) = freelist_;
401 * An allocator that can be used with CacheLocality to allocate
404 * There is actually nothing special about the memory itself (it is
405 * not bound to numa nodes or anything), but the allocator guarantees
406 * that memory allocatd from the same stripe will only come from cache
407 * lines also allocated to the same stripe. This means multiple
408 * things using CacheLocality can allocate memory in smaller-than
409 * cacheline increments, and be assured that it won't cause more false
410 * sharing than it otherwise would.
412 * Note that allocation and deallocation takes a per-sizeclass lock.
414 template <size_t Stripes>
415 class CoreAllocator {
418 static constexpr size_t AllocSize{4096};
420 uint8_t sizeClass(size_t size) {
423 } else if (size <= 16) {
425 } else if (size <= 32) {
427 } else if (size <= 64) {
429 } else { // punt to malloc.
434 std::array<SimpleAllocator, 4> allocators_{
435 {{AllocSize, 8}, {AllocSize, 16}, {AllocSize, 32}, {AllocSize, 64}}};
438 void* allocate(size_t size) {
439 auto cl = sizeClass(size);
441 // Align to a cacheline
442 size = size + (hardware_destructive_interference_size - 1);
443 size &= ~size_t(hardware_destructive_interference_size - 1);
444 void* mem = detail::aligned_malloc(
445 size, hardware_destructive_interference_size);
447 std::__throw_bad_alloc();
451 return allocators_[cl].allocate();
453 void deallocate(void* mem) {
458 // See if it came from this allocator or malloc.
459 if (intptr_t(mem) % 128 != 0) {
461 reinterpret_cast<void*>(intptr_t(mem) & ~intptr_t(AllocSize - 1));
462 auto allocator = *static_cast<SimpleAllocator**>(addr);
463 allocator->deallocate(mem);
465 detail::aligned_free(mem);
470 Allocator* get(size_t stripe) {
471 assert(stripe < Stripes);
472 return &allocators_[stripe];
476 Allocator allocators_[Stripes];
479 template <size_t Stripes>
480 typename CoreAllocator<Stripes>::Allocator* getCoreAllocator(size_t stripe) {
481 // We cannot make sure that the allocator will be destroyed after
482 // all the objects allocated with it, so we leak it.
483 static Indestructible<CoreAllocator<Stripes>> allocator;
484 return allocator->get(stripe);
487 template <typename T, size_t Stripes>
488 StlAllocator<typename CoreAllocator<Stripes>::Allocator, T> getCoreAllocatorStl(
490 auto alloc = getCoreAllocator<Stripes>(stripe);
491 return StlAllocator<typename CoreAllocator<Stripes>::Allocator, T>(alloc);