2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
27 #include <type_traits>
28 #include <unordered_map>
31 #include <folly/Hash.h>
32 #include <folly/Indestructible.h>
33 #include <folly/Likely.h>
34 #include <folly/Memory.h>
35 #include <folly/Portability.h>
36 #include <folly/ThreadId.h>
37 #include <folly/portability/BitsFunctexcept.h>
38 #include <folly/portability/Memory.h>
43 // This file contains several classes that might be useful if you are
44 // trying to dynamically optimize cache locality: CacheLocality reads
45 // cache sharing information from sysfs to determine how CPUs should be
46 // grouped to minimize contention, Getcpu provides fast access to the
47 // current CPU via __vdso_getcpu, and AccessSpreader uses these two to
48 // optimally spread accesses among a predetermined number of stripes.
50 // AccessSpreader<>::current(n) microbenchmarks at 22 nanos, which is
51 // substantially less than the cost of a cache miss. This means that we
52 // can effectively use it to reduce cache line ping-pong on striped data
53 // structures such as IndexedMemPool or statistics counters.
55 // Because CacheLocality looks at all of the cache levels, it can be
56 // used for different levels of optimization. AccessSpreader(2) does
57 // per-chip spreading on a dual socket system. AccessSpreader(numCpus)
58 // does perfect per-cpu spreading. AccessSpreader(numCpus / 2) does
59 // perfect L1 spreading in a system with hyperthreading enabled.
61 struct CacheLocality {
63 /// 1 more than the maximum value that can be returned from sched_getcpu
64 /// or getcpu. This is the number of hardware thread contexts provided
68 /// Holds the number of caches present at each cache level (0 is
69 /// the closest to the cpu). This is the number of AccessSpreader
70 /// stripes needed to avoid cross-cache communication at the specified
71 /// layer. numCachesByLevel.front() is the number of L1 caches and
72 /// numCachesByLevel.back() is the number of last-level caches.
73 std::vector<size_t> numCachesByLevel;
75 /// A map from cpu (from sched_getcpu or getcpu) to an index in the
76 /// range 0..numCpus-1, where neighboring locality indices are more
77 /// likely to share caches then indices far away. All of the members
78 /// of a particular cache level be contiguous in their locality index.
79 /// For example, if numCpus is 32 and numCachesByLevel.back() is 2,
80 /// then cpus with a locality index < 16 will share one last-level
81 /// cache and cpus with a locality index >= 16 will share the other.
82 std::vector<size_t> localityIndexByCpu;
84 /// Returns the best CacheLocality information available for the current
85 /// system, cached for fast access. This will be loaded from sysfs if
86 /// possible, otherwise it will be correct in the number of CPUs but
87 /// not in their sharing structure.
89 /// If you are into yo dawgs, this is a shared cache of the local
90 /// locality of the shared caches.
92 /// The template parameter here is used to allow injection of a
93 /// repeatable CacheLocality structure during testing. Rather than
94 /// inject the type of the CacheLocality provider into every data type
95 /// that transitively uses it, all components select between the default
96 /// sysfs implementation and a deterministic implementation by keying
97 /// off the type of the underlying atomic. See DeterministicScheduler.
98 template <template <typename> class Atom = std::atomic>
99 static const CacheLocality& system();
101 /// Reads CacheLocality information from a tree structured like
102 /// the sysfs filesystem. The provided function will be evaluated
103 /// for each sysfs file that needs to be queried. The function
104 /// should return a string containing the first line of the file
105 /// (not including the newline), or an empty string if the file does
106 /// not exist. The function will be called with paths of the form
107 /// /sys/devices/system/cpu/cpu*/cache/index*/{type,shared_cpu_list} .
108 /// Throws an exception if no caches can be parsed at all.
109 static CacheLocality readFromSysfsTree(
110 const std::function<std::string(std::string)>& mapping);
112 /// Reads CacheLocality information from the real sysfs filesystem.
113 /// Throws an exception if no cache information can be loaded.
114 static CacheLocality readFromSysfs();
116 /// Returns a usable (but probably not reflective of reality)
117 /// CacheLocality structure with the specified number of cpus and a
118 /// single cache level that associates one cpu per cache.
119 static CacheLocality uniform(size_t numCpus);
122 /// Memory locations on the same cache line are subject to false
123 /// sharing, which is very bad for performance. Microbenchmarks
124 /// indicate that pairs of cache lines also see interference under
125 /// heavy use of atomic operations (observed for atomic increment on
126 /// Sandy Bridge). See FOLLY_ALIGN_TO_AVOID_FALSE_SHARING
127 kFalseSharingRange = 128
131 kFalseSharingRange == 128,
132 "FOLLY_ALIGN_TO_AVOID_FALSE_SHARING should track kFalseSharingRange");
135 // TODO replace __attribute__ with alignas and 128 with kFalseSharingRange
137 /// An attribute that will cause a variable or field to be aligned so that
138 /// it doesn't have false sharing with anything at a smaller memory address.
139 #define FOLLY_ALIGN_TO_AVOID_FALSE_SHARING FOLLY_ALIGNED(128)
141 /// Knows how to derive a function pointer to the VDSO implementation of
142 /// getcpu(2), if available
144 /// Function pointer to a function with the same signature as getcpu(2).
145 typedef int (*Func)(unsigned* cpu, unsigned* node, void* unused);
147 /// Returns a pointer to the VDSO implementation of getcpu(2), if
148 /// available, or nullptr otherwise. This function may be quite
149 /// expensive, be sure to cache the result.
150 static Func resolveVdsoFunc();
154 template <template <typename> class Atom>
155 struct SequentialThreadId {
157 /// Returns the thread id assigned to the current thread
158 static unsigned get() {
160 if (UNLIKELY(rv == 0)) {
161 rv = currentId = ++prevId;
167 static Atom<unsigned> prevId;
169 static FOLLY_TLS unsigned currentId;
172 template <template <typename> class Atom>
173 Atom<unsigned> SequentialThreadId<Atom>::prevId(0);
175 template <template <typename> class Atom>
176 FOLLY_TLS unsigned SequentialThreadId<Atom>::currentId(0);
178 // Suppress this instantiation in other translation units. It is
179 // instantiated in CacheLocality.cpp
180 extern template struct SequentialThreadId<std::atomic>;
183 struct HashingThreadId {
184 static unsigned get() {
185 return hash::twang_32from64(getCurrentThreadID());
189 /// A class that lazily binds a unique (for each implementation of Atom)
190 /// identifier to a thread. This is a fallback mechanism for the access
191 /// spreader if __vdso_getcpu can't be loaded
192 template <typename ThreadId>
193 struct FallbackGetcpu {
194 /// Fills the thread id into the cpu and node out params (if they
195 /// are non-null). This method is intended to act like getcpu when a
196 /// fast-enough form of getcpu isn't available or isn't desired
197 static int getcpu(unsigned* cpu, unsigned* node, void* /* unused */) {
198 auto id = ThreadId::get();
210 typedef FallbackGetcpu<SequentialThreadId<std::atomic>> FallbackGetcpuType;
212 typedef FallbackGetcpu<HashingThreadId> FallbackGetcpuType;
215 /// AccessSpreader arranges access to a striped data structure in such a
216 /// way that concurrently executing threads are likely to be accessing
217 /// different stripes. It does NOT guarantee uncontended access.
218 /// Your underlying algorithm must be thread-safe without spreading, this
219 /// is merely an optimization. AccessSpreader::current(n) is typically
220 /// much faster than a cache miss (12 nanos on my dev box, tested fast
221 /// in both 2.6 and 3.2 kernels).
223 /// If available (and not using the deterministic testing implementation)
224 /// AccessSpreader uses the getcpu system call via VDSO and the
225 /// precise locality information retrieved from sysfs by CacheLocality.
226 /// This provides optimal anti-sharing at a fraction of the cost of a
229 /// When there are not as many stripes as processors, we try to optimally
230 /// place the cache sharing boundaries. This means that if you have 2
231 /// stripes and run on a dual-socket system, your 2 stripes will each get
232 /// all of the cores from a single socket. If you have 16 stripes on a
233 /// 16 core system plus hyperthreading (32 cpus), each core will get its
234 /// own stripe and there will be no cache sharing at all.
236 /// AccessSpreader has a fallback mechanism for when __vdso_getcpu can't be
237 /// loaded, or for use during deterministic testing. Using sched_getcpu
238 /// or the getcpu syscall would negate the performance advantages of
239 /// access spreading, so we use a thread-local value and a shared atomic
240 /// counter to spread access out. On systems lacking both a fast getcpu()
241 /// and TLS, we hash the thread id to spread accesses.
243 /// AccessSpreader is templated on the template type that is used
244 /// to implement atomics, as a way to instantiate the underlying
245 /// heuristics differently for production use and deterministic unit
246 /// testing. See DeterministicScheduler for more. If you aren't using
247 /// DeterministicScheduler, you can just use the default template parameter
249 template <template <typename> class Atom = std::atomic>
250 struct AccessSpreader {
252 /// Returns the stripe associated with the current CPU. The returned
253 /// value will be < numStripes.
254 static size_t current(size_t numStripes) {
255 // widthAndCpuToStripe[0] will actually work okay (all zeros), but
256 // something's wrong with the caller
257 assert(numStripes > 0);
260 getcpuFunc(&cpu, nullptr, nullptr);
261 return widthAndCpuToStripe[std::min(size_t(kMaxCpus),
262 numStripes)][cpu % kMaxCpus];
266 /// If there are more cpus than this nothing will crash, but there
267 /// might be unnecessary sharing
268 enum { kMaxCpus = 128 };
270 typedef uint8_t CompactStripe;
272 static_assert((kMaxCpus & (kMaxCpus - 1)) == 0,
273 "kMaxCpus should be a power of two so modulo is fast");
274 static_assert(kMaxCpus - 1 <= std::numeric_limits<CompactStripe>::max(),
275 "stripeByCpu element type isn't wide enough");
277 /// Points to the getcpu-like function we are using to obtain the
278 /// current cpu. It should not be assumed that the returned cpu value
279 /// is in range. We use a static for this so that we can prearrange a
280 /// valid value in the pre-constructed state and avoid the need for a
281 /// conditional on every subsequent invocation (not normally a big win,
282 /// but 20% on some inner loops here).
283 static Getcpu::Func getcpuFunc;
285 /// For each level of splitting up to kMaxCpus, maps the cpu (mod
286 /// kMaxCpus) to the stripe. Rather than performing any inequalities
287 /// or modulo on the actual number of cpus, we just fill in the entire
289 static CompactStripe widthAndCpuToStripe[kMaxCpus + 1][kMaxCpus];
291 static bool initialized;
293 /// Returns the best getcpu implementation for Atom
294 static Getcpu::Func pickGetcpuFunc() {
295 auto best = Getcpu::resolveVdsoFunc();
296 return best ? best : &FallbackGetcpuType::getcpu;
299 /// Always claims to be on CPU zero, node zero
300 static int degenerateGetcpu(unsigned* cpu, unsigned* node, void*) {
301 if (cpu != nullptr) {
304 if (node != nullptr) {
310 // The function to call for fast lookup of getcpu is a singleton, as
311 // is the precomputed table of locality information. AccessSpreader
312 // is used in very tight loops, however (we're trying to race an L1
313 // cache miss!), so the normal singleton mechanisms are noticeably
314 // expensive. Even a not-taken branch guarding access to getcpuFunc
315 // slows AccessSpreader::current from 12 nanos to 14. As a result, we
316 // populate the static members with simple (but valid) values that can
317 // be filled in by the linker, and then follow up with a normal static
318 // initializer call that puts in the proper version. This means that
319 // when there are initialization order issues we will just observe a
320 // zero stripe. Once a sanitizer gets smart enough to detect this as
321 // a race or undefined behavior, we can annotate it.
323 static bool initialize() {
324 getcpuFunc = pickGetcpuFunc();
326 auto& cacheLocality = CacheLocality::system<Atom>();
327 auto n = cacheLocality.numCpus;
328 for (size_t width = 0; width <= kMaxCpus; ++width) {
329 auto numStripes = std::max(size_t{1}, width);
330 for (size_t cpu = 0; cpu < kMaxCpus && cpu < n; ++cpu) {
331 auto index = cacheLocality.localityIndexByCpu[cpu];
333 // as index goes from 0..n, post-transform value goes from
335 widthAndCpuToStripe[width][cpu] =
336 CompactStripe((index * numStripes) / n);
337 assert(widthAndCpuToStripe[width][cpu] < numStripes);
339 for (size_t cpu = n; cpu < kMaxCpus; ++cpu) {
340 widthAndCpuToStripe[width][cpu] = widthAndCpuToStripe[width][cpu - n];
347 template <template <typename> class Atom>
348 Getcpu::Func AccessSpreader<Atom>::getcpuFunc =
349 AccessSpreader<Atom>::degenerateGetcpu;
351 template <template <typename> class Atom>
352 typename AccessSpreader<Atom>::CompactStripe
353 AccessSpreader<Atom>::widthAndCpuToStripe[kMaxCpus + 1][kMaxCpus] = {};
355 template <template <typename> class Atom>
356 bool AccessSpreader<Atom>::initialized = AccessSpreader<Atom>::initialize();
358 // Suppress this instantiation in other translation units. It is
359 // instantiated in CacheLocality.cpp
360 extern template struct AccessSpreader<std::atomic>;
363 * A simple freelist allocator. Allocates things of size sz, from
364 * slabs of size allocSize. Takes a lock on each
365 * allocation/deallocation.
367 class SimpleAllocator {
369 uint8_t* mem_{nullptr};
370 uint8_t* end_{nullptr};
371 void* freelist_{nullptr};
374 std::vector<void*> blocks_;
377 SimpleAllocator(size_t allocSize, size_t sz);
379 void* allocateHard();
381 // Inline fast-paths.
383 std::lock_guard<std::mutex> g(m_);
384 // Freelist allocation.
386 auto mem = freelist_;
387 freelist_ = *static_cast<void**>(freelist_);
391 // Bump-ptr allocation.
392 if (intptr_t(mem_) % 128 == 0) {
393 // Avoid allocating pointers that may look like malloc
395 mem_ += std::min(sz_, alignof(std::max_align_t));
397 if (mem_ && (mem_ + sz_ <= end_)) {
401 assert(intptr_t(mem) % 128 != 0);
405 return allocateHard();
407 void deallocate(void* mem) {
408 std::lock_guard<std::mutex> g(m_);
409 *static_cast<void**>(mem) = freelist_;
415 * An allocator that can be used with CacheLocality to allocate
418 * There is actually nothing special about the memory itself (it is
419 * not bound to numa nodes or anything), but the allocator guarantees
420 * that memory allocatd from the same stripe will only come from cache
421 * lines also allocated to the same stripe. This means multiple
422 * things using CacheLocality can allocate memory in smaller-than
423 * cacheline increments, and be assured that it won't cause more false
424 * sharing than it otherwise would.
426 * Note that allocation and deallocation takes a per-sizeclass lock.
428 template <size_t Stripes>
429 class CoreAllocator {
432 static constexpr size_t AllocSize{4096};
434 uint8_t sizeClass(size_t size) {
437 } else if (size <= 16) {
439 } else if (size <= 32) {
441 } else if (size <= 64) {
443 } else { // punt to malloc.
448 std::array<SimpleAllocator, 4> allocators_{
449 {{AllocSize, 8}, {AllocSize, 16}, {AllocSize, 32}, {AllocSize, 64}}};
452 void* allocate(size_t size) {
453 auto cl = sizeClass(size);
456 CacheLocality::kFalseSharingRange == 128,
457 "kFalseSharingRange changed");
458 // Align to a cacheline
459 size = size + (CacheLocality::kFalseSharingRange - 1);
460 size &= ~size_t(CacheLocality::kFalseSharingRange - 1);
461 void* mem = aligned_malloc(size, CacheLocality::kFalseSharingRange);
463 std::__throw_bad_alloc();
467 return allocators_[cl].allocate();
469 void deallocate(void* mem) {
474 // See if it came from this allocator or malloc.
475 if (intptr_t(mem) % 128 != 0) {
477 reinterpret_cast<void*>(intptr_t(mem) & ~intptr_t(AllocSize - 1));
478 auto allocator = *static_cast<SimpleAllocator**>(addr);
479 allocator->deallocate(mem);
486 Allocator* get(size_t stripe) {
487 assert(stripe < Stripes);
488 return &allocators_[stripe];
492 Allocator allocators_[Stripes];
495 template <size_t Stripes>
496 typename CoreAllocator<Stripes>::Allocator* getCoreAllocator(size_t stripe) {
497 // We cannot make sure that the allocator will be destroyed after
498 // all the objects allocated with it, so we leak it.
499 static Indestructible<CoreAllocator<Stripes>> allocator;
500 return allocator->get(stripe);
503 template <typename T, size_t Stripes>
504 StlAllocator<typename CoreAllocator<Stripes>::Allocator, T> getCoreAllocatorStl(
506 auto alloc = getCoreAllocator<Stripes>(stripe);
507 return StlAllocator<typename CoreAllocator<Stripes>::Allocator, T>(alloc);
510 } // namespace detail