2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_DETAIL_CACHELOCALITY_H_
18 #define FOLLY_DETAIL_CACHELOCALITY_H_
27 #include <type_traits>
30 #include <folly/Hash.h>
31 #include <folly/Likely.h>
32 #include <folly/Portability.h>
37 // This file contains several classes that might be useful if you are
38 // trying to dynamically optimize cache locality: CacheLocality reads
39 // cache sharing information from sysfs to determine how CPUs should be
40 // grouped to minimize contention, Getcpu provides fast access to the
41 // current CPU via __vdso_getcpu, and AccessSpreader uses these two to
42 // optimally spread accesses among a predetermined number of stripes.
44 // AccessSpreader<>::current(n) microbenchmarks at 22 nanos, which is
45 // substantially less than the cost of a cache miss. This means that we
46 // can effectively use it to reduce cache line ping-pong on striped data
47 // structures such as IndexedMemPool or statistics counters.
49 // Because CacheLocality looks at all of the cache levels, it can be
50 // used for different levels of optimization. AccessSpreader(2) does
51 // per-chip spreading on a dual socket system. AccessSpreader(numCpus)
52 // does perfect per-cpu spreading. AccessSpreader(numCpus / 2) does
53 // perfect L1 spreading in a system with hyperthreading enabled.
55 struct CacheLocality {
57 /// 1 more than the maximum value that can be returned from sched_getcpu
58 /// or getcpu. This is the number of hardware thread contexts provided
62 /// Holds the number of caches present at each cache level (0 is
63 /// the closest to the cpu). This is the number of AccessSpreader
64 /// stripes needed to avoid cross-cache communication at the specified
65 /// layer. numCachesByLevel.front() is the number of L1 caches and
66 /// numCachesByLevel.back() is the number of last-level caches.
67 std::vector<size_t> numCachesByLevel;
69 /// A map from cpu (from sched_getcpu or getcpu) to an index in the
70 /// range 0..numCpus-1, where neighboring locality indices are more
71 /// likely to share caches then indices far away. All of the members
72 /// of a particular cache level be contiguous in their locality index.
73 /// For example, if numCpus is 32 and numCachesByLevel.back() is 2,
74 /// then cpus with a locality index < 16 will share one last-level
75 /// cache and cpus with a locality index >= 16 will share the other.
76 std::vector<size_t> localityIndexByCpu;
78 /// Returns the best CacheLocality information available for the current
79 /// system, cached for fast access. This will be loaded from sysfs if
80 /// possible, otherwise it will be correct in the number of CPUs but
81 /// not in their sharing structure.
83 /// If you are into yo dawgs, this is a shared cache of the local
84 /// locality of the shared caches.
86 /// The template parameter here is used to allow injection of a
87 /// repeatable CacheLocality structure during testing. Rather than
88 /// inject the type of the CacheLocality provider into every data type
89 /// that transitively uses it, all components select between the default
90 /// sysfs implementation and a deterministic implementation by keying
91 /// off the type of the underlying atomic. See DeterministicScheduler.
92 template <template <typename> class Atom = std::atomic>
93 static const CacheLocality& system();
95 /// Reads CacheLocality information from a tree structured like
96 /// the sysfs filesystem. The provided function will be evaluated
97 /// for each sysfs file that needs to be queried. The function
98 /// should return a string containing the first line of the file
99 /// (not including the newline), or an empty string if the file does
100 /// not exist. The function will be called with paths of the form
101 /// /sys/devices/system/cpu/cpu*/cache/index*/{type,shared_cpu_list} .
102 /// Throws an exception if no caches can be parsed at all.
103 static CacheLocality readFromSysfsTree(
104 const std::function<std::string(std::string)>& mapping);
106 /// Reads CacheLocality information from the real sysfs filesystem.
107 /// Throws an exception if no cache information can be loaded.
108 static CacheLocality readFromSysfs();
110 /// Returns a usable (but probably not reflective of reality)
111 /// CacheLocality structure with the specified number of cpus and a
112 /// single cache level that associates one cpu per cache.
113 static CacheLocality uniform(size_t numCpus);
116 /// Memory locations on the same cache line are subject to false
117 /// sharing, which is very bad for performance. Microbenchmarks
118 /// indicate that pairs of cache lines also see interference under
119 /// heavy use of atomic operations (observed for atomic increment on
120 /// Sandy Bridge). See FOLLY_ALIGN_TO_AVOID_FALSE_SHARING
121 kFalseSharingRange = 128
125 kFalseSharingRange == 128,
126 "FOLLY_ALIGN_TO_AVOID_FALSE_SHARING should track kFalseSharingRange");
129 // TODO replace __attribute__ with alignas and 128 with kFalseSharingRange
131 /// An attribute that will cause a variable or field to be aligned so that
132 /// it doesn't have false sharing with anything at a smaller memory address.
133 #define FOLLY_ALIGN_TO_AVOID_FALSE_SHARING FOLLY_ALIGNED(128)
135 /// Knows how to derive a function pointer to the VDSO implementation of
136 /// getcpu(2), if available
138 /// Function pointer to a function with the same signature as getcpu(2).
139 typedef int (*Func)(unsigned* cpu, unsigned* node, void* unused);
141 /// Returns a pointer to the VDSO implementation of getcpu(2), if
142 /// available, or nullptr otherwise. This function may be quite
143 /// expensive, be sure to cache the result.
144 static Func resolveVdsoFunc();
148 template <template <typename> class Atom>
149 struct SequentialThreadId {
151 /// Returns the thread id assigned to the current thread
152 static size_t get() {
154 if (UNLIKELY(rv == 0)) {
155 rv = currentId = ++prevId;
161 static Atom<size_t> prevId;
163 static FOLLY_TLS size_t currentId;
167 struct HashingThreadId {
168 static size_t get() {
169 pthread_t pid = pthread_self();
171 memcpy(&id, &pid, std::min(sizeof(pid), sizeof(id)));
172 return hash::twang_32from64(id);
176 /// A class that lazily binds a unique (for each implementation of Atom)
177 /// identifier to a thread. This is a fallback mechanism for the access
178 /// spreader if __vdso_getcpu can't be loaded
179 template <typename ThreadId>
180 struct FallbackGetcpu {
181 /// Fills the thread id into the cpu and node out params (if they
182 /// are non-null). This method is intended to act like getcpu when a
183 /// fast-enough form of getcpu isn't available or isn't desired
184 static int getcpu(unsigned* cpu, unsigned* node, void* /* unused */) {
185 auto id = ThreadId::get();
197 typedef FallbackGetcpu<SequentialThreadId<std::atomic>> FallbackGetcpuType;
199 typedef FallbackGetcpu<HashingThreadId> FallbackGetcpuType;
202 /// AccessSpreader arranges access to a striped data structure in such a
203 /// way that concurrently executing threads are likely to be accessing
204 /// different stripes. It does NOT guarantee uncontended access.
205 /// Your underlying algorithm must be thread-safe without spreading, this
206 /// is merely an optimization. AccessSpreader::current(n) is typically
207 /// much faster than a cache miss (12 nanos on my dev box, tested fast
208 /// in both 2.6 and 3.2 kernels).
210 /// If available (and not using the deterministic testing implementation)
211 /// AccessSpreader uses the getcpu system call via VDSO and the
212 /// precise locality information retrieved from sysfs by CacheLocality.
213 /// This provides optimal anti-sharing at a fraction of the cost of a
216 /// When there are not as many stripes as processors, we try to optimally
217 /// place the cache sharing boundaries. This means that if you have 2
218 /// stripes and run on a dual-socket system, your 2 stripes will each get
219 /// all of the cores from a single socket. If you have 16 stripes on a
220 /// 16 core system plus hyperthreading (32 cpus), each core will get its
221 /// own stripe and there will be no cache sharing at all.
223 /// AccessSpreader has a fallback mechanism for when __vdso_getcpu can't be
224 /// loaded, or for use during deterministic testing. Using sched_getcpu
225 /// or the getcpu syscall would negate the performance advantages of
226 /// access spreading, so we use a thread-local value and a shared atomic
227 /// counter to spread access out. On systems lacking both a fast getcpu()
228 /// and TLS, we hash the thread id to spread accesses.
230 /// AccessSpreader is templated on the template type that is used
231 /// to implement atomics, as a way to instantiate the underlying
232 /// heuristics differently for production use and deterministic unit
233 /// testing. See DeterministicScheduler for more. If you aren't using
234 /// DeterministicScheduler, you can just use the default template parameter
236 template <template <typename> class Atom = std::atomic>
237 struct AccessSpreader {
239 /// Returns the stripe associated with the current CPU. The returned
240 /// value will be < numStripes.
241 static size_t current(size_t numStripes) {
242 // widthAndCpuToStripe[0] will actually work okay (all zeros), but
243 // something's wrong with the caller
244 assert(numStripes > 0);
247 getcpuFunc(&cpu, nullptr, nullptr);
248 return widthAndCpuToStripe[std::min(size_t(kMaxCpus),
249 numStripes)][cpu % kMaxCpus];
253 /// If there are more cpus than this nothing will crash, but there
254 /// might be unnecessary sharing
255 enum { kMaxCpus = 128 };
257 typedef uint8_t CompactStripe;
259 static_assert((kMaxCpus & (kMaxCpus - 1)) == 0,
260 "kMaxCpus should be a power of two so modulo is fast");
261 static_assert(kMaxCpus - 1 <= std::numeric_limits<CompactStripe>::max(),
262 "stripeByCpu element type isn't wide enough");
264 /// Points to the getcpu-like function we are using to obtain the
265 /// current cpu. It should not be assumed that the returned cpu value
266 /// is in range. We use a static for this so that we can prearrange a
267 /// valid value in the pre-constructed state and avoid the need for a
268 /// conditional on every subsequent invocation (not normally a big win,
269 /// but 20% on some inner loops here).
270 static Getcpu::Func getcpuFunc;
272 /// For each level of splitting up to kMaxCpus, maps the cpu (mod
273 /// kMaxCpus) to the stripe. Rather than performing any inequalities
274 /// or modulo on the actual number of cpus, we just fill in the entire
276 static CompactStripe widthAndCpuToStripe[kMaxCpus + 1][kMaxCpus];
278 static bool initialized;
280 /// Returns the best getcpu implementation for Atom
281 static Getcpu::Func pickGetcpuFunc();
283 /// Always claims to be on CPU zero, node zero
284 static int degenerateGetcpu(unsigned* cpu, unsigned* node, void*) {
285 if (cpu != nullptr) {
288 if (node != nullptr) {
294 // The function to call for fast lookup of getcpu is a singleton, as
295 // is the precomputed table of locality information. AccessSpreader
296 // is used in very tight loops, however (we're trying to race an L1
297 // cache miss!), so the normal singleton mechanisms are noticeably
298 // expensive. Even a not-taken branch guarding access to getcpuFunc
299 // slows AccessSpreader::current from 12 nanos to 14. As a result, we
300 // populate the static members with simple (but valid) values that can
301 // be filled in by the linker, and then follow up with a normal static
302 // initializer call that puts in the proper version. This means that
303 // when there are initialization order issues we will just observe a
304 // zero stripe. Once a sanitizer gets smart enough to detect this as
305 // a race or undefined behavior, we can annotate it.
307 static bool initialize() {
308 getcpuFunc = pickGetcpuFunc();
310 auto& cacheLocality = CacheLocality::system<Atom>();
311 auto n = cacheLocality.numCpus;
312 for (size_t width = 0; width <= kMaxCpus; ++width) {
313 auto numStripes = std::max(size_t{1}, width);
314 for (size_t cpu = 0; cpu < kMaxCpus && cpu < n; ++cpu) {
315 auto index = cacheLocality.localityIndexByCpu[cpu];
317 // as index goes from 0..n, post-transform value goes from
319 widthAndCpuToStripe[width][cpu] = (index * numStripes) / n;
320 assert(widthAndCpuToStripe[width][cpu] < numStripes);
322 for (size_t cpu = n; cpu < kMaxCpus; ++cpu) {
323 widthAndCpuToStripe[width][cpu] = widthAndCpuToStripe[width][cpu - n];
331 Getcpu::Func AccessSpreader<std::atomic>::pickGetcpuFunc();
333 #define DECLARE_ACCESS_SPREADER_TYPE(Atom) \
337 Getcpu::Func AccessSpreader<Atom>::getcpuFunc = \
338 AccessSpreader<Atom>::degenerateGetcpu; \
340 typename AccessSpreader<Atom>::CompactStripe \
341 AccessSpreader<Atom>::widthAndCpuToStripe[129][128] = {}; \
343 bool AccessSpreader<Atom>::initialized = AccessSpreader<Atom>::initialize(); \
347 } // namespace detail
350 #endif /* FOLLY_DETAIL_CacheLocality_H_ */