2 * Copyright 2015 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_DETAIL_CACHELOCALITY_H_
18 #define FOLLY_DETAIL_CACHELOCALITY_H_
27 #include <type_traits>
29 #include <folly/Likely.h>
30 #include <folly/Portability.h>
32 namespace folly { namespace detail {
34 // This file contains several classes that might be useful if you are
35 // trying to dynamically optimize cache locality: CacheLocality reads
36 // cache sharing information from sysfs to determine how CPUs should be
37 // grouped to minimize contention, Getcpu provides fast access to the
38 // current CPU via __vdso_getcpu, and AccessSpreader uses these two to
39 // optimally spread accesses among a predetermined number of stripes.
41 // AccessSpreader<>::current(n) microbenchmarks at 22 nanos, which is
42 // substantially less than the cost of a cache miss. This means that we
43 // can effectively use it to reduce cache line ping-pong on striped data
44 // structures such as IndexedMemPool or statistics counters.
46 // Because CacheLocality looks at all of the cache levels, it can be
47 // used for different levels of optimization. AccessSpreader(2) does
48 // per-chip spreading on a dual socket system. AccessSpreader(numCpus)
49 // does perfect per-cpu spreading. AccessSpreader(numCpus / 2) does
50 // perfect L1 spreading in a system with hyperthreading enabled.
52 struct CacheLocality {
54 /// 1 more than the maximum value that can be returned from sched_getcpu
55 /// or getcpu. This is the number of hardware thread contexts provided
59 /// Holds the number of caches present at each cache level (0 is
60 /// the closest to the cpu). This is the number of AccessSpreader
61 /// stripes needed to avoid cross-cache communication at the specified
62 /// layer. numCachesByLevel.front() is the number of L1 caches and
63 /// numCachesByLevel.back() is the number of last-level caches.
64 std::vector<size_t> numCachesByLevel;
66 /// A map from cpu (from sched_getcpu or getcpu) to an index in the
67 /// range 0..numCpus-1, where neighboring locality indices are more
68 /// likely to share caches then indices far away. All of the members
69 /// of a particular cache level be contiguous in their locality index.
70 /// For example, if numCpus is 32 and numCachesByLevel.back() is 2,
71 /// then cpus with a locality index < 16 will share one last-level
72 /// cache and cpus with a locality index >= 16 will share the other.
73 std::vector<size_t> localityIndexByCpu;
76 /// Returns the best CacheLocality information available for the current
77 /// system, cached for fast access. This will be loaded from sysfs if
78 /// possible, otherwise it will be correct in the number of CPUs but
79 /// not in their sharing structure.
81 /// If you are into yo dawgs, this is a shared cache of the local
82 /// locality of the shared caches.
84 /// The template parameter here is used to allow injection of a
85 /// repeatable CacheLocality structure during testing. Rather than
86 /// inject the type of the CacheLocality provider into every data type
87 /// that transitively uses it, all components select between the default
88 /// sysfs implementation and a deterministic implementation by keying
89 /// off the type of the underlying atomic. See DeterministicScheduler.
90 template <template<typename> class Atom = std::atomic>
91 static const CacheLocality& system();
94 /// Reads CacheLocality information from a tree structured like
95 /// the sysfs filesystem. The provided function will be evaluated
96 /// for each sysfs file that needs to be queried. The function
97 /// should return a string containing the first line of the file
98 /// (not including the newline), or an empty string if the file does
99 /// not exist. The function will be called with paths of the form
100 /// /sys/devices/system/cpu/cpu*/cache/index*/{type,shared_cpu_list} .
101 /// Throws an exception if no caches can be parsed at all.
102 static CacheLocality readFromSysfsTree(
103 const std::function<std::string(std::string)>& mapping);
105 /// Reads CacheLocality information from the real sysfs filesystem.
106 /// Throws an exception if no cache information can be loaded.
107 static CacheLocality readFromSysfs();
109 /// Returns a usable (but probably not reflective of reality)
110 /// CacheLocality structure with the specified number of cpus and a
111 /// single cache level that associates one cpu per cache.
112 static CacheLocality uniform(size_t numCpus);
115 /// Memory locations on the same cache line are subject to false
116 /// sharing, which is very bad for performance. Microbenchmarks
117 /// indicate that pairs of cache lines also see interference under
118 /// heavy use of atomic operations (observed for atomic increment on
119 /// Sandy Bridge). See FOLLY_ALIGN_TO_AVOID_FALSE_SHARING
120 kFalseSharingRange = 128
123 static_assert(kFalseSharingRange == 128,
124 "FOLLY_ALIGN_TO_AVOID_FALSE_SHARING should track kFalseSharingRange");
127 // TODO replace __attribute__ with alignas and 128 with kFalseSharingRange
129 /// An attribute that will cause a variable or field to be aligned so that
130 /// it doesn't have false sharing with anything at a smaller memory address.
131 #define FOLLY_ALIGN_TO_AVOID_FALSE_SHARING FOLLY_ALIGNED(128)
133 /// Holds a function pointer to the VDSO implementation of getcpu(2),
136 /// Function pointer to a function with the same signature as getcpu(2).
137 typedef int (*Func)(unsigned* cpu, unsigned* node, void* unused);
139 /// Returns a pointer to the VDSO implementation of getcpu(2), if
140 /// available, or nullptr otherwise
141 static Func vdsoFunc();
144 /// A class that lazily binds a unique (for each implementation of Atom)
145 /// identifier to a thread. This is a fallback mechanism for the access
146 /// spreader if we are in testing (using DeterministicAtomic) or if
147 /// __vdso_getcpu can't be dynamically loaded
148 template <template<typename> class Atom>
149 struct SequentialThreadId {
151 /// Returns the thread id assigned to the current thread
152 static size_t get() {
154 if (UNLIKELY(rv == 0)) {
155 rv = currentId = ++prevId;
160 /// Fills the thread id into the cpu and node out params (if they
161 /// are non-null). This method is intended to act like getcpu when a
162 /// fast-enough form of getcpu isn't available or isn't desired
163 static int getcpu(unsigned* cpu, unsigned* node, void* unused) {
175 static Atom<size_t> prevId;
177 static FOLLY_TLS size_t currentId;
180 template <template<typename> class Atom, size_t kMaxCpus>
181 struct AccessSpreaderArray;
183 /// AccessSpreader arranges access to a striped data structure in such a
184 /// way that concurrently executing threads are likely to be accessing
185 /// different stripes. It does NOT guarantee uncontended access.
186 /// Your underlying algorithm must be thread-safe without spreading, this
187 /// is merely an optimization. AccessSpreader::current(n) is typically
188 /// much faster than a cache miss (22 nanos on my dev box, tested fast
189 /// in both 2.6 and 3.2 kernels).
191 /// You are free to create your own AccessSpreader-s or to cache the
192 /// results of AccessSpreader<>::shared(n), but you will probably want
193 /// to use one of the system-wide shared ones. Calling .current() on
194 /// a particular AccessSpreader instance only saves about 1 nanosecond
195 /// over calling AccessSpreader<>::shared(n).
197 /// If available (and not using the deterministic testing implementation)
198 /// AccessSpreader uses the getcpu system call via VDSO and the
199 /// precise locality information retrieved from sysfs by CacheLocality.
200 /// This provides optimal anti-sharing at a fraction of the cost of a
203 /// When there are not as many stripes as processors, we try to optimally
204 /// place the cache sharing boundaries. This means that if you have 2
205 /// stripes and run on a dual-socket system, your 2 stripes will each get
206 /// all of the cores from a single socket. If you have 16 stripes on a
207 /// 16 core system plus hyperthreading (32 cpus), each core will get its
208 /// own stripe and there will be no cache sharing at all.
210 /// AccessSpreader has a fallback mechanism for when __vdso_getcpu can't be
211 /// loaded, or for use during deterministic testing. Using sched_getcpu or
212 /// the getcpu syscall would negate the performance advantages of access
213 /// spreading, so we use a thread-local value and a shared atomic counter
214 /// to spread access out.
216 /// AccessSpreader is templated on the template type that is used
217 /// to implement atomics, as a way to instantiate the underlying
218 /// heuristics differently for production use and deterministic unit
219 /// testing. See DeterministicScheduler for more. If you aren't using
220 /// DeterministicScheduler, you can just use the default template parameter
222 template <template<typename> class Atom = std::atomic>
223 struct AccessSpreader {
225 /// Returns a never-destructed shared AccessSpreader instance.
226 /// numStripes should be > 0.
227 static const AccessSpreader& shared(size_t numStripes) {
228 // sharedInstances[0] actually has numStripes == 1
229 assert(numStripes > 0);
231 // the last shared element handles all large sizes
232 return AccessSpreaderArray<Atom,kMaxCpus>::sharedInstance[
233 std::min(size_t(kMaxCpus), numStripes)];
236 /// Returns the stripe associated with the current CPU, assuming
237 /// that there are numStripes (non-zero) stripes. Equivalent to
238 /// AccessSpreader::shared(numStripes)->current.
239 static size_t current(size_t numStripes) {
240 return shared(numStripes).current();
243 /// stripeByCore uses 1 stripe per L1 cache, according to
244 /// CacheLocality::system<>(). Use stripeByCore.numStripes() to see
245 /// its width, or stripeByCore.current() to get the current stripe
246 static const AccessSpreader stripeByCore;
248 /// stripeByChip uses 1 stripe per last-level cache, which is the fewest
249 /// number of stripes for which off-chip communication can be avoided
250 /// (assuming all caches are on-chip). Use stripeByChip.numStripes()
251 /// to see its width, or stripeByChip.current() to get the current stripe
252 static const AccessSpreader stripeByChip;
255 /// Constructs an AccessSpreader that will return values from
256 /// 0 to numStripes-1 (inclusive), precomputing the mapping
257 /// from CPU to stripe. There is no use in having more than
258 /// CacheLocality::system<Atom>().localityIndexByCpu.size() stripes or
260 explicit AccessSpreader(size_t spreaderNumStripes,
261 const CacheLocality& cacheLocality =
262 CacheLocality::system<Atom>(),
263 Getcpu::Func getcpuFunc = nullptr)
264 : getcpuFunc_(getcpuFunc ? getcpuFunc : pickGetcpuFunc(spreaderNumStripes))
265 , numStripes_(spreaderNumStripes)
267 auto n = cacheLocality.numCpus;
268 for (size_t cpu = 0; cpu < kMaxCpus && cpu < n; ++cpu) {
269 auto index = cacheLocality.localityIndexByCpu[cpu];
271 // as index goes from 0..n, post-transform value goes from
273 stripeByCpu[cpu] = (index * numStripes_) / n;
274 assert(stripeByCpu[cpu] < numStripes_);
276 for (size_t cpu = n; cpu < kMaxCpus; ++cpu) {
277 stripeByCpu[cpu] = stripeByCpu[cpu - n];
281 /// Returns 1 more than the maximum value that can be returned from
283 size_t numStripes() const {
287 /// Returns the stripe associated with the current CPU
288 size_t current() const {
290 getcpuFunc_(&cpu, nullptr, nullptr);
291 return stripeByCpu[cpu % kMaxCpus];
296 /// If there are more cpus than this nothing will crash, but there
297 /// might be unnecessary sharing
298 enum { kMaxCpus = 128 };
300 typedef uint8_t CompactStripe;
302 static_assert((kMaxCpus & (kMaxCpus - 1)) == 0,
303 "kMaxCpus should be a power of two so modulo is fast");
304 static_assert(kMaxCpus - 1 <= std::numeric_limits<CompactStripe>::max(),
305 "stripeByCpu element type isn't wide enough");
308 /// Points to the getcpu-like function we are using to obtain the
309 /// current cpu. It should not be assumed that the returned cpu value
310 /// is in range. We use a member for this instead of a static so that
311 /// this fetch preloads a prefix the stripeByCpu array
312 Getcpu::Func getcpuFunc_;
314 /// A precomputed map from cpu to stripe. Rather than add a layer of
315 /// indirection requiring a dynamic bounds check and another cache miss,
316 /// we always precompute the whole array
317 CompactStripe stripeByCpu[kMaxCpus];
321 /// Returns the best getcpu implementation for this type and width
322 /// of AccessSpreader
323 static Getcpu::Func pickGetcpuFunc(size_t numStripes);
327 Getcpu::Func AccessSpreader<std::atomic>::pickGetcpuFunc(size_t);
330 /// An array of kMaxCpus+1 AccessSpreader<Atom> instances constructed
331 /// with default params, with the zero-th element having 1 stripe
332 template <template<typename> class Atom, size_t kMaxStripe>
333 struct AccessSpreaderArray {
335 AccessSpreaderArray() {
336 for (size_t i = 0; i <= kMaxStripe; ++i) {
337 new (raw + i) AccessSpreader<Atom>(std::max(size_t(1), i));
341 ~AccessSpreaderArray() {
342 for (size_t i = 0; i <= kMaxStripe; ++i) {
343 auto p = static_cast<AccessSpreader<Atom>*>(static_cast<void*>(raw + i));
344 p->~AccessSpreader();
348 AccessSpreader<Atom> const& operator[] (size_t index) const {
349 return *static_cast<AccessSpreader<Atom> const*>(
350 static_cast<void const*>(raw + index));
355 // AccessSpreader uses sharedInstance
356 friend AccessSpreader<Atom>;
358 static AccessSpreaderArray<Atom,kMaxStripe> sharedInstance;
361 /// aligned_storage is uninitialized, we use placement new since there
362 /// is no AccessSpreader default constructor
363 typename std::aligned_storage<sizeof(AccessSpreader<Atom>),
364 CacheLocality::kFalseSharingRange>::type
370 #endif /* FOLLY_DETAIL_CacheLocality_H_ */