2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include <folly/Portability.h>
20 #include <folly/Preprocessor.h> // for FB_ANONYMOUS_VARIABLE
21 #include <folly/ScopeGuard.h>
22 #include <folly/Traits.h>
23 #include <folly/portability/GFlags.h>
24 #include <folly/portability/Time.h>
28 #include <boost/function_types/function_arity.hpp>
30 #include <glog/logging.h>
32 #include <type_traits>
34 DECLARE_bool(benchmark);
39 * Runs all benchmarks defined. Usually put in main().
44 * Runs all benchmarks defined if and only if the --benchmark flag has
45 * been passed to the program. Usually put in main().
47 inline bool runBenchmarksOnFlag() {
48 if (FLAGS_benchmark) {
51 return FLAGS_benchmark;
56 typedef std::pair<uint64_t, unsigned int> TimeIterPair;
59 * Adds a benchmark wrapped in a std::function. Only used
60 * internally. Pass by value is intentional.
62 void addBenchmarkImpl(const char* file,
64 std::function<TimeIterPair(unsigned int)>);
67 * Takes the difference between two timespec values. end is assumed to
70 inline uint64_t timespecDiff(timespec end, timespec start) {
71 if (end.tv_sec == start.tv_sec) {
72 assert(end.tv_nsec >= start.tv_nsec);
73 return uint64_t(end.tv_nsec - start.tv_nsec);
75 assert(end.tv_sec > start.tv_sec);
76 auto diff = uint64_t(end.tv_sec - start.tv_sec);
77 assert(diff < std::numeric_limits<uint64_t>::max() / 1000000000ULL);
78 return diff * 1000000000ULL + end.tv_nsec - start.tv_nsec;
82 * Takes the difference between two sets of timespec values. The first
83 * two come from a high-resolution clock whereas the other two come
84 * from a low-resolution clock. The crux of the matter is that
85 * high-res values may be bogus as documented in
86 * http://linux.die.net/man/3/clock_gettime. The trouble is when the
87 * running process migrates from one CPU to another, which is more
88 * likely for long-running processes. Therefore we watch for high
89 * differences between the two timings.
91 * This function is subject to further improvements.
93 inline uint64_t timespecDiff(timespec end, timespec start,
94 timespec endCoarse, timespec startCoarse) {
95 auto fine = timespecDiff(end, start);
96 auto coarse = timespecDiff(endCoarse, startCoarse);
97 if (coarse - fine >= 1000000) {
98 // The fine time is in all likelihood bogus
104 } // namespace detail
107 * Supporting type for BENCHMARK_SUSPEND defined below.
109 struct BenchmarkSuspender {
110 BenchmarkSuspender() {
111 CHECK_EQ(0, clock_gettime(CLOCK_REALTIME, &start));
114 BenchmarkSuspender(const BenchmarkSuspender &) = delete;
115 BenchmarkSuspender(BenchmarkSuspender && rhs) noexcept {
120 BenchmarkSuspender& operator=(const BenchmarkSuspender &) = delete;
121 BenchmarkSuspender& operator=(BenchmarkSuspender && rhs) {
122 if (start.tv_nsec > 0 || start.tv_sec > 0) {
130 ~BenchmarkSuspender() {
131 if (start.tv_nsec > 0 || start.tv_sec > 0) {
137 assert(start.tv_nsec > 0 || start.tv_sec > 0);
143 assert(start.tv_nsec == 0 || start.tv_sec == 0);
144 CHECK_EQ(0, clock_gettime(CLOCK_REALTIME, &start));
148 auto dismissing(F f) -> typename std::result_of<F()>::type {
149 SCOPE_EXIT { rehire(); };
155 * This is for use inside of if-conditions, used in BENCHMARK macros.
156 * If-conditions bypass the explicit on operator bool.
158 explicit operator bool() const {
163 * Accumulates nanoseconds spent outside benchmark.
165 typedef uint64_t NanosecondsSpent;
166 static NanosecondsSpent nsSpent;
171 CHECK_EQ(0, clock_gettime(CLOCK_REALTIME, &end));
172 nsSpent += detail::timespecDiff(end, start);
180 * Adds a benchmark. Usually not called directly but instead through
181 * the macro BENCHMARK defined below. The lambda function involved
182 * must take exactly one parameter of type unsigned, and the benchmark
183 * uses it with counter semantics (iteration occurs inside the
186 template <typename Lambda>
187 typename std::enable_if<
188 boost::function_types::function_arity<decltype(&Lambda::operator())>::value
191 addBenchmark(const char* file, const char* name, Lambda&& lambda) {
192 auto execute = [=](unsigned int times) {
193 BenchmarkSuspender::nsSpent = 0;
197 // CORE MEASUREMENT STARTS
198 auto const r1 = clock_gettime(CLOCK_REALTIME, &start);
199 niter = lambda(times);
200 auto const r2 = clock_gettime(CLOCK_REALTIME, &end);
201 // CORE MEASUREMENT ENDS
206 return detail::TimeIterPair(
207 detail::timespecDiff(end, start) - BenchmarkSuspender::nsSpent,
211 detail::addBenchmarkImpl(file, name,
212 std::function<detail::TimeIterPair(unsigned int)>(execute));
216 * Adds a benchmark. Usually not called directly but instead through
217 * the macro BENCHMARK defined below. The lambda function involved
218 * must take zero parameters, and the benchmark calls it repeatedly
219 * (iteration occurs outside the function).
221 template <typename Lambda>
222 typename std::enable_if<
223 boost::function_types::function_arity<decltype(&Lambda::operator())>::value
226 addBenchmark(const char* file, const char* name, Lambda&& lambda) {
227 addBenchmark(file, name, [=](unsigned int times) {
228 unsigned int niter = 0;
229 while (times-- > 0) {
237 * Call doNotOptimizeAway(var) to ensure that var will be computed even
238 * post-optimization. Use it for variables that are computed during
239 * benchmarking but otherwise are useless. The compiler tends to do a
240 * good job at eliminating unused variables, and this function fools it
241 * into thinking var is in fact needed.
243 * Call makeUnpredictable(var) when you don't want the optimizer to use
244 * its knowledge of var to shape the following code. This is useful
245 * when constant propagation or power reduction is possible during your
246 * benchmark but not in real use cases.
251 #pragma optimize("", off)
253 inline void doNotOptimizeDependencySink(const void*) {}
255 #pragma optimize("", on)
258 void doNotOptimizeAway(const T& datum) {
259 doNotOptimizeDependencySink(&datum);
262 template <typename T>
263 void makeUnpredictable(T& datum) {
264 doNotOptimizeDependencySink(&datum);
270 template <typename T>
271 struct DoNotOptimizeAwayNeedsIndirect {
272 using Decayed = typename std::decay<T>::type;
274 // First two constraints ensure it can be an "r" operand.
275 // std::is_pointer check is because callers seem to expect that
276 // doNotOptimizeAway(&x) is equivalent to doNotOptimizeAway(x).
277 constexpr static bool value = !folly::IsTriviallyCopyable<Decayed>::value ||
278 sizeof(Decayed) > sizeof(long) || std::is_pointer<Decayed>::value;
280 } // detail namespace
282 template <typename T>
283 auto doNotOptimizeAway(const T& datum) -> typename std::enable_if<
284 !detail::DoNotOptimizeAwayNeedsIndirect<T>::value>::type {
285 // The "r" constraint forces the compiler to make datum available
286 // in a register to the asm block, which means that it must have
287 // computed/loaded it. We use this path for things that are <=
288 // sizeof(long) (they have to fit), trivial (otherwise the compiler
289 // doesn't want to put them in a register), and not a pointer (because
290 // doNotOptimizeAway(&foo) would otherwise be a foot gun that didn't
291 // necessarily compute foo).
293 // An earlier version of this method had a more permissive input operand
294 // constraint, but that caused unnecessary variation between clang and
296 asm volatile("" ::"r"(datum));
299 template <typename T>
300 auto doNotOptimizeAway(const T& datum) -> typename std::enable_if<
301 detail::DoNotOptimizeAwayNeedsIndirect<T>::value>::type {
302 // This version of doNotOptimizeAway tells the compiler that the asm
303 // block will read datum from memory, and that in addition it might read
304 // or write from any memory location. If the memory clobber could be
305 // separated into input and output that would be preferrable.
306 asm volatile("" ::"m"(datum) : "memory");
309 template <typename T>
310 auto makeUnpredictable(T& datum) -> typename std::enable_if<
311 !detail::DoNotOptimizeAwayNeedsIndirect<T>::value>::type {
312 asm volatile("" : "+r"(datum));
315 template <typename T>
316 auto makeUnpredictable(T& datum) -> typename std::enable_if<
317 detail::DoNotOptimizeAwayNeedsIndirect<T>::value>::type {
318 asm volatile("" ::"m"(datum) : "memory");
326 * Introduces a benchmark function. Used internally, see BENCHMARK and
329 #define BENCHMARK_IMPL(funName, stringName, rv, paramType, paramName) \
330 static void funName(paramType); \
331 static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = ( \
332 ::folly::addBenchmark(__FILE__, stringName, \
333 [](paramType paramName) -> unsigned { funName(paramName); \
336 static void funName(paramType paramName)
339 * Introduces a benchmark function with support for returning the actual
340 * number of iterations. Used internally, see BENCHMARK_MULTI and friends
343 #define BENCHMARK_MULTI_IMPL(funName, stringName, paramType, paramName) \
344 static unsigned funName(paramType); \
345 static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = ( \
346 ::folly::addBenchmark(__FILE__, stringName, \
347 [](paramType paramName) { return funName(paramName); }), \
349 static unsigned funName(paramType paramName)
352 * Introduces a benchmark function. Use with either one or two arguments.
353 * The first is the name of the benchmark. Use something descriptive, such
354 * as insertVectorBegin. The second argument may be missing, or could be a
355 * symbolic counter. The counter dictates how many internal iteration the
356 * benchmark does. Example:
358 * BENCHMARK(vectorPushBack) {
363 * BENCHMARK(insertVectorBegin, n) {
365 * FOR_EACH_RANGE (i, 0, n) {
366 * v.insert(v.begin(), 42);
370 #define BENCHMARK(name, ...) \
373 FB_STRINGIZE(name), \
374 FB_ARG_2_OR_1(1, ## __VA_ARGS__), \
375 FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
379 * Like BENCHMARK above, but allows the user to return the actual
380 * number of iterations executed in the function body. This can be
381 * useful if the benchmark function doesn't know upfront how many
382 * iterations it's going to run or if it runs through a certain
383 * number of test cases, e.g.:
385 * BENCHMARK_MULTI(benchmarkSomething) {
386 * std::vector<int> testCases { 0, 1, 1, 2, 3, 5 };
387 * for (int c : testCases) {
390 * return testCases.size();
393 #define BENCHMARK_MULTI(name, ...) \
394 BENCHMARK_MULTI_IMPL( \
396 FB_STRINGIZE(name), \
397 FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
401 * Defines a benchmark that passes a parameter to another one. This is
402 * common for benchmarks that need a "problem size" in addition to
403 * "number of iterations". Consider:
405 * void pushBack(uint n, size_t initialSize) {
407 * BENCHMARK_SUSPEND {
408 * v.resize(initialSize);
410 * FOR_EACH_RANGE (i, 0, n) {
414 * BENCHMARK_PARAM(pushBack, 0)
415 * BENCHMARK_PARAM(pushBack, 1000)
416 * BENCHMARK_PARAM(pushBack, 1000000)
418 * The benchmark above estimates the speed of push_back at different
419 * initial sizes of the vector. The framework will pass 0, 1000, and
420 * 1000000 for initialSize, and the iteration count for n.
422 #define BENCHMARK_PARAM(name, param) \
423 BENCHMARK_NAMED_PARAM(name, param, param)
426 * Same as BENCHMARK_PARAM, but allows one to return the actual number of
427 * iterations that have been run.
429 #define BENCHMARK_PARAM_MULTI(name, param) \
430 BENCHMARK_NAMED_PARAM_MULTI(name, param, param)
433 * Like BENCHMARK_PARAM(), but allows a custom name to be specified for each
434 * parameter, rather than using the parameter value.
436 * Useful when the parameter value is not a valid token for string pasting,
437 * of when you want to specify multiple parameter arguments.
441 * void addValue(uint n, int64_t bucketSize, int64_t min, int64_t max) {
442 * Histogram<int64_t> hist(bucketSize, min, max);
444 * FOR_EACH_RANGE (i, 0, n) {
445 * hist.addValue(num);
447 * if (num > max) { num = min; }
451 * BENCHMARK_NAMED_PARAM(addValue, 0_to_100, 1, 0, 100)
452 * BENCHMARK_NAMED_PARAM(addValue, 0_to_1000, 10, 0, 1000)
453 * BENCHMARK_NAMED_PARAM(addValue, 5k_to_20k, 250, 5000, 20000)
455 #define BENCHMARK_NAMED_PARAM(name, param_name, ...) \
457 FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
458 FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
462 name(iters, ## __VA_ARGS__); \
466 * Same as BENCHMARK_NAMED_PARAM, but allows one to return the actual number
467 * of iterations that have been run.
469 #define BENCHMARK_NAMED_PARAM_MULTI(name, param_name, ...) \
470 BENCHMARK_MULTI_IMPL( \
471 FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
472 FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
475 return name(iters, ## __VA_ARGS__); \
479 * Just like BENCHMARK, but prints the time relative to a
480 * baseline. The baseline is the most recent BENCHMARK() seen in
481 * the current scope. Example:
483 * // This is the baseline
484 * BENCHMARK(insertVectorBegin, n) {
486 * FOR_EACH_RANGE (i, 0, n) {
487 * v.insert(v.begin(), 42);
491 * BENCHMARK_RELATIVE(insertListBegin, n) {
493 * FOR_EACH_RANGE (i, 0, n) {
494 * s.insert(s.begin(), 42);
498 * Any number of relative benchmark can be associated with a
499 * baseline. Another BENCHMARK() occurrence effectively establishes a
502 #define BENCHMARK_RELATIVE(name, ...) \
505 "%" FB_STRINGIZE(name), \
506 FB_ARG_2_OR_1(1, ## __VA_ARGS__), \
507 FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
511 * Same as BENCHMARK_RELATIVE, but allows one to return the actual number
512 * of iterations that have been run.
514 #define BENCHMARK_RELATIVE_MULTI(name, ...) \
515 BENCHMARK_MULTI_IMPL( \
517 "%" FB_STRINGIZE(name), \
518 FB_ONE_OR_NONE(unsigned, ## __VA_ARGS__), \
522 * A combination of BENCHMARK_RELATIVE and BENCHMARK_PARAM.
524 #define BENCHMARK_RELATIVE_PARAM(name, param) \
525 BENCHMARK_RELATIVE_NAMED_PARAM(name, param, param)
528 * Same as BENCHMARK_RELATIVE_PARAM, but allows one to return the actual
529 * number of iterations that have been run.
531 #define BENCHMARK_RELATIVE_PARAM_MULTI(name, param) \
532 BENCHMARK_RELATIVE_NAMED_PARAM_MULTI(name, param, param)
535 * A combination of BENCHMARK_RELATIVE and BENCHMARK_NAMED_PARAM.
537 #define BENCHMARK_RELATIVE_NAMED_PARAM(name, param_name, ...) \
539 FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
540 "%" FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
544 name(iters, ## __VA_ARGS__); \
548 * Same as BENCHMARK_RELATIVE_NAMED_PARAM, but allows one to return the
549 * actual number of iterations that have been run.
551 #define BENCHMARK_RELATIVE_NAMED_PARAM_MULTI(name, param_name, ...) \
552 BENCHMARK_MULTI_IMPL( \
553 FB_CONCATENATE(name, FB_CONCATENATE(_, param_name)), \
554 "%" FB_STRINGIZE(name) "(" FB_STRINGIZE(param_name) ")", \
557 return name(iters, ## __VA_ARGS__); \
561 * Draws a line of dashes.
563 #define BENCHMARK_DRAW_LINE() \
564 static bool FB_ANONYMOUS_VARIABLE(follyBenchmarkUnused) = ( \
565 ::folly::addBenchmark(__FILE__, "-", []() -> unsigned { return 0; }), \
569 * Allows execution of code that doesn't count torward the benchmark's
570 * time budget. Example:
572 * BENCHMARK_START_GROUP(insertVectorBegin, n) {
574 * BENCHMARK_SUSPEND {
577 * FOR_EACH_RANGE (i, 0, n) {
578 * v.insert(v.begin(), 42);
582 #define BENCHMARK_SUSPEND \
583 if (auto FB_ANONYMOUS_VARIABLE(BENCHMARK_SUSPEND) = \
584 ::folly::BenchmarkSuspender()) {} \