Fix SimpleBarrier
[folly.git] / folly / test / SharedMutexTest.cpp
1 /*
2  * Copyright 2016 Facebook, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *   http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include <folly/SharedMutex.h>
18
19 #include <stdlib.h>
20 #include <thread>
21 #include <vector>
22
23 #include <boost/optional.hpp>
24 #include <boost/thread/shared_mutex.hpp>
25
26 #include <folly/Benchmark.h>
27 #include <folly/MPMCQueue.h>
28 #include <folly/RWSpinLock.h>
29 #include <folly/Random.h>
30 #include <folly/portability/GFlags.h>
31 #include <folly/portability/GTest.h>
32 #include <folly/test/DeterministicSchedule.h>
33
34 using namespace folly;
35 using namespace folly::test;
36 using namespace std;
37 using namespace chrono;
38
39 typedef DeterministicSchedule DSched;
40 typedef SharedMutexImpl<true, void, DeterministicAtomic, true>
41     DSharedMutexReadPriority;
42 typedef SharedMutexImpl<false, void, DeterministicAtomic, true>
43     DSharedMutexWritePriority;
44
45 template <typename Lock>
46 void runBasicTest() {
47   Lock lock;
48   SharedMutexToken token1;
49   SharedMutexToken token2;
50   SharedMutexToken token3;
51
52   EXPECT_TRUE(lock.try_lock());
53   EXPECT_FALSE(lock.try_lock());
54   EXPECT_FALSE(lock.try_lock_shared(token1));
55   lock.unlock();
56
57   EXPECT_TRUE(lock.try_lock_shared(token1));
58   EXPECT_FALSE(lock.try_lock());
59   EXPECT_TRUE(lock.try_lock_shared(token2));
60   lock.lock_shared(token3);
61   lock.unlock_shared(token3);
62   lock.unlock_shared(token2);
63   lock.unlock_shared(token1);
64
65   lock.lock();
66   lock.unlock();
67
68   lock.lock_shared(token1);
69   lock.lock_shared(token2);
70   lock.unlock_shared(token1);
71   lock.unlock_shared(token2);
72
73   lock.lock();
74   lock.unlock_and_lock_shared(token1);
75   lock.lock_shared(token2);
76   lock.unlock_shared(token2);
77   lock.unlock_shared(token1);
78 }
79
80 TEST(SharedMutex, basic) {
81   runBasicTest<SharedMutexReadPriority>();
82   runBasicTest<SharedMutexWritePriority>();
83 }
84
85 template <typename Lock>
86 void runBasicHoldersTest() {
87   Lock lock;
88   SharedMutexToken token;
89
90   {
91     // create an exclusive write lock via holder
92     typename Lock::WriteHolder holder(lock);
93     EXPECT_FALSE(lock.try_lock());
94     EXPECT_FALSE(lock.try_lock_shared(token));
95
96     // move ownership to another write holder via move constructor
97     typename Lock::WriteHolder holder2(std::move(holder));
98     EXPECT_FALSE(lock.try_lock());
99     EXPECT_FALSE(lock.try_lock_shared(token));
100
101     // move ownership to another write holder via assign operator
102     typename Lock::WriteHolder holder3;
103     holder3 = std::move(holder2);
104     EXPECT_FALSE(lock.try_lock());
105     EXPECT_FALSE(lock.try_lock_shared(token));
106
107     // downgrade from exclusive to upgrade lock via move constructor
108     typename Lock::UpgradeHolder holder4(std::move(holder3));
109
110     // ensure we can lock from a shared source
111     EXPECT_FALSE(lock.try_lock());
112     EXPECT_TRUE(lock.try_lock_shared(token));
113     lock.unlock_shared(token);
114
115     // promote from upgrade to exclusive lock via move constructor
116     typename Lock::WriteHolder holder5(std::move(holder4));
117     EXPECT_FALSE(lock.try_lock());
118     EXPECT_FALSE(lock.try_lock_shared(token));
119
120     // downgrade exclusive to shared lock via move constructor
121     typename Lock::ReadHolder holder6(std::move(holder5));
122
123     // ensure we can lock from another shared source
124     EXPECT_FALSE(lock.try_lock());
125     EXPECT_TRUE(lock.try_lock_shared(token));
126     lock.unlock_shared(token);
127   }
128
129   {
130     typename Lock::WriteHolder holder(lock);
131     EXPECT_FALSE(lock.try_lock());
132   }
133
134   {
135     typename Lock::ReadHolder holder(lock);
136     typename Lock::ReadHolder holder2(lock);
137     typename Lock::UpgradeHolder holder3(lock);
138   }
139
140   {
141     typename Lock::UpgradeHolder holder(lock);
142     typename Lock::ReadHolder holder2(lock);
143     typename Lock::ReadHolder holder3(std::move(holder));
144   }
145 }
146
147 TEST(SharedMutex, basic_holders) {
148   runBasicHoldersTest<SharedMutexReadPriority>();
149   runBasicHoldersTest<SharedMutexWritePriority>();
150 }
151
152 template <typename Lock>
153 void runManyReadLocksTestWithTokens() {
154   Lock lock;
155
156   vector<SharedMutexToken> tokens;
157   for (int i = 0; i < 1000; ++i) {
158     tokens.emplace_back();
159     EXPECT_TRUE(lock.try_lock_shared(tokens.back()));
160   }
161   for (auto& token : tokens) {
162     lock.unlock_shared(token);
163   }
164   EXPECT_TRUE(lock.try_lock());
165   lock.unlock();
166 }
167
168 TEST(SharedMutex, many_read_locks_with_tokens) {
169   runManyReadLocksTestWithTokens<SharedMutexReadPriority>();
170   runManyReadLocksTestWithTokens<SharedMutexWritePriority>();
171 }
172
173 template <typename Lock>
174 void runManyReadLocksTestWithoutTokens() {
175   Lock lock;
176
177   for (int i = 0; i < 1000; ++i) {
178     EXPECT_TRUE(lock.try_lock_shared());
179   }
180   for (int i = 0; i < 1000; ++i) {
181     lock.unlock_shared();
182   }
183   EXPECT_TRUE(lock.try_lock());
184   lock.unlock();
185 }
186
187 TEST(SharedMutex, many_read_locks_without_tokens) {
188   runManyReadLocksTestWithoutTokens<SharedMutexReadPriority>();
189   runManyReadLocksTestWithoutTokens<SharedMutexWritePriority>();
190 }
191
192 template <typename Lock>
193 void runTimeoutInPastTest() {
194   Lock lock;
195
196   EXPECT_TRUE(lock.try_lock_for(milliseconds(0)));
197   lock.unlock();
198   EXPECT_TRUE(lock.try_lock_for(milliseconds(-1)));
199   lock.unlock();
200   EXPECT_TRUE(lock.try_lock_shared_for(milliseconds(0)));
201   lock.unlock_shared();
202   EXPECT_TRUE(lock.try_lock_shared_for(milliseconds(-1)));
203   lock.unlock_shared();
204   EXPECT_TRUE(lock.try_lock_until(system_clock::now() - milliseconds(1)));
205   lock.unlock();
206   EXPECT_TRUE(
207       lock.try_lock_shared_until(system_clock::now() - milliseconds(1)));
208   lock.unlock_shared();
209   EXPECT_TRUE(lock.try_lock_until(steady_clock::now() - milliseconds(1)));
210   lock.unlock();
211   EXPECT_TRUE(
212       lock.try_lock_shared_until(steady_clock::now() - milliseconds(1)));
213   lock.unlock_shared();
214 }
215
216 TEST(SharedMutex, timeout_in_past) {
217   runTimeoutInPastTest<SharedMutexReadPriority>();
218   runTimeoutInPastTest<SharedMutexWritePriority>();
219 }
220
221 template <class Func>
222 bool funcHasDuration(milliseconds expectedDuration, Func func) {
223   // elapsed time should eventually fall within expectedDuration +- 25%
224   for (int tries = 0; tries < 100; ++tries) {
225     auto start = steady_clock::now();
226     func();
227     auto elapsed = steady_clock::now() - start;
228     if (elapsed > expectedDuration - expectedDuration / 4 &&
229         elapsed < expectedDuration + expectedDuration / 4) {
230       return true;
231     }
232   }
233   return false;
234 }
235
236 template <typename Lock>
237 void runFailingTryTimeoutTest() {
238   Lock lock;
239   lock.lock();
240   EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
241     EXPECT_FALSE(lock.try_lock_for(milliseconds(10)));
242   }));
243   EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
244     typename Lock::Token token;
245     EXPECT_FALSE(lock.try_lock_shared_for(milliseconds(10), token));
246   }));
247   EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
248     EXPECT_FALSE(lock.try_lock_upgrade_for(milliseconds(10)));
249   }));
250   EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
251     EXPECT_FALSE(lock.try_lock_until(steady_clock::now() + milliseconds(10)));
252   }));
253   EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
254     typename Lock::Token token;
255     EXPECT_FALSE(lock.try_lock_shared_until(
256         steady_clock::now() + milliseconds(10), token));
257   }));
258   EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
259     EXPECT_FALSE(
260         lock.try_lock_upgrade_until(steady_clock::now() + milliseconds(10)));
261   }));
262   EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
263     EXPECT_FALSE(lock.try_lock_until(system_clock::now() + milliseconds(10)));
264   }));
265   EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
266     typename Lock::Token token;
267     EXPECT_FALSE(lock.try_lock_shared_until(
268         system_clock::now() + milliseconds(10), token));
269   }));
270   EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
271     EXPECT_FALSE(
272         lock.try_lock_upgrade_until(system_clock::now() + milliseconds(10)));
273   }));
274   lock.unlock();
275
276   lock.lock_shared();
277   EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
278     EXPECT_FALSE(lock.try_lock_for(milliseconds(10)));
279   }));
280   EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
281     EXPECT_FALSE(lock.try_lock_until(steady_clock::now() + milliseconds(10)));
282   }));
283   EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
284     EXPECT_FALSE(lock.try_lock_until(system_clock::now() + milliseconds(10)));
285   }));
286   lock.unlock_shared();
287
288   lock.lock();
289   for (int p = 0; p < 8; ++p) {
290     EXPECT_FALSE(lock.try_lock_for(nanoseconds(1 << p)));
291   }
292   lock.unlock();
293
294   for (int p = 0; p < 8; ++p) {
295     typename Lock::ReadHolder holder1(lock);
296     typename Lock::ReadHolder holder2(lock);
297     typename Lock::ReadHolder holder3(lock);
298     EXPECT_FALSE(lock.try_lock_for(nanoseconds(1 << p)));
299   }
300 }
301
302 TEST(SharedMutex, failing_try_timeout) {
303   runFailingTryTimeoutTest<SharedMutexReadPriority>();
304   runFailingTryTimeoutTest<SharedMutexWritePriority>();
305 }
306
307 template <typename Lock>
308 void runBasicUpgradeTest() {
309   Lock lock;
310   typename Lock::Token token1;
311   typename Lock::Token token2;
312
313   lock.lock_upgrade();
314   EXPECT_FALSE(lock.try_lock());
315   EXPECT_TRUE(lock.try_lock_shared(token1));
316   lock.unlock_shared(token1);
317   lock.unlock_upgrade();
318
319   lock.lock_upgrade();
320   lock.unlock_upgrade_and_lock();
321   EXPECT_FALSE(lock.try_lock_shared(token1));
322   lock.unlock();
323
324   lock.lock_upgrade();
325   lock.unlock_upgrade_and_lock_shared(token1);
326   lock.lock_upgrade();
327   lock.unlock_upgrade_and_lock_shared(token2);
328   lock.unlock_shared(token1);
329   lock.unlock_shared(token2);
330
331   lock.lock();
332   lock.unlock_and_lock_upgrade();
333   EXPECT_TRUE(lock.try_lock_shared(token1));
334   lock.unlock_upgrade();
335   lock.unlock_shared(token1);
336 }
337
338 TEST(SharedMutex, basic_upgrade_tests) {
339   runBasicUpgradeTest<SharedMutexReadPriority>();
340   runBasicUpgradeTest<SharedMutexWritePriority>();
341 }
342
343 TEST(SharedMutex, read_has_prio) {
344   SharedMutexReadPriority lock;
345   SharedMutexToken token1;
346   SharedMutexToken token2;
347   lock.lock_shared(token1);
348   bool exclusiveAcquired = false;
349   auto writer = thread([&] {
350     lock.lock();
351     exclusiveAcquired = true;
352     lock.unlock();
353   });
354
355   // lock() can't complete until we unlock token1, but it should stake
356   // its claim with regards to other exclusive or upgrade locks.  We can
357   // use try_lock_upgrade to poll for that eventuality.
358   while (lock.try_lock_upgrade()) {
359     lock.unlock_upgrade();
360     this_thread::yield();
361   }
362   EXPECT_FALSE(exclusiveAcquired);
363
364   // Even though lock() is stuck we should be able to get token2
365   EXPECT_TRUE(lock.try_lock_shared(token2));
366   lock.unlock_shared(token1);
367   lock.unlock_shared(token2);
368   writer.join();
369   EXPECT_TRUE(exclusiveAcquired);
370 }
371
372 TEST(SharedMutex, write_has_prio) {
373   SharedMutexWritePriority lock;
374   SharedMutexToken token1;
375   SharedMutexToken token2;
376   lock.lock_shared(token1);
377   auto writer = thread([&] {
378     lock.lock();
379     lock.unlock();
380   });
381
382   // eventually lock() should block readers
383   while (lock.try_lock_shared(token2)) {
384     lock.unlock_shared(token2);
385     this_thread::yield();
386   }
387
388   lock.unlock_shared(token1);
389   writer.join();
390 }
391
392 struct TokenLocker {
393   SharedMutexToken token;
394
395   template <typename T>
396   void lock(T* lock) {
397     lock->lock();
398   }
399
400   template <typename T>
401   void unlock(T* lock) {
402     lock->unlock();
403   }
404
405   template <typename T>
406   void lock_shared(T* lock) {
407     lock->lock_shared(token);
408   }
409
410   template <typename T>
411   void unlock_shared(T* lock) {
412     lock->unlock_shared(token);
413   }
414 };
415
416 struct Locker {
417   template <typename T>
418   void lock(T* lock) {
419     lock->lock();
420   }
421
422   template <typename T>
423   void unlock(T* lock) {
424     lock->unlock();
425   }
426
427   template <typename T>
428   void lock_shared(T* lock) {
429     lock->lock_shared();
430   }
431
432   template <typename T>
433   void unlock_shared(T* lock) {
434     lock->unlock_shared();
435   }
436 };
437
438 struct EnterLocker {
439   template <typename T>
440   void lock(T* lock) {
441     lock->lock(0);
442   }
443
444   template <typename T>
445   void unlock(T* lock) {
446     lock->unlock();
447   }
448
449   template <typename T>
450   void lock_shared(T* lock) {
451     lock->enter(0);
452   }
453
454   template <typename T>
455   void unlock_shared(T* lock) {
456     lock->leave();
457   }
458 };
459
460 struct PosixRWLock {
461   pthread_rwlock_t lock_;
462
463   PosixRWLock() { pthread_rwlock_init(&lock_, nullptr); }
464
465   ~PosixRWLock() { pthread_rwlock_destroy(&lock_); }
466
467   void lock() { pthread_rwlock_wrlock(&lock_); }
468
469   void unlock() { pthread_rwlock_unlock(&lock_); }
470
471   void lock_shared() { pthread_rwlock_rdlock(&lock_); }
472
473   void unlock_shared() { pthread_rwlock_unlock(&lock_); }
474 };
475
476 struct PosixMutex {
477   pthread_mutex_t lock_;
478
479   PosixMutex() { pthread_mutex_init(&lock_, nullptr); }
480
481   ~PosixMutex() { pthread_mutex_destroy(&lock_); }
482
483   void lock() { pthread_mutex_lock(&lock_); }
484
485   void unlock() { pthread_mutex_unlock(&lock_); }
486
487   void lock_shared() { pthread_mutex_lock(&lock_); }
488
489   void unlock_shared() { pthread_mutex_unlock(&lock_); }
490 };
491
492 template <template <typename> class Atom, typename Lock, typename Locker>
493 static void runContendedReaders(size_t numOps,
494                                 size_t numThreads,
495                                 bool useSeparateLocks) {
496   char padding1[64];
497   (void)padding1;
498   Lock globalLock;
499   int valueProtectedByLock = 10;
500   char padding2[64];
501   (void)padding2;
502   Atom<bool> go(false);
503   Atom<bool>* goPtr = &go; // workaround for clang bug
504   vector<thread> threads(numThreads);
505
506   BENCHMARK_SUSPEND {
507     for (size_t t = 0; t < numThreads; ++t) {
508       threads[t] = DSched::thread([&, t, numThreads] {
509         Lock privateLock;
510         Lock* lock = useSeparateLocks ? &privateLock : &globalLock;
511         Locker locker;
512         while (!goPtr->load()) {
513           this_thread::yield();
514         }
515         for (size_t op = t; op < numOps; op += numThreads) {
516           locker.lock_shared(lock);
517           // note: folly::doNotOptimizeAway reads and writes to its arg,
518           // so the following two lines are very different than a call
519           // to folly::doNotOptimizeAway(valueProtectedByLock);
520           auto copy = valueProtectedByLock;
521           folly::doNotOptimizeAway(copy);
522           locker.unlock_shared(lock);
523         }
524       });
525     }
526   }
527
528   go.store(true);
529   for (auto& thr : threads) {
530     DSched::join(thr);
531   }
532 }
533
534 static void folly_rwspin_reads(uint32_t numOps,
535                                size_t numThreads,
536                                bool useSeparateLocks) {
537   runContendedReaders<atomic, RWSpinLock, Locker>(
538       numOps, numThreads, useSeparateLocks);
539 }
540
541 static void shmtx_wr_pri_reads(uint32_t numOps,
542                                size_t numThreads,
543                                bool useSeparateLocks) {
544   runContendedReaders<atomic, SharedMutexWritePriority, TokenLocker>(
545       numOps, numThreads, useSeparateLocks);
546 }
547
548 static void shmtx_w_bare_reads(uint32_t numOps,
549                                size_t numThreads,
550                                bool useSeparateLocks) {
551   runContendedReaders<atomic, SharedMutexWritePriority, Locker>(
552       numOps, numThreads, useSeparateLocks);
553 }
554
555 static void shmtx_rd_pri_reads(uint32_t numOps,
556                                size_t numThreads,
557                                bool useSeparateLocks) {
558   runContendedReaders<atomic, SharedMutexReadPriority, TokenLocker>(
559       numOps, numThreads, useSeparateLocks);
560 }
561
562 static void shmtx_r_bare_reads(uint32_t numOps,
563                                size_t numThreads,
564                                bool useSeparateLocks) {
565   runContendedReaders<atomic, SharedMutexReadPriority, Locker>(
566       numOps, numThreads, useSeparateLocks);
567 }
568
569 static void folly_ticket_reads(uint32_t numOps,
570                                size_t numThreads,
571                                bool useSeparateLocks) {
572   runContendedReaders<atomic, RWTicketSpinLock64, Locker>(
573       numOps, numThreads, useSeparateLocks);
574 }
575
576 static void boost_shared_reads(uint32_t numOps,
577                                size_t numThreads,
578                                bool useSeparateLocks) {
579   runContendedReaders<atomic, boost::shared_mutex, Locker>(
580       numOps, numThreads, useSeparateLocks);
581 }
582
583 static void pthrd_rwlock_reads(uint32_t numOps,
584                                size_t numThreads,
585                                bool useSeparateLocks) {
586   runContendedReaders<atomic, PosixRWLock, Locker>(
587       numOps, numThreads, useSeparateLocks);
588 }
589
590 template <template <typename> class Atom, typename Lock, typename Locker>
591 static void runMixed(size_t numOps,
592                      size_t numThreads,
593                      double writeFraction,
594                      bool useSeparateLocks) {
595   char padding1[64];
596   (void)padding1;
597   Lock globalLock;
598   int valueProtectedByLock = 0;
599   char padding2[64];
600   (void)padding2;
601   Atom<bool> go(false);
602   Atom<bool>* goPtr = &go; // workaround for clang bug
603   vector<thread> threads(numThreads);
604
605   BENCHMARK_SUSPEND {
606     for (size_t t = 0; t < numThreads; ++t) {
607       threads[t] = DSched::thread([&, t, numThreads] {
608         struct drand48_data buffer;
609         srand48_r(t, &buffer);
610         long writeThreshold = writeFraction * 0x7fffffff;
611         Lock privateLock;
612         Lock* lock = useSeparateLocks ? &privateLock : &globalLock;
613         Locker locker;
614         while (!goPtr->load()) {
615           this_thread::yield();
616         }
617         for (size_t op = t; op < numOps; op += numThreads) {
618           long randVal;
619           lrand48_r(&buffer, &randVal);
620           bool writeOp = randVal < writeThreshold;
621           if (writeOp) {
622             locker.lock(lock);
623             if (!useSeparateLocks) {
624               ++valueProtectedByLock;
625             }
626             locker.unlock(lock);
627           } else {
628             locker.lock_shared(lock);
629             auto v = valueProtectedByLock;
630             folly::doNotOptimizeAway(v);
631             locker.unlock_shared(lock);
632           }
633         }
634       });
635     }
636   }
637
638   go.store(true);
639   for (auto& thr : threads) {
640     DSched::join(thr);
641   }
642 }
643
644 static void folly_rwspin(size_t numOps,
645                          size_t numThreads,
646                          double writeFraction,
647                          bool useSeparateLocks) {
648   runMixed<atomic, RWSpinLock, Locker>(
649       numOps, numThreads, writeFraction, useSeparateLocks);
650 }
651
652 static void shmtx_wr_pri(uint32_t numOps,
653                          size_t numThreads,
654                          double writeFraction,
655                          bool useSeparateLocks) {
656   runMixed<atomic, SharedMutexWritePriority, TokenLocker>(
657       numOps, numThreads, writeFraction, useSeparateLocks);
658 }
659
660 static void shmtx_w_bare(uint32_t numOps,
661                          size_t numThreads,
662                          double writeFraction,
663                          bool useSeparateLocks) {
664   runMixed<atomic, SharedMutexWritePriority, Locker>(
665       numOps, numThreads, writeFraction, useSeparateLocks);
666 }
667
668 static void shmtx_rd_pri(uint32_t numOps,
669                          size_t numThreads,
670                          double writeFraction,
671                          bool useSeparateLocks) {
672   runMixed<atomic, SharedMutexReadPriority, TokenLocker>(
673       numOps, numThreads, writeFraction, useSeparateLocks);
674 }
675
676 static void shmtx_r_bare(uint32_t numOps,
677                          size_t numThreads,
678                          double writeFraction,
679                          bool useSeparateLocks) {
680   runMixed<atomic, SharedMutexReadPriority, Locker>(
681       numOps, numThreads, writeFraction, useSeparateLocks);
682 }
683
684 static void folly_ticket(size_t numOps,
685                          size_t numThreads,
686                          double writeFraction,
687                          bool useSeparateLocks) {
688   runMixed<atomic, RWTicketSpinLock64, Locker>(
689       numOps, numThreads, writeFraction, useSeparateLocks);
690 }
691
692 static void boost_shared(size_t numOps,
693                          size_t numThreads,
694                          double writeFraction,
695                          bool useSeparateLocks) {
696   runMixed<atomic, boost::shared_mutex, Locker>(
697       numOps, numThreads, writeFraction, useSeparateLocks);
698 }
699
700 static void pthrd_rwlock(size_t numOps,
701                          size_t numThreads,
702                          double writeFraction,
703                          bool useSeparateLocks) {
704   runMixed<atomic, PosixRWLock, Locker>(
705       numOps, numThreads, writeFraction, useSeparateLocks);
706 }
707
708 static void pthrd_mutex_(size_t numOps,
709                          size_t numThreads,
710                          double writeFraction,
711                          bool useSeparateLocks) {
712   runMixed<atomic, PosixMutex, Locker>(
713       numOps, numThreads, writeFraction, useSeparateLocks);
714 }
715
716 template <typename Lock, template <typename> class Atom>
717 static void runAllAndValidate(size_t numOps, size_t numThreads) {
718   Lock globalLock;
719   Atom<int> globalExclusiveCount(0);
720   Atom<int> globalUpgradeCount(0);
721   Atom<int> globalSharedCount(0);
722
723   Atom<bool> go(false);
724
725   // clang crashes on access to Atom<> captured by ref in closure
726   Atom<int>* globalExclusiveCountPtr = &globalExclusiveCount;
727   Atom<int>* globalUpgradeCountPtr = &globalUpgradeCount;
728   Atom<int>* globalSharedCountPtr = &globalSharedCount;
729   Atom<bool>* goPtr = &go;
730
731   vector<thread> threads(numThreads);
732
733   BENCHMARK_SUSPEND {
734     for (size_t t = 0; t < numThreads; ++t) {
735       threads[t] = DSched::thread([&, t, numThreads] {
736         struct drand48_data buffer;
737         srand48_r(t, &buffer);
738
739         bool exclusive = false;
740         bool upgrade = false;
741         bool shared = false;
742         bool ourGlobalTokenUsed = false;
743         SharedMutexToken ourGlobalToken;
744
745         Lock privateLock;
746         vector<SharedMutexToken> privateTokens;
747
748         while (!goPtr->load()) {
749           this_thread::yield();
750         }
751         for (size_t op = t; op < numOps; op += numThreads) {
752           // randVal in [0,1000)
753           long randVal;
754           lrand48_r(&buffer, &randVal);
755           randVal = (long)((randVal * (uint64_t)1000) / 0x7fffffff);
756
757           // make as many assertions as possible about the global state
758           if (exclusive) {
759             EXPECT_EQ(1, globalExclusiveCountPtr->load(memory_order_acquire));
760             EXPECT_EQ(0, globalUpgradeCountPtr->load(memory_order_acquire));
761             EXPECT_EQ(0, globalSharedCountPtr->load(memory_order_acquire));
762           }
763           if (upgrade) {
764             EXPECT_EQ(0, globalExclusiveCountPtr->load(memory_order_acquire));
765             EXPECT_EQ(1, globalUpgradeCountPtr->load(memory_order_acquire));
766           }
767           if (shared) {
768             EXPECT_EQ(0, globalExclusiveCountPtr->load(memory_order_acquire));
769             EXPECT_TRUE(globalSharedCountPtr->load(memory_order_acquire) > 0);
770           } else {
771             EXPECT_FALSE(ourGlobalTokenUsed);
772           }
773
774           // independent 20% chance we do something to the private lock
775           if (randVal < 200) {
776             // it's okay to take multiple private shared locks because
777             // we never take an exclusive lock, so reader versus writer
778             // priority doesn't cause deadlocks
779             if (randVal < 100 && privateTokens.size() > 0) {
780               auto i = randVal % privateTokens.size();
781               privateLock.unlock_shared(privateTokens[i]);
782               privateTokens.erase(privateTokens.begin() + i);
783             } else {
784               SharedMutexToken token;
785               privateLock.lock_shared(token);
786               privateTokens.push_back(token);
787             }
788             continue;
789           }
790
791           // if we've got a lock, the only thing we can do is release it
792           // or transform it into a different kind of lock
793           if (exclusive) {
794             exclusive = false;
795             --*globalExclusiveCountPtr;
796             if (randVal < 500) {
797               globalLock.unlock();
798             } else if (randVal < 700) {
799               globalLock.unlock_and_lock_shared();
800               ++*globalSharedCountPtr;
801               shared = true;
802             } else if (randVal < 900) {
803               globalLock.unlock_and_lock_shared(ourGlobalToken);
804               ++*globalSharedCountPtr;
805               shared = true;
806               ourGlobalTokenUsed = true;
807             } else {
808               globalLock.unlock_and_lock_upgrade();
809               ++*globalUpgradeCountPtr;
810               upgrade = true;
811             }
812           } else if (upgrade) {
813             upgrade = false;
814             --*globalUpgradeCountPtr;
815             if (randVal < 500) {
816               globalLock.unlock_upgrade();
817             } else if (randVal < 700) {
818               globalLock.unlock_upgrade_and_lock_shared();
819               ++*globalSharedCountPtr;
820               shared = true;
821             } else if (randVal < 900) {
822               globalLock.unlock_upgrade_and_lock_shared(ourGlobalToken);
823               ++*globalSharedCountPtr;
824               shared = true;
825               ourGlobalTokenUsed = true;
826             } else {
827               globalLock.unlock_upgrade_and_lock();
828               ++*globalExclusiveCountPtr;
829               exclusive = true;
830             }
831           } else if (shared) {
832             shared = false;
833             --*globalSharedCountPtr;
834             if (ourGlobalTokenUsed) {
835               globalLock.unlock_shared(ourGlobalToken);
836               ourGlobalTokenUsed = false;
837             } else {
838               globalLock.unlock_shared();
839             }
840           } else if (randVal < 400) {
841             // 40% chance of shared lock with token, 5 ways to get it
842
843             // delta t goes from -1 millis to 7 millis
844             auto dt = microseconds(10 * (randVal - 100));
845
846             if (randVal < 400) {
847               globalLock.lock_shared(ourGlobalToken);
848               shared = true;
849             } else if (randVal < 500) {
850               shared = globalLock.try_lock_shared(ourGlobalToken);
851             } else if (randVal < 600) {
852               shared = globalLock.try_lock_shared_for(dt, ourGlobalToken);
853             } else if (randVal < 800) {
854               shared = globalLock.try_lock_shared_until(
855                   system_clock::now() + dt, ourGlobalToken);
856             }
857             if (shared) {
858               ourGlobalTokenUsed = true;
859               ++*globalSharedCountPtr;
860             }
861           } else if (randVal < 800) {
862             // 40% chance of shared lock without token
863             auto dt = microseconds(10 * (randVal - 100));
864             if (randVal < 400) {
865               globalLock.lock_shared();
866               shared = true;
867             } else if (randVal < 500) {
868               shared = globalLock.try_lock_shared();
869             } else if (randVal < 600) {
870               shared = globalLock.try_lock_shared_for(dt);
871             } else if (randVal < 800) {
872               shared = globalLock.try_lock_shared_until(
873                   system_clock::now() + dt);
874             }
875             if (shared) {
876               ++*globalSharedCountPtr;
877             }
878           } else if (randVal < 900) {
879             // 10% change of upgrade lock
880             globalLock.lock_upgrade();
881             upgrade = true;
882             ++*globalUpgradeCountPtr;
883           } else {
884             // 10% chance of exclusive lock, 5 ways to get it
885
886             // delta t goes from -1 millis to 9 millis
887             auto dt = microseconds(100 * (randVal - 910));
888
889             if (randVal < 400) {
890               globalLock.lock();
891               exclusive = true;
892             } else if (randVal < 500) {
893               exclusive = globalLock.try_lock();
894             } else if (randVal < 600) {
895               exclusive = globalLock.try_lock_for(dt);
896             } else if (randVal < 700) {
897               exclusive = globalLock.try_lock_until(steady_clock::now() + dt);
898             } else {
899               exclusive = globalLock.try_lock_until(system_clock::now() + dt);
900             }
901             if (exclusive) {
902               ++*globalExclusiveCountPtr;
903             }
904           }
905         }
906
907         if (exclusive) {
908           --*globalExclusiveCountPtr;
909           globalLock.unlock();
910         }
911         if (upgrade) {
912           --*globalUpgradeCountPtr;
913           globalLock.unlock_upgrade();
914         }
915         if (shared) {
916           --*globalSharedCountPtr;
917           if (ourGlobalTokenUsed) {
918             globalLock.unlock_shared(ourGlobalToken);
919             ourGlobalTokenUsed = false;
920           } else {
921             globalLock.unlock_shared();
922           }
923         }
924         for (auto& token : privateTokens) {
925           privateLock.unlock_shared(token);
926         }
927       });
928     }
929   }
930
931   go.store(true);
932   for (auto& thr : threads) {
933     DSched::join(thr);
934   }
935 }
936
937 TEST(SharedMutex, deterministic_concurrent_readers_of_one_lock_read_prio) {
938   for (int pass = 0; pass < 3; ++pass) {
939     DSched sched(DSched::uniform(pass));
940     runContendedReaders<DeterministicAtomic,
941                         DSharedMutexReadPriority,
942                         Locker>(1000, 3, false);
943   }
944 }
945
946 TEST(SharedMutex, deterministic_concurrent_readers_of_one_lock_write_prio) {
947   for (int pass = 0; pass < 3; ++pass) {
948     DSched sched(DSched::uniform(pass));
949     runContendedReaders<DeterministicAtomic,
950                         DSharedMutexWritePriority,
951                         Locker>(1000, 3, false);
952   }
953 }
954
955 TEST(SharedMutex, concurrent_readers_of_one_lock_read_prio) {
956   for (int pass = 0; pass < 10; ++pass) {
957     runContendedReaders<atomic, SharedMutexReadPriority, Locker>(
958         100000, 32, false);
959   }
960 }
961
962 TEST(SharedMutex, concurrent_readers_of_one_lock_write_prio) {
963   for (int pass = 0; pass < 10; ++pass) {
964     runContendedReaders<atomic, SharedMutexWritePriority, Locker>(
965         100000, 32, false);
966   }
967 }
968
969 TEST(SharedMutex, deterministic_readers_of_concurrent_locks_read_prio) {
970   for (int pass = 0; pass < 3; ++pass) {
971     DSched sched(DSched::uniform(pass));
972     runContendedReaders<DeterministicAtomic,
973                         DSharedMutexReadPriority,
974                         Locker>(1000, 3, true);
975   }
976 }
977
978 TEST(SharedMutex, deterministic_readers_of_concurrent_locks_write_prio) {
979   for (int pass = 0; pass < 3; ++pass) {
980     DSched sched(DSched::uniform(pass));
981     runContendedReaders<DeterministicAtomic,
982                         DSharedMutexWritePriority,
983                         Locker>(1000, 3, true);
984   }
985 }
986
987 TEST(SharedMutex, readers_of_concurrent_locks_read_prio) {
988   for (int pass = 0; pass < 10; ++pass) {
989     runContendedReaders<atomic, SharedMutexReadPriority, TokenLocker>(
990         100000, 32, true);
991   }
992 }
993
994 TEST(SharedMutex, readers_of_concurrent_locks_write_prio) {
995   for (int pass = 0; pass < 10; ++pass) {
996     runContendedReaders<atomic, SharedMutexWritePriority, TokenLocker>(
997         100000, 32, true);
998   }
999 }
1000
1001 TEST(SharedMutex, deterministic_mixed_mostly_read_read_prio) {
1002   for (int pass = 0; pass < 3; ++pass) {
1003     DSched sched(DSched::uniform(pass));
1004     runMixed<DeterministicAtomic, DSharedMutexReadPriority, Locker>(
1005         1000, 3, 0.1, false);
1006   }
1007 }
1008
1009 TEST(SharedMutex, deterministic_mixed_mostly_read_write_prio) {
1010   for (int pass = 0; pass < 3; ++pass) {
1011     DSched sched(DSched::uniform(pass));
1012     runMixed<DeterministicAtomic, DSharedMutexWritePriority, Locker>(
1013         1000, 3, 0.1, false);
1014   }
1015 }
1016
1017 TEST(SharedMutex, mixed_mostly_read_read_prio) {
1018   for (int pass = 0; pass < 5; ++pass) {
1019     runMixed<atomic, SharedMutexReadPriority, TokenLocker>(
1020         10000, 32, 0.1, false);
1021   }
1022 }
1023
1024 TEST(SharedMutex, mixed_mostly_read_write_prio) {
1025   for (int pass = 0; pass < 5; ++pass) {
1026     runMixed<atomic, SharedMutexWritePriority, TokenLocker>(
1027         10000, 32, 0.1, false);
1028   }
1029 }
1030
1031 TEST(SharedMutex, deterministic_mixed_mostly_write_read_prio) {
1032   for (int pass = 0; pass < 1; ++pass) {
1033     DSched sched(DSched::uniform(pass));
1034     runMixed<DeterministicAtomic, DSharedMutexReadPriority, TokenLocker>(
1035         1000, 10, 0.9, false);
1036   }
1037 }
1038
1039 TEST(SharedMutex, deterministic_mixed_mostly_write_write_prio) {
1040   for (int pass = 0; pass < 1; ++pass) {
1041     DSched sched(DSched::uniform(pass));
1042     runMixed<DeterministicAtomic, DSharedMutexWritePriority, TokenLocker>(
1043         1000, 10, 0.9, false);
1044   }
1045 }
1046
1047 TEST(SharedMutex, deterministic_lost_wakeup_write_prio) {
1048   for (int pass = 0; pass < 10; ++pass) {
1049     DSched sched(DSched::uniformSubset(pass, 2, 200));
1050     runMixed<DeterministicAtomic, DSharedMutexWritePriority, TokenLocker>(
1051         1000, 3, 1.0, false);
1052   }
1053 }
1054
1055 TEST(SharedMutex, mixed_mostly_write_read_prio) {
1056   for (int pass = 0; pass < 5; ++pass) {
1057     runMixed<atomic, SharedMutexReadPriority, TokenLocker>(
1058         50000, 300, 0.9, false);
1059   }
1060 }
1061
1062 TEST(SharedMutex, mixed_mostly_write_write_prio) {
1063   for (int pass = 0; pass < 5; ++pass) {
1064     runMixed<atomic, SharedMutexWritePriority, TokenLocker>(
1065         50000, 300, 0.9, false);
1066   }
1067 }
1068
1069 TEST(SharedMutex, deterministic_all_ops_read_prio) {
1070   for (int pass = 0; pass < 5; ++pass) {
1071     DSched sched(DSched::uniform(pass));
1072     runAllAndValidate<DSharedMutexReadPriority, DeterministicAtomic>(1000, 8);
1073   }
1074 }
1075
1076 TEST(SharedMutex, deterministic_all_ops_write_prio) {
1077   for (int pass = 0; pass < 5; ++pass) {
1078     DSched sched(DSched::uniform(pass));
1079     runAllAndValidate<DSharedMutexWritePriority, DeterministicAtomic>(1000, 8);
1080   }
1081 }
1082
1083 TEST(SharedMutex, all_ops_read_prio) {
1084   for (int pass = 0; pass < 5; ++pass) {
1085     runAllAndValidate<SharedMutexReadPriority, atomic>(100000, 32);
1086   }
1087 }
1088
1089 TEST(SharedMutex, all_ops_write_prio) {
1090   for (int pass = 0; pass < 5; ++pass) {
1091     runAllAndValidate<SharedMutexWritePriority, atomic>(100000, 32);
1092   }
1093 }
1094
1095 FOLLY_ASSUME_FBVECTOR_COMPATIBLE(
1096     boost::optional<boost::optional<SharedMutexToken>>)
1097
1098 // Setup is a set of threads that either grab a shared lock, or exclusive
1099 // and then downgrade it, or upgrade then upgrade and downgrade, then
1100 // enqueue the shared lock to a second set of threads that just performs
1101 // unlocks.  Half of the shared locks use tokens, the others don't.
1102 template <typename Lock, template <typename> class Atom>
1103 static void runRemoteUnlock(size_t numOps,
1104                             double preWriteFraction,
1105                             double preUpgradeFraction,
1106                             size_t numSendingThreads,
1107                             size_t numReceivingThreads) {
1108   Lock globalLock;
1109   MPMCQueue<boost::optional<boost::optional<SharedMutexToken>>, Atom>
1110     queue(10);
1111   auto queuePtr = &queue; // workaround for clang crash
1112
1113   Atom<bool> go(false);
1114   auto goPtr = &go; // workaround for clang crash
1115   Atom<int> pendingSenders(numSendingThreads);
1116   auto pendingSendersPtr = &pendingSenders; // workaround for clang crash
1117   vector<thread> threads(numSendingThreads + numReceivingThreads);
1118
1119   BENCHMARK_SUSPEND {
1120     for (size_t t = 0; t < threads.size(); ++t) {
1121       threads[t] = DSched::thread([&, t, numSendingThreads] {
1122         if (t >= numSendingThreads) {
1123           // we're a receiver
1124           typename decltype(queue)::value_type elem;
1125           while (true) {
1126             queuePtr->blockingRead(elem);
1127             if (!elem) {
1128               // EOF, pass the EOF token
1129               queuePtr->blockingWrite(std::move(elem));
1130               break;
1131             }
1132             if (*elem) {
1133               globalLock.unlock_shared(**elem);
1134             } else {
1135               globalLock.unlock_shared();
1136             }
1137           }
1138           return;
1139         }
1140         // else we're a sender
1141
1142         struct drand48_data buffer;
1143         srand48_r(t, &buffer);
1144
1145         while (!goPtr->load()) {
1146           this_thread::yield();
1147         }
1148         for (size_t op = t; op < numOps; op += numSendingThreads) {
1149           long unscaledRandVal;
1150           lrand48_r(&buffer, &unscaledRandVal);
1151
1152           // randVal in [0,1]
1153           double randVal = ((double)unscaledRandVal) / 0x7fffffff;
1154
1155           // extract a bit and rescale
1156           bool useToken = randVal >= 0.5;
1157           randVal = (randVal - (useToken ? 0.5 : 0.0)) * 2;
1158
1159           boost::optional<SharedMutexToken> maybeToken;
1160
1161           if (useToken) {
1162             SharedMutexToken token;
1163             if (randVal < preWriteFraction) {
1164               globalLock.lock();
1165               globalLock.unlock_and_lock_shared(token);
1166             } else if (randVal < preWriteFraction + preUpgradeFraction / 2) {
1167               globalLock.lock_upgrade();
1168               globalLock.unlock_upgrade_and_lock_shared(token);
1169             } else if (randVal < preWriteFraction + preUpgradeFraction) {
1170               globalLock.lock_upgrade();
1171               globalLock.unlock_upgrade_and_lock();
1172               globalLock.unlock_and_lock_shared(token);
1173             } else {
1174               globalLock.lock_shared(token);
1175             }
1176             maybeToken = token;
1177           } else {
1178             if (randVal < preWriteFraction) {
1179               globalLock.lock();
1180               globalLock.unlock_and_lock_shared();
1181             } else if (randVal < preWriteFraction + preUpgradeFraction / 2) {
1182               globalLock.lock_upgrade();
1183               globalLock.unlock_upgrade_and_lock_shared();
1184             } else if (randVal < preWriteFraction + preUpgradeFraction) {
1185               globalLock.lock_upgrade();
1186               globalLock.unlock_upgrade_and_lock();
1187               globalLock.unlock_and_lock_shared();
1188             } else {
1189               globalLock.lock_shared();
1190             }
1191           }
1192
1193           // blockingWrite is emplace-like, so this automatically adds
1194           // another level of wrapping
1195           queuePtr->blockingWrite(maybeToken);
1196         }
1197         if (--*pendingSendersPtr == 0) {
1198           queuePtr->blockingWrite(boost::none);
1199         }
1200       });
1201     }
1202   }
1203
1204   go.store(true);
1205   for (auto& thr : threads) {
1206     DSched::join(thr);
1207   }
1208 }
1209
1210 TEST(SharedMutex, deterministic_remote_write_prio) {
1211   for (int pass = 0; pass < 1; ++pass) {
1212     DSched sched(DSched::uniform(pass));
1213     runRemoteUnlock<DSharedMutexWritePriority, DeterministicAtomic>(
1214         500, 0.1, 0.1, 5, 5);
1215   }
1216 }
1217
1218 TEST(SharedMutex, deterministic_remote_read_prio) {
1219   for (int pass = 0; pass < 1; ++pass) {
1220     DSched sched(DSched::uniform(pass));
1221     runRemoteUnlock<DSharedMutexReadPriority, DeterministicAtomic>(
1222         500, 0.1, 0.1, 5, 5);
1223   }
1224 }
1225
1226 TEST(SharedMutex, remote_write_prio) {
1227   for (int pass = 0; pass < 10; ++pass) {
1228     runRemoteUnlock<SharedMutexWritePriority, atomic>(100000, 0.1, 0.1, 5, 5);
1229   }
1230 }
1231
1232 TEST(SharedMutex, remote_read_prio) {
1233   for (int pass = 0; pass < 100; ++pass) {
1234     runRemoteUnlock<SharedMutexReadPriority, atomic>(100000, 0.1, 0.1, 5, 5);
1235   }
1236 }
1237
1238 static void burn(size_t n) {
1239   for (size_t i = 0; i < n; ++i) {
1240     folly::doNotOptimizeAway(i);
1241   }
1242 }
1243
1244 // Two threads and three locks, arranged so that they have to proceed
1245 // in turn with reader/writer conflict
1246 template <typename Lock, template <typename> class Atom = atomic>
1247 static void runPingPong(size_t numRounds, size_t burnCount) {
1248   char padding1[56];
1249   (void)padding1;
1250   pair<Lock, char[56]> locks[3];
1251   char padding2[56];
1252   (void)padding2;
1253
1254   Atom<int> avail(0);
1255   auto availPtr = &avail; // workaround for clang crash
1256   Atom<bool> go(false);
1257   auto goPtr = &go; // workaround for clang crash
1258   vector<thread> threads(2);
1259
1260   locks[0].first.lock();
1261   locks[1].first.lock();
1262   locks[2].first.lock_shared();
1263
1264   BENCHMARK_SUSPEND {
1265     threads[0] = DSched::thread([&] {
1266       ++*availPtr;
1267       while (!goPtr->load()) {
1268         this_thread::yield();
1269       }
1270       for (size_t i = 0; i < numRounds; ++i) {
1271         locks[i % 3].first.unlock();
1272         locks[(i + 2) % 3].first.lock();
1273         burn(burnCount);
1274       }
1275     });
1276     threads[1] = DSched::thread([&] {
1277       ++*availPtr;
1278       while (!goPtr->load()) {
1279         this_thread::yield();
1280       }
1281       for (size_t i = 0; i < numRounds; ++i) {
1282         locks[i % 3].first.lock_shared();
1283         burn(burnCount);
1284         locks[(i + 2) % 3].first.unlock_shared();
1285       }
1286     });
1287
1288     while (avail.load() < 2) {
1289       this_thread::yield();
1290     }
1291   }
1292
1293   go.store(true);
1294   for (auto& thr : threads) {
1295     DSched::join(thr);
1296   }
1297   locks[numRounds % 3].first.unlock();
1298   locks[(numRounds + 1) % 3].first.unlock();
1299   locks[(numRounds + 2) % 3].first.unlock_shared();
1300 }
1301
1302 static void folly_rwspin_ping_pong(size_t n, size_t scale, size_t burnCount) {
1303   runPingPong<RWSpinLock>(n / scale, burnCount);
1304 }
1305
1306 static void shmtx_w_bare_ping_pong(size_t n, size_t scale, size_t burnCount) {
1307   runPingPong<SharedMutexWritePriority>(n / scale, burnCount);
1308 }
1309
1310 static void shmtx_r_bare_ping_pong(size_t n, size_t scale, size_t burnCount) {
1311   runPingPong<SharedMutexReadPriority>(n / scale, burnCount);
1312 }
1313
1314 static void folly_ticket_ping_pong(size_t n, size_t scale, size_t burnCount) {
1315   runPingPong<RWTicketSpinLock64>(n / scale, burnCount);
1316 }
1317
1318 static void boost_shared_ping_pong(size_t n, size_t scale, size_t burnCount) {
1319   runPingPong<boost::shared_mutex>(n / scale, burnCount);
1320 }
1321
1322 static void pthrd_rwlock_ping_pong(size_t n, size_t scale, size_t burnCount) {
1323   runPingPong<PosixRWLock>(n / scale, burnCount);
1324 }
1325
1326 TEST(SharedMutex, deterministic_ping_pong_write_prio) {
1327   for (int pass = 0; pass < 1; ++pass) {
1328     DSched sched(DSched::uniform(pass));
1329     runPingPong<DSharedMutexWritePriority, DeterministicAtomic>(500, 0);
1330   }
1331 }
1332
1333 TEST(SharedMutex, deterministic_ping_pong_read_prio) {
1334   for (int pass = 0; pass < 1; ++pass) {
1335     DSched sched(DSched::uniform(pass));
1336     runPingPong<DSharedMutexReadPriority, DeterministicAtomic>(500, 0);
1337   }
1338 }
1339
1340 TEST(SharedMutex, ping_pong_write_prio) {
1341   for (int pass = 0; pass < 1; ++pass) {
1342     runPingPong<SharedMutexWritePriority, atomic>(50000, 0);
1343   }
1344 }
1345
1346 TEST(SharedMutex, ping_pong_read_prio) {
1347   for (int pass = 0; pass < 1; ++pass) {
1348     runPingPong<SharedMutexReadPriority, atomic>(50000, 0);
1349   }
1350 }
1351
1352 // This is here so you can tell how much of the runtime reported by the
1353 // more complex harnesses is due to the harness, although due to the
1354 // magic of compiler optimization it may also be slower
1355 BENCHMARK(single_thread_lock_shared_unlock_shared, iters) {
1356   SharedMutex lock;
1357   for (size_t n = 0; n < iters; ++n) {
1358     SharedMutex::Token token;
1359     lock.lock_shared(token);
1360     folly::doNotOptimizeAway(0);
1361     lock.unlock_shared(token);
1362   }
1363 }
1364
1365 BENCHMARK(single_thread_lock_unlock, iters) {
1366   SharedMutex lock;
1367   for (size_t n = 0; n < iters; ++n) {
1368     lock.lock();
1369     folly::doNotOptimizeAway(0);
1370     lock.unlock();
1371   }
1372 }
1373
1374 #define BENCH_BASE(...) FB_VA_GLUE(BENCHMARK_NAMED_PARAM, (__VA_ARGS__))
1375 #define BENCH_REL(...) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM, (__VA_ARGS__))
1376
1377 // 100% reads.  Best-case scenario for deferred locks.  Lock is colocated
1378 // with read data, so inline lock takes cache miss every time but deferred
1379 // lock has only cache hits and local access.
1380 BENCHMARK_DRAW_LINE()
1381 BENCHMARK_DRAW_LINE()
1382 BENCH_BASE(folly_rwspin_reads, 1thread, 1, false)
1383 BENCH_REL (shmtx_wr_pri_reads, 1thread, 1, false)
1384 BENCH_REL (shmtx_w_bare_reads, 1thread, 1, false)
1385 BENCH_REL (shmtx_rd_pri_reads, 1thread, 1, false)
1386 BENCH_REL (shmtx_r_bare_reads, 1thread, 1, false)
1387 BENCH_REL (folly_ticket_reads, 1thread, 1, false)
1388 BENCH_REL (boost_shared_reads, 1thread, 1, false)
1389 BENCH_REL (pthrd_rwlock_reads, 1thread, 1, false)
1390 BENCHMARK_DRAW_LINE()
1391 BENCH_BASE(folly_rwspin_reads, 2thread, 2, false)
1392 BENCH_REL (shmtx_wr_pri_reads, 2thread, 2, false)
1393 BENCH_REL (shmtx_w_bare_reads, 2thread, 2, false)
1394 BENCH_REL (shmtx_rd_pri_reads, 2thread, 2, false)
1395 BENCH_REL (shmtx_r_bare_reads, 2thread, 2, false)
1396 BENCH_REL (folly_ticket_reads, 2thread, 2, false)
1397 BENCH_REL (boost_shared_reads, 2thread, 2, false)
1398 BENCH_REL (pthrd_rwlock_reads, 2thread, 2, false)
1399 BENCHMARK_DRAW_LINE()
1400 BENCH_BASE(folly_rwspin_reads, 4thread, 4, false)
1401 BENCH_REL (shmtx_wr_pri_reads, 4thread, 4, false)
1402 BENCH_REL (shmtx_w_bare_reads, 4thread, 4, false)
1403 BENCH_REL (shmtx_rd_pri_reads, 4thread, 4, false)
1404 BENCH_REL (shmtx_r_bare_reads, 4thread, 4, false)
1405 BENCH_REL (folly_ticket_reads, 4thread, 4, false)
1406 BENCH_REL (boost_shared_reads, 4thread, 4, false)
1407 BENCH_REL (pthrd_rwlock_reads, 4thread, 4, false)
1408 BENCHMARK_DRAW_LINE()
1409 BENCH_BASE(folly_rwspin_reads, 8thread, 8, false)
1410 BENCH_REL (shmtx_wr_pri_reads, 8thread, 8, false)
1411 BENCH_REL (shmtx_w_bare_reads, 8thread, 8, false)
1412 BENCH_REL (shmtx_rd_pri_reads, 8thread, 8, false)
1413 BENCH_REL (shmtx_r_bare_reads, 8thread, 8, false)
1414 BENCH_REL (folly_ticket_reads, 8thread, 8, false)
1415 BENCH_REL (boost_shared_reads, 8thread, 8, false)
1416 BENCH_REL (pthrd_rwlock_reads, 8thread, 8, false)
1417 BENCHMARK_DRAW_LINE()
1418 BENCH_BASE(folly_rwspin_reads, 16thread, 16, false)
1419 BENCH_REL (shmtx_wr_pri_reads, 16thread, 16, false)
1420 BENCH_REL (shmtx_w_bare_reads, 16thread, 16, false)
1421 BENCH_REL (shmtx_rd_pri_reads, 16thread, 16, false)
1422 BENCH_REL (shmtx_r_bare_reads, 16thread, 16, false)
1423 BENCH_REL (folly_ticket_reads, 16thread, 16, false)
1424 BENCH_REL (boost_shared_reads, 16thread, 16, false)
1425 BENCH_REL (pthrd_rwlock_reads, 16thread, 16, false)
1426 BENCHMARK_DRAW_LINE()
1427 BENCH_BASE(folly_rwspin_reads, 32thread, 32, false)
1428 BENCH_REL (shmtx_wr_pri_reads, 32thread, 32, false)
1429 BENCH_REL (shmtx_w_bare_reads, 32thread, 32, false)
1430 BENCH_REL (shmtx_rd_pri_reads, 32thread, 32, false)
1431 BENCH_REL (shmtx_r_bare_reads, 32thread, 32, false)
1432 BENCH_REL (folly_ticket_reads, 32thread, 32, false)
1433 BENCH_REL (boost_shared_reads, 32thread, 32, false)
1434 BENCH_REL (pthrd_rwlock_reads, 32thread, 32, false)
1435 BENCHMARK_DRAW_LINE()
1436 BENCH_BASE(folly_rwspin_reads, 64thread, 64, false)
1437 BENCH_REL (shmtx_wr_pri_reads, 64thread, 64, false)
1438 BENCH_REL (shmtx_w_bare_reads, 64thread, 64, false)
1439 BENCH_REL (shmtx_rd_pri_reads, 64thread, 64, false)
1440 BENCH_REL (shmtx_r_bare_reads, 64thread, 64, false)
1441 BENCH_REL (folly_ticket_reads, 64thread, 64, false)
1442 BENCH_REL (boost_shared_reads, 64thread, 64, false)
1443 BENCH_REL (pthrd_rwlock_reads, 64thread, 64, false)
1444
1445 // 1 lock used by everybody, 100% writes.  Threads only hurt, but it is
1446 // good to not fail catastrophically.  Compare to single_thread_lock_unlock
1447 // to see the overhead of the generic driver (and its pseudo-random number
1448 // generator).  pthrd_mutex_ is a pthread_mutex_t (default, not adaptive),
1449 // which is better than any of the reader-writer locks for this scenario.
1450 BENCHMARK_DRAW_LINE()
1451 BENCHMARK_DRAW_LINE()
1452 BENCH_BASE(folly_rwspin, 1thread_all_write, 1, 1.0, false)
1453 BENCH_REL (shmtx_wr_pri, 1thread_all_write, 1, 1.0, false)
1454 BENCH_REL (shmtx_rd_pri, 1thread_all_write, 1, 1.0, false)
1455 BENCH_REL (folly_ticket, 1thread_all_write, 1, 1.0, false)
1456 BENCH_REL (boost_shared, 1thread_all_write, 1, 1.0, false)
1457 BENCH_REL (pthrd_rwlock, 1thread_all_write, 1, 1.0, false)
1458 BENCH_REL (pthrd_mutex_, 1thread_all_write, 1, 1.0, false)
1459 BENCHMARK_DRAW_LINE()
1460 BENCH_BASE(folly_rwspin, 2thread_all_write, 2, 1.0, false)
1461 BENCH_REL (shmtx_wr_pri, 2thread_all_write, 2, 1.0, false)
1462 BENCH_REL (shmtx_rd_pri, 2thread_all_write, 2, 1.0, false)
1463 BENCH_REL (folly_ticket, 2thread_all_write, 2, 1.0, false)
1464 BENCH_REL (boost_shared, 2thread_all_write, 2, 1.0, false)
1465 BENCH_REL (pthrd_rwlock, 2thread_all_write, 2, 1.0, false)
1466 BENCH_REL (pthrd_mutex_, 2thread_all_write, 2, 1.0, false)
1467 BENCHMARK_DRAW_LINE()
1468 BENCH_BASE(folly_rwspin, 4thread_all_write, 4, 1.0, false)
1469 BENCH_REL (shmtx_wr_pri, 4thread_all_write, 4, 1.0, false)
1470 BENCH_REL (shmtx_rd_pri, 4thread_all_write, 4, 1.0, false)
1471 BENCH_REL (folly_ticket, 4thread_all_write, 4, 1.0, false)
1472 BENCH_REL (boost_shared, 4thread_all_write, 4, 1.0, false)
1473 BENCH_REL (pthrd_rwlock, 4thread_all_write, 4, 1.0, false)
1474 BENCH_REL (pthrd_mutex_, 4thread_all_write, 4, 1.0, false)
1475 BENCHMARK_DRAW_LINE()
1476 BENCH_BASE(folly_rwspin, 8thread_all_write, 8, 1.0, false)
1477 BENCH_REL (shmtx_wr_pri, 8thread_all_write, 8, 1.0, false)
1478 BENCH_REL (shmtx_rd_pri, 8thread_all_write, 8, 1.0, false)
1479 BENCH_REL (folly_ticket, 8thread_all_write, 8, 1.0, false)
1480 BENCH_REL (boost_shared, 8thread_all_write, 8, 1.0, false)
1481 BENCH_REL (pthrd_rwlock, 8thread_all_write, 8, 1.0, false)
1482 BENCH_REL (pthrd_mutex_, 8thread_all_write, 8, 1.0, false)
1483 BENCHMARK_DRAW_LINE()
1484 BENCH_BASE(folly_rwspin, 16thread_all_write, 16, 1.0, false)
1485 BENCH_REL (shmtx_wr_pri, 16thread_all_write, 16, 1.0, false)
1486 BENCH_REL (shmtx_rd_pri, 16thread_all_write, 16, 1.0, false)
1487 BENCH_REL (folly_ticket, 16thread_all_write, 16, 1.0, false)
1488 BENCH_REL (boost_shared, 16thread_all_write, 16, 1.0, false)
1489 BENCH_REL (pthrd_rwlock, 16thread_all_write, 16, 1.0, false)
1490 BENCH_REL (pthrd_mutex_, 16thread_all_write, 16, 1.0, false)
1491 BENCHMARK_DRAW_LINE()
1492 BENCH_BASE(folly_rwspin, 32thread_all_write, 32, 1.0, false)
1493 BENCH_REL (shmtx_wr_pri, 32thread_all_write, 32, 1.0, false)
1494 BENCH_REL (shmtx_rd_pri, 32thread_all_write, 32, 1.0, false)
1495 BENCH_REL (folly_ticket, 32thread_all_write, 32, 1.0, false)
1496 BENCH_REL (boost_shared, 32thread_all_write, 32, 1.0, false)
1497 BENCH_REL (pthrd_rwlock, 32thread_all_write, 32, 1.0, false)
1498 BENCH_REL (pthrd_mutex_, 32thread_all_write, 32, 1.0, false)
1499 BENCHMARK_DRAW_LINE()
1500 BENCH_BASE(folly_rwspin, 64thread_all_write, 64, 1.0, false)
1501 BENCH_REL (shmtx_wr_pri, 64thread_all_write, 64, 1.0, false)
1502 BENCH_REL (shmtx_rd_pri, 64thread_all_write, 64, 1.0, false)
1503 BENCH_REL (folly_ticket, 64thread_all_write, 64, 1.0, false)
1504 BENCH_REL (boost_shared, 64thread_all_write, 64, 1.0, false)
1505 BENCH_REL (pthrd_rwlock, 64thread_all_write, 64, 1.0, false)
1506 BENCH_REL (pthrd_mutex_, 64thread_all_write, 64, 1.0, false)
1507
1508 // 1 lock used by everybody, 10% writes.  Not much scaling to be had.  Perf
1509 // is best at 1 thread, once you've got multiple threads > 8 threads hurts.
1510 BENCHMARK_DRAW_LINE()
1511 BENCHMARK_DRAW_LINE()
1512 BENCH_BASE(folly_rwspin, 1thread_10pct_write, 1, 0.10, false)
1513 BENCH_REL (shmtx_wr_pri, 1thread_10pct_write, 1, 0.10, false)
1514 BENCH_REL (shmtx_rd_pri, 1thread_10pct_write, 1, 0.10, false)
1515 BENCH_REL (folly_ticket, 1thread_10pct_write, 1, 0.10, false)
1516 BENCH_REL (boost_shared, 1thread_10pct_write, 1, 0.10, false)
1517 BENCH_REL (pthrd_rwlock, 1thread_10pct_write, 1, 0.10, false)
1518 BENCHMARK_DRAW_LINE()
1519 BENCH_BASE(folly_rwspin, 2thread_10pct_write, 2, 0.10, false)
1520 BENCH_REL (shmtx_wr_pri, 2thread_10pct_write, 2, 0.10, false)
1521 BENCH_REL (shmtx_rd_pri, 2thread_10pct_write, 2, 0.10, false)
1522 BENCH_REL (folly_ticket, 2thread_10pct_write, 2, 0.10, false)
1523 BENCH_REL (boost_shared, 2thread_10pct_write, 2, 0.10, false)
1524 BENCH_REL (pthrd_rwlock, 2thread_10pct_write, 2, 0.10, false)
1525 BENCHMARK_DRAW_LINE()
1526 BENCH_BASE(folly_rwspin, 4thread_10pct_write, 4, 0.10, false)
1527 BENCH_REL (shmtx_wr_pri, 4thread_10pct_write, 4, 0.10, false)
1528 BENCH_REL (shmtx_rd_pri, 4thread_10pct_write, 4, 0.10, false)
1529 BENCH_REL (folly_ticket, 4thread_10pct_write, 4, 0.10, false)
1530 BENCH_REL (boost_shared, 4thread_10pct_write, 4, 0.10, false)
1531 BENCH_REL (pthrd_rwlock, 4thread_10pct_write, 4, 0.10, false)
1532 BENCHMARK_DRAW_LINE()
1533 BENCH_BASE(folly_rwspin, 8thread_10pct_write, 8, 0.10, false)
1534 BENCH_REL (shmtx_wr_pri, 8thread_10pct_write, 8, 0.10, false)
1535 BENCH_REL (shmtx_rd_pri, 8thread_10pct_write, 8, 0.10, false)
1536 BENCH_REL (folly_ticket, 8thread_10pct_write, 8, 0.10, false)
1537 BENCH_REL (boost_shared, 8thread_10pct_write, 8, 0.10, false)
1538 BENCH_REL (pthrd_rwlock, 8thread_10pct_write, 8, 0.10, false)
1539 BENCHMARK_DRAW_LINE()
1540 BENCH_BASE(folly_rwspin, 16thread_10pct_write, 16, 0.10, false)
1541 BENCH_REL (shmtx_wr_pri, 16thread_10pct_write, 16, 0.10, false)
1542 BENCH_REL (shmtx_rd_pri, 16thread_10pct_write, 16, 0.10, false)
1543 BENCH_REL (folly_ticket, 16thread_10pct_write, 16, 0.10, false)
1544 BENCH_REL (boost_shared, 16thread_10pct_write, 16, 0.10, false)
1545 BENCH_REL (pthrd_rwlock, 16thread_10pct_write, 16, 0.10, false)
1546 BENCHMARK_DRAW_LINE()
1547 BENCH_BASE(folly_rwspin, 32thread_10pct_write, 32, 0.10, false)
1548 BENCH_REL (shmtx_wr_pri, 32thread_10pct_write, 32, 0.10, false)
1549 BENCH_REL (shmtx_rd_pri, 32thread_10pct_write, 32, 0.10, false)
1550 BENCH_REL (folly_ticket, 32thread_10pct_write, 32, 0.10, false)
1551 BENCH_REL (boost_shared, 32thread_10pct_write, 32, 0.10, false)
1552 BENCH_REL (pthrd_rwlock, 32thread_10pct_write, 32, 0.10, false)
1553 BENCHMARK_DRAW_LINE()
1554 BENCH_BASE(folly_rwspin, 64thread_10pct_write, 64, 0.10, false)
1555 BENCH_REL (shmtx_wr_pri, 64thread_10pct_write, 64, 0.10, false)
1556 BENCH_REL (shmtx_rd_pri, 64thread_10pct_write, 64, 0.10, false)
1557 BENCH_REL (folly_ticket, 64thread_10pct_write, 64, 0.10, false)
1558 BENCH_REL (boost_shared, 64thread_10pct_write, 64, 0.10, false)
1559 BENCH_REL (pthrd_rwlock, 64thread_10pct_write, 64, 0.10, false)
1560
1561 // 1 lock used by everybody, 1% writes.  This is a more realistic example
1562 // than the concurrent_*_reads benchmark, but still shows SharedMutex locks
1563 // winning over all of the others
1564 BENCHMARK_DRAW_LINE()
1565 BENCHMARK_DRAW_LINE()
1566 BENCH_BASE(folly_rwspin, 1thread_1pct_write, 1, 0.01, false)
1567 BENCH_REL (shmtx_wr_pri, 1thread_1pct_write, 1, 0.01, false)
1568 BENCH_REL (shmtx_w_bare, 1thread_1pct_write, 1, 0.01, false)
1569 BENCH_REL (shmtx_rd_pri, 1thread_1pct_write, 1, 0.01, false)
1570 BENCH_REL (shmtx_r_bare, 1thread_1pct_write, 1, 0.01, false)
1571 BENCH_REL (folly_ticket, 1thread_1pct_write, 1, 0.01, false)
1572 BENCH_REL (boost_shared, 1thread_1pct_write, 1, 0.01, false)
1573 BENCH_REL (pthrd_rwlock, 1thread_1pct_write, 1, 0.01, false)
1574 BENCHMARK_DRAW_LINE()
1575 BENCH_BASE(folly_rwspin, 2thread_1pct_write, 2, 0.01, false)
1576 BENCH_REL (shmtx_wr_pri, 2thread_1pct_write, 2, 0.01, false)
1577 BENCH_REL (shmtx_w_bare, 2thread_1pct_write, 2, 0.01, false)
1578 BENCH_REL (shmtx_rd_pri, 2thread_1pct_write, 2, 0.01, false)
1579 BENCH_REL (shmtx_r_bare, 2thread_1pct_write, 2, 0.01, false)
1580 BENCH_REL (folly_ticket, 2thread_1pct_write, 2, 0.01, false)
1581 BENCH_REL (boost_shared, 2thread_1pct_write, 2, 0.01, false)
1582 BENCH_REL (pthrd_rwlock, 2thread_1pct_write, 2, 0.01, false)
1583 BENCHMARK_DRAW_LINE()
1584 BENCH_BASE(folly_rwspin, 4thread_1pct_write, 4, 0.01, false)
1585 BENCH_REL (shmtx_wr_pri, 4thread_1pct_write, 4, 0.01, false)
1586 BENCH_REL (shmtx_w_bare, 4thread_1pct_write, 4, 0.01, false)
1587 BENCH_REL (shmtx_rd_pri, 4thread_1pct_write, 4, 0.01, false)
1588 BENCH_REL (shmtx_r_bare, 4thread_1pct_write, 4, 0.01, false)
1589 BENCH_REL (folly_ticket, 4thread_1pct_write, 4, 0.01, false)
1590 BENCH_REL (boost_shared, 4thread_1pct_write, 4, 0.01, false)
1591 BENCH_REL (pthrd_rwlock, 4thread_1pct_write, 4, 0.01, false)
1592 BENCHMARK_DRAW_LINE()
1593 BENCH_BASE(folly_rwspin, 8thread_1pct_write, 8, 0.01, false)
1594 BENCH_REL (shmtx_wr_pri, 8thread_1pct_write, 8, 0.01, false)
1595 BENCH_REL (shmtx_w_bare, 8thread_1pct_write, 8, 0.01, false)
1596 BENCH_REL (shmtx_rd_pri, 8thread_1pct_write, 8, 0.01, false)
1597 BENCH_REL (shmtx_r_bare, 8thread_1pct_write, 8, 0.01, false)
1598 BENCH_REL (folly_ticket, 8thread_1pct_write, 8, 0.01, false)
1599 BENCH_REL (boost_shared, 8thread_1pct_write, 8, 0.01, false)
1600 BENCH_REL (pthrd_rwlock, 8thread_1pct_write, 8, 0.01, false)
1601 BENCHMARK_DRAW_LINE()
1602 BENCH_BASE(folly_rwspin, 16thread_1pct_write, 16, 0.01, false)
1603 BENCH_REL (shmtx_wr_pri, 16thread_1pct_write, 16, 0.01, false)
1604 BENCH_REL (shmtx_w_bare, 16thread_1pct_write, 16, 0.01, false)
1605 BENCH_REL (shmtx_rd_pri, 16thread_1pct_write, 16, 0.01, false)
1606 BENCH_REL (shmtx_r_bare, 16thread_1pct_write, 16, 0.01, false)
1607 BENCH_REL (folly_ticket, 16thread_1pct_write, 16, 0.01, false)
1608 BENCH_REL (boost_shared, 16thread_1pct_write, 16, 0.01, false)
1609 BENCH_REL (pthrd_rwlock, 16thread_1pct_write, 16, 0.01, false)
1610 BENCHMARK_DRAW_LINE()
1611 BENCH_BASE(folly_rwspin, 32thread_1pct_write, 32, 0.01, false)
1612 BENCH_REL (shmtx_wr_pri, 32thread_1pct_write, 32, 0.01, false)
1613 BENCH_REL (shmtx_w_bare, 32thread_1pct_write, 32, 0.01, false)
1614 BENCH_REL (shmtx_rd_pri, 32thread_1pct_write, 32, 0.01, false)
1615 BENCH_REL (shmtx_r_bare, 32thread_1pct_write, 32, 0.01, false)
1616 BENCH_REL (folly_ticket, 32thread_1pct_write, 32, 0.01, false)
1617 BENCH_REL (boost_shared, 32thread_1pct_write, 32, 0.01, false)
1618 BENCH_REL (pthrd_rwlock, 32thread_1pct_write, 32, 0.01, false)
1619 BENCHMARK_DRAW_LINE()
1620 BENCH_BASE(folly_rwspin, 64thread_1pct_write, 64, 0.01, false)
1621 BENCH_REL (shmtx_wr_pri, 64thread_1pct_write, 64, 0.01, false)
1622 BENCH_REL (shmtx_w_bare, 64thread_1pct_write, 64, 0.01, false)
1623 BENCH_REL (shmtx_rd_pri, 64thread_1pct_write, 64, 0.01, false)
1624 BENCH_REL (shmtx_r_bare, 64thread_1pct_write, 64, 0.01, false)
1625 BENCH_REL (folly_ticket, 64thread_1pct_write, 64, 0.01, false)
1626 BENCH_REL (boost_shared, 64thread_1pct_write, 64, 0.01, false)
1627 BENCH_REL (pthrd_rwlock, 64thread_1pct_write, 64, 0.01, false)
1628
1629 // Worst case scenario for deferred locks. No actual sharing, likely that
1630 // read operations will have to first set the kDeferredReadersPossibleBit,
1631 // and likely that writers will have to scan deferredReaders[].
1632 BENCHMARK_DRAW_LINE()
1633 BENCH_BASE(folly_rwspin, 2thr_2lock_50pct_write, 2, 0.50, true)
1634 BENCH_REL (shmtx_wr_pri, 2thr_2lock_50pct_write, 2, 0.50, true)
1635 BENCH_REL (shmtx_rd_pri, 2thr_2lock_50pct_write, 2, 0.50, true)
1636 BENCH_BASE(folly_rwspin, 4thr_4lock_50pct_write, 4, 0.50, true)
1637 BENCH_REL (shmtx_wr_pri, 4thr_4lock_50pct_write, 4, 0.50, true)
1638 BENCH_REL (shmtx_rd_pri, 4thr_4lock_50pct_write, 4, 0.50, true)
1639 BENCH_BASE(folly_rwspin, 8thr_8lock_50pct_write, 8, 0.50, true)
1640 BENCH_REL (shmtx_wr_pri, 8thr_8lock_50pct_write, 8, 0.50, true)
1641 BENCH_REL (shmtx_rd_pri, 8thr_8lock_50pct_write, 8, 0.50, true)
1642 BENCH_BASE(folly_rwspin, 16thr_16lock_50pct_write, 16, 0.50, true)
1643 BENCH_REL (shmtx_wr_pri, 16thr_16lock_50pct_write, 16, 0.50, true)
1644 BENCH_REL (shmtx_rd_pri, 16thr_16lock_50pct_write, 16, 0.50, true)
1645 BENCH_BASE(folly_rwspin, 32thr_32lock_50pct_write, 32, 0.50, true)
1646 BENCH_REL (shmtx_wr_pri, 32thr_32lock_50pct_write, 32, 0.50, true)
1647 BENCH_REL (shmtx_rd_pri, 32thr_32lock_50pct_write, 32, 0.50, true)
1648 BENCH_BASE(folly_rwspin, 64thr_64lock_50pct_write, 64, 0.50, true)
1649 BENCH_REL (shmtx_wr_pri, 64thr_64lock_50pct_write, 64, 0.50, true)
1650 BENCH_REL (shmtx_rd_pri, 64thr_64lock_50pct_write, 64, 0.50, true)
1651 BENCHMARK_DRAW_LINE()
1652 BENCH_BASE(folly_rwspin, 2thr_2lock_10pct_write, 2, 0.10, true)
1653 BENCH_REL (shmtx_wr_pri, 2thr_2lock_10pct_write, 2, 0.10, true)
1654 BENCH_REL (shmtx_rd_pri, 2thr_2lock_10pct_write, 2, 0.10, true)
1655 BENCH_BASE(folly_rwspin, 4thr_4lock_10pct_write, 4, 0.10, true)
1656 BENCH_REL (shmtx_wr_pri, 4thr_4lock_10pct_write, 4, 0.10, true)
1657 BENCH_REL (shmtx_rd_pri, 4thr_4lock_10pct_write, 4, 0.10, true)
1658 BENCH_BASE(folly_rwspin, 8thr_8lock_10pct_write, 8, 0.10, true)
1659 BENCH_REL (shmtx_wr_pri, 8thr_8lock_10pct_write, 8, 0.10, true)
1660 BENCH_REL (shmtx_rd_pri, 8thr_8lock_10pct_write, 8, 0.10, true)
1661 BENCH_BASE(folly_rwspin, 16thr_16lock_10pct_write, 16, 0.10, true)
1662 BENCH_REL (shmtx_wr_pri, 16thr_16lock_10pct_write, 16, 0.10, true)
1663 BENCH_REL (shmtx_rd_pri, 16thr_16lock_10pct_write, 16, 0.10, true)
1664 BENCH_BASE(folly_rwspin, 32thr_32lock_10pct_write, 32, 0.10, true)
1665 BENCH_REL (shmtx_wr_pri, 32thr_32lock_10pct_write, 32, 0.10, true)
1666 BENCH_REL (shmtx_rd_pri, 32thr_32lock_10pct_write, 32, 0.10, true)
1667 BENCH_BASE(folly_rwspin, 64thr_64lock_10pct_write, 64, 0.10, true)
1668 BENCH_REL (shmtx_wr_pri, 64thr_64lock_10pct_write, 64, 0.10, true)
1669 BENCH_REL (shmtx_rd_pri, 64thr_64lock_10pct_write, 64, 0.10, true)
1670 BENCHMARK_DRAW_LINE()
1671 BENCH_BASE(folly_rwspin, 2thr_2lock_1pct_write, 2, 0.01, true)
1672 BENCH_REL (shmtx_wr_pri, 2thr_2lock_1pct_write, 2, 0.01, true)
1673 BENCH_REL (shmtx_rd_pri, 2thr_2lock_1pct_write, 2, 0.01, true)
1674 BENCH_BASE(folly_rwspin, 4thr_4lock_1pct_write, 4, 0.01, true)
1675 BENCH_REL (shmtx_wr_pri, 4thr_4lock_1pct_write, 4, 0.01, true)
1676 BENCH_REL (shmtx_rd_pri, 4thr_4lock_1pct_write, 4, 0.01, true)
1677 BENCH_BASE(folly_rwspin, 8thr_8lock_1pct_write, 8, 0.01, true)
1678 BENCH_REL (shmtx_wr_pri, 8thr_8lock_1pct_write, 8, 0.01, true)
1679 BENCH_REL (shmtx_rd_pri, 8thr_8lock_1pct_write, 8, 0.01, true)
1680 BENCH_BASE(folly_rwspin, 16thr_16lock_1pct_write, 16, 0.01, true)
1681 BENCH_REL (shmtx_wr_pri, 16thr_16lock_1pct_write, 16, 0.01, true)
1682 BENCH_REL (shmtx_rd_pri, 16thr_16lock_1pct_write, 16, 0.01, true)
1683 BENCH_BASE(folly_rwspin, 32thr_32lock_1pct_write, 32, 0.01, true)
1684 BENCH_REL (shmtx_wr_pri, 32thr_32lock_1pct_write, 32, 0.01, true)
1685 BENCH_REL (shmtx_rd_pri, 32thr_32lock_1pct_write, 32, 0.01, true)
1686 BENCH_BASE(folly_rwspin, 64thr_64lock_1pct_write, 64, 0.01, true)
1687 BENCH_REL (shmtx_wr_pri, 64thr_64lock_1pct_write, 64, 0.01, true)
1688 BENCH_REL (shmtx_rd_pri, 64thr_64lock_1pct_write, 64, 0.01, true)
1689
1690 // Ping-pong tests have a scaled number of iterations, because their burn
1691 // loop would make them too slow otherwise.  Ping-pong with burn count of
1692 // 100k or 300k shows the advantage of soft-spin, reducing the cost of
1693 // each wakeup by about 20 usec.  (Take benchmark reported difference,
1694 // ~400 nanos, multiply by the scale of 100, then divide by 2 because
1695 // each round has two wakeups.)
1696 BENCHMARK_DRAW_LINE()
1697 BENCHMARK_DRAW_LINE()
1698 BENCH_BASE(folly_rwspin_ping_pong, burn0, 1, 0)
1699 BENCH_REL (shmtx_w_bare_ping_pong, burn0, 1, 0)
1700 BENCH_REL (shmtx_r_bare_ping_pong, burn0, 1, 0)
1701 BENCH_REL (folly_ticket_ping_pong, burn0, 1, 0)
1702 BENCH_REL (boost_shared_ping_pong, burn0, 1, 0)
1703 BENCH_REL (pthrd_rwlock_ping_pong, burn0, 1, 0)
1704 BENCHMARK_DRAW_LINE()
1705 BENCH_BASE(folly_rwspin_ping_pong, burn100k, 100, 100000)
1706 BENCH_REL (shmtx_w_bare_ping_pong, burn100k, 100, 100000)
1707 BENCH_REL (shmtx_r_bare_ping_pong, burn100k, 100, 100000)
1708 BENCH_REL (folly_ticket_ping_pong, burn100k, 100, 100000)
1709 BENCH_REL (boost_shared_ping_pong, burn100k, 100, 100000)
1710 BENCH_REL (pthrd_rwlock_ping_pong, burn100k, 100, 100000)
1711 BENCHMARK_DRAW_LINE()
1712 BENCH_BASE(folly_rwspin_ping_pong, burn300k, 100, 300000)
1713 BENCH_REL (shmtx_w_bare_ping_pong, burn300k, 100, 300000)
1714 BENCH_REL (shmtx_r_bare_ping_pong, burn300k, 100, 300000)
1715 BENCH_REL (folly_ticket_ping_pong, burn300k, 100, 300000)
1716 BENCH_REL (boost_shared_ping_pong, burn300k, 100, 300000)
1717 BENCH_REL (pthrd_rwlock_ping_pong, burn300k, 100, 300000)
1718 BENCHMARK_DRAW_LINE()
1719 BENCH_BASE(folly_rwspin_ping_pong, burn1M, 1000, 1000000)
1720 BENCH_REL (shmtx_w_bare_ping_pong, burn1M, 1000, 1000000)
1721 BENCH_REL (shmtx_r_bare_ping_pong, burn1M, 1000, 1000000)
1722 BENCH_REL (folly_ticket_ping_pong, burn1M, 1000, 1000000)
1723 BENCH_REL (boost_shared_ping_pong, burn1M, 1000, 1000000)
1724 BENCH_REL (pthrd_rwlock_ping_pong, burn1M, 1000, 1000000)
1725
1726 // Reproduce with 10 minutes and
1727 //   sudo nice -n -20
1728 //     shared_mutex_test --benchmark --bm_min_iters=1000000
1729 //
1730 // Comparison use folly::RWSpinLock as the baseline, with the
1731 // following row being the default SharedMutex (using *Holder or
1732 // Token-ful methods).
1733 //
1734 // Following results on 2-socket Intel(R) Xeon(R) CPU E5-2660 0 @ 2.20GHz
1735 //
1736 // ============================================================================
1737 // folly/test/SharedMutexTest.cpp                  relative  time/iter  iters/s
1738 // ============================================================================
1739 // single_thread_lock_shared_unlock_shared                     25.17ns   39.74M
1740 // single_thread_lock_unlock                                   25.88ns   38.64M
1741 // ----------------------------------------------------------------------------
1742 // ----------------------------------------------------------------------------
1743 // folly_rwspin_reads(1thread)                                 15.16ns   65.95M
1744 // shmtx_wr_pri_reads(1thread)                       69.18%    21.92ns   45.63M
1745 // shmtx_w_bare_reads(1thread)                       56.07%    27.04ns   36.98M
1746 // shmtx_rd_pri_reads(1thread)                       69.06%    21.95ns   45.55M
1747 // shmtx_r_bare_reads(1thread)                       56.36%    26.90ns   37.17M
1748 // folly_ticket_reads(1thread)                       57.56%    26.34ns   37.96M
1749 // boost_shared_reads(1thread)                       10.55%   143.72ns    6.96M
1750 // pthrd_rwlock_reads(1thread)                       39.61%    38.28ns   26.12M
1751 // ----------------------------------------------------------------------------
1752 // folly_rwspin_reads(2thread)                                 45.05ns   22.20M
1753 // shmtx_wr_pri_reads(2thread)                      379.98%    11.86ns   84.34M
1754 // shmtx_w_bare_reads(2thread)                      319.27%    14.11ns   70.87M
1755 // shmtx_rd_pri_reads(2thread)                      385.59%    11.68ns   85.59M
1756 // shmtx_r_bare_reads(2thread)                      306.56%    14.70ns   68.04M
1757 // folly_ticket_reads(2thread)                       61.07%    73.78ns   13.55M
1758 // boost_shared_reads(2thread)                       13.54%   332.66ns    3.01M
1759 // pthrd_rwlock_reads(2thread)                       34.22%   131.65ns    7.60M
1760 // ----------------------------------------------------------------------------
1761 // folly_rwspin_reads(4thread)                                 62.19ns   16.08M
1762 // shmtx_wr_pri_reads(4thread)                     1022.82%     6.08ns  164.48M
1763 // shmtx_w_bare_reads(4thread)                      875.37%     7.10ns  140.76M
1764 // shmtx_rd_pri_reads(4thread)                     1060.46%     5.86ns  170.53M
1765 // shmtx_r_bare_reads(4thread)                      879.88%     7.07ns  141.49M
1766 // folly_ticket_reads(4thread)                       64.62%    96.23ns   10.39M
1767 // boost_shared_reads(4thread)                       14.86%   418.49ns    2.39M
1768 // pthrd_rwlock_reads(4thread)                       25.01%   248.65ns    4.02M
1769 // ----------------------------------------------------------------------------
1770 // folly_rwspin_reads(8thread)                                 64.09ns   15.60M
1771 // shmtx_wr_pri_reads(8thread)                     2191.99%     2.92ns  342.03M
1772 // shmtx_w_bare_reads(8thread)                     1804.92%     3.55ns  281.63M
1773 // shmtx_rd_pri_reads(8thread)                     2194.60%     2.92ns  342.44M
1774 // shmtx_r_bare_reads(8thread)                     1800.53%     3.56ns  280.95M
1775 // folly_ticket_reads(8thread)                       54.90%   116.74ns    8.57M
1776 // boost_shared_reads(8thread)                       18.25%   351.24ns    2.85M
1777 // pthrd_rwlock_reads(8thread)                       28.19%   227.31ns    4.40M
1778 // ----------------------------------------------------------------------------
1779 // folly_rwspin_reads(16thread)                                70.06ns   14.27M
1780 // shmtx_wr_pri_reads(16thread)                    4970.09%     1.41ns  709.38M
1781 // shmtx_w_bare_reads(16thread)                    4143.75%     1.69ns  591.44M
1782 // shmtx_rd_pri_reads(16thread)                    5009.31%     1.40ns  714.98M
1783 // shmtx_r_bare_reads(16thread)                    4067.36%     1.72ns  580.54M
1784 // folly_ticket_reads(16thread)                      46.78%   149.77ns    6.68M
1785 // boost_shared_reads(16thread)                      21.67%   323.37ns    3.09M
1786 // pthrd_rwlock_reads(16thread)                      35.05%   199.90ns    5.00M
1787 // ----------------------------------------------------------------------------
1788 // folly_rwspin_reads(32thread)                                58.83ns   17.00M
1789 // shmtx_wr_pri_reads(32thread)                    5158.37%     1.14ns  876.79M
1790 // shmtx_w_bare_reads(32thread)                    4246.03%     1.39ns  721.72M
1791 // shmtx_rd_pri_reads(32thread)                    4845.97%     1.21ns  823.69M
1792 // shmtx_r_bare_reads(32thread)                    4721.44%     1.25ns  802.52M
1793 // folly_ticket_reads(32thread)                      28.40%   207.15ns    4.83M
1794 // boost_shared_reads(32thread)                      17.08%   344.54ns    2.90M
1795 // pthrd_rwlock_reads(32thread)                      30.01%   196.02ns    5.10M
1796 // ----------------------------------------------------------------------------
1797 // folly_rwspin_reads(64thread)                                59.19ns   16.89M
1798 // shmtx_wr_pri_reads(64thread)                    3804.54%     1.56ns  642.76M
1799 // shmtx_w_bare_reads(64thread)                    3625.06%     1.63ns  612.43M
1800 // shmtx_rd_pri_reads(64thread)                    3418.19%     1.73ns  577.48M
1801 // shmtx_r_bare_reads(64thread)                    3416.98%     1.73ns  577.28M
1802 // folly_ticket_reads(64thread)                      30.53%   193.90ns    5.16M
1803 // boost_shared_reads(64thread)                      18.59%   318.47ns    3.14M
1804 // pthrd_rwlock_reads(64thread)                      31.35%   188.81ns    5.30M
1805 // ----------------------------------------------------------------------------
1806 // ----------------------------------------------------------------------------
1807 // folly_rwspin(1thread_all_write)                             23.77ns   42.06M
1808 // shmtx_wr_pri(1thread_all_write)                   85.09%    27.94ns   35.79M
1809 // shmtx_rd_pri(1thread_all_write)                   85.32%    27.87ns   35.89M
1810 // folly_ticket(1thread_all_write)                   88.11%    26.98ns   37.06M
1811 // boost_shared(1thread_all_write)                   16.49%   144.14ns    6.94M
1812 // pthrd_rwlock(1thread_all_write)                   53.99%    44.04ns   22.71M
1813 // pthrd_mutex_(1thread_all_write)                   86.05%    27.63ns   36.20M
1814 // ----------------------------------------------------------------------------
1815 // folly_rwspin(2thread_all_write)                             76.05ns   13.15M
1816 // shmtx_wr_pri(2thread_all_write)                   60.67%   125.35ns    7.98M
1817 // shmtx_rd_pri(2thread_all_write)                   60.36%   125.99ns    7.94M
1818 // folly_ticket(2thread_all_write)                  129.10%    58.91ns   16.98M
1819 // boost_shared(2thread_all_write)                   18.65%   407.74ns    2.45M
1820 // pthrd_rwlock(2thread_all_write)                   40.90%   185.92ns    5.38M
1821 // pthrd_mutex_(2thread_all_write)                  127.37%    59.71ns   16.75M
1822 // ----------------------------------------------------------------------------
1823 // folly_rwspin(4thread_all_write)                            207.17ns    4.83M
1824 // shmtx_wr_pri(4thread_all_write)                  119.42%   173.49ns    5.76M
1825 // shmtx_rd_pri(4thread_all_write)                  117.68%   176.05ns    5.68M
1826 // folly_ticket(4thread_all_write)                  182.39%   113.59ns    8.80M
1827 // boost_shared(4thread_all_write)                   11.98%     1.73us  578.46K
1828 // pthrd_rwlock(4thread_all_write)                   27.50%   753.25ns    1.33M
1829 // pthrd_mutex_(4thread_all_write)                  117.75%   175.95ns    5.68M
1830 // ----------------------------------------------------------------------------
1831 // folly_rwspin(8thread_all_write)                            326.50ns    3.06M
1832 // shmtx_wr_pri(8thread_all_write)                  125.47%   260.22ns    3.84M
1833 // shmtx_rd_pri(8thread_all_write)                  124.73%   261.76ns    3.82M
1834 // folly_ticket(8thread_all_write)                  253.39%   128.85ns    7.76M
1835 // boost_shared(8thread_all_write)                    6.36%     5.13us  194.87K
1836 // pthrd_rwlock(8thread_all_write)                   38.54%   847.09ns    1.18M
1837 // pthrd_mutex_(8thread_all_write)                  166.31%   196.32ns    5.09M
1838 // ----------------------------------------------------------------------------
1839 // folly_rwspin(16thread_all_write)                           729.89ns    1.37M
1840 // shmtx_wr_pri(16thread_all_write)                 219.91%   331.91ns    3.01M
1841 // shmtx_rd_pri(16thread_all_write)                 220.09%   331.62ns    3.02M
1842 // folly_ticket(16thread_all_write)                 390.06%   187.12ns    5.34M
1843 // boost_shared(16thread_all_write)                  10.27%     7.11us  140.72K
1844 // pthrd_rwlock(16thread_all_write)                 113.90%   640.84ns    1.56M
1845 // pthrd_mutex_(16thread_all_write)                 401.97%   181.58ns    5.51M
1846 // ----------------------------------------------------------------------------
1847 // folly_rwspin(32thread_all_write)                             1.55us  645.01K
1848 // shmtx_wr_pri(32thread_all_write)                 415.05%   373.54ns    2.68M
1849 // shmtx_rd_pri(32thread_all_write)                 258.45%   599.88ns    1.67M
1850 // folly_ticket(32thread_all_write)                 525.40%   295.09ns    3.39M
1851 // boost_shared(32thread_all_write)                  20.84%     7.44us  134.45K
1852 // pthrd_rwlock(32thread_all_write)                 254.16%   610.00ns    1.64M
1853 // pthrd_mutex_(32thread_all_write)                 852.51%   181.86ns    5.50M
1854 // ----------------------------------------------------------------------------
1855 // folly_rwspin(64thread_all_write)                             2.03us  492.00K
1856 // shmtx_wr_pri(64thread_all_write)                 517.65%   392.64ns    2.55M
1857 // shmtx_rd_pri(64thread_all_write)                 288.20%   705.24ns    1.42M
1858 // folly_ticket(64thread_all_write)                 638.22%   318.47ns    3.14M
1859 // boost_shared(64thread_all_write)                  27.56%     7.37us  135.61K
1860 // pthrd_rwlock(64thread_all_write)                 326.75%   622.04ns    1.61M
1861 // pthrd_mutex_(64thread_all_write)                1231.57%   165.04ns    6.06M
1862 // ----------------------------------------------------------------------------
1863 // ----------------------------------------------------------------------------
1864 // folly_rwspin(1thread_10pct_write)                           19.39ns   51.58M
1865 // shmtx_wr_pri(1thread_10pct_write)                 93.87%    20.65ns   48.42M
1866 // shmtx_rd_pri(1thread_10pct_write)                 93.60%    20.71ns   48.28M
1867 // folly_ticket(1thread_10pct_write)                 73.75%    26.29ns   38.04M
1868 // boost_shared(1thread_10pct_write)                 12.97%   149.53ns    6.69M
1869 // pthrd_rwlock(1thread_10pct_write)                 44.15%    43.92ns   22.77M
1870 // ----------------------------------------------------------------------------
1871 // folly_rwspin(2thread_10pct_write)                          227.88ns    4.39M
1872 // shmtx_wr_pri(2thread_10pct_write)                321.08%    70.98ns   14.09M
1873 // shmtx_rd_pri(2thread_10pct_write)                280.65%    81.20ns   12.32M
1874 // folly_ticket(2thread_10pct_write)                220.43%   103.38ns    9.67M
1875 // boost_shared(2thread_10pct_write)                 58.78%   387.71ns    2.58M
1876 // pthrd_rwlock(2thread_10pct_write)                112.68%   202.23ns    4.94M
1877 // ----------------------------------------------------------------------------
1878 // folly_rwspin(4thread_10pct_write)                          444.94ns    2.25M
1879 // shmtx_wr_pri(4thread_10pct_write)                470.35%    94.60ns   10.57M
1880 // shmtx_rd_pri(4thread_10pct_write)                349.08%   127.46ns    7.85M
1881 // folly_ticket(4thread_10pct_write)                305.64%   145.58ns    6.87M
1882 // boost_shared(4thread_10pct_write)                 44.43%     1.00us  998.57K
1883 // pthrd_rwlock(4thread_10pct_write)                100.59%   442.31ns    2.26M
1884 // ----------------------------------------------------------------------------
1885 // folly_rwspin(8thread_10pct_write)                          424.67ns    2.35M
1886 // shmtx_wr_pri(8thread_10pct_write)                337.53%   125.82ns    7.95M
1887 // shmtx_rd_pri(8thread_10pct_write)                232.32%   182.79ns    5.47M
1888 // folly_ticket(8thread_10pct_write)                206.59%   205.56ns    4.86M
1889 // boost_shared(8thread_10pct_write)                 19.45%     2.18us  457.90K
1890 // pthrd_rwlock(8thread_10pct_write)                 78.58%   540.42ns    1.85M
1891 // ----------------------------------------------------------------------------
1892 // folly_rwspin(16thread_10pct_write)                         727.04ns    1.38M
1893 // shmtx_wr_pri(16thread_10pct_write)               400.60%   181.49ns    5.51M
1894 // shmtx_rd_pri(16thread_10pct_write)               312.94%   232.33ns    4.30M
1895 // folly_ticket(16thread_10pct_write)               283.67%   256.30ns    3.90M
1896 // boost_shared(16thread_10pct_write)                15.87%     4.58us  218.32K
1897 // pthrd_rwlock(16thread_10pct_write)               131.28%   553.82ns    1.81M
1898 // ----------------------------------------------------------------------------
1899 // folly_rwspin(32thread_10pct_write)                         810.61ns    1.23M
1900 // shmtx_wr_pri(32thread_10pct_write)               429.61%   188.68ns    5.30M
1901 // shmtx_rd_pri(32thread_10pct_write)               321.13%   252.42ns    3.96M
1902 // folly_ticket(32thread_10pct_write)               247.65%   327.32ns    3.06M
1903 // boost_shared(32thread_10pct_write)                 8.34%     9.71us  102.94K
1904 // pthrd_rwlock(32thread_10pct_write)               144.28%   561.85ns    1.78M
1905 // ----------------------------------------------------------------------------
1906 // folly_rwspin(64thread_10pct_write)                           1.10us  912.30K
1907 // shmtx_wr_pri(64thread_10pct_write)               486.68%   225.22ns    4.44M
1908 // shmtx_rd_pri(64thread_10pct_write)               412.96%   265.43ns    3.77M
1909 // folly_ticket(64thread_10pct_write)               280.23%   391.15ns    2.56M
1910 // boost_shared(64thread_10pct_write)                 6.16%    17.79us   56.22K
1911 // pthrd_rwlock(64thread_10pct_write)               198.81%   551.34ns    1.81M
1912 // ----------------------------------------------------------------------------
1913 // ----------------------------------------------------------------------------
1914 // folly_rwspin(1thread_1pct_write)                            19.02ns   52.57M
1915 // shmtx_wr_pri(1thread_1pct_write)                  94.46%    20.14ns   49.66M
1916 // shmtx_w_bare(1thread_1pct_write)                  76.60%    24.83ns   40.27M
1917 // shmtx_rd_pri(1thread_1pct_write)                  93.83%    20.27ns   49.33M
1918 // shmtx_r_bare(1thread_1pct_write)                  77.04%    24.69ns   40.50M
1919 // folly_ticket(1thread_1pct_write)                  72.83%    26.12ns   38.29M
1920 // boost_shared(1thread_1pct_write)                  12.48%   152.44ns    6.56M
1921 // pthrd_rwlock(1thread_1pct_write)                  42.85%    44.39ns   22.53M
1922 // ----------------------------------------------------------------------------
1923 // folly_rwspin(2thread_1pct_write)                           110.63ns    9.04M
1924 // shmtx_wr_pri(2thread_1pct_write)                 442.12%    25.02ns   39.96M
1925 // shmtx_w_bare(2thread_1pct_write)                 374.65%    29.53ns   33.86M
1926 // shmtx_rd_pri(2thread_1pct_write)                 371.08%    29.81ns   33.54M
1927 // shmtx_r_bare(2thread_1pct_write)                 138.02%    80.15ns   12.48M
1928 // folly_ticket(2thread_1pct_write)                 131.34%    84.23ns   11.87M
1929 // boost_shared(2thread_1pct_write)                  30.35%   364.58ns    2.74M
1930 // pthrd_rwlock(2thread_1pct_write)                  95.48%   115.87ns    8.63M
1931 // ----------------------------------------------------------------------------
1932 // folly_rwspin(4thread_1pct_write)                           140.62ns    7.11M
1933 // shmtx_wr_pri(4thread_1pct_write)                 627.13%    22.42ns   44.60M
1934 // shmtx_w_bare(4thread_1pct_write)                 552.94%    25.43ns   39.32M
1935 // shmtx_rd_pri(4thread_1pct_write)                 226.06%    62.21ns   16.08M
1936 // shmtx_r_bare(4thread_1pct_write)                  77.61%   181.19ns    5.52M
1937 // folly_ticket(4thread_1pct_write)                 119.58%   117.60ns    8.50M
1938 // boost_shared(4thread_1pct_write)                  25.36%   554.54ns    1.80M
1939 // pthrd_rwlock(4thread_1pct_write)                  45.55%   308.72ns    3.24M
1940 // ----------------------------------------------------------------------------
1941 // folly_rwspin(8thread_1pct_write)                           166.23ns    6.02M
1942 // shmtx_wr_pri(8thread_1pct_write)                 687.09%    24.19ns   41.33M
1943 // shmtx_w_bare(8thread_1pct_write)                 611.80%    27.17ns   36.80M
1944 // shmtx_rd_pri(8thread_1pct_write)                 140.37%   118.43ns    8.44M
1945 // shmtx_r_bare(8thread_1pct_write)                  80.32%   206.97ns    4.83M
1946 // folly_ticket(8thread_1pct_write)                 117.06%   142.01ns    7.04M
1947 // boost_shared(8thread_1pct_write)                  22.29%   745.67ns    1.34M
1948 // pthrd_rwlock(8thread_1pct_write)                  49.84%   333.55ns    3.00M
1949 // ----------------------------------------------------------------------------
1950 // folly_rwspin(16thread_1pct_write)                          419.79ns    2.38M
1951 // shmtx_wr_pri(16thread_1pct_write)               1397.92%    30.03ns   33.30M
1952 // shmtx_w_bare(16thread_1pct_write)               1324.60%    31.69ns   31.55M
1953 // shmtx_rd_pri(16thread_1pct_write)                278.12%   150.94ns    6.63M
1954 // shmtx_r_bare(16thread_1pct_write)                194.25%   216.11ns    4.63M
1955 // folly_ticket(16thread_1pct_write)                255.38%   164.38ns    6.08M
1956 // boost_shared(16thread_1pct_write)                 33.71%     1.25us  803.01K
1957 // pthrd_rwlock(16thread_1pct_write)                131.96%   318.12ns    3.14M
1958 // ----------------------------------------------------------------------------
1959 // folly_rwspin(32thread_1pct_write)                          395.99ns    2.53M
1960 // shmtx_wr_pri(32thread_1pct_write)               1332.76%    29.71ns   33.66M
1961 // shmtx_w_bare(32thread_1pct_write)               1208.86%    32.76ns   30.53M
1962 // shmtx_rd_pri(32thread_1pct_write)                252.97%   156.54ns    6.39M
1963 // shmtx_r_bare(32thread_1pct_write)                193.79%   204.35ns    4.89M
1964 // folly_ticket(32thread_1pct_write)                173.16%   228.69ns    4.37M
1965 // boost_shared(32thread_1pct_write)                 17.00%     2.33us  429.40K
1966 // pthrd_rwlock(32thread_1pct_write)                129.88%   304.89ns    3.28M
1967 // ----------------------------------------------------------------------------
1968 // folly_rwspin(64thread_1pct_write)                          424.07ns    2.36M
1969 // shmtx_wr_pri(64thread_1pct_write)               1297.89%    32.67ns   30.61M
1970 // shmtx_w_bare(64thread_1pct_write)               1228.88%    34.51ns   28.98M
1971 // shmtx_rd_pri(64thread_1pct_write)                270.40%   156.83ns    6.38M
1972 // shmtx_r_bare(64thread_1pct_write)                218.05%   194.48ns    5.14M
1973 // folly_ticket(64thread_1pct_write)                171.44%   247.36ns    4.04M
1974 // boost_shared(64thread_1pct_write)                 10.60%     4.00us  249.95K
1975 // pthrd_rwlock(64thread_1pct_write)                143.80%   294.91ns    3.39M
1976 // ----------------------------------------------------------------------------
1977 // folly_rwspin(2thr_2lock_50pct_write)                        10.87ns   91.99M
1978 // shmtx_wr_pri(2thr_2lock_50pct_write)              83.71%    12.99ns   77.01M
1979 // shmtx_rd_pri(2thr_2lock_50pct_write)              84.08%    12.93ns   77.34M
1980 // folly_rwspin(4thr_4lock_50pct_write)                         5.32ns  188.12M
1981 // shmtx_wr_pri(4thr_4lock_50pct_write)              82.21%     6.47ns  154.65M
1982 // shmtx_rd_pri(4thr_4lock_50pct_write)              81.20%     6.55ns  152.75M
1983 // folly_rwspin(8thr_8lock_50pct_write)                         2.64ns  379.06M
1984 // shmtx_wr_pri(8thr_8lock_50pct_write)              81.26%     3.25ns  308.03M
1985 // shmtx_rd_pri(8thr_8lock_50pct_write)              80.95%     3.26ns  306.86M
1986 // folly_rwspin(16thr_16lock_50pct_write)                       1.52ns  656.77M
1987 // shmtx_wr_pri(16thr_16lock_50pct_write)            86.24%     1.77ns  566.41M
1988 // shmtx_rd_pri(16thr_16lock_50pct_write)            83.72%     1.82ns  549.82M
1989 // folly_rwspin(32thr_32lock_50pct_write)                       1.19ns  841.03M
1990 // shmtx_wr_pri(32thr_32lock_50pct_write)            85.08%     1.40ns  715.55M
1991 // shmtx_rd_pri(32thr_32lock_50pct_write)            86.44%     1.38ns  727.00M
1992 // folly_rwspin(64thr_64lock_50pct_write)                       1.46ns  684.28M
1993 // shmtx_wr_pri(64thr_64lock_50pct_write)            84.53%     1.73ns  578.43M
1994 // shmtx_rd_pri(64thr_64lock_50pct_write)            82.80%     1.76ns  566.58M
1995 // ----------------------------------------------------------------------------
1996 // folly_rwspin(2thr_2lock_10pct_write)                        10.01ns   99.85M
1997 // shmtx_wr_pri(2thr_2lock_10pct_write)              92.02%    10.88ns   91.88M
1998 // shmtx_rd_pri(2thr_2lock_10pct_write)              92.35%    10.84ns   92.22M
1999 // folly_rwspin(4thr_4lock_10pct_write)                         4.81ns  207.87M
2000 // shmtx_wr_pri(4thr_4lock_10pct_write)              89.32%     5.39ns  185.67M
2001 // shmtx_rd_pri(4thr_4lock_10pct_write)              88.96%     5.41ns  184.93M
2002 // folly_rwspin(8thr_8lock_10pct_write)                         2.39ns  417.62M
2003 // shmtx_wr_pri(8thr_8lock_10pct_write)              91.17%     2.63ns  380.76M
2004 // shmtx_rd_pri(8thr_8lock_10pct_write)              89.53%     2.67ns  373.92M
2005 // folly_rwspin(16thr_16lock_10pct_write)                       1.16ns  860.47M
2006 // shmtx_wr_pri(16thr_16lock_10pct_write)            74.35%     1.56ns  639.77M
2007 // shmtx_rd_pri(16thr_16lock_10pct_write)            91.34%     1.27ns  785.97M
2008 // folly_rwspin(32thr_32lock_10pct_write)                       1.15ns  866.23M
2009 // shmtx_wr_pri(32thr_32lock_10pct_write)            92.32%     1.25ns  799.72M
2010 // shmtx_rd_pri(32thr_32lock_10pct_write)            94.40%     1.22ns  817.71M
2011 // folly_rwspin(64thr_64lock_10pct_write)                       1.41ns  710.54M
2012 // shmtx_wr_pri(64thr_64lock_10pct_write)            94.14%     1.50ns  668.88M
2013 // shmtx_rd_pri(64thr_64lock_10pct_write)            94.80%     1.48ns  673.56M
2014 // ----------------------------------------------------------------------------
2015 // folly_rwspin(2thr_2lock_1pct_write)                          9.58ns  104.36M
2016 // shmtx_wr_pri(2thr_2lock_1pct_write)               92.00%    10.42ns   96.01M
2017 // shmtx_rd_pri(2thr_2lock_1pct_write)               91.79%    10.44ns   95.79M
2018 // folly_rwspin(4thr_4lock_1pct_write)                          4.71ns  212.30M
2019 // shmtx_wr_pri(4thr_4lock_1pct_write)               90.37%     5.21ns  191.85M
2020 // shmtx_rd_pri(4thr_4lock_1pct_write)               89.94%     5.24ns  190.95M
2021 // folly_rwspin(8thr_8lock_1pct_write)                          2.33ns  429.91M
2022 // shmtx_wr_pri(8thr_8lock_1pct_write)               90.67%     2.57ns  389.80M
2023 // shmtx_rd_pri(8thr_8lock_1pct_write)               90.61%     2.57ns  389.55M
2024 // folly_rwspin(16thr_16lock_1pct_write)                        1.10ns  905.23M
2025 // shmtx_wr_pri(16thr_16lock_1pct_write)             91.96%     1.20ns  832.46M
2026 // shmtx_rd_pri(16thr_16lock_1pct_write)             92.29%     1.20ns  835.42M
2027 // folly_rwspin(32thr_32lock_1pct_write)                        1.14ns  879.85M
2028 // shmtx_wr_pri(32thr_32lock_1pct_write)             93.41%     1.22ns  821.86M
2029 // shmtx_rd_pri(32thr_32lock_1pct_write)             94.18%     1.21ns  828.66M
2030 // folly_rwspin(64thr_64lock_1pct_write)                        1.34ns  748.83M
2031 // shmtx_wr_pri(64thr_64lock_1pct_write)             94.39%     1.41ns  706.84M
2032 // shmtx_rd_pri(64thr_64lock_1pct_write)             94.02%     1.42ns  704.06M
2033 // ----------------------------------------------------------------------------
2034 // ----------------------------------------------------------------------------
2035 // folly_rwspin_ping_pong(burn0)                              605.63ns    1.65M
2036 // shmtx_w_bare_ping_pong(burn0)                    102.17%   592.76ns    1.69M
2037 // shmtx_r_bare_ping_pong(burn0)                     88.75%   682.44ns    1.47M
2038 // folly_ticket_ping_pong(burn0)                     63.92%   947.56ns    1.06M
2039 // boost_shared_ping_pong(burn0)                      8.52%     7.11us  140.73K
2040 // pthrd_rwlock_ping_pong(burn0)                      7.88%     7.68us  130.15K
2041 // ----------------------------------------------------------------------------
2042 // folly_rwspin_ping_pong(burn100k)                           727.76ns    1.37M
2043 // shmtx_w_bare_ping_pong(burn100k)                 100.79%   722.09ns    1.38M
2044 // shmtx_r_bare_ping_pong(burn100k)                 101.98%   713.61ns    1.40M
2045 // folly_ticket_ping_pong(burn100k)                 102.80%   707.95ns    1.41M
2046 // boost_shared_ping_pong(burn100k)                  81.49%   893.02ns    1.12M
2047 // pthrd_rwlock_ping_pong(burn100k)                  71.05%     1.02us  976.30K
2048 // ----------------------------------------------------------------------------
2049 // folly_rwspin_ping_pong(burn300k)                             2.11us  473.46K
2050 // shmtx_w_bare_ping_pong(burn300k)                 100.06%     2.11us  473.72K
2051 // shmtx_r_bare_ping_pong(burn300k)                  98.93%     2.13us  468.39K
2052 // folly_ticket_ping_pong(burn300k)                  96.68%     2.18us  457.73K
2053 // boost_shared_ping_pong(burn300k)                  84.72%     2.49us  401.13K
2054 // pthrd_rwlock_ping_pong(burn300k)                  84.62%     2.50us  400.66K
2055 // ----------------------------------------------------------------------------
2056 // folly_rwspin_ping_pong(burn1M)                             709.70ns    1.41M
2057 // shmtx_w_bare_ping_pong(burn1M)                   100.28%   707.73ns    1.41M
2058 // shmtx_r_bare_ping_pong(burn1M)                    99.63%   712.37ns    1.40M
2059 // folly_ticket_ping_pong(burn1M)                   100.09%   709.05ns    1.41M
2060 // boost_shared_ping_pong(burn1M)                    94.09%   754.29ns    1.33M
2061 // pthrd_rwlock_ping_pong(burn1M)                    96.32%   736.82ns    1.36M
2062 // ============================================================================
2063
2064 int main(int argc, char** argv) {
2065   (void)folly_rwspin_reads;
2066   (void)shmtx_wr_pri_reads;
2067   (void)shmtx_w_bare_reads;
2068   (void)shmtx_rd_pri_reads;
2069   (void)shmtx_r_bare_reads;
2070   (void)folly_ticket_reads;
2071   (void)boost_shared_reads;
2072   (void)pthrd_rwlock_reads;
2073   (void)folly_rwspin;
2074   (void)shmtx_wr_pri;
2075   (void)shmtx_w_bare;
2076   (void)shmtx_rd_pri;
2077   (void)shmtx_r_bare;
2078   (void)folly_ticket;
2079   (void)boost_shared;
2080   (void)pthrd_rwlock;
2081   (void)pthrd_mutex_;
2082   (void)folly_rwspin_ping_pong;
2083   (void)shmtx_w_bare_ping_pong;
2084   (void)shmtx_r_bare_ping_pong;
2085   (void)folly_ticket_ping_pong;
2086   (void)boost_shared_ping_pong;
2087   (void)pthrd_rwlock_ping_pong;
2088
2089   testing::InitGoogleTest(&argc, argv);
2090   gflags::ParseCommandLineFlags(&argc, &argv, true);
2091   int rv = RUN_ALL_TESTS();
2092   folly::runBenchmarksOnFlag();
2093   return rv;
2094 }