+#endif
+
+TEST(SmallLocks, RegClobber) {
+ TestClobber().go();
+}
+
+FOLLY_PACK_PUSH
+#if defined(__SANITIZE_ADDRESS__) && !defined(__clang__) && \
+ (defined(__GNUC__) || defined(__GNUG__))
+static_assert(sizeof(MicroLock) == 4, "Size check failed");
+#else
+static_assert(sizeof(MicroLock) == 1, "Size check failed");
+#endif
+FOLLY_PACK_POP
+
+namespace {
+
+struct SimpleBarrier {
+
+ SimpleBarrier() : lock_(), cv_(), ready_(false) {}
+
+ void wait() {
+ std::unique_lock<std::mutex> lockHeld(lock_);
+ while (!ready_) {
+ cv_.wait(lockHeld);
+ }
+ }
+
+ void run() {
+ {
+ std::unique_lock<std::mutex> lockHeld(lock_);
+ ready_ = true;
+ }
+
+ cv_.notify_all();
+ }
+
+ private:
+ std::mutex lock_;
+ std::condition_variable cv_;
+ bool ready_;
+};
+}
+
+TEST(SmallLocks, MicroLock) {
+ volatile uint64_t counters[4] = {0, 0, 0, 0};
+ std::vector<std::thread> threads;
+ static const unsigned nrThreads = 20;
+ static const unsigned iterPerThread = 10000;
+ SimpleBarrier startBarrier;
+
+ assert(iterPerThread % 4 == 0);
+
+ // Embed the lock in a larger structure to ensure that we do not
+ // affect bits outside the ones MicroLock is defined to affect.
+ struct {
+ uint8_t a;
+ volatile uint8_t b;
+ MicroLock alock;
+ volatile uint8_t d;
+ } x;
+
+ uint8_t origB = 'b';
+ uint8_t origD = 'd';
+
+ x.a = 'a';
+ x.b = origB;
+ x.alock.init();
+ x.d = origD;
+
+ // This thread touches other parts of the host word to show that
+ // MicroLock does not interfere with memory outside of the byte
+ // it owns.
+ std::thread adjacentMemoryToucher = std::thread([&] {
+ startBarrier.wait();
+ for (unsigned iter = 0; iter < iterPerThread; ++iter) {
+ if (iter % 2) {
+ x.b++;
+ } else {
+ x.d++;
+ }
+ }
+ });
+
+ for (unsigned i = 0; i < nrThreads; ++i) {
+ threads.emplace_back([&] {
+ startBarrier.wait();
+ for (unsigned iter = 0; iter < iterPerThread; ++iter) {
+ unsigned slotNo = iter % 4;
+ x.alock.lock(slotNo);
+ counters[slotNo] += 1;
+ // The occasional sleep makes it more likely that we'll
+ // exercise the futex-wait path inside MicroLock.
+ if (iter % 1000 == 0) {
+ struct timespec ts = {0, 10000};
+ (void)nanosleep(&ts, nullptr);
+ }
+ x.alock.unlock(slotNo);
+ }
+ });
+ }
+
+ startBarrier.run();
+
+ for (auto it = threads.begin(); it != threads.end(); ++it) {
+ it->join();
+ }
+
+ adjacentMemoryToucher.join();
+
+ EXPECT_EQ(x.a, 'a');
+ EXPECT_EQ(x.b, (uint8_t)(origB + iterPerThread / 2));
+ EXPECT_EQ(x.d, (uint8_t)(origD + iterPerThread / 2));
+ for (unsigned i = 0; i < 4; ++i) {
+ EXPECT_EQ(counters[i], ((uint64_t)nrThreads * iterPerThread) / 4);
+ }
+}
+
+TEST(SmallLocks, MicroLockTryLock) {
+ MicroLock lock;
+ lock.init();
+ EXPECT_TRUE(lock.try_lock());
+ EXPECT_FALSE(lock.try_lock());
+ lock.unlock();
+}