/*
- * Copyright 2012 Facebook, Inc.
+ * Copyright 2015 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include <cstdlib>
#include <pthread.h>
#include <mutex>
+#include <atomic>
#include <glog/logging.h>
+#include <folly/Portability.h>
-#ifndef __x86_64__
+#if !FOLLY_X64
# error "SmallLocks.h is currently x64-only."
#endif
-#include "folly/Portability.h"
-
namespace folly {
//////////////////////////////////////////////////////////////////////
namespace detail {
/*
- * A helper object for the condended case. Starts off with eager
+ * A helper object for the contended case. Starts off with eager
* spinning, and falls back to sleeping for small quantums.
*/
class Sleeper {
* linux this varies by kernel version from 1ms to 10ms).
*/
struct timespec ts = { 0, 500000 };
- nanosleep(&ts, NULL);
+ nanosleep(&ts, nullptr);
}
}
};
* init(), since the free state is guaranteed to be all-bits zero.
*
* This class should be kept a POD, so we can used it in other packed
- * structs (gcc does not allow __attribute__((packed)) on structs that
+ * structs (gcc does not allow __attribute__((__packed__)) on structs that
* contain non-POD data). This means avoid adding a constructor, or
* making some members private, etc.
*/
struct MicroSpinLock {
enum { FREE = 0, LOCKED = 1 };
+ // lock_ can't be std::atomic<> to preserve POD-ness.
uint8_t lock_;
- /*
- * Atomically move lock_ from "compare" to "newval". Return boolean
- * success. Do not play on or around.
- */
- bool cas(uint8_t compare, uint8_t newVal) {
- bool out;
- asm volatile("lock; cmpxchgb %2, (%3);"
- "setz %0;"
- : "=r" (out)
- : "a" (compare), // cmpxchgb constrains this to be in %al
- "q" (newVal), // Needs to be byte-accessible
- "r" (&lock_)
- : "memory", "flags");
- return out;
- }
-
// Initialize this MSL. It is unnecessary to call this if you
// zero-initialize the MicroSpinLock.
void init() {
- lock_ = FREE;
+ payload()->store(FREE);
}
bool try_lock() {
void lock() {
detail::Sleeper sleeper;
do {
- while (lock_ != FREE) {
- asm volatile("" : : : "memory");
+ while (payload()->load() != FREE) {
sleeper.wait();
}
} while (!try_lock());
- DCHECK(lock_ == LOCKED);
+ DCHECK(payload()->load() == LOCKED);
}
void unlock() {
- CHECK(lock_ == LOCKED);
- asm volatile("" : : : "memory");
- lock_ = FREE; // release barrier on x86
+ CHECK(payload()->load() == LOCKED);
+ payload()->store(FREE, std::memory_order_release);
+ }
+
+ private:
+ std::atomic<uint8_t>* payload() {
+ return reinterpret_cast<std::atomic<uint8_t>*>(&this->lock_);
+ }
+
+ bool cas(uint8_t compare, uint8_t newVal) {
+ return std::atomic_compare_exchange_strong_explicit(payload(), &compare, newVal,
+ std::memory_order_acquire,
+ std::memory_order_relaxed);
}
};
char padding_[FOLLY_CACHE_LINE_SIZE];
std::array<PaddedSpinLock, N> data_;
-} __attribute__((aligned));
+} __attribute__((__aligned__));
//////////////////////////////////////////////////////////////////////