/*
- * Copyright 2012 Facebook, Inc.
+ * Copyright 2015 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include <cstdlib>
#include <pthread.h>
#include <mutex>
+#include <atomic>
#include <glog/logging.h>
+#include <folly/Portability.h>
-#ifndef __x86_64__
-# error "SmallLocks.h is currently x64-only."
+#if !FOLLY_X64 && !FOLLY_A64
+# error "SmallLocks.h is currently x64 and aarch64 only."
#endif
-#include "folly/Portability.h"
-
namespace folly {
//////////////////////////////////////////////////////////////////////
namespace detail {
/*
- * A helper object for the condended case. Starts off with eager
+ * A helper object for the contended case. Starts off with eager
* spinning, and falls back to sleeping for small quantums.
*/
class Sleeper {
void wait() {
if (spinCount < kMaxActiveSpin) {
++spinCount;
- asm volatile("pause");
+ asm_volatile_pause();
} else {
/*
* Always sleep 0.5ms, assuming this will make the kernel put
* linux this varies by kernel version from 1ms to 10ms).
*/
struct timespec ts = { 0, 500000 };
- nanosleep(&ts, NULL);
+ nanosleep(&ts, nullptr);
}
}
};
* init(), since the free state is guaranteed to be all-bits zero.
*
* This class should be kept a POD, so we can used it in other packed
- * structs (gcc does not allow __attribute__((packed)) on structs that
+ * structs (gcc does not allow __attribute__((__packed__)) on structs that
* contain non-POD data). This means avoid adding a constructor, or
* making some members private, etc.
*/
struct MicroSpinLock {
enum { FREE = 0, LOCKED = 1 };
+ // lock_ can't be std::atomic<> to preserve POD-ness.
uint8_t lock_;
- /*
- * Atomically move lock_ from "compare" to "newval". Return boolean
- * success. Do not play on or around.
- */
- bool cas(uint8_t compare, uint8_t newVal) {
- bool out;
- asm volatile("lock; cmpxchgb %2, (%3);"
- "setz %0;"
- : "=r" (out)
- : "a" (compare), // cmpxchgb constrains this to be in %al
- "q" (newVal), // Needs to be byte-accessible
- "r" (&lock_)
- : "memory", "flags");
- return out;
- }
-
// Initialize this MSL. It is unnecessary to call this if you
// zero-initialize the MicroSpinLock.
void init() {
- lock_ = FREE;
+ payload()->store(FREE);
}
bool try_lock() {
void lock() {
detail::Sleeper sleeper;
do {
- while (lock_ != FREE) {
- asm volatile("" : : : "memory");
+ while (payload()->load() != FREE) {
sleeper.wait();
}
} while (!try_lock());
- DCHECK(lock_ == LOCKED);
+ DCHECK(payload()->load() == LOCKED);
}
void unlock() {
- CHECK(lock_ == LOCKED);
- asm volatile("" : : : "memory");
- lock_ = FREE; // release barrier on x86
+ CHECK(payload()->load() == LOCKED);
+ payload()->store(FREE, std::memory_order_release);
+ }
+
+ private:
+ std::atomic<uint8_t>* payload() {
+ return reinterpret_cast<std::atomic<uint8_t>*>(&this->lock_);
+ }
+
+ bool cas(uint8_t compare, uint8_t newVal) {
+ return std::atomic_compare_exchange_strong_explicit(payload(), &compare, newVal,
+ std::memory_order_acquire,
+ std::memory_order_relaxed);
}
};
bool try_lock() const {
bool ret = false;
+#if FOLLY_X64
#define FB_DOBTS(size) \
asm volatile("lock; bts" #size " %1, (%2); setnc %0" \
: "=r" (ret) \
}
#undef FB_DOBTS
+#elif FOLLY_A64
+ ret = __atomic_fetch_or(&lock_, 1 << Bit, __ATOMIC_SEQ_CST);
+#else
+#error "x86 aarch64 only"
+#endif
return ret;
}
* integer.
*/
void unlock() const {
+#if FOLLY_X64
#define FB_DOBTR(size) \
asm volatile("lock; btr" #size " %0, (%1)" \
: \
}
#undef FB_DOBTR
+#elif FOLLY_A64
+ __atomic_fetch_and(&lock_, ~(1 << Bit), __ATOMIC_SEQ_CST);
+#else
+# error "x64 aarch64 only"
+#endif
}
};
private:
struct PaddedSpinLock {
- PaddedSpinLock() : lock() { }
+ PaddedSpinLock() : lock() {}
T lock;
char padding[FOLLY_CACHE_LINE_SIZE - sizeof(T)];
};
char padding_[FOLLY_CACHE_LINE_SIZE];
std::array<PaddedSpinLock, N> data_;
-} __attribute__((aligned));
+} __attribute__((__aligned__));
//////////////////////////////////////////////////////////////////////