/*
- * Copyright 2016 Facebook, Inc.
+ * Copyright 2017 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* @author Xin Liu <xliux@fb.com>
*/
-#ifndef FOLLY_RWSPINLOCK_H_
-#define FOLLY_RWSPINLOCK_H_
+#pragma once
/*
========================================================================
*/
#include <folly/Portability.h>
+#include <folly/portability/Asm.h>
#if defined(__GNUC__) && \
(defined(__i386) || FOLLY_X64 || \
#endif
// iOS doesn't define _mm_cvtsi64_si128 and friends
-#if (FOLLY_SSE >= 2) && !TARGET_OS_IPHONE
+#if (FOLLY_SSE >= 2) && !FOLLY_MOBILE
#define RW_SPINLOCK_USE_SSE_INSTRUCTIONS_
#else
#undef RW_SPINLOCK_USE_SSE_INSTRUCTIONS_
#endif
+#include <algorithm>
#include <atomic>
#include <string>
-#include <algorithm>
+#include <thread>
-#include <sched.h>
#include <glog/logging.h>
#include <folly/Likely.h>
void lock() {
int count = 0;
while (!LIKELY(try_lock())) {
- if (++count > 1000) sched_yield();
+ if (++count > 1000) std::this_thread::yield();
}
}
void lock_shared() {
int count = 0;
while (!LIKELY(try_lock_shared())) {
- if (++count > 1000) sched_yield();
+ if (++count > 1000) std::this_thread::yield();
}
}
void lock_upgrade() {
int count = 0;
while (!try_lock_upgrade()) {
- if (++count > 1000) sched_yield();
+ if (++count > 1000) std::this_thread::yield();
}
}
void unlock_upgrade_and_lock() {
int64_t count = 0;
while (!try_unlock_upgrade_and_lock()) {
- if (++count > 1000) sched_yield();
+ if (++count > 1000) std::this_thread::yield();
}
}
class ReadHolder {
public:
- explicit ReadHolder(RWSpinLock* lock = nullptr) : lock_(lock) {
+ explicit ReadHolder(RWSpinLock* lock) : lock_(lock) {
if (lock_) lock_->lock_shared();
}
class UpgradedHolder {
public:
- explicit UpgradedHolder(RWSpinLock* lock = nullptr) : lock_(lock) {
+ explicit UpgradedHolder(RWSpinLock* lock) : lock_(lock) {
if (lock_) lock_->lock_upgrade();
}
class WriteHolder {
public:
- explicit WriteHolder(RWSpinLock* lock = nullptr) : lock_(lock) {
+ explicit WriteHolder(RWSpinLock* lock) : lock_(lock) {
if (lock_) lock_->lock();
}
RWSpinLock* lock_;
};
- // Synchronized<> adaptors
- friend void acquireRead(RWSpinLock& l) { return l.lock_shared(); }
- friend void acquireReadWrite(RWSpinLock& l) { return l.lock(); }
- friend void releaseRead(RWSpinLock& l) { return l.unlock_shared(); }
- friend void releaseReadWrite(RWSpinLock& l) { return l.unlock(); }
-
private:
std::atomic<int32_t> bits_;
};
#ifdef RW_SPINLOCK_USE_SSE_INSTRUCTIONS_
static __m128i make128(const uint16_t v[4]) {
- return _mm_set_epi16(0, 0, 0, 0, v[3], v[2], v[1], v[0]);
+ return _mm_set_epi16(0, 0, 0, 0,
+ short(v[3]), short(v[2]), short(v[1]), short(v[0]));
}
static inline __m128i fromInteger(uint64_t from) {
- return _mm_cvtsi64_si128(from);
+ return _mm_cvtsi64_si128(int64_t(from));
}
static inline uint64_t toInteger(__m128i in) {
- return _mm_cvtsi128_si64(in);
+ return uint64_t(_mm_cvtsi128_si64(in));
}
static inline uint64_t addParallel(__m128i in, __m128i kDelta) {
return toInteger(_mm_add_epi16(in, kDelta));
#ifdef RW_SPINLOCK_USE_SSE_INSTRUCTIONS_
static __m128i make128(const uint8_t v[4]) {
- return _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, v[3], v[2], v[1], v[0]);
+ return _mm_set_epi8(
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ char(v[3]), char(v[2]), char(v[1]), char(v[0]));
}
static inline __m128i fromInteger(uint32_t from) {
- return _mm_cvtsi32_si128(from);
+ return _mm_cvtsi32_si128(int32_t(from));
}
static inline uint32_t toInteger(__m128i in) {
- return _mm_cvtsi128_si32(in);
+ return uint32_t(_mm_cvtsi128_si32(in));
}
static inline uint32_t addParallel(__m128i in, __m128i kDelta) {
return toInteger(_mm_add_epi8(in, kDelta));
* turns.
*/
void writeLockAggressive() {
- // sched_yield() is needed here to avoid a pathology if the number
+ // std::this_thread::yield() is needed here to avoid a pathology if the number
// of threads attempting concurrent writes is >= the number of real
// cores allocated to this process. This is less likely than the
// corresponding situation in lock_shared(), but we still want to
QuarterInt val = __sync_fetch_and_add(&ticket.users, 1);
while (val != load_acquire(&ticket.write)) {
asm_volatile_pause();
- if (UNLIKELY(++count > 1000)) sched_yield();
+ if (UNLIKELY(++count > 1000)) std::this_thread::yield();
}
}
// there are a lot of competing readers. The aggressive spinning
// can help to avoid starving writers.
//
- // We don't worry about sched_yield() here because the caller
+ // We don't worry about std::this_thread::yield() here because the caller
// has already explicitly abandoned fairness.
while (!try_lock()) {}
}
}
void lock_shared() {
- // sched_yield() is important here because we can't grab the
+ // std::this_thread::yield() is important here because we can't grab the
// shared lock if there is a pending writeLockAggressive, so we
// need to let threads that already have a shared lock complete
int count = 0;
while (!LIKELY(try_lock_shared())) {
asm_volatile_pause();
- if (UNLIKELY((++count & 1023) == 0)) sched_yield();
+ if (UNLIKELY((++count & 1023) == 0)) std::this_thread::yield();
}
}
ReadHolder(ReadHolder const&) = delete;
ReadHolder& operator=(ReadHolder const&) = delete;
- explicit ReadHolder(RWSpinLock *lock = nullptr) :
- lock_(lock) {
+ explicit ReadHolder(RWSpinLock* lock) : lock_(lock) {
if (lock_) lock_->lock_shared();
}
WriteHolder(WriteHolder const&) = delete;
WriteHolder& operator=(WriteHolder const&) = delete;
- explicit WriteHolder(RWSpinLock *lock = nullptr) : lock_(lock) {
+ explicit WriteHolder(RWSpinLock* lock) : lock_(lock) {
if (lock_) lock_->lock();
}
explicit WriteHolder(RWSpinLock &lock) : lock_ (&lock) {
friend class ReadHolder;
RWSpinLock *lock_;
};
-
- // Synchronized<> adaptors.
- friend void acquireRead(RWTicketSpinLockT& mutex) {
- mutex.lock_shared();
- }
- friend void acquireReadWrite(RWTicketSpinLockT& mutex) {
- mutex.lock();
- }
- friend void releaseRead(RWTicketSpinLockT& mutex) {
- mutex.unlock_shared();
- }
- friend void releaseReadWrite(RWTicketSpinLockT& mutex) {
- mutex.unlock();
- }
};
typedef RWTicketSpinLockT<32> RWTicketSpinLock32;
#ifdef RW_SPINLOCK_USE_X86_INTRINSIC_
#undef RW_SPINLOCK_USE_X86_INTRINSIC_
#endif
-
-#endif // FOLLY_RWSPINLOCK_H_