X-Git-Url: http://plrg.eecs.uci.edu/git/?p=folly.git;a=blobdiff_plain;f=folly%2FSpinLock.h;h=b87e58a6ed074415bd230dd21157f06dbb74078e;hp=2747a4dbfbef86cb947eca95f1f19b93e08a7bf0;hb=0917b8bd56401f43683061111d91a7cf0b194491;hpb=4494e275ffe1c2bd4a4e59550c325fccbafed139 diff --git a/folly/SpinLock.h b/folly/SpinLock.h index 2747a4db..b87e58a6 100644 --- a/folly/SpinLock.h +++ b/folly/SpinLock.h @@ -1,5 +1,5 @@ /* - * Copyright 2014 Facebook, Inc. + * Copyright 2017 Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,17 +14,27 @@ * limitations under the License. */ -#pragma once +/* + * N.B. You most likely do _not_ want to use SpinLock or any other + * kind of spinlock. Use std::mutex instead. + * + * In short, spinlocks in preemptive multi-tasking operating systems + * have serious problems and fast mutexes like std::mutex are almost + * certainly the better choice, because letting the OS scheduler put a + * thread to sleep is better for system responsiveness and throughput + * than wasting a timeslice repeatedly querying a lock held by a + * thread that's blocked, and you can't prevent userspace + * programs blocking. + * + * Spinlocks in an operating system kernel make much more sense than + * they do in userspace. + */ -#include -#include +#pragma once -// This is a wrapper SpinLock implementation that works around the -// x64 limitation of the base folly MicroSpinLock. If that is available, this -// simply thinly wraps it. Otherwise, it uses the simplest analog available on -// iOS (or 32-bit Mac) or, failing that, POSIX (on Android et. al.) +#include -#if __x86_64__ +#include #include namespace folly { @@ -40,83 +50,28 @@ class SpinLock { FOLLY_ALWAYS_INLINE void unlock() const { lock_.unlock(); } - FOLLY_ALWAYS_INLINE bool trylock() const { + FOLLY_ALWAYS_INLINE bool try_lock() const { return lock_.try_lock(); } - private: - mutable folly::MicroSpinLock lock_; -}; - -} - -#elif __APPLE__ -#include - -namespace folly { - -class SpinLock { - public: - FOLLY_ALWAYS_INLINE SpinLock() : lock_(0) {} - FOLLY_ALWAYS_INLINE void lock() const { - OSSpinLockLock(&lock_); - } - FOLLY_ALWAYS_INLINE void unlock() const { - OSSpinLockUnlock(&lock_); - } - FOLLY_ALWAYS_INLINE bool trylock() const { - return OSSpinLockTry(&lock_); - } - private: - mutable OSSpinLock lock_; -}; - -} - -#else -#include -#include -namespace folly { - -class SpinLock { - public: - FOLLY_ALWAYS_INLINE SpinLock() { - pthread_mutex_init(&lock_, nullptr); - } - void lock() const { - int rc = pthread_mutex_lock(&lock_); - CHECK_EQ(0, rc); - } - FOLLY_ALWAYS_INLINE void unlock() const { - int rc = pthread_mutex_unlock(&lock_); - CHECK_EQ(0, rc); - } - FOLLY_ALWAYS_INLINE bool trylock() const { - int rc = pthread_mutex_trylock(&lock_); - CHECK_GE(rc, 0); - return rc == 0; - } private: - mutable pthread_mutex_t lock_; + mutable folly::MicroSpinLock lock_; }; -} - -#endif - -namespace folly { - -class SpinLockGuard : private boost::noncopyable { +template +class SpinLockGuardImpl : private boost::noncopyable { public: - FOLLY_ALWAYS_INLINE explicit SpinLockGuard(SpinLock& lock) : + FOLLY_ALWAYS_INLINE explicit SpinLockGuardImpl(LOCK& lock) : lock_(lock) { lock_.lock(); } - FOLLY_ALWAYS_INLINE ~SpinLockGuard() { + FOLLY_ALWAYS_INLINE ~SpinLockGuardImpl() { lock_.unlock(); } private: - SpinLock& lock_; + LOCK& lock_; }; +typedef SpinLockGuardImpl SpinLockGuard; + }