2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 /*************************************************
19 IF YOU HAVE TO ASK, NO, YOU DO NOT WANT A SPINLOCK.
21 Yes, even if a microbench shows that a spinlock is faster, you still probably
24 Spinlocks in general are a big problem on systems for which you cannot disable
25 preemption, like normal user-space code running on POXIX and Windows
26 platforms. If the thread holding a spinlock is preempted, another thread
27 trying to acquire the lock and pounding the lock variable
28 has no idea that it's spinning in vain. Some spinlock implementations use
29 sched_yield or similar to try to make this problem less severe --- we don't
30 use the rest of our timeslice to pointlessly read a variable ---
31 but the overall result is still poor, especially if the thread locking the lock
32 sleeps (which any thread can do on a demand-paging system), then we'll
33 sched_yield over and over again, still pointlessly, pounding on the lock.
35 You really want a plain old mutex. Regular mutexes will spin for a little while,
36 then go to sleep. While sleeping, threads use no CPU resources and do
37 not cause scheduler contention.
39 There are exceptions to the above warning. If you have to ask, no, your
43 STOP USING SPINLOCKS IN ORDINARY CODE.
46 **************************************************/
49 * Two Read-Write spin lock implementations.
51 * Ref: http://locklessinc.com/articles/locks
53 * Both locks here are faster than pthread_rwlock and have very low
54 * overhead (usually 20-30ns). They don't use any system mutexes and
55 * are very compact (4/8 bytes), so are suitable for per-instance
56 * based locking, particularly when contention is not expected.
58 * In most cases, RWSpinLock is a reasonable choice. It has minimal
59 * overhead, and comparable contention performance when the number of
60 * competing threads is less than or equal to the number of logical
61 * CPUs. Even as the number of threads gets larger, RWSpinLock can
62 * still be very competitive in READ, although it is slower on WRITE,
63 * and also inherently unfair to writers.
65 * RWTicketSpinLock shows more balanced READ/WRITE performance. If
66 * your application really needs a lot more threads, and a
67 * higher-priority writer, prefer one of the RWTicketSpinLock locks.
71 * RWTicketSpinLock locks can only be used with GCC on x86/x86-64
74 * RWTicketSpinLock<32> only allows up to 2^8 - 1 concurrent
75 * readers and writers.
77 * RWTicketSpinLock<64> only allows up to 2^16 - 1 concurrent
78 * readers and writers.
80 * RWTicketSpinLock<..., true> (kFavorWriter = true, that is, strict
81 * writer priority) is NOT reentrant, even for lock_shared().
83 * The lock will not grant any new shared (read) accesses while a thread
84 * attempting to acquire the lock in write mode is blocked. (That is,
85 * if the lock is held in shared mode by N threads, and a thread attempts
86 * to acquire it in write mode, no one else can acquire it in shared mode
87 * until these N threads release the lock and then the blocked thread
88 * acquires and releases the exclusive lock.) This also applies for
89 * attempts to reacquire the lock in shared mode by threads that already
90 * hold it in shared mode, making the lock non-reentrant.
92 * RWSpinLock handles 2^30 - 1 concurrent readers.
94 * @author Xin Liu <xliux@fb.com>
97 #ifndef FOLLY_RWSPINLOCK_H_
98 #define FOLLY_RWSPINLOCK_H_
101 ========================================================================
102 Benchmark on (Intel(R) Xeon(R) CPU L5630 @ 2.13GHz) 8 cores(16 HTs)
103 ========================================================================
105 ------------------------------------------------------------------------------
106 1. Single thread benchmark (read/write lock + unlock overhead)
107 Benchmark Iters Total t t/iter iter/sec
108 -------------------------------------------------------------------------------
109 * BM_RWSpinLockRead 100000 1.786 ms 17.86 ns 53.4M
110 +30.5% BM_RWSpinLockWrite 100000 2.331 ms 23.31 ns 40.91M
111 +85.7% BM_RWTicketSpinLock32Read 100000 3.317 ms 33.17 ns 28.75M
112 +96.0% BM_RWTicketSpinLock32Write 100000 3.5 ms 35 ns 27.25M
113 +85.6% BM_RWTicketSpinLock64Read 100000 3.315 ms 33.15 ns 28.77M
114 +96.0% BM_RWTicketSpinLock64Write 100000 3.5 ms 35 ns 27.25M
115 +85.7% BM_RWTicketSpinLock32FavorWriterRead 100000 3.317 ms 33.17 ns 28.75M
116 +29.7% BM_RWTicketSpinLock32FavorWriterWrite 100000 2.316 ms 23.16 ns 41.18M
117 +85.3% BM_RWTicketSpinLock64FavorWriterRead 100000 3.309 ms 33.09 ns 28.82M
118 +30.2% BM_RWTicketSpinLock64FavorWriterWrite 100000 2.325 ms 23.25 ns 41.02M
119 + 175% BM_PThreadRWMutexRead 100000 4.917 ms 49.17 ns 19.4M
120 + 166% BM_PThreadRWMutexWrite 100000 4.757 ms 47.57 ns 20.05M
122 ------------------------------------------------------------------------------
123 2. Contention Benchmark 90% read 10% write
124 Benchmark hits average min max sigma
125 ------------------------------------------------------------------------------
126 ---------- 8 threads ------------
127 RWSpinLock Write 142666 220ns 78ns 40.8us 269ns
128 RWSpinLock Read 1282297 222ns 80ns 37.7us 248ns
129 RWTicketSpinLock Write 85692 209ns 71ns 17.9us 252ns
130 RWTicketSpinLock Read 769571 215ns 78ns 33.4us 251ns
131 pthread_rwlock_t Write 84248 2.48us 99ns 269us 8.19us
132 pthread_rwlock_t Read 761646 933ns 101ns 374us 3.25us
134 ---------- 16 threads ------------
135 RWSpinLock Write 124236 237ns 78ns 261us 801ns
136 RWSpinLock Read 1115807 236ns 78ns 2.27ms 2.17us
137 RWTicketSpinLock Write 81781 231ns 71ns 31.4us 351ns
138 RWTicketSpinLock Read 734518 238ns 78ns 73.6us 379ns
139 pthread_rwlock_t Write 83363 7.12us 99ns 785us 28.1us
140 pthread_rwlock_t Read 754978 2.18us 101ns 1.02ms 14.3us
142 ---------- 50 threads ------------
143 RWSpinLock Write 131142 1.37us 82ns 7.53ms 68.2us
144 RWSpinLock Read 1181240 262ns 78ns 6.62ms 12.7us
145 RWTicketSpinLock Write 83045 397ns 73ns 7.01ms 31.5us
146 RWTicketSpinLock Read 744133 386ns 78ns 11ms 31.4us
147 pthread_rwlock_t Write 80849 112us 103ns 4.52ms 263us
148 pthread_rwlock_t Read 728698 24us 101ns 7.28ms 194us
152 #include <folly/Portability.h>
154 #if defined(__GNUC__) && \
155 (defined(__i386) || FOLLY_X64 || \
157 # define RW_SPINLOCK_USE_X86_INTRINSIC_
158 # include <x86intrin.h>
159 #elif defined(_MSC_VER) && defined(FOLLY_X64)
160 # define RW_SPINLOCK_USE_X86_INTRINSIC_
162 # undef RW_SPINLOCK_USE_X86_INTRINSIC_
165 // iOS doesn't define _mm_cvtsi64_si128 and friends
166 #if (FOLLY_SSE >= 2) && !TARGET_OS_IPHONE
167 #define RW_SPINLOCK_USE_SSE_INSTRUCTIONS_
169 #undef RW_SPINLOCK_USE_SSE_INSTRUCTIONS_
177 #include <glog/logging.h>
179 #include <folly/Likely.h>
184 * A simple, small (4-bytes), but unfair rwlock. Use it when you want
185 * a nice writer and don't expect a lot of write/read contention, or
186 * when you need small rwlocks since you are creating a large number
189 * Note that the unfairness here is extreme: if the lock is
190 * continually accessed for read, writers will never get a chance. If
191 * the lock can be that highly contended this class is probably not an
192 * ideal choice anyway.
194 * It currently implements most of the Lockable, SharedLockable and
195 * UpgradeLockable concepts except the TimedLockable related locking/unlocking
199 enum : int32_t { READER = 4, UPGRADED = 2, WRITER = 1 };
201 constexpr RWSpinLock() : bits_(0) {}
203 RWSpinLock(RWSpinLock const&) = delete;
204 RWSpinLock& operator=(RWSpinLock const&) = delete;
209 while (!LIKELY(try_lock())) {
210 if (++count > 1000) sched_yield();
214 // Writer is responsible for clearing up both the UPGRADED and WRITER bits.
216 static_assert(READER > WRITER + UPGRADED, "wrong bits!");
217 bits_.fetch_and(~(WRITER | UPGRADED), std::memory_order_release);
220 // SharedLockable Concept
223 while (!LIKELY(try_lock_shared())) {
224 if (++count > 1000) sched_yield();
228 void unlock_shared() {
229 bits_.fetch_add(-READER, std::memory_order_release);
232 // Downgrade the lock from writer status to reader status.
233 void unlock_and_lock_shared() {
234 bits_.fetch_add(READER, std::memory_order_acquire);
238 // UpgradeLockable Concept
239 void lock_upgrade() {
241 while (!try_lock_upgrade()) {
242 if (++count > 1000) sched_yield();
246 void unlock_upgrade() {
247 bits_.fetch_add(-UPGRADED, std::memory_order_acq_rel);
250 // unlock upgrade and try to acquire write lock
251 void unlock_upgrade_and_lock() {
253 while (!try_unlock_upgrade_and_lock()) {
254 if (++count > 1000) sched_yield();
258 // unlock upgrade and read lock atomically
259 void unlock_upgrade_and_lock_shared() {
260 bits_.fetch_add(READER - UPGRADED, std::memory_order_acq_rel);
263 // write unlock and upgrade lock atomically
264 void unlock_and_lock_upgrade() {
265 // need to do it in two steps here -- as the UPGRADED bit might be OR-ed at
266 // the same time when other threads are trying do try_lock_upgrade().
267 bits_.fetch_or(UPGRADED, std::memory_order_acquire);
268 bits_.fetch_add(-WRITER, std::memory_order_release);
272 // Attempt to acquire writer permission. Return false if we didn't get it.
275 return bits_.compare_exchange_strong(expect, WRITER,
276 std::memory_order_acq_rel);
279 // Try to get reader permission on the lock. This can fail if we
280 // find out someone is a writer or upgrader.
281 // Setting the UPGRADED bit would allow a writer-to-be to indicate
282 // its intention to write and block any new readers while waiting
283 // for existing readers to finish and release their read locks. This
284 // helps avoid starving writers (promoted from upgraders).
285 bool try_lock_shared() {
286 // fetch_add is considerably (100%) faster than compare_exchange,
287 // so here we are optimizing for the common (lock success) case.
288 int32_t value = bits_.fetch_add(READER, std::memory_order_acquire);
289 if (UNLIKELY(value & (WRITER|UPGRADED))) {
290 bits_.fetch_add(-READER, std::memory_order_release);
296 // try to unlock upgrade and write lock atomically
297 bool try_unlock_upgrade_and_lock() {
298 int32_t expect = UPGRADED;
299 return bits_.compare_exchange_strong(expect, WRITER,
300 std::memory_order_acq_rel);
303 // try to acquire an upgradable lock.
304 bool try_lock_upgrade() {
305 int32_t value = bits_.fetch_or(UPGRADED, std::memory_order_acquire);
307 // Note: when failed, we cannot flip the UPGRADED bit back,
308 // as in this case there is either another upgrade lock or a write lock.
309 // If it's a write lock, the bit will get cleared up when that lock's done
311 return ((value & (UPGRADED | WRITER)) == 0);
314 // mainly for debugging purposes.
315 int32_t bits() const { return bits_.load(std::memory_order_acquire); }
318 class UpgradedHolder;
323 explicit ReadHolder(RWSpinLock* lock = nullptr) : lock_(lock) {
324 if (lock_) lock_->lock_shared();
327 explicit ReadHolder(RWSpinLock& lock) : lock_(&lock) {
328 lock_->lock_shared();
331 ReadHolder(ReadHolder&& other) noexcept : lock_(other.lock_) {
332 other.lock_ = nullptr;
336 explicit ReadHolder(UpgradedHolder&& upgraded) : lock_(upgraded.lock_) {
337 upgraded.lock_ = nullptr;
338 if (lock_) lock_->unlock_upgrade_and_lock_shared();
341 explicit ReadHolder(WriteHolder&& writer) : lock_(writer.lock_) {
342 writer.lock_ = nullptr;
343 if (lock_) lock_->unlock_and_lock_shared();
346 ReadHolder& operator=(ReadHolder&& other) {
348 swap(lock_, other.lock_);
352 ReadHolder(const ReadHolder& other) = delete;
353 ReadHolder& operator=(const ReadHolder& other) = delete;
355 ~ReadHolder() { if (lock_) lock_->unlock_shared(); }
357 void reset(RWSpinLock* lock = nullptr) {
358 if (lock == lock_) return;
359 if (lock_) lock_->unlock_shared();
361 if (lock_) lock_->lock_shared();
364 void swap(ReadHolder* other) {
365 std::swap(lock_, other->lock_);
369 friend class UpgradedHolder;
370 friend class WriteHolder;
374 class UpgradedHolder {
376 explicit UpgradedHolder(RWSpinLock* lock = nullptr) : lock_(lock) {
377 if (lock_) lock_->lock_upgrade();
380 explicit UpgradedHolder(RWSpinLock& lock) : lock_(&lock) {
381 lock_->lock_upgrade();
384 explicit UpgradedHolder(WriteHolder&& writer) {
385 lock_ = writer.lock_;
386 writer.lock_ = nullptr;
387 if (lock_) lock_->unlock_and_lock_upgrade();
390 UpgradedHolder(UpgradedHolder&& other) noexcept : lock_(other.lock_) {
391 other.lock_ = nullptr;
394 UpgradedHolder& operator =(UpgradedHolder&& other) {
396 swap(lock_, other.lock_);
400 UpgradedHolder(const UpgradedHolder& other) = delete;
401 UpgradedHolder& operator =(const UpgradedHolder& other) = delete;
403 ~UpgradedHolder() { if (lock_) lock_->unlock_upgrade(); }
405 void reset(RWSpinLock* lock = nullptr) {
406 if (lock == lock_) return;
407 if (lock_) lock_->unlock_upgrade();
409 if (lock_) lock_->lock_upgrade();
412 void swap(UpgradedHolder* other) {
414 swap(lock_, other->lock_);
418 friend class WriteHolder;
419 friend class ReadHolder;
425 explicit WriteHolder(RWSpinLock* lock = nullptr) : lock_(lock) {
426 if (lock_) lock_->lock();
429 explicit WriteHolder(RWSpinLock& lock) : lock_(&lock) {
433 // promoted from an upgrade lock holder
434 explicit WriteHolder(UpgradedHolder&& upgraded) {
435 lock_ = upgraded.lock_;
436 upgraded.lock_ = nullptr;
437 if (lock_) lock_->unlock_upgrade_and_lock();
440 WriteHolder(WriteHolder&& other) noexcept : lock_(other.lock_) {
441 other.lock_ = nullptr;
444 WriteHolder& operator =(WriteHolder&& other) {
446 swap(lock_, other.lock_);
450 WriteHolder(const WriteHolder& other) = delete;
451 WriteHolder& operator =(const WriteHolder& other) = delete;
453 ~WriteHolder () { if (lock_) lock_->unlock(); }
455 void reset(RWSpinLock* lock = nullptr) {
456 if (lock == lock_) return;
457 if (lock_) lock_->unlock();
459 if (lock_) lock_->lock();
462 void swap(WriteHolder* other) {
464 swap(lock_, other->lock_);
468 friend class ReadHolder;
469 friend class UpgradedHolder;
473 // Synchronized<> adaptors
474 friend void acquireRead(RWSpinLock& l) { return l.lock_shared(); }
475 friend void acquireReadWrite(RWSpinLock& l) { return l.lock(); }
476 friend void releaseRead(RWSpinLock& l) { return l.unlock_shared(); }
477 friend void releaseReadWrite(RWSpinLock& l) { return l.unlock(); }
480 std::atomic<int32_t> bits_;
484 #ifdef RW_SPINLOCK_USE_X86_INTRINSIC_
485 // A more balanced Read-Write spin lock implemented based on GCC intrinsics.
488 template <size_t kBitWidth> struct RWTicketIntTrait {
489 static_assert(kBitWidth == 32 || kBitWidth == 64,
490 "bit width has to be either 32 or 64 ");
494 struct RWTicketIntTrait<64> {
495 typedef uint64_t FullInt;
496 typedef uint32_t HalfInt;
497 typedef uint16_t QuarterInt;
499 #ifdef RW_SPINLOCK_USE_SSE_INSTRUCTIONS_
500 static __m128i make128(const uint16_t v[4]) {
501 return _mm_set_epi16(0, 0, 0, 0, v[3], v[2], v[1], v[0]);
503 static inline __m128i fromInteger(uint64_t from) {
504 return _mm_cvtsi64_si128(from);
506 static inline uint64_t toInteger(__m128i in) {
507 return _mm_cvtsi128_si64(in);
509 static inline uint64_t addParallel(__m128i in, __m128i kDelta) {
510 return toInteger(_mm_add_epi16(in, kDelta));
516 struct RWTicketIntTrait<32> {
517 typedef uint32_t FullInt;
518 typedef uint16_t HalfInt;
519 typedef uint8_t QuarterInt;
521 #ifdef RW_SPINLOCK_USE_SSE_INSTRUCTIONS_
522 static __m128i make128(const uint8_t v[4]) {
523 return _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
524 0, 0, 0, 0, v[3], v[2], v[1], v[0]);
526 static inline __m128i fromInteger(uint32_t from) {
527 return _mm_cvtsi32_si128(from);
529 static inline uint32_t toInteger(__m128i in) {
530 return _mm_cvtsi128_si32(in);
532 static inline uint32_t addParallel(__m128i in, __m128i kDelta) {
533 return toInteger(_mm_add_epi8(in, kDelta));
540 template<size_t kBitWidth, bool kFavorWriter=false>
541 class RWTicketSpinLockT {
542 typedef detail::RWTicketIntTrait<kBitWidth> IntTraitType;
543 typedef typename detail::RWTicketIntTrait<kBitWidth>::FullInt FullInt;
544 typedef typename detail::RWTicketIntTrait<kBitWidth>::HalfInt HalfInt;
545 typedef typename detail::RWTicketIntTrait<kBitWidth>::QuarterInt
549 constexpr RWTicket() : whole(0) {}
552 __extension__ struct {
559 private: // Some x64-specific utilities for atomic access to ticket.
560 template<class T> static T load_acquire(T* addr) {
561 T t = *addr; // acquire barrier
562 asm_volatile_memory();
567 static void store_release(T* addr, T v) {
568 asm_volatile_memory();
569 *addr = v; // release barrier
574 constexpr RWTicketSpinLockT() {}
576 RWTicketSpinLockT(RWTicketSpinLockT const&) = delete;
577 RWTicketSpinLockT& operator=(RWTicketSpinLockT const&) = delete;
581 writeLockAggressive();
588 * Both try_lock and try_lock_shared diverge in our implementation from the
589 * lock algorithm described in the link above.
591 * In the read case, it is undesirable that the readers could wait
592 * for another reader (before increasing ticket.read in the other
593 * implementation). Our approach gives up on
594 * first-come-first-serve, but our benchmarks showed improve
595 * performance for both readers and writers under heavily contended
596 * cases, particularly when the number of threads exceeds the number
599 * We have writeLockAggressive() using the original implementation
600 * for a writer, which gives some advantage to the writer over the
601 * readers---for that path it is guaranteed that the writer will
602 * acquire the lock after all the existing readers exit.
606 FullInt old = t.whole = load_acquire(&ticket.whole);
607 if (t.users != t.write) return false;
609 return __sync_bool_compare_and_swap(&ticket.whole, old, t.whole);
613 * Call this if you want to prioritize writer to avoid starvation.
614 * Unlike writeLockNice, immediately acquires the write lock when
615 * the existing readers (arriving before the writer) finish their
618 void writeLockAggressive() {
619 // sched_yield() is needed here to avoid a pathology if the number
620 // of threads attempting concurrent writes is >= the number of real
621 // cores allocated to this process. This is less likely than the
622 // corresponding situation in lock_shared(), but we still want to
625 QuarterInt val = __sync_fetch_and_add(&ticket.users, 1);
626 while (val != load_acquire(&ticket.write)) {
627 asm_volatile_pause();
628 if (UNLIKELY(++count > 1000)) sched_yield();
632 // Call this when the writer should be nicer to the readers.
633 void writeLockNice() {
634 // Here it doesn't cpu-relax the writer.
636 // This is because usually we have many more readers than the
637 // writers, so the writer has less chance to get the lock when
638 // there are a lot of competing readers. The aggressive spinning
639 // can help to avoid starving writers.
641 // We don't worry about sched_yield() here because the caller
642 // has already explicitly abandoned fairness.
643 while (!try_lock()) {}
646 // Atomically unlock the write-lock from writer and acquire the read-lock.
647 void unlock_and_lock_shared() {
648 QuarterInt val = __sync_fetch_and_add(&ticket.read, 1);
651 // Release writer permission on the lock.
654 t.whole = load_acquire(&ticket.whole);
655 FullInt old = t.whole;
657 #ifdef RW_SPINLOCK_USE_SSE_INSTRUCTIONS_
658 // SSE2 can reduce the lock and unlock overhead by 10%
659 static const QuarterInt kDeltaBuf[4] = { 1, 1, 0, 0 }; // write/read/user
660 static const __m128i kDelta = IntTraitType::make128(kDeltaBuf);
661 __m128i m = IntTraitType::fromInteger(old);
662 t.whole = IntTraitType::addParallel(m, kDelta);
667 store_release(&ticket.readWrite, t.readWrite);
671 // sched_yield() is important here because we can't grab the
672 // shared lock if there is a pending writeLockAggressive, so we
673 // need to let threads that already have a shared lock complete
675 while (!LIKELY(try_lock_shared())) {
676 asm_volatile_pause();
677 if (UNLIKELY((++count & 1023) == 0)) sched_yield();
681 bool try_lock_shared() {
683 old.whole = t.whole = load_acquire(&ticket.whole);
684 old.users = old.read;
685 #ifdef RW_SPINLOCK_USE_SSE_INSTRUCTIONS_
686 // SSE2 may reduce the total lock and unlock overhead by 10%
687 static const QuarterInt kDeltaBuf[4] = { 0, 1, 1, 0 }; // write/read/user
688 static const __m128i kDelta = IntTraitType::make128(kDeltaBuf);
689 __m128i m = IntTraitType::fromInteger(old.whole);
690 t.whole = IntTraitType::addParallel(m, kDelta);
695 return __sync_bool_compare_and_swap(&ticket.whole, old.whole, t.whole);
698 void unlock_shared() {
699 QuarterInt val = __sync_fetch_and_add(&ticket.write, 1);
704 typedef RWTicketSpinLockT<kBitWidth, kFavorWriter> RWSpinLock;
707 ReadHolder(ReadHolder const&) = delete;
708 ReadHolder& operator=(ReadHolder const&) = delete;
710 explicit ReadHolder(RWSpinLock *lock = nullptr) :
712 if (lock_) lock_->lock_shared();
715 explicit ReadHolder(RWSpinLock &lock) : lock_ (&lock) {
716 if (lock_) lock_->lock_shared();
719 // atomically unlock the write-lock from writer and acquire the read-lock
720 explicit ReadHolder(WriteHolder *writer) : lock_(nullptr) {
721 std::swap(this->lock_, writer->lock_);
723 lock_->unlock_and_lock_shared();
728 if (lock_) lock_->unlock_shared();
731 void reset(RWSpinLock *lock = nullptr) {
732 if (lock_) lock_->unlock_shared();
734 if (lock_) lock_->lock_shared();
737 void swap(ReadHolder *other) {
738 std::swap(this->lock_, other->lock_);
747 WriteHolder(WriteHolder const&) = delete;
748 WriteHolder& operator=(WriteHolder const&) = delete;
750 explicit WriteHolder(RWSpinLock *lock = nullptr) : lock_(lock) {
751 if (lock_) lock_->lock();
753 explicit WriteHolder(RWSpinLock &lock) : lock_ (&lock) {
754 if (lock_) lock_->lock();
758 if (lock_) lock_->unlock();
761 void reset(RWSpinLock *lock = nullptr) {
762 if (lock == lock_) return;
763 if (lock_) lock_->unlock();
765 if (lock_) lock_->lock();
768 void swap(WriteHolder *other) {
769 std::swap(this->lock_, other->lock_);
773 friend class ReadHolder;
777 // Synchronized<> adaptors.
778 friend void acquireRead(RWTicketSpinLockT& mutex) {
781 friend void acquireReadWrite(RWTicketSpinLockT& mutex) {
784 friend void releaseRead(RWTicketSpinLockT& mutex) {
785 mutex.unlock_shared();
787 friend void releaseReadWrite(RWTicketSpinLockT& mutex) {
792 typedef RWTicketSpinLockT<32> RWTicketSpinLock32;
793 typedef RWTicketSpinLockT<64> RWTicketSpinLock64;
795 #endif // RW_SPINLOCK_USE_X86_INTRINSIC_
799 #ifdef RW_SPINLOCK_USE_X86_INTRINSIC_
800 #undef RW_SPINLOCK_USE_X86_INTRINSIC_
803 #endif // FOLLY_RWSPINLOCK_H_