2 * Copyright 2013 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * This module implements a Synchronized abstraction useful in
19 * mutex-based concurrency.
21 * @author: Andrei Alexandrescu (andrei.alexandrescu@fb.com)
24 #ifndef SYNCHRONIZED_H_
25 #define SYNCHRONIZED_H_
27 #include <type_traits>
29 #include <boost/thread.hpp>
30 #include "folly/Preprocessor.h"
31 #include "folly/Traits.h"
36 enum InternalDoNotUse {};
39 * Free function adaptors for std:: and boost::
43 * Yields true iff T has .lock() and .unlock() member functions. This
44 * is done by simply enumerating the mutexes with this interface in
48 struct HasLockUnlock {
49 enum { value = IsOneOf<T,
50 std::mutex, std::recursive_mutex,
51 boost::mutex, boost::recursive_mutex, boost::shared_mutex,
52 #ifndef __APPLE__ // OSX doesn't have timed mutexes
53 std::timed_mutex, std::recursive_timed_mutex,
54 boost::timed_mutex, boost::recursive_timed_mutex
60 * Acquires a mutex for reading by calling .lock(). The exception is
61 * boost::shared_mutex, which has a special read-lock primitive called
65 typename std::enable_if<
66 HasLockUnlock<T>::value && !std::is_same<T, boost::shared_mutex>::value>::type
67 acquireRead(T& mutex) {
72 * Special case for boost::shared_mutex.
75 typename std::enable_if<std::is_same<T, boost::shared_mutex>::value>::type
76 acquireRead(T& mutex) {
81 * Acquires a mutex for reading with timeout by calling .timed_lock(). This
82 * applies to three of the boost mutex classes as enumerated below.
85 typename std::enable_if<std::is_same<T, boost::shared_mutex>::value, bool>::type
87 unsigned int milliseconds) {
88 return mutex.timed_lock_shared(boost::posix_time::milliseconds(milliseconds));
92 * Acquires a mutex for reading and writing by calling .lock().
95 typename std::enable_if<HasLockUnlock<T>::value>::type
96 acquireReadWrite(T& mutex) {
100 #ifndef __APPLE__ // OSX doesn't have timed mutexes
102 * Acquires a mutex for reading and writing with timeout by calling
103 * .try_lock_for(). This applies to two of the std mutex classes as
107 typename std::enable_if<
108 IsOneOf<T, std::timed_mutex, std::recursive_timed_mutex>::value, bool>::type
109 acquireReadWrite(T& mutex,
110 unsigned int milliseconds) {
111 return mutex.try_lock_for(std::chrono::milliseconds(milliseconds));
115 * Acquires a mutex for reading and writing with timeout by calling
116 * .timed_lock(). This applies to three of the boost mutex classes as
120 typename std::enable_if<
121 IsOneOf<T, boost::shared_mutex, boost::timed_mutex,
122 boost::recursive_timed_mutex>::value, bool>::type
123 acquireReadWrite(T& mutex,
124 unsigned int milliseconds) {
125 return mutex.timed_lock(boost::posix_time::milliseconds(milliseconds));
130 * Releases a mutex previously acquired for reading by calling
131 * .unlock(). The exception is boost::shared_mutex, which has a
132 * special primitive called .unlock_shared().
135 typename std::enable_if<
136 HasLockUnlock<T>::value && !std::is_same<T, boost::shared_mutex>::value>::type
137 releaseRead(T& mutex) {
142 * Special case for boost::shared_mutex.
145 typename std::enable_if<std::is_same<T, boost::shared_mutex>::value>::type
146 releaseRead(T& mutex) {
147 mutex.unlock_shared();
151 * Releases a mutex previously acquired for reading-writing by calling
155 typename std::enable_if<HasLockUnlock<T>::value>::type
156 releaseReadWrite(T& mutex) {
160 } // namespace detail
163 * Synchronized<T> encapsulates an object of type T (a "datum") paired
164 * with a mutex. The only way to access the datum is while the mutex
165 * is locked, and Synchronized makes it virtually impossible to do
166 * otherwise. The code that would access the datum in unsafe ways
167 * would look odd and convoluted, thus readily alerting the human
168 * reviewer. In contrast, the code that uses Synchronized<T> correctly
169 * looks simple and intuitive.
171 * The second parameter must be a mutex type. Supported mutexes are
172 * std::mutex, std::recursive_mutex, std::timed_mutex,
173 * std::recursive_timed_mutex, boost::mutex, boost::recursive_mutex,
174 * boost::shared_mutex, boost::timed_mutex,
175 * boost::recursive_timed_mutex, and the folly/RWSpinLock.h
178 * You may define Synchronized support by defining 4-6 primitives in
179 * the same namespace as the mutex class (found via ADL). The
180 * primitives are: acquireRead, acquireReadWrite, releaseRead, and
181 * releaseReadWrite. Two optional primitives for timout operations are
182 * overloads of acquireRead and acquireReadWrite. For signatures,
183 * refer to the namespace detail below, which implements the
184 * primitives for mutexes in std and boost.
186 template <class T, class Mutex = boost::shared_mutex>
187 struct Synchronized {
189 * Default constructor leaves both members call their own default
192 Synchronized() = default;
195 * Copy constructor copies the data (with locking the source and
196 * all) but does NOT copy the mutex. Doing so would result in
199 Synchronized(const Synchronized& rhs) {
200 auto guard = rhs.operator->();
205 * Move constructor moves the data (with locking the source and all)
206 * but does not move the mutex.
208 Synchronized(Synchronized&& rhs) {
209 auto guard = rhs.operator->();
210 datum_ = std::move(rhs.datum_);
214 * Constructor taking a datum as argument copies it. There is no
215 * need to lock the constructing object.
217 explicit Synchronized(const T& rhs) : datum_(rhs) {}
220 * Constructor taking a datum rvalue as argument moves it. Again,
221 * there is no need to lock the constructing object.
223 explicit Synchronized(T && rhs) : datum_(std::move(rhs)) {}
226 * The canonical assignment operator only assigns the data, NOT the
227 * mutex. It locks the two objects in ascending order of their
230 Synchronized& operator=(const Synchronized& rhs) {
232 auto guard1 = operator->();
233 auto guard2 = rhs.operator->();
236 auto guard1 = rhs.operator->();
237 auto guard2 = operator->();
244 * Lock object, assign datum.
246 Synchronized& operator=(const T& rhs) {
247 auto guard = operator->();
253 * A LockedPtr lp keeps a modifiable (i.e. non-const)
254 * Synchronized<T> object locked for the duration of lp's
255 * existence. Because of this, you get to access the datum's methods
256 * directly by using lp->fun().
260 * Found no reason to leave this hanging.
262 LockedPtr() = delete;
265 * Takes a Synchronized and locks it.
267 explicit LockedPtr(Synchronized* parent) : parent_(parent) {
272 * Takes a Synchronized and attempts to lock it for some
273 * milliseconds. If not, the LockedPtr will be subsequently null.
275 LockedPtr(Synchronized* parent, unsigned int milliseconds) {
276 using namespace detail;
277 if (acquireReadWrite(parent->mutex_, milliseconds)) {
281 // Could not acquire the resource, pointer is null
286 * This is used ONLY inside SYNCHRONIZED_DUAL. It initializes
287 * everything properly, but does not lock the parent because it
288 * "knows" someone else will lock it. Please do not use.
290 LockedPtr(Synchronized* parent, detail::InternalDoNotUse)
295 * Copy ctor adds one lock.
297 LockedPtr(const LockedPtr& rhs) : parent_(rhs.parent_) {
302 * Assigning from another LockedPtr results in freeing the former
303 * lock and acquiring the new one. The method works with
304 * self-assignment (does nothing).
306 LockedPtr& operator=(const LockedPtr& rhs) {
307 if (parent_ != rhs.parent_) {
308 if (parent_) parent_->mutex_.unlock();
309 parent_ = rhs.parent_;
316 * Destructor releases.
319 using namespace detail;
320 if (parent_) releaseReadWrite(parent_->mutex_);
324 * Safe to access the data. Don't save the obtained pointer by
325 * invoking lp.operator->() by hand. Also, if the method returns a
326 * handle stored inside the datum, don't use this idiom - use
327 * SYNCHRONIZED below.
330 return parent_ ? &parent_->datum_ : NULL;
334 * This class temporarily unlocks a LockedPtr in a scoped
335 * manner. It is used inside of the UNSYNCHRONIZED macro.
337 struct Unsynchronizer {
338 explicit Unsynchronizer(LockedPtr* p) : parent_(p) {
339 using namespace detail;
340 releaseReadWrite(parent_->parent_->mutex_);
342 Unsynchronizer(const Unsynchronizer&) = delete;
343 Unsynchronizer& operator=(const Unsynchronizer&) = delete;
347 LockedPtr* operator->() const {
353 friend struct Unsynchronizer;
354 Unsynchronizer typeHackDoNotUse();
356 template <class P1, class P2>
357 friend void lockInOrder(P1& p1, P2& p2);
361 using namespace detail;
362 if (parent_) acquireReadWrite(parent_->mutex_);
365 // This is the entire state of LockedPtr.
366 Synchronized* parent_;
370 * ConstLockedPtr does exactly what LockedPtr does, but for const
371 * Synchronized objects. Of interest is that ConstLockedPtr only
372 * uses a read lock, which is faster but more restrictive - you only
373 * get to call const methods of the datum.
375 * Much of the code between LockedPtr and
376 * ConstLockedPtr is identical and could be factor out, but there
377 * are enough nagging little differences to not justify the trouble.
379 struct ConstLockedPtr {
380 ConstLockedPtr() = delete;
381 explicit ConstLockedPtr(const Synchronized* parent) : parent_(parent) {
384 ConstLockedPtr(const Synchronized* parent, detail::InternalDoNotUse)
387 ConstLockedPtr(const ConstLockedPtr& rhs) : parent_(rhs.parent_) {
390 explicit ConstLockedPtr(const LockedPtr& rhs) : parent_(rhs.parent_) {
393 ConstLockedPtr(const Synchronized* parent, unsigned int milliseconds) {
394 if (parent->mutex_.timed_lock(
395 boost::posix_time::milliseconds(milliseconds))) {
399 // Could not acquire the resource, pointer is null
403 ConstLockedPtr& operator=(const ConstLockedPtr& rhs) {
404 if (parent_ != rhs.parent_) {
405 if (parent_) parent_->mutex_.unlock_shared();
406 parent_ = rhs.parent_;
411 using namespace detail;
412 if (parent_) releaseRead(parent_->mutex_);
415 const T* operator->() const {
416 return parent_ ? &parent_->datum_ : NULL;
419 struct Unsynchronizer {
420 explicit Unsynchronizer(ConstLockedPtr* p) : parent_(p) {
421 using namespace detail;
422 releaseRead(parent_->parent_->mutex_);
424 Unsynchronizer(const Unsynchronizer&) = delete;
425 Unsynchronizer& operator=(const Unsynchronizer&) = delete;
427 using namespace detail;
428 acquireRead(parent_->parent_->mutex_);
430 ConstLockedPtr* operator->() const {
434 ConstLockedPtr* parent_;
436 friend struct Unsynchronizer;
437 Unsynchronizer typeHackDoNotUse();
439 template <class P1, class P2>
440 friend void lockInOrder(P1& p1, P2& p2);
444 using namespace detail;
445 if (parent_) acquireRead(parent_->mutex_);
448 const Synchronized* parent_;
452 * This accessor offers a LockedPtr. In turn. LockedPtr offers
453 * operator-> returning a pointer to T. The operator-> keeps
454 * expanding until it reaches a pointer, so syncobj->foo() will lock
455 * the object and call foo() against it.
457 LockedPtr operator->() {
458 return LockedPtr(this);
462 * Same, for constant objects. You will be able to invoke only const
465 ConstLockedPtr operator->() const {
466 return ConstLockedPtr(this);
470 * Attempts to acquire for a given number of milliseconds. If
471 * acquisition is unsuccessful, the returned LockedPtr is NULL.
473 LockedPtr timedAcquire(unsigned int milliseconds) {
474 return LockedPtr(this, milliseconds);
478 * As above, for a constant object.
480 ConstLockedPtr timedAcquire(unsigned int milliseconds) const {
481 return ConstLockedPtr(this, milliseconds);
485 * Used by SYNCHRONIZED_DUAL.
487 LockedPtr internalDoNotUse() {
488 return LockedPtr(this, detail::InternalDoNotUse());
494 ConstLockedPtr internalDoNotUse() const {
495 return ConstLockedPtr(this, detail::InternalDoNotUse());
499 * Sometimes, although you have a mutable object, you only want to
500 * call a const method against it. The most efficient way to achieve
501 * that is by using a read lock. You get to do so by using
502 * obj.asConst()->method() instead of obj->method().
504 const Synchronized& asConst() const {
509 * Swaps with another Synchronized. Protected against
510 * self-swap. Only data is swapped. Locks are acquired in increasing
513 void swap(Synchronized& rhs) {
518 return rhs.swap(*this);
520 auto guard1 = operator->();
521 auto guard2 = rhs.operator->();
522 datum_.swap(rhs.datum_);
526 * Swap with another datum. Recommended because it keeps the mutex
530 LockedPtr guard = operator->();
535 * Copies datum to a given target.
537 void copy(T* target) const {
538 ConstLockedPtr guard = operator->();
543 * Returns a fresh copy of the datum.
546 ConstLockedPtr guard = operator->();
552 mutable Mutex mutex_;
555 // Non-member swap primitive
556 template <class T, class M>
557 void swap(Synchronized<T, M>& lhs, Synchronized<T, M>& rhs) {
562 * SYNCHRONIZED is the main facility that makes Synchronized<T>
563 * helpful. It is a pseudo-statement that introduces a scope where the
564 * object is locked. Inside that scope you get to access the unadorned
569 * Synchronized<vector<int>> svector;
571 * SYNCHRONIZED (svector) { ... use svector as a vector<int> ... }
573 * SYNCHRONIZED (v, svector) { ... use v as a vector<int> ... }
575 * Refer to folly/docs/Synchronized.md for a detailed explanation and more
578 #define SYNCHRONIZED(...) \
579 if (bool SYNCHRONIZED_state = false) {} else \
580 for (auto SYNCHRONIZED_lockedPtr = \
581 (FB_ARG_2_OR_1(__VA_ARGS__)).operator->(); \
582 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
583 for (auto& FB_ARG_1(__VA_ARGS__) = \
584 *SYNCHRONIZED_lockedPtr.operator->(); \
585 !SYNCHRONIZED_state; SYNCHRONIZED_state = true)
587 #define TIMED_SYNCHRONIZED(timeout, ...) \
588 if (bool SYNCHRONIZED_state = false) {} else \
589 for (auto SYNCHRONIZED_lockedPtr = \
590 (FB_ARG_2_OR_1(__VA_ARGS__)).timedAcquire(timeout); \
591 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
592 for (auto FB_ARG_1(__VA_ARGS__) = \
593 SYNCHRONIZED_lockedPtr.operator->(); \
594 !SYNCHRONIZED_state; SYNCHRONIZED_state = true)
597 * Similar to SYNCHRONIZED, but only uses a read lock.
599 #define SYNCHRONIZED_CONST(...) \
600 SYNCHRONIZED(FB_ARG_1(__VA_ARGS__), \
601 (FB_ARG_2_OR_1(__VA_ARGS__)).asConst())
604 * Similar to TIMED_SYNCHRONIZED, but only uses a read lock.
606 #define TIMED_SYNCHRONIZED_CONST(timeout, ...) \
607 TIMED_SYNCHRONIZED(timeout, FB_ARG_1(__VA_ARGS__), \
608 (FB_ARG_2_OR_1(__VA_ARGS__)).asConst())
611 * Temporarily disables synchronization inside a SYNCHRONIZED block.
613 #define UNSYNCHRONIZED(name) \
614 for (decltype(SYNCHRONIZED_lockedPtr.typeHackDoNotUse()) \
615 SYNCHRONIZED_state3(&SYNCHRONIZED_lockedPtr); \
616 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
617 for (auto name = *SYNCHRONIZED_state3.operator->(); \
618 !SYNCHRONIZED_state; SYNCHRONIZED_state = true)
621 * Locks two objects in increasing order of their addresses.
623 template <class P1, class P2>
624 void lockInOrder(P1& p1, P2& p2) {
625 if (static_cast<const void*>(p1.operator->()) >
626 static_cast<const void*>(p2.operator->())) {
636 * Synchronizes two Synchronized objects (they may encapsulate
637 * different data). Synchronization is done in increasing address of
638 * object order, so there is no deadlock risk.
640 #define SYNCHRONIZED_DUAL(n1, e1, n2, e2) \
641 if (bool SYNCHRONIZED_state = false) {} else \
642 for (auto SYNCHRONIZED_lp1 = (e1).internalDoNotUse(); \
643 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
644 for (auto& n1 = *SYNCHRONIZED_lp1.operator->(); \
645 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
646 for (auto SYNCHRONIZED_lp2 = (e2).internalDoNotUse(); \
647 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
648 for (auto& n2 = *SYNCHRONIZED_lp2.operator->(); \
649 !SYNCHRONIZED_state; SYNCHRONIZED_state = true) \
650 if ((::folly::lockInOrder( \
651 SYNCHRONIZED_lp1, SYNCHRONIZED_lp2), \
655 } /* namespace folly */
657 #endif // SYNCHRONIZED_H_