* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
#pragma once
#include <atomic>
-
-#include <glog/logging.h>
+#include <cassert>
namespace folly {
AtomicIntrusiveLinkedList& operator=(const AtomicIntrusiveLinkedList&) =
delete;
AtomicIntrusiveLinkedList(AtomicIntrusiveLinkedList&& other) noexcept {
- *this = std::move(other);
+ auto tmp = other.head_.load();
+ other.head_ = head_.load();
+ head_ = tmp;
}
AtomicIntrusiveLinkedList& operator=(
AtomicIntrusiveLinkedList&& other) noexcept {
/**
* Note: list must be empty on destruction.
*/
- ~AtomicIntrusiveLinkedList() { DCHECK(empty()); }
+ ~AtomicIntrusiveLinkedList() {
+ assert(empty());
+ }
- bool empty() const { return head_ == nullptr; }
+ bool empty() const {
+ return head_.load() == nullptr;
+ }
/**
* Atomically insert t at the head of the list.
* after the call.
*/
bool insertHead(T* t) {
- DCHECK(next(t) == nullptr);
+ assert(next(t) == nullptr);
auto oldHead = head_.load(std::memory_order_relaxed);
do {
compiler bugs (GCC prior to 4.8.3 (bug 60272), clang (bug 18899),
MSVC (bug 819819); source:
http://en.cppreference.com/w/cpp/atomic/atomic/compare_exchange */
- } while (!head_.compare_exchange_weak(
- oldHead, t, std::memory_order_release, std::memory_order_relaxed));
+ } while (!head_.compare_exchange_weak(oldHead, t,
+ std::memory_order_release,
+ std::memory_order_relaxed));
return oldHead == nullptr;
}
private:
std::atomic<T*> head_{nullptr};
- static T*& next(T* t) { return (t->*HookMember).next; }
+ static T*& next(T* t) {
+ return (t->*HookMember).next;
+ }
/* Reverses a linked list, returning the pointer to the new head
(old tail) */