/*
- * Copyright 2015 Facebook, Inc.
+ * Copyright 2017 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
// @author Bo Hu (bhu@fb.com)
// @author Jordan DeLong (delong.j@fb.com)
-#ifndef PRODUCER_CONSUMER_QUEUE_H_
-#define PRODUCER_CONSUMER_QUEUE_H_
+#pragma once
#include <atomic>
#include <cassert>
#include <cstdlib>
+#include <memory>
#include <stdexcept>
#include <type_traits>
#include <utility>
+#include <folly/concurrency/CacheLocality.h>
+
namespace folly {
/*
* ProducerConsumerQueue is a one producer and one consumer queue
* without locks.
*/
-template<class T>
+template <class T>
struct ProducerConsumerQueue {
typedef T value_type;
// (No real synchronization needed at destructor time: only one
// thread can be doing this.)
if (!std::is_trivially_destructible<T>::value) {
- size_t read = readIndex_;
- size_t end = writeIndex_;
- while (read != end) {
- records_[read].~T();
- if (++read == size_) {
- read = 0;
+ size_t readIndex = readIndex_;
+ size_t endIndex = writeIndex_;
+ while (readIndex != endIndex) {
+ records_[readIndex].~T();
+ if (++readIndex == size_) {
+ readIndex = 0;
}
}
}
std::free(records_);
}
- template<class ...Args>
+ template <class... Args>
bool write(Args&&... recordArgs) {
auto const currentWrite = writeIndex_.load(std::memory_order_relaxed);
auto nextRecord = currentWrite + 1;
}
bool isEmpty() const {
- return readIndex_.load(std::memory_order_consume) ==
- writeIndex_.load(std::memory_order_consume);
+ return readIndex_.load(std::memory_order_acquire) ==
+ writeIndex_.load(std::memory_order_acquire);
}
bool isFull() const {
- auto nextRecord = writeIndex_.load(std::memory_order_consume) + 1;
+ auto nextRecord = writeIndex_.load(std::memory_order_acquire) + 1;
if (nextRecord == size_) {
nextRecord = 0;
}
- if (nextRecord != readIndex_.load(std::memory_order_consume)) {
+ if (nextRecord != readIndex_.load(std::memory_order_acquire)) {
return false;
}
// queue is full
// be removing items concurrently).
// * It is undefined to call this from any other thread.
size_t sizeGuess() const {
- int ret = writeIndex_.load(std::memory_order_consume) -
- readIndex_.load(std::memory_order_consume);
+ int ret = writeIndex_.load(std::memory_order_acquire) -
+ readIndex_.load(std::memory_order_acquire);
if (ret < 0) {
ret += size_;
}
return ret;
}
-private:
+ // maximum number of items in the queue.
+ size_t capacity() const {
+ return size_ - 1;
+ }
+
+ private:
+ char pad0_[CacheLocality::kFalseSharingRange];
const uint32_t size_;
T* const records_;
- std::atomic<unsigned int> readIndex_;
- std::atomic<unsigned int> writeIndex_;
-};
+ FOLLY_ALIGN_TO_AVOID_FALSE_SHARING std::atomic<unsigned int> readIndex_;
+ FOLLY_ALIGN_TO_AVOID_FALSE_SHARING std::atomic<unsigned int> writeIndex_;
-}
+ char pad1_[CacheLocality::kFalseSharingRange - sizeof(writeIndex_)];
+};
-#endif
+} // namespace folly