X-Git-Url: http://plrg.eecs.uci.edu/git/?p=folly.git;a=blobdiff_plain;f=folly%2FMPMCQueue.h;h=d4c0ccc7c8307dbfec970f75fa8b6bd676da6339;hp=08b2e1b8f7e31cd86b9881c30311fcb1fe08ed7f;hb=ef20f6380813110434dae416f0abe964e476c8c6;hpb=35fcff936a0ba58986269fb05689843f99e89eb5 diff --git a/folly/MPMCQueue.h b/folly/MPMCQueue.h index 08b2e1b8..d4c0ccc7 100644 --- a/folly/MPMCQueue.h +++ b/folly/MPMCQueue.h @@ -1,5 +1,5 @@ /* - * Copyright 2016 Facebook, Inc. + * Copyright 2017 Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,14 +18,15 @@ #include #include -#include -#include +#include +#include #include -#include #include +#include + #include -#include +#include #include #include @@ -33,11 +34,14 @@ namespace folly { namespace detail { -template class Atom> +template class Atom> struct SingleElementQueue; template class MPMCPipelineStageImpl; +/// MPMCQueue base CRTP template +template class MPMCQueueBase; + } // namespace detail /// MPMCQueue is a high-performance bounded concurrent queue that @@ -83,7 +87,7 @@ template class MPMCPipelineStageImpl; /// use noexcept, you will have to wrap it in something that provides /// the guarantee. We provide an alternate safe implementation for types /// that don't use noexcept but that are marked folly::IsRelocatable -/// and boost::has_nothrow_constructor, which is common for folly types. +/// and std::is_nothrow_constructible, which is common for folly types. /// In particular, if you can declare FOLLY_ASSUME_FBVECTOR_COMPATIBLE /// then your type can be put in MPMCQueue. /// @@ -93,49 +97,575 @@ template class MPMCPipelineStageImpl; /// are you can enqueue one sentinel and then have each consumer requeue /// two sentinels after it receives it (by requeuing 2 the shutdown can /// complete in O(log P) time instead of O(P)). -template class Atom = std::atomic> -class MPMCQueue : boost::noncopyable { +template < + typename T, + template class Atom = std::atomic, + bool Dynamic = false> +class MPMCQueue : public detail::MPMCQueueBase> { + friend class detail::MPMCPipelineStageImpl; + using Slot = detail::SingleElementQueue; + public: + + explicit MPMCQueue(size_t queueCapacity) + : detail::MPMCQueueBase>(queueCapacity) + { + this->stride_ = this->computeStride(queueCapacity); + this->slots_ = new Slot[queueCapacity + 2 * this->kSlotPadding]; + } + + MPMCQueue() noexcept { } +}; + +/// The dynamic version of MPMCQueue allows dynamic expansion of queue +/// capacity, such that a queue may start with a smaller capacity than +/// specified and expand only if needed. Users may optionally specify +/// the initial capacity and the expansion multiplier. +/// +/// The design uses a seqlock to enforce mutual exclusion among +/// expansion attempts. Regular operations read up-to-date queue +/// information (slots array, capacity, stride) inside read-only +/// seqlock sections, which are unimpeded when no expansion is in +/// progress. +/// +/// An expansion computes a new capacity, allocates a new slots array, +/// and updates stride. No information needs to be copied from the +/// current slots array to the new one. When this happens, new slots +/// will not have sequence numbers that match ticket numbers. The +/// expansion needs to compute a ticket offset such that operations +/// that use new arrays can adjust the calculations of slot indexes +/// and sequence numbers that take into account that the new slots +/// start with sequence numbers of zero. The current ticket offset is +/// packed with the seqlock in an atomic 64-bit integer. The initial +/// offset is zero. +/// +/// Lagging write and read operations with tickets lower than the +/// ticket offset of the current slots array (i.e., the minimum ticket +/// number that can be served by the current array) must use earlier +/// closed arrays instead of the current one. Information about closed +/// slots arrays (array address, capacity, stride, and offset) is +/// maintained in a logarithmic-sized structure. Each entry in that +/// structure never needs to be changed once set. The number of closed +/// arrays is half the value of the seqlock (when unlocked). +/// +/// The acquisition of the seqlock to perform an expansion does not +/// prevent the issuing of new push and pop tickets concurrently. The +/// expansion must set the new ticket offset to a value that couldn't +/// have been issued to an operation that has already gone through a +/// seqlock read-only section (and hence obtained information for +/// older closed arrays). +/// +/// Note that the total queue capacity can temporarily exceed the +/// specified capacity when there are lagging consumers that haven't +/// yet consumed all the elements in closed arrays. Users should not +/// rely on the capacity of dynamic queues for synchronization, e.g., +/// they should not expect that a thread will definitely block on a +/// call to blockingWrite() when the queue size is known to be equal +/// to its capacity. +/// +/// Note that some writeIfNotFull() and tryWriteUntil() operations may +/// fail even if the size of the queue is less than its maximum +/// capacity and despite the success of expansion, if the operation +/// happens to acquire a ticket that belongs to a closed array. This +/// is a transient condition. Typically, one or two ticket values may +/// be subject to such condition per expansion. +/// +/// The dynamic version is a partial specialization of MPMCQueue with +/// Dynamic == true +template class Atom> +class MPMCQueue : + public detail::MPMCQueueBase> { + friend class detail::MPMCQueueBase>; + using Slot = detail::SingleElementQueue; + + struct ClosedArray { + uint64_t offset_ {0}; + Slot* slots_ {nullptr}; + size_t capacity_ {0}; + int stride_ {0}; + }; + + public: + + explicit MPMCQueue(size_t queueCapacity) + : detail::MPMCQueueBase>(queueCapacity) + { + size_t cap = std::min(kDefaultMinDynamicCapacity, queueCapacity); + initQueue(cap, kDefaultExpansionMultiplier); + } + + explicit MPMCQueue(size_t queueCapacity, + size_t minCapacity, + size_t expansionMultiplier) + : detail::MPMCQueueBase>(queueCapacity) + { + minCapacity = std::max(1, minCapacity); + size_t cap = std::min(minCapacity, queueCapacity); + expansionMultiplier = std::max(2, expansionMultiplier); + initQueue(cap, expansionMultiplier); + } + + MPMCQueue() noexcept { + dmult_ = 0; + closed_ = nullptr; + } + + MPMCQueue(MPMCQueue&& rhs) noexcept { + this->capacity_ = rhs.capacity_; + this->slots_ = rhs.slots_; + this->stride_ = rhs.stride_; + this->dstate_.store(rhs.dstate_.load(std::memory_order_relaxed), + std::memory_order_relaxed); + this->dcapacity_.store(rhs.dcapacity_.load(std::memory_order_relaxed), + std::memory_order_relaxed); + this->pushTicket_.store(rhs.pushTicket_.load(std::memory_order_relaxed), + std::memory_order_relaxed); + this->popTicket_.store(rhs.popTicket_.load(std::memory_order_relaxed), + std::memory_order_relaxed); + this->pushSpinCutoff_.store( + rhs.pushSpinCutoff_.load(std::memory_order_relaxed), + std::memory_order_relaxed); + this->popSpinCutoff_.store( + rhs.popSpinCutoff_.load(std::memory_order_relaxed), + std::memory_order_relaxed); + dmult_ = rhs.dmult_; + closed_ = rhs.closed_; + + rhs.capacity_ = 0; + rhs.slots_ = nullptr; + rhs.stride_ = 0; + rhs.dstate_.store(0, std::memory_order_relaxed); + rhs.dcapacity_.store(0, std::memory_order_relaxed); + rhs.pushTicket_.store(0, std::memory_order_relaxed); + rhs.popTicket_.store(0, std::memory_order_relaxed); + rhs.pushSpinCutoff_.store(0, std::memory_order_relaxed); + rhs.popSpinCutoff_.store(0, std::memory_order_relaxed); + rhs.dmult_ = 0; + rhs.closed_ = nullptr; + } + + MPMCQueue const& operator= (MPMCQueue&& rhs) { + if (this != &rhs) { + this->~MPMCQueue(); + new (this) MPMCQueue(std::move(rhs)); + } + return *this; + } + + ~MPMCQueue() { + if (closed_ != nullptr) { + for (int i = getNumClosed(this->dstate_.load()) - 1; i >= 0; --i) { + delete[] closed_[i].slots_; + } + delete[] closed_; + } + } + + size_t allocatedCapacity() const noexcept { + return this->dcapacity_.load(std::memory_order_relaxed); + } + + template + void blockingWrite(Args&&... args) noexcept { + uint64_t ticket = this->pushTicket_++; + Slot* slots; + size_t cap; + int stride; + uint64_t state; + uint64_t offset; + do { + if (!trySeqlockReadSection(state, slots, cap, stride)) { + asm_volatile_pause(); + continue; + } + if (maybeUpdateFromClosed(state, ticket, offset, slots, cap, stride)) { + // There was an expansion after this ticket was issued. + break; + } + if (slots[this->idx((ticket - offset), cap, stride)].mayEnqueue( + this->turn(ticket - offset, cap))) { + // A slot is ready. No need to expand. + break; + } else if ( + this->popTicket_.load(std::memory_order_relaxed) + cap > ticket) { + // May block, but a pop is in progress. No need to expand. + // Get seqlock read section info again in case an expansion + // occurred with an equal or higher ticket. + continue; + } else { + // May block. See if we can expand. + if (tryExpand(state, cap)) { + // This or another thread started an expansion. Get updated info. + continue; + } else { + // Can't expand. + break; + } + } + } while (true); + this->enqueueWithTicketBase(ticket-offset, slots, cap, stride, + std::forward(args)...); + } + + void blockingReadWithTicket(uint64_t& ticket, T& elem) noexcept { + ticket = this->popTicket_++; + Slot* slots; + size_t cap; + int stride; + uint64_t state; + uint64_t offset; + while (!trySeqlockReadSection(state, slots, cap, stride)) { + asm_volatile_pause(); + } + // If there was an expansion after the corresponding push ticket + // was issued, adjust accordingly + maybeUpdateFromClosed(state, ticket, offset, slots, cap, stride); + this->dequeueWithTicketBase(ticket-offset, slots, cap, stride, elem); + } + + private: + enum { + kSeqlockBits = 6, + kDefaultMinDynamicCapacity = 10, + kDefaultExpansionMultiplier = 10, + }; + + size_t dmult_; + + // Info about closed slots arrays for use by lagging operations + ClosedArray* closed_; + + void initQueue(const size_t cap, const size_t mult) { + this->stride_ = this->computeStride(cap); + this->slots_ = new Slot[cap + 2 * this->kSlotPadding]; + this->dstate_.store(0); + this->dcapacity_.store(cap); + dmult_ = mult; + size_t maxClosed = 0; + for (size_t expanded = cap; + expanded < this->capacity_; + expanded *= mult) { + ++maxClosed; + } + closed_ = (maxClosed > 0) ? new ClosedArray[maxClosed] : nullptr; + } + + bool tryObtainReadyPushTicket( + uint64_t& ticket, Slot*& slots, size_t& cap, int& stride + ) noexcept { + uint64_t state; + do { + ticket = this->pushTicket_.load(std::memory_order_acquire); // A + if (!trySeqlockReadSection(state, slots, cap, stride)) { + asm_volatile_pause(); + continue; + } + + // If there was an expansion with offset greater than this ticket, + // adjust accordingly + uint64_t offset; + maybeUpdateFromClosed(state, ticket, offset, slots, cap, stride); + + if (slots[this->idx((ticket - offset), cap, stride)].mayEnqueue( + this->turn(ticket - offset, cap))) { + // A slot is ready. + if (this->pushTicket_.compare_exchange_strong(ticket, ticket + 1)) { + // Adjust ticket + ticket -= offset; + return true; + } else { + continue; + } + } else { + if (ticket != this->pushTicket_.load(std::memory_order_relaxed)) { // B + // Try again. Ticket changed. + continue; + } + // Likely to block. + // Try to expand unless the ticket is for a closed array + if (offset == getOffset(state)) { + if (tryExpand(state, cap)) { + // This or another thread started an expansion. Get up-to-date info. + continue; + } + } + return false; + } + } while (true); + } + + bool tryObtainPromisedPushTicket( + uint64_t& ticket, Slot*& slots, size_t& cap, int& stride + ) noexcept { + uint64_t state; + do { + ticket = this->pushTicket_.load(std::memory_order_acquire); + auto numPops = this->popTicket_.load(std::memory_order_acquire); + if (!trySeqlockReadSection(state, slots, cap, stride)) { + asm_volatile_pause(); + continue; + } + + const auto curCap = cap; + // If there was an expansion with offset greater than this ticket, + // adjust accordingly + uint64_t offset; + maybeUpdateFromClosed(state, ticket, offset, slots, cap, stride); + + int64_t n = ticket - numPops; + + if (n >= static_cast(cap)) { + if ((cap == curCap) && tryExpand(state, cap)) { + // This or another thread started an expansion. Start over. + continue; + } + // Can't expand. + ticket -= offset; + return false; + } + + if (this->pushTicket_.compare_exchange_strong(ticket, ticket + 1)) { + // Adjust ticket + ticket -= offset; + return true; + } + } while (true); + } + + bool tryObtainReadyPopTicket( + uint64_t& ticket, Slot*& slots, size_t& cap, int& stride + ) noexcept { + uint64_t state; + do { + ticket = this->popTicket_.load(std::memory_order_relaxed); + if (!trySeqlockReadSection(state, slots, cap, stride)) { + asm_volatile_pause(); + continue; + } + + // If there was an expansion after the corresponding push ticket + // was issued, adjust accordingly + uint64_t offset; + maybeUpdateFromClosed(state, ticket, offset, slots, cap, stride); + + if (slots[this->idx((ticket - offset), cap, stride)].mayDequeue( + this->turn(ticket - offset, cap))) { + if (this->popTicket_.compare_exchange_strong(ticket, ticket + 1)) { + // Adjust ticket + ticket -= offset; + return true; + } + } else { + return false; + } + } while (true); + } + + bool tryObtainPromisedPopTicket( + uint64_t& ticket, Slot*& slots, size_t& cap, int& stride + ) noexcept { + uint64_t state; + do { + ticket = this->popTicket_.load(std::memory_order_acquire); + auto numPushes = this->pushTicket_.load(std::memory_order_acquire); + if (!trySeqlockReadSection(state, slots, cap, stride)) { + asm_volatile_pause(); + continue; + } + + uint64_t offset; + // If there was an expansion after the corresponding push + // ticket was issued, adjust accordingly + maybeUpdateFromClosed(state, ticket, offset, slots, cap, stride); + + if (ticket >= numPushes) { + ticket -= offset; + return false; + } + if (this->popTicket_.compare_exchange_strong(ticket, ticket + 1)) { + ticket -= offset; + return true; + } + } while (true); + } + + /// Enqueues an element with a specific ticket number + template + void enqueueWithTicket(const uint64_t ticket, Args&&... args) noexcept { + Slot* slots; + size_t cap; + int stride; + uint64_t state; + uint64_t offset; + + while (!trySeqlockReadSection(state, slots, cap, stride)) { + } + + // If there was an expansion after this ticket was issued, adjust + // accordingly + maybeUpdateFromClosed(state, ticket, offset, slots, cap, stride); + + this->enqueueWithTicketBase(ticket-offset, slots, cap, stride, + std::forward(args)...); + } + + uint64_t getOffset(const uint64_t state) const noexcept { + return state >> kSeqlockBits; + } + + int getNumClosed(const uint64_t state) const noexcept { + return (state & ((1 << kSeqlockBits) - 1)) >> 1; + } + + /// Try to expand the queue. Returns true if this expansion was + /// successful or a concurent expansion is in progress. Returns + /// false if the queue has reached its maximum capacity or + /// allocation has failed. + bool tryExpand(const uint64_t state, const size_t cap) noexcept { + if (cap == this->capacity_) { + return false; + } + // Acquire seqlock + uint64_t oldval = state; + assert((state & 1) == 0); + if (this->dstate_.compare_exchange_strong(oldval, state + 1)) { + assert(cap == this->dcapacity_.load()); + uint64_t ticket = + 1 + std::max(this->pushTicket_.load(), this->popTicket_.load()); + size_t newCapacity = std::min(dmult_ * cap, this->capacity_); + Slot* newSlots = + new (std::nothrow) Slot[newCapacity + 2 * this->kSlotPadding]; + if (newSlots == nullptr) { + // Expansion failed. Restore the seqlock + this->dstate_.store(state); + return false; + } + // Successful expansion + // calculate the current ticket offset + uint64_t offset = getOffset(state); + // calculate index in closed array + int index = getNumClosed(state); + assert((index << 1) < (1 << kSeqlockBits)); + // fill the info for the closed slots array + closed_[index].offset_ = offset; + closed_[index].slots_ = this->dslots_.load(); + closed_[index].capacity_ = cap; + closed_[index].stride_ = this->dstride_.load(); + // update the new slots array info + this->dslots_.store(newSlots); + this->dcapacity_.store(newCapacity); + this->dstride_.store(this->computeStride(newCapacity)); + // Release the seqlock and record the new ticket offset + this->dstate_.store((ticket << kSeqlockBits) + (2 * (index + 1))); + return true; + } else { // failed to acquire seqlock + // Someone acaquired the seqlock. Go back to the caller and get + // up-to-date info. + return true; + } + } + + /// Seqlock read-only section + bool trySeqlockReadSection( + uint64_t& state, Slot*& slots, size_t& cap, int& stride + ) noexcept { + state = this->dstate_.load(std::memory_order_acquire); + if (state & 1) { + // Locked. + return false; + } + // Start read-only section. + slots = this->dslots_.load(std::memory_order_relaxed); + cap = this->dcapacity_.load(std::memory_order_relaxed); + stride = this->dstride_.load(std::memory_order_relaxed); + // End of read-only section. Validate seqlock. + std::atomic_thread_fence(std::memory_order_acquire); + return (state == this->dstate_.load(std::memory_order_relaxed)); + } + + /// If there was an expansion after ticket was issued, update local variables + /// of the lagging operation using the most recent closed array with + /// offset <= ticket and return true. Otherwise, return false; + bool maybeUpdateFromClosed( + const uint64_t state, + const uint64_t ticket, + uint64_t& offset, + Slot*& slots, + size_t& cap, + int& stride) noexcept { + offset = getOffset(state); + if (ticket >= offset) { + return false; + } + for (int i = getNumClosed(state) - 1; i >= 0; --i) { + offset = closed_[i].offset_; + if (offset <= ticket) { + slots = closed_[i].slots_; + cap = closed_[i].capacity_; + stride = closed_[i].stride_; + return true; + } + } + // A closed array with offset <= ticket should have been found + assert(false); + return false; + } +}; + +namespace detail { + +/// CRTP specialization of MPMCQueueBase +template < + template class Atom, bool Dynamic> + class Derived, + typename T, + template class Atom, + bool Dynamic> +class MPMCQueueBase> : boost::noncopyable { + +// Note: Using CRTP static casts in several functions of this base +// template instead of making called functions virtual or duplicating +// the code of calling functions in the derived partially specialized +// template static_assert(std::is_nothrow_constructible::value || folly::IsRelocatable::value, "T must be relocatable or have a noexcept move constructor"); - friend class detail::MPMCPipelineStageImpl; public: typedef T value_type; - explicit MPMCQueue(size_t queueCapacity) + using Slot = detail::SingleElementQueue; + + explicit MPMCQueueBase(size_t queueCapacity) : capacity_(queueCapacity) , pushTicket_(0) , popTicket_(0) , pushSpinCutoff_(0) , popSpinCutoff_(0) { - if (queueCapacity == 0) + if (queueCapacity == 0) { throw std::invalid_argument( "MPMCQueue with explicit capacity 0 is impossible" + // Stride computation in derived classes would sigfpe if capacity is 0 ); - - // would sigfpe if capacity is 0 - stride_ = computeStride(queueCapacity); - slots_ = new detail::SingleElementQueue[queueCapacity + - 2 * kSlotPadding]; + } // ideally this would be a static assert, but g++ doesn't allow it - assert(alignof(MPMCQueue) - >= detail::CacheLocality::kFalseSharingRange); - assert(static_cast(static_cast(&popTicket_)) - - static_cast(static_cast(&pushTicket_)) - >= detail::CacheLocality::kFalseSharingRange); + assert(alignof(MPMCQueue) >= CacheLocality::kFalseSharingRange); + assert( + static_cast(static_cast(&popTicket_)) - + static_cast(static_cast(&pushTicket_)) >= + CacheLocality::kFalseSharingRange); } /// A default-constructed queue is useful because a usable (non-zero /// capacity) queue can be moved onto it or swapped with it - MPMCQueue() noexcept + MPMCQueueBase() noexcept : capacity_(0) , slots_(nullptr) , stride_(0) + , dstate_(0) + , dcapacity_(0) , pushTicket_(0) , popTicket_(0) , pushSpinCutoff_(0) @@ -145,10 +675,12 @@ class MPMCQueue : boost::noncopyable { /// IMPORTANT: The move constructor is here to make it easier to perform /// the initialization phase, it is not safe to use when there are any /// concurrent accesses (this is not checked). - MPMCQueue(MPMCQueue&& rhs) noexcept + MPMCQueueBase(MPMCQueueBase>&& rhs) noexcept : capacity_(rhs.capacity_) , slots_(rhs.slots_) , stride_(rhs.stride_) + , dstate_(rhs.dstate_.load(std::memory_order_relaxed)) + , dcapacity_(rhs.dcapacity_.load(std::memory_order_relaxed)) , pushTicket_(rhs.pushTicket_.load(std::memory_order_relaxed)) , popTicket_(rhs.popTicket_.load(std::memory_order_relaxed)) , pushSpinCutoff_(rhs.pushSpinCutoff_.load(std::memory_order_relaxed)) @@ -161,6 +693,8 @@ class MPMCQueue : boost::noncopyable { rhs.capacity_ = 0; rhs.slots_ = nullptr; rhs.stride_ = 0; + rhs.dstate_.store(0, std::memory_order_relaxed); + rhs.dcapacity_.store(0, std::memory_order_relaxed); rhs.pushTicket_.store(0, std::memory_order_relaxed); rhs.popTicket_.store(0, std::memory_order_relaxed); rhs.pushSpinCutoff_.store(0, std::memory_order_relaxed); @@ -170,23 +704,30 @@ class MPMCQueue : boost::noncopyable { /// IMPORTANT: The move operator is here to make it easier to perform /// the initialization phase, it is not safe to use when there are any /// concurrent accesses (this is not checked). - MPMCQueue const& operator= (MPMCQueue&& rhs) { + MPMCQueueBase> const& operator= + (MPMCQueueBase>&& rhs) { if (this != &rhs) { - this->~MPMCQueue(); - new (this) MPMCQueue(std::move(rhs)); + this->~MPMCQueueBase(); + new (this) MPMCQueueBase(std::move(rhs)); } return *this; } /// MPMCQueue can only be safely destroyed when there are no /// pending enqueuers or dequeuers (this is not checked). - ~MPMCQueue() { + ~MPMCQueueBase() { delete[] slots_; } - /// Returns the number of successful reads minus the number of successful - /// writes. Waiting blockingRead and blockingWrite calls are included, - /// so this value can be negative. + /// Returns the number of writes (including threads that are blocked waiting + /// to write) minus the number of reads (including threads that are blocked + /// waiting to read). So effectively, it becomes: + /// elements in queue + pending(calls to write) - pending(calls to read). + /// If nothing is pending, then the method returns the actual number of + /// elements in the queue. + /// The returned value can be negative if there are no writers and the queue + /// is empty, but there is one reader that is blocked waiting to read (in + /// which case, the returned size will be -1). ssize_t size() const noexcept { // since both pushes and pops increase monotonically, we can get a // consistent snapshot either by bracketing a read of popTicket_ with @@ -200,14 +741,14 @@ class MPMCQueue : boost::noncopyable { if (pushes == nextPushes) { // pushTicket_ didn't change from A (or the previous C) to C, // so we can linearize at B (or D) - return pushes - pops; + return ssize_t(pushes - pops); } pushes = nextPushes; uint64_t nextPops = popTicket_.load(std::memory_order_acquire); // D if (pops == nextPops) { // popTicket_ didn't chance from B (or the previous D), so we // can linearize at C - return pushes - pops; + return ssize_t(pushes - pops); } pops = nextPops; } @@ -225,7 +766,11 @@ class MPMCQueue : boost::noncopyable { } /// Returns is a guess at size() for contexts that don't need a precise - /// value, such as stats. + /// value, such as stats. More specifically, it returns the number of writes + /// minus the number of reads, but after reading the number of writes, more + /// writers could have came before the number of reads was sampled, + /// and this method doesn't protect against such case. + /// The returned value can be negative. ssize_t sizeGuess() const noexcept { return writeCount() - readCount(); } @@ -235,6 +780,11 @@ class MPMCQueue : boost::noncopyable { return capacity_; } + /// Doesn't change for non-dynamic + size_t allocatedCapacity() const noexcept { + return capacity_; + } + /// Returns the total number of calls to blockingWrite or successful /// calls to write, including those blockingWrite calls that are /// currently blocking @@ -256,7 +806,8 @@ class MPMCQueue : boost::noncopyable { /// to a T constructor. template void blockingWrite(Args&&... args) noexcept { - enqueueWithTicket(pushTicket_++, std::forward(args)...); + enqueueWithTicketBase(pushTicket_++, slots_, capacity_, stride_, + std::forward(args)...); } /// If an item can be enqueued with no blocking, does so and returns @@ -275,9 +826,14 @@ class MPMCQueue : boost::noncopyable { template bool write(Args&&... args) noexcept { uint64_t ticket; - if (tryObtainReadyPushTicket(ticket)) { + Slot* slots; + size_t cap; + int stride; + if (static_cast*>(this)-> + tryObtainReadyPushTicket(ticket, slots, cap, stride)) { // we have pre-validated that the ticket won't block - enqueueWithTicket(ticket, std::forward(args)...); + enqueueWithTicketBase(ticket, slots, cap, stride, + std::forward(args)...); return true; } else { return false; @@ -288,11 +844,15 @@ class MPMCQueue : boost::noncopyable { bool tryWriteUntil(const std::chrono::time_point& when, Args&&... args) noexcept { uint64_t ticket; - if (tryObtainPromisedPushTicketUntil(ticket, when)) { - // we have pre-validated that the ticket won't block, or rather that - // it won't block longer than it takes another thread to dequeue an - // element from the slot it identifies. - enqueueWithTicket(ticket, std::forward(args)...); + Slot* slots; + size_t cap; + int stride; + if (tryObtainPromisedPushTicketUntil(ticket, slots, cap, stride, when)) { + // we have pre-validated that the ticket won't block, or rather that + // it won't block longer than it takes another thread to dequeue an + // element from the slot it identifies. + enqueueWithTicketBase(ticket, slots, cap, stride, + std::forward(args)...); return true; } else { return false; @@ -315,10 +875,15 @@ class MPMCQueue : boost::noncopyable { template bool writeIfNotFull(Args&&... args) noexcept { uint64_t ticket; - if (tryObtainPromisedPushTicket(ticket)) { + Slot* slots; + size_t cap; + int stride; + if (static_cast*>(this)-> + tryObtainPromisedPushTicket(ticket, slots, cap, stride)) { // some other thread is already dequeuing the slot into which we // are going to enqueue, but we might have to wait for them to finish - enqueueWithTicket(ticket, std::forward(args)...); + enqueueWithTicketBase(ticket, slots, cap, stride, + std::forward(args)...); return true; } else { return false; @@ -328,16 +893,53 @@ class MPMCQueue : boost::noncopyable { /// Moves a dequeued element onto elem, blocking until an element /// is available void blockingRead(T& elem) noexcept { - dequeueWithTicket(popTicket_++, elem); + uint64_t ticket; + static_cast*>(this)-> + blockingReadWithTicket(ticket, elem); + } + + /// Same as blockingRead() but also records the ticket nunmer + void blockingReadWithTicket(uint64_t& ticket, T& elem) noexcept { + assert(capacity_ != 0); + ticket = popTicket_++; + dequeueWithTicketBase(ticket, slots_, capacity_, stride_, elem); } /// If an item can be dequeued with no blocking, does so and returns /// true, otherwise returns false. bool read(T& elem) noexcept { uint64_t ticket; - if (tryObtainReadyPopTicket(ticket)) { + return readAndGetTicket(ticket, elem); + } + + /// Same as read() but also records the ticket nunmer + bool readAndGetTicket(uint64_t& ticket, T& elem) noexcept { + Slot* slots; + size_t cap; + int stride; + if (static_cast*>(this)-> + tryObtainReadyPopTicket(ticket, slots, cap, stride)) { // the ticket has been pre-validated to not block - dequeueWithTicket(ticket, elem); + dequeueWithTicketBase(ticket, slots, cap, stride, elem); + return true; + } else { + return false; + } + } + + template + bool tryReadUntil( + const std::chrono::time_point& when, + T& elem) noexcept { + uint64_t ticket; + Slot* slots; + size_t cap; + int stride; + if (tryObtainPromisedPopTicketUntil(ticket, slots, cap, stride, when)) { + // we have pre-validated that the ticket won't block, or rather that + // it won't block longer than it takes another thread to enqueue an + // element on the slot it identifies. + dequeueWithTicketBase(ticket, slots, cap, stride, elem); return true; } else { return false; @@ -351,16 +953,20 @@ class MPMCQueue : boost::noncopyable { /// prefer read. bool readIfNotEmpty(T& elem) noexcept { uint64_t ticket; - if (tryObtainPromisedPopTicket(ticket)) { + Slot* slots; + size_t cap; + int stride; + if (static_cast*>(this)-> + tryObtainPromisedPopTicket(ticket, slots, cap, stride)) { // the matching enqueue already has a ticket, but might not be done - dequeueWithTicket(ticket, elem); + dequeueWithTicketBase(ticket, slots, cap, stride, elem); return true; } else { return false; } } - private: + protected: enum { /// Once every kAdaptationFreq we will spin longer, to try to estimate /// the proper spin backoff @@ -369,22 +975,41 @@ class MPMCQueue : boost::noncopyable { /// To avoid false sharing in slots_ with neighboring memory /// allocations, we pad it with this many SingleElementQueue-s at /// each end - kSlotPadding = (detail::CacheLocality::kFalseSharingRange - 1) - / sizeof(detail::SingleElementQueue) + 1 + kSlotPadding = (CacheLocality::kFalseSharingRange - 1) / sizeof(Slot) + 1 }; /// The maximum number of items in the queue at once size_t FOLLY_ALIGN_TO_AVOID_FALSE_SHARING capacity_; - /// An array of capacity_ SingleElementQueue-s, each of which holds - /// either 0 or 1 item. We over-allocate by 2 * kSlotPadding and don't - /// touch the slots at either end, to avoid false sharing - detail::SingleElementQueue* slots_; + /// Anonymous union for use when Dynamic = false and true, respectively + union { + /// An array of capacity_ SingleElementQueue-s, each of which holds + /// either 0 or 1 item. We over-allocate by 2 * kSlotPadding and don't + /// touch the slots at either end, to avoid false sharing + Slot* slots_; + /// Current dynamic slots array of dcapacity_ SingleElementQueue-s + Atom dslots_; + }; - /// The number of slots_ indices that we advance for each ticket, to - /// avoid false sharing. Ideally slots_[i] and slots_[i + stride_] - /// aren't on the same cache line - int stride_; + /// Anonymous union for use when Dynamic = false and true, respectively + union { + /// The number of slots_ indices that we advance for each ticket, to + /// avoid false sharing. Ideally slots_[i] and slots_[i + stride_] + /// aren't on the same cache line + int stride_; + /// Current stride + Atom dstride_; + }; + + /// The following two memebers are used by dynamic MPMCQueue. + /// Ideally they should be in MPMCQueue, but we get + /// better cache locality if they are in the same cache line as + /// dslots_ and dstride_. + /// + /// Dynamic state. A packed seqlock and ticket offset + Atom dstate_; + /// Dynamic capacity + Atom dcapacity_; /// Enqueuers get tickets from here Atom FOLLY_ALIGN_TO_AVOID_FALSE_SHARING pushTicket_; @@ -402,9 +1027,7 @@ class MPMCQueue : boost::noncopyable { /// Alignment doesn't prevent false sharing at the end of the struct, /// so fill out the last cache line - char padding_[detail::CacheLocality::kFalseSharingRange - - sizeof(Atom)]; - + char padding_[CacheLocality::kFalseSharingRange - sizeof(Atom)]; /// We assign tickets in increasing order, but we don't want to /// access neighboring elements of slots_ because that will lead to @@ -446,23 +1069,30 @@ class MPMCQueue : boost::noncopyable { /// Returns the index into slots_ that should be used when enqueuing or /// dequeuing with the specified ticket - size_t idx(uint64_t ticket) noexcept { - return ((ticket * stride_) % capacity_) + kSlotPadding; + size_t idx(uint64_t ticket, size_t cap, int stride) noexcept { + return ((ticket * stride) % cap) + kSlotPadding; } /// Maps an enqueue or dequeue ticket to the turn should be used at the /// corresponding SingleElementQueue - uint32_t turn(uint64_t ticket) noexcept { - return ticket / capacity_; + uint32_t turn(uint64_t ticket, size_t cap) noexcept { + assert(cap != 0); + return uint32_t(ticket / cap); } /// Tries to obtain a push ticket for which SingleElementQueue::enqueue /// won't block. Returns true on immediate success, false on immediate /// failure. - bool tryObtainReadyPushTicket(uint64_t& rv) noexcept { - auto ticket = pushTicket_.load(std::memory_order_acquire); // A + bool tryObtainReadyPushTicket( + uint64_t& ticket, Slot*& slots, size_t& cap, int& stride + ) noexcept { + ticket = pushTicket_.load(std::memory_order_acquire); // A + slots = slots_; + cap = capacity_; + stride = stride_; while (true) { - if (!slots_[idx(ticket)].mayEnqueue(turn(ticket))) { + if (!slots[idx(ticket, cap, stride)] + .mayEnqueue(turn(ticket, cap))) { // if we call enqueue(ticket, ...) on the SingleElementQueue // right now it would block, but this might no longer be the next // ticket. We can increase the chance of tryEnqueue success under @@ -479,7 +1109,6 @@ class MPMCQueue : boost::noncopyable { // or prev failing CAS) and the following CAS. If the CAS fails // it will effect a load of pushTicket_ if (pushTicket_.compare_exchange_strong(ticket, ticket + 1)) { - rv = ticket; return true; } } @@ -492,18 +1121,22 @@ class MPMCQueue : boost::noncopyable { /// ticket is filled on success AND failure. template bool tryObtainPromisedPushTicketUntil( - uint64_t& ticket, const std::chrono::time_point& when) noexcept { + uint64_t& ticket, Slot*& slots, size_t& cap, int& stride, + const std::chrono::time_point& when + ) noexcept { bool deadlineReached = false; while (!deadlineReached) { - if (tryObtainPromisedPushTicket(ticket)) { + if (static_cast*>(this)-> + tryObtainPromisedPushTicket(ticket, slots, cap, stride)) { return true; } // ticket is a blocking ticket until the preceding ticket has been // processed: wait until this ticket's turn arrives. We have not reserved // this ticket so we will have to re-attempt to get a non-blocking ticket // if we wake up before we time-out. - deadlineReached = !slots_[idx(ticket)].tryWaitForEnqueueTurnUntil( - turn(ticket), pushSpinCutoff_, (ticket % kAdaptationFreq) == 0, when); + deadlineReached = !slots[idx(ticket, cap, stride)] + .tryWaitForEnqueueTurnUntil(turn(ticket, cap), pushSpinCutoff_, + (ticket % kAdaptationFreq) == 0, when); } return false; } @@ -513,13 +1146,18 @@ class MPMCQueue : boost::noncopyable { /// blocking may be required when using the returned ticket if some /// other thread's pop is still in progress (ticket has been granted but /// pop has not yet completed). - bool tryObtainPromisedPushTicket(uint64_t& rv) noexcept { + bool tryObtainPromisedPushTicket( + uint64_t& ticket, Slot*& slots, size_t& cap, int& stride + ) noexcept { auto numPushes = pushTicket_.load(std::memory_order_acquire); // A + slots = slots_; + cap = capacity_; + stride = stride_; while (true) { - auto numPops = popTicket_.load(std::memory_order_acquire); // B + ticket = numPushes; + const auto numPops = popTicket_.load(std::memory_order_acquire); // B // n will be negative if pops are pending - int64_t n = numPushes - numPops; - rv = numPushes; + const int64_t n = int64_t(numPushes - numPops); if (n >= static_cast(capacity_)) { // Full, linearize at B. We don't need to recheck the read we // performed at A, because if numPushes was stale at B then the @@ -535,10 +1173,16 @@ class MPMCQueue : boost::noncopyable { /// Tries to obtain a pop ticket for which SingleElementQueue::dequeue /// won't block. Returns true on immediate success, false on immediate /// failure. - bool tryObtainReadyPopTicket(uint64_t& rv) noexcept { - auto ticket = popTicket_.load(std::memory_order_acquire); + bool tryObtainReadyPopTicket( + uint64_t& ticket, Slot*& slots, size_t& cap, int& stride + ) noexcept { + ticket = popTicket_.load(std::memory_order_acquire); + slots = slots_; + cap = capacity_; + stride = stride_; while (true) { - if (!slots_[idx(ticket)].mayDequeue(turn(ticket))) { + if (!slots[idx(ticket, cap, stride)] + .mayDequeue(turn(ticket, cap))) { auto prev = ticket; ticket = popTicket_.load(std::memory_order_acquire); if (prev == ticket) { @@ -546,13 +1190,43 @@ class MPMCQueue : boost::noncopyable { } } else { if (popTicket_.compare_exchange_strong(ticket, ticket + 1)) { - rv = ticket; return true; } } } } + /// Tries until when to obtain a pop ticket for which + /// SingleElementQueue::dequeue won't block. Returns true on success, false + /// on failure. + /// ticket is filled on success AND failure. + template + bool tryObtainPromisedPopTicketUntil( + uint64_t& ticket, + Slot*& slots, + size_t& cap, + int& stride, + const std::chrono::time_point& when) noexcept { + bool deadlineReached = false; + while (!deadlineReached) { + if (static_cast*>(this) + ->tryObtainPromisedPopTicket(ticket, slots, cap, stride)) { + return true; + } + // ticket is a blocking ticket until the preceding ticket has been + // processed: wait until this ticket's turn arrives. We have not reserved + // this ticket so we will have to re-attempt to get a non-blocking ticket + // if we wake up before we time-out. + deadlineReached = + !slots[idx(ticket, cap, stride)].tryWaitForDequeueTurnUntil( + turn(ticket, cap), + pushSpinCutoff_, + (ticket % kAdaptationFreq) == 0, + when); + } + return false; + } + /// Similar to tryObtainReadyPopTicket, but returns a pop ticket whose /// corresponding push ticket has already been handed out, rather than /// returning one whose corresponding push ticket has already been @@ -563,10 +1237,16 @@ class MPMCQueue : boost::noncopyable { /// to block waiting for someone to call enqueue, although we might /// have to block waiting for them to finish executing code inside the /// MPMCQueue itself. - bool tryObtainPromisedPopTicket(uint64_t& rv) noexcept { + bool tryObtainPromisedPopTicket( + uint64_t& ticket, Slot*& slots, size_t& cap, int& stride + ) noexcept { auto numPops = popTicket_.load(std::memory_order_acquire); // A + slots = slots_; + cap = capacity_; + stride = stride_; while (true) { - auto numPushes = pushTicket_.load(std::memory_order_acquire); // B + ticket = numPops; + const auto numPushes = pushTicket_.load(std::memory_order_acquire); // B if (numPops >= numPushes) { // Empty, or empty with pending pops. Linearize at B. We don't // need to recheck the read we performed at A, because if numPops @@ -574,7 +1254,6 @@ class MPMCQueue : boost::noncopyable { return false; } if (popTicket_.compare_exchange_strong(numPops, numPops + 1)) { - rv = numPops; return true; } } @@ -582,25 +1261,36 @@ class MPMCQueue : boost::noncopyable { // Given a ticket, constructs an enqueued item using args template + void enqueueWithTicketBase( + uint64_t ticket, Slot* slots, size_t cap, int stride, Args&&... args + ) noexcept { + slots[idx(ticket, cap, stride)] + .enqueue(turn(ticket, cap), + pushSpinCutoff_, + (ticket % kAdaptationFreq) == 0, + std::forward(args)...); + } + + // To support tracking ticket numbers in MPMCPipelineStageImpl + template void enqueueWithTicket(uint64_t ticket, Args&&... args) noexcept { - slots_[idx(ticket)].enqueue(turn(ticket), - pushSpinCutoff_, - (ticket % kAdaptationFreq) == 0, - std::forward(args)...); + enqueueWithTicketBase(ticket, slots_, capacity_, stride_, + std::forward(args)...); } // Given a ticket, dequeues the corresponding element - void dequeueWithTicket(uint64_t ticket, T& elem) noexcept { - slots_[idx(ticket)].dequeue(turn(ticket), - popSpinCutoff_, - (ticket % kAdaptationFreq) == 0, - elem); + void dequeueWithTicketBase( + uint64_t ticket, Slot* slots, size_t cap, int stride, T& elem + ) noexcept { + assert(cap != 0); + slots[idx(ticket, cap, stride)] + .dequeue(turn(ticket, cap), + popSpinCutoff_, + (ticket % kAdaptationFreq) == 0, + elem); } }; - -namespace detail { - /// SingleElementQueue implements a blocking queue that holds at most one /// item, and that requires its users to assign incrementing identifiers /// (turns) to each enqueue and dequeue operation. Note that the turns @@ -616,9 +1306,10 @@ struct SingleElementQueue { } /// enqueue using in-place noexcept construction - template ::value>::type> + template < + typename... Args, + typename = typename std::enable_if< + std::is_nothrow_constructible::value>::type> void enqueue(const uint32_t turn, Atom& spinCutoff, const bool updateSpinCutoff, @@ -630,15 +1321,17 @@ struct SingleElementQueue { /// enqueue using move construction, either real (if /// is_nothrow_move_constructible) or simulated using relocation and - /// default construction (if IsRelocatable and has_nothrow_constructor) - template ::value && - boost::has_nothrow_constructor::value) || - std::is_nothrow_constructible::value>::type> - void enqueue(const uint32_t turn, - Atom& spinCutoff, - const bool updateSpinCutoff, - T&& goner) noexcept { + /// default construction (if IsRelocatable and is_nothrow_constructible) + template < + typename = typename std::enable_if< + (folly::IsRelocatable::value && + std::is_nothrow_constructible::value) || + std::is_nothrow_constructible::value>::type> + void enqueue( + const uint32_t turn, + Atom& spinCutoff, + const bool updateSpinCutoff, + T&& goner) noexcept { enqueueImpl( turn, spinCutoff, @@ -659,7 +1352,8 @@ struct SingleElementQueue { const bool updateSpinCutoff, const std::chrono::time_point& when) noexcept { return sequencer_.tryWaitForTurn( - turn * 2, spinCutoff, updateSpinCutoff, &when); + turn * 2, spinCutoff, updateSpinCutoff, &when) != + TurnSequencer::TryWaitResult::TIMEDOUT; } bool mayEnqueue(const uint32_t turn) const noexcept { @@ -679,6 +1373,21 @@ struct SingleElementQueue { ImplByMove>::type()); } + /// Waits until either: + /// 1: the enqueue turn preceding the given dequeue turn has arrived + /// 2: the given deadline has arrived + /// Case 1 returns true, case 2 returns false. + template + bool tryWaitForDequeueTurnUntil( + const uint32_t turn, + Atom& spinCutoff, + const bool updateSpinCutoff, + const std::chrono::time_point& when) noexcept { + return sequencer_.tryWaitForTurn( + turn * 2 + 1, spinCutoff, updateSpinCutoff, &when) != + TurnSequencer::TryWaitResult::TIMEDOUT; + } + bool mayDequeue(const uint32_t turn) const noexcept { return sequencer_.isTurn(turn * 2 + 1); }