X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=folly%2FIndexedMemPool.h;h=82ae62954d97f05b3967723c069b604e6a224f2c;hb=b010847b060f99a988069ca387416a08e9dc221e;hp=3a49b634ff86af471a82dde082c8348872f00bea;hpb=146c24b7ee25026ae410edde34bbc5808a224da8;p=folly.git diff --git a/folly/IndexedMemPool.h b/folly/IndexedMemPool.h index 3a49b634..82ae6295 100644 --- a/folly/IndexedMemPool.h +++ b/folly/IndexedMemPool.h @@ -16,14 +16,16 @@ #pragma once -#include #include #include #include + +#include + #include #include #include -#include +#include #include #include @@ -36,7 +38,7 @@ namespace folly { namespace detail { template struct IndexedMemPoolRecycler; -} +} // namespace detail template < typename T, @@ -259,7 +261,6 @@ struct IndexedMemPool : boost::noncopyable { /// Gives up ownership previously granted by alloc() void recycleIndex(uint32_t idx) { assert(isAllocated(idx)); - Traits::onRecycle(&slot(idx).elem); localPush(localHead(), idx); } @@ -422,7 +423,8 @@ struct IndexedMemPool : boost::noncopyable { Slot& s = slot(idx); TaggedPtr h = head.load(std::memory_order_acquire); while (true) { - s.localNext.store(h.idx, std::memory_order_relaxed); + s.localNext.store(h.idx, std::memory_order_release); + Traits::onRecycle(&slot(idx).elem); if (h.size() == LocalListLimit) { // push will overflow local list, steal it instead @@ -497,13 +499,16 @@ struct IndexedMemPool : boost::noncopyable { } AtomicStruct& localHead() { - auto stripe = detail::AccessSpreader::current(NumLocalLists); + auto stripe = AccessSpreader::current(NumLocalLists); return local_[stripe].head; } void markAllocated(Slot& slot) { slot.localNext.store(uint32_t(-1), std::memory_order_release); } + + public: + static constexpr std::size_t kSlotSize = sizeof(Slot); }; namespace detail { @@ -527,7 +532,7 @@ struct IndexedMemPoolRecycler { } }; -} +} // namespace detail } // namespace folly