-//$$CDS-header$$
-
-#ifndef __CDS_INTRUSIVE_MOIR_QUEUE_H
-#define __CDS_INTRUSIVE_MOIR_QUEUE_H
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CDSLIB_INTRUSIVE_MOIR_QUEUE_H
+#define CDSLIB_INTRUSIVE_MOIR_QUEUE_H
#include <cds/intrusive/msqueue.h>
This is slightly optimized Michael & Scott's queue algorithm that overloads \ref dequeue function.
Source:
- \li [2000] Simon Doherty, Lindsay Groves, Victor Luchangco, Mark Moir
+ - [2000] Simon Doherty, Lindsay Groves, Victor Luchangco, Mark Moir
"Formal Verification of a practical lock-free queue algorithm"
Cite from this work about difference from Michael & Scott algo:
- "Our algorithm differs from Michael and Scott\92s [MS98] in that we test whether \p Tail points to the header
+ "Our algorithm differs from Michael and Scott's [MS98] in that we test whether \p Tail points to the header
node only <b>after</b> \p Head has been updated, so a dequeuing process reads \p Tail only once. The dequeue in
[MS98] performs this test before checking whether the next pointer in the dummy node is null, which
means that it reads \p Tail every time a dequeuing process loops. Under high load, when operations retry
frequently, our modification will reduce the number of accesses to global memory. This modification, however,
- introduces the possibility of \p Head and \p Tail \93crossing\94."
-
- Type of node: \ref single_link::node
+ introduces the possibility of \p Head and \p Tail 'crossing'."
- Explanation of template arguments see intrusive::MSQueue.
+ Explanation of template arguments see \p intrusive::MSQueue.
\par Examples
\code
typedef cds::gc::HP hp_gc;
// MoirQueue with Hazard Pointer garbage collector, base hook + item disposer:
- struct Foo: public ci::single_link::node< hp_gc >
+ struct Foo: public ci::msqueue::node< hp_gc >
{
// Your data
...
typedef ci::MoirQueue<
hp_gc
,Foo
- ,ci::opt::hook<
- ci::single_link::base_hook< ci::opt::gc<hp_gc> >
- >
- ,ci::opt::disposer< fooDisposer >
+ typename ci::msqueue::make_traits<
+ ,ci::opt::hook<
+ ci::msqueue::base_hook< ci::opt::gc<hp_gc> >
+ >
+ ,ci::opt::disposer< fooDisposer >
+ >::type
> fooQueue;
// MoirQueue with Hazard Pointer garbage collector,
// member hook + item disposer + item counter,
- // without alignment of internal queue data:
+ // without padding of internal queue data:
struct Bar
{
// Your data
...
- ci::single_link::node< hp_gc > hMember;
+ ci::msqueue::node< hp_gc > hMember;
};
- typedef ci::MoirQueue<
- hp_gc
- ,Foo
- ,ci::opt::hook<
- ci::single_link::member_hook<
- offsetof(Bar, hMember)
- ,ci::opt::gc<hp_gc>
- >
- >
- ,ci::opt::disposer< fooDisposer >
- ,cds::opt::item_counter< cds::atomicity::item_counter >
- ,cds::opt::alignment< cds::opt::no_special_alignment >
- > barQueue;
-
+ struct barQueueTraits: public ci::msqueue::traits
+ {
+ typedef ci::msqueue::member_hook< offsetof(Bar, hMember), ,ci::opt::gc<hp_gc> > hook;
+ typedef fooDisposer disposer;
+ typedef cds::atomicity::item_counter item_counter;
+ enum { padding = cds::opt::no_special_padding };
+ };
+ typedef ci::MoirQueue< hp_gc, Bar, barQueueTraits > barQueue;
\endcode
*/
- template <typename GC, typename T, typename... Options>
- class MoirQueue: public MSQueue< GC, T, Options... >
+ template <typename GC, typename T, typename Traits = cds::intrusive::msqueue::traits>
+ class MoirQueue: public MSQueue< GC, T, Traits >
{
//@cond
- typedef MSQueue< GC, T, Options... > base_class;
+ typedef MSQueue< GC, T, Traits > base_class;
typedef typename base_class::node_type node_type;
//@endcond
//@endcond
/// Rebind template arguments
- template <typename GC2, typename T2, typename... Options2>
+ template < typename GC2, typename T2, typename Traits2 >
struct rebind {
- typedef MoirQueue< GC2, T2, Options2...> other ; ///< Rebinding result
+ typedef MoirQueue< GC2, T2, Traits2> other ; ///< Rebinding result
};
protected:
//@cond
typedef typename base_class::dequeue_result dequeue_result;
- typedef typename base_class::node_to_value node_to_value;
bool do_dequeue( dequeue_result& res )
{
node_type * pNext;
node_type * h;
while ( true ) {
- h = res.guards.protect( 0, base_class::m_pHead, node_to_value() );
- pNext = res.guards.protect( 1, h->m_pNext, node_to_value() );
+ h = res.guards.protect( 0, base_class::m_pHead, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p );});
+ pNext = res.guards.protect( 1, h->m_pNext, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p );});
- if ( pNext == nullptr )
- return false ; // queue is empty
+ if ( pNext == nullptr ) {
+ base_class::m_Stat.onEmptyDequeue();
+ return false; // queue is empty
+ }
if ( base_class::m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
node_type * t = base_class::m_pTail.load(memory_model::memory_order_acquire);
public:
/// Dequeues a value from the queue
/** @anchor cds_intrusive_MoirQueue_dequeue
- See warning about item disposing in \ref MSQueue::dequeue.
+ See warning about item disposing in \p MSQueue::dequeue.
*/
value_type * dequeue()
{
}} // namespace cds::intrusive
-#endif // #ifndef __CDS_INTRUSIVE_MOIR_QUEUE_H
+#endif // #ifndef CDSLIB_INTRUSIVE_MOIR_QUEUE_H