-//$$CDS-header$$
+/*
+ This file is a part of libcds - Concurrent Data Structures library
-#ifndef __CDS_INTRUSIVE_MOIR_QUEUE_H
-#define __CDS_INTRUSIVE_MOIR_QUEUE_H
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CDSLIB_INTRUSIVE_MOIR_QUEUE_H
+#define CDSLIB_INTRUSIVE_MOIR_QUEUE_H
#include <cds/intrusive/msqueue.h>
"Formal Verification of a practical lock-free queue algorithm"
Cite from this work about difference from Michael & Scott algo:
- "Our algorithm differs from Michael and Scott\92s [MS98] in that we test whether \p Tail points to the header
+ "Our algorithm differs from Michael and Scott's [MS98] in that we test whether \p Tail points to the header
node only <b>after</b> \p Head has been updated, so a dequeuing process reads \p Tail only once. The dequeue in
[MS98] performs this test before checking whether the next pointer in the dummy node is null, which
means that it reads \p Tail every time a dequeuing process loops. Under high load, when operations retry
frequently, our modification will reduce the number of accesses to global memory. This modification, however,
- introduces the possibility of \p Head and \p Tail \93crossing\94."
+ introduces the possibility of \p Head and \p Tail 'crossing'."
- Explanation of template arguments see intrusive::MSQueue.
+ Explanation of template arguments see \p intrusive::MSQueue.
\par Examples
\code
// MoirQueue with Hazard Pointer garbage collector,
// member hook + item disposer + item counter,
- // without alignment of internal queue data:
+ // without padding of internal queue data:
struct Bar
{
// Your data
typedef ci::msqueue::member_hook< offsetof(Bar, hMember), ,ci::opt::gc<hp_gc> > hook;
typedef fooDisposer disposer;
typedef cds::atomicity::item_counter item_counter;
- enum { aligment = cds::opt::no_special_alignment alignment };
+ enum { padding = cds::opt::no_special_padding };
};
typedef ci::MoirQueue< hp_gc, Bar, barQueueTraits > barQueue;
\endcode
protected:
//@cond
typedef typename base_class::dequeue_result dequeue_result;
- typedef typename base_class::node_to_value node_to_value;
bool do_dequeue( dequeue_result& res )
{
node_type * pNext;
node_type * h;
while ( true ) {
- h = res.guards.protect( 0, base_class::m_pHead, node_to_value() );
- pNext = res.guards.protect( 1, h->m_pNext, node_to_value() );
+ h = res.guards.protect( 0, base_class::m_pHead, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p );});
+ pNext = res.guards.protect( 1, h->m_pNext, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p );});
- if ( pNext == nullptr )
- return false ; // queue is empty
+ if ( pNext == nullptr ) {
+ base_class::m_Stat.onEmptyDequeue();
+ return false; // queue is empty
+ }
if ( base_class::m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
node_type * t = base_class::m_pTail.load(memory_model::memory_order_acquire);
}} // namespace cds::intrusive
-#endif // #ifndef __CDS_INTRUSIVE_MOIR_QUEUE_H
+#endif // #ifndef CDSLIB_INTRUSIVE_MOIR_QUEUE_H