-//$$CDS-header$$
-
-#ifndef __CDS_INTRUSIVE_OPTIMISTIC_QUEUE_H
-#define __CDS_INTRUSIVE_OPTIMISTIC_QUEUE_H
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CDSLIB_INTRUSIVE_OPTIMISTIC_QUEUE_H
+#define CDSLIB_INTRUSIVE_OPTIMISTIC_QUEUE_H
#include <type_traits>
#include <cds/intrusive/details/base.h>
typedef typename gc::template atomic_ref<node> atomic_node_ptr ; ///< atomic pointer
- atomic_node_ptr m_pPrev ; ///< Pointer to previous node
atomic_node_ptr m_pNext ; ///< Pointer to next node
+ atomic_node_ptr m_pPrev ; ///< Pointer to previous node
CDS_CONSTEXPR node() CDS_NOEXCEPT
- : m_pPrev( nullptr )
- , m_pNext( nullptr )
+ : m_pNext( nullptr )
+ , m_pPrev( nullptr )
{}
};
/// Link checking, see \p cds::opt::link_checker
static CDS_CONSTEXPR const opt::link_check_type link_checker = opt::debug_check_link;
- /// Alignment for internal queue data. Default is \p opt::cache_line_alignment
- enum { alignment = opt::cache_line_alignment };
+ /// Padding for internal critical atomic data. Default is \p opt::cache_line_padding
+ enum { padding = opt::cache_line_padding };
};
/// Metafunction converting option list to \p optimistic_queue::traits
/**
Supported \p Options are:
- - opt::hook - hook used. Possible hooks are: \p optimistic_queue::base_hook, \p optimistic_queue::member_hook, \p optimistic_queue::traits_hook.
+ - \p opt::hook - hook used. Possible hooks are: \p optimistic_queue::base_hook, \p optimistic_queue::member_hook, \p optimistic_queue::traits_hook.
If the option is not specified, \p %optimistic_queue::base_hook<> is used.
- - opt::back_off - back-off strategy used, default is \p cds::backoff::empty.
- - opt::disposer - the functor used for dispose removed items. Default is \p opt::v::empty_disposer. This option is used
+ - \p opt::back_off - back-off strategy used, default is \p cds::backoff::empty.
+ - \p opt::disposer - the functor used for dispose removed items. Default is \p opt::v::empty_disposer. This option is used
when dequeuing.
- - opt::link_checker - the type of node's link fields checking. Default is \p opt::debug_check_link
- - opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter (item counting disabled)
+ - \p opt::link_checker - the type of node's link fields checking. Default is \p opt::debug_check_link
+ - \p opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter (item counting disabled)
To enable item counting use \p cds::atomicity::item_counter
- - opt::stat - the type to gather internal statistics.
+ - \p opt::stat - the type to gather internal statistics.
Possible statistics types are: \p optimistic_queue::stat, \p optimistic_queue::empty_stat,
user-provided class that supports \p %optimistic_queue::stat interface.
Default is \p %optimistic_queue::empty_stat (internal statistics disabled).
- - opt::alignment - the alignment for internal queue data. Default is \p opt::cache_line_alignment
- - opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
+ - \p opt::padding - padding for internal critical atomic data. Default is \p opt::cache_line_padding
+ - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
or \p opt::v::sequential_consistent (sequentially consisnent memory model).
Example: declare \p %OptimisticQueue with item counting and internal statistics
protected:
//@cond
- typedef intrusive::node_to_value<OptimisticQueue> node_to_value;
- typedef typename opt::details::alignment_setter< typename node_type::atomic_node_ptr, traits::alignment >::type aligned_node_ptr;
+ typedef typename node_type::atomic_node_ptr atomic_node_ptr;
// GC and node_type::gc must be the same
static_assert((std::is_same<gc, typename node_type::gc>::value), "GC and node_type::gc must be the same");
-
//@endcond
- aligned_node_ptr m_pTail ; ///< Pointer to tail node
- aligned_node_ptr m_pHead ; ///< Pointer to head node
- node_type m_Dummy ; ///< dummy node
+ atomic_node_ptr m_pTail; ///< Pointer to tail node
+ //@cond
+ typename opt::details::apply_padding< atomic_node_ptr, traits::padding >::padding_type pad1_;
+ //@endcond
+ atomic_node_ptr m_pHead; ///< Pointer to head node
+ //@cond
+ typename opt::details::apply_padding< atomic_node_ptr, traits::padding >::padding_type pad2_;
+ //@endcond
+ node_type m_Dummy ; ///< dummy node
+ //@cond
+ typename opt::details::apply_padding< atomic_node_ptr, traits::padding >::padding_type pad3_;
+ //@endcond
item_counter m_ItemCounter ; ///< Item counter
stat m_Stat ; ///< Internal statistics
back_off bkoff;
while ( true ) { // Try till success or empty
- pHead = res.guards.protect( 0, m_pHead, node_to_value() );
- pTail = res.guards.protect( 1, m_pTail, node_to_value() );
+ pHead = res.guards.protect( 0, m_pHead, [](node_type * p) -> value_type * {return node_traits::to_value_ptr(p);});
+ pTail = res.guards.protect( 1, m_pTail, [](node_type * p) -> value_type * {return node_traits::to_value_ptr(p);});
assert( pHead != nullptr );
- pFirstNodePrev = res.guards.protect( 2, pHead->m_pPrev, node_to_value() );
+ pFirstNodePrev = res.guards.protect( 2, pHead->m_pPrev, [](node_type * p) -> value_type * {return node_traits::to_value_ptr(p);});
- if ( pHead == m_pHead.load(memory_model::memory_order_relaxed)) {
+ if ( pHead == m_pHead.load(memory_model::memory_order_acquire)) {
if ( pTail != pHead ) {
if ( pFirstNodePrev == nullptr
- || pFirstNodePrev->m_pNext.load(memory_model::memory_order_relaxed) != pHead )
+ || pFirstNodePrev->m_pNext.load(memory_model::memory_order_acquire) != pHead )
{
fix_list( pTail, pHead );
continue;
}
- if ( m_pHead.compare_exchange_weak( pHead, pFirstNodePrev, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
+ if ( m_pHead.compare_exchange_weak( pHead, pFirstNodePrev, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
// dequeue success
break;
}
pCurNode = pTail;
while ( pCurNode != pHead ) { // While not at head
- pCurNodeNext = guards.protect(0, pCurNode->m_pNext, node_to_value() );
- if ( pHead != m_pHead.load(memory_model::memory_order_relaxed) )
+ pCurNodeNext = guards.protect(0, pCurNode->m_pNext, [](node_type * p) -> value_type * { return node_traits::to_value_ptr(p);});
+ if ( pHead != m_pHead.load(memory_model::memory_order_acquire))
break;
pCurNodeNext->m_pPrev.store( pCurNode, memory_model::memory_order_release );
guards.assign( 1, node_traits::to_value_ptr( pCurNode = pCurNodeNext ));
back_off bkoff;
guards.assign( 1, &val );
- node_type * pTail = guards.protect( 0, m_pTail, node_to_value() ) ; // Read the tail
+ node_type * pTail = guards.protect( 0, m_pTail, [](node_type * p) -> value_type * {return node_traits::to_value_ptr(p);} ); // Read the tail
while( true ) {
pNew->m_pNext.store( pTail, memory_model::memory_order_release );
- if ( m_pTail.compare_exchange_strong( pTail, pNew, memory_model::memory_order_release, atomics::memory_order_relaxed ) ) { // Try to CAS the tail
- pTail->m_pPrev.store( pNew, memory_model::memory_order_release ) ; // Success, write prev
+ if ( m_pTail.compare_exchange_strong( pTail, pNew, memory_model::memory_order_release, atomics::memory_order_relaxed )) { // Try to CAS the tail
+ pTail->m_pPrev.store( pNew, memory_model::memory_order_release ); // Success, write prev
++m_ItemCounter;
m_Stat.onEnqueue();
- break ; // Enqueue done!
+ break; // Enqueue done!
}
- guards.assign( 0, node_traits::to_value_ptr( pTail ) ) ; // pTail has been changed by CAS above
+ guards.assign( 0, node_traits::to_value_ptr( pTail )); // pTail has been changed by CAS above
m_Stat.onEnqueueRace();
bkoff();
}
}} // namespace cds::intrusive
-#endif // #ifndef __CDS_INTRUSIVE_OPTIMISTIC_QUEUE_H
+#endif // #ifndef CDSLIB_INTRUSIVE_OPTIMISTIC_QUEUE_H