SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_ALGO_SPLIT_BITSTRING_H
The splitter stores a const reference to bit-string, not a copy.
The maximum count of bits that can be cut in a single call is <tt> sizeof(UInt) * 8 </tt>
*/
- template <typename BitString, typename UInt = size_t >
+ template <typename BitString, typename UInt = typename std::conditional< sizeof(BitString) % sizeof(size_t) == 0, size_t, unsigned >::type >
class split_bitstring
{
public:
};
};
- template <typename GC, typename T, typename Traits>
- struct is_iterable_list< IterableKVList<GC, T, Traits >>
+ template <typename GC, typename K, typename V, typename Traits>
+ struct is_iterable_list< IterableKVList<GC, K, V, Traits >>
{
enum {
value = true
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_IMPL_ELLEN_BINTREE_MAP_H
*/
guarded_ptr extract_min()
{
- guarded_ptr gp;
- base_class::extract_min_( gp.guard() );
- return gp;
+ return guarded_ptr( base_class::extract_min_());
}
/// Extracts an item with maximal key from the map
*/
guarded_ptr extract_max()
{
- guarded_ptr gp;
- base_class::extract_max_( gp.guard() );
- return gp;
+ return guarded_ptr( base_class::extract_max_());
}
/// Extracts an item from the tree
template <typename Q>
guarded_ptr extract( Q const& key )
{
- guarded_ptr gp;
- base_class::extract_( gp.guard(), key );
- return gp;
+ return guarded_ptr( base_class::extract_( key ));
}
/// Extracts an item from the map using \p pred for searching
guarded_ptr extract_with( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- base_class::extract_with_( gp.guard(), key,
- cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >());
- return gp;
+ return guarded_ptr( base_class::extract_with_( key,
+ cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >()));
}
/// Find the key \p key
template <typename Q>
guarded_ptr get( Q const& key )
{
- guarded_ptr gp;
- base_class::get_( gp.guard(), key );
- return gp;
+ return guarded_ptr( base_class::get_( key ));
}
/// Finds \p key with predicate \p pred and returns the item found
guarded_ptr get_with( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- base_class::get_with_( gp.guard(), key,
- cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >() );
- return gp;
+ return guarded_ptr( base_class::get_with_( key,
+ cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >() ));
}
/// Clears the map (not atomic)
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_IMPL_ELLEN_BINTREE_SET_H
*/
guarded_ptr extract_min()
{
- guarded_ptr gp;
- base_class::extract_min_( gp.guard() );
- return gp;
+ return guarded_ptr( base_class::extract_min_());
}
/// Extracts an item with maximal key from the set
*/
guarded_ptr extract_max()
{
- guarded_ptr gp;
- base_class::extract_max_( gp.guard() );
- return gp;
+ return guarded_ptr( base_class::extract_max_());
}
/// Extracts an item from the tree
template <typename Q>
guarded_ptr extract( Q const& key )
{
- guarded_ptr gp;
- base_class::extract_( gp.guard(), key );
- return gp;
+ return base_class::extract_( key );
}
/// Extracts an item from the set using \p pred for searching
guarded_ptr extract_with( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- base_class::extract_with_( gp.guard(), key,
+ return base_class::extract_with_( key,
cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >());
- return gp;
}
/// Find the key \p key
template <typename Q>
guarded_ptr get( Q const& key )
{
- guarded_ptr gp;
- base_class::get_( gp.guard(), key );
- return gp;
+ return base_class::get_( key );
}
/// Finds \p key with predicate \p pred and returns the item found
guarded_ptr get_with( Q const& key, Less pred )
{
CDS_UNUSED(pred);
- guarded_ptr gp;
- base_class::get_with_( gp.guard(), key,
+ return base_class::get_with_( key,
cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >() );
- return gp;
}
/// Clears the set (not atomic)
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_IMPL_FELDMAN_HASHMAP_H
template <typename K>
guarded_ptr extract( K const& key )
{
- guarded_ptr gp;
- typename gc::Guard guard;
- node_type * p = base_class::do_erase( m_Hasher( key_type( key )), guard, []( node_type const&) -> bool {return true;} );
-
- // p is guarded by HP
- if ( p )
- gp.reset( p );
- return gp;
+ return base_class::extract( m_Hasher( key_type( key )));
}
/// Checks whether the map contains \p key
template <typename K>
guarded_ptr get( K const& key )
{
- guarded_ptr gp;
- {
- typename gc::Guard guard;
- gp.reset( base_class::search( m_Hasher( key_type( key )), guard ));
- }
- return gp;
+ return base_class::get( m_Hasher( key_type( key )));
}
/// Clears the map (non-atomic)
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_IMPL_FELDMAN_HASHSET_H
typedef typename maker::mapped_type mapped_type;
typedef typename maker::value_type value_type;
#endif
-
+ typedef Traits traits; ///< List traits
typedef typename base_class::gc gc; ///< Garbage collector used
typedef typename base_class::back_off back_off; ///< Back-off strategy used
typedef typename maker::data_allocator_type allocator_type; ///< Allocator type used for allocate/deallocate data
/// Guarded pointer
typedef typename base_class::guarded_ptr guarded_ptr;
+ //@cond
+ // Rebind traits (split-list support)
+ template <typename... Options>
+ struct rebind_traits {
+ typedef IterableKVList<
+ gc
+ , key_type, mapped_type
+ , typename cds::opt::make_options< traits, Options...>::type
+ > type;
+ };
+
+ // Stat selector
+ template <typename Stat>
+ using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >;
+ //@endcond
+
protected:
//@cond
typedef typename base_class::head_type head_type;
The functor may change non-key fields of \p val; however, \p func must guarantee
that during changing no any other modifications could be made on this item by concurrent threads.
- Returns <tt> std::pair<bool, bool> </tt> where \p first is true if operation is successful,
+ @return <tt> std::pair<bool, bool> </tt> where \p first is true if operation is successful,
\p second is true if new item has been added or \p false if the item with such \p key
already exists.
// Split-list support
template <typename K>
- bool insert_at( head_type& refHead, K const& key )
+ bool insert_at( head_type& refHead, K&& key )
{
- return base_class::insert_at( refHead, value_type( key_type( key ), mapped_type() ));
+ return base_class::insert_at( refHead, value_type( key_type( std::forward<K>( key )), mapped_type() ));
}
template <typename K, typename V>
- bool insert_at( head_type& refHead, const K& key, V const& val )
+ bool insert_at( head_type& refHead, K&& key, V&& val )
{
- return base_class::insert_at( refHead, value_type( key_type( key ), val ));
+ return base_class::insert_at( refHead, value_type( key_type( std::forward<K>( key )), std::forward<V>( val )));
}
template <typename K, typename Func>
- bool insert_with_at( head_type& refHead, K const& key, Func f )
+ bool insert_with_at( head_type& refHead, K&& key, Func f )
{
- return base_class::insert_at( refHead, value_type( key_type( key ), mapped_type()), f );
+ return base_class::insert_at( refHead, value_type( key_type( std::forward<K>( key )), mapped_type()), f );
}
template <typename K, typename... Args>
}
template <typename K, typename Func>
- std::pair<bool, bool> update_at( head_type& refHead, const K& key, Func f, bool bAllowInsert )
+ std::pair<bool, bool> update_at( head_type& refHead, K&& key, Func f, bool bAllowInsert )
{
- return base_class::update_at( refHead, value_type( key_type( key ), mapped_type()), f, bAllowInsert );
+ return base_class::update_at( refHead, value_type( key_type( std::forward<K>( key )), mapped_type()), f, bAllowInsert );
}
template <typename K, typename Compare>
return base_class::erase_at( refHead, key, cmp, f );
}
template <typename K, typename Compare>
- bool extract_at( head_type& refHead, typename guarded_ptr::native_guard& guard, K const& key, Compare cmp )
+ guarded_ptr extract_at( head_type& refHead, K const& key, Compare cmp )
{
- return base_class::extract_at( refHead, guard, key, cmp );
+ return base_class::extract_at( refHead, key, cmp );
}
template <typename K, typename Compare>
}
template <typename K, typename Compare>
- bool get_at( head_type& refHead, typename guarded_ptr::native_guard& guard, K const& key, Compare cmp )
+ guarded_ptr get_at( head_type& refHead, K const& key, Compare cmp )
{
- return base_class::get_at( refHead, guard, key, cmp );
+ return base_class::get_at( refHead, key, cmp );
}
//@endcond
template <typename Q>
guarded_ptr extract( Q const& key )
{
- guarded_ptr gp;
- extract_at( head(), gp.guard(), key, key_comparator() );
- return gp;
+ return extract_at( head(), key, key_comparator() );
}
/// Extracts the item from the list with comparing functor \p pred
guarded_ptr extract_with( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- extract_at( head(), gp.guard(), key, typename maker::template less_wrapper<Less>::type() );
- return gp;
+ return extract_at( head(), key, typename maker::template less_wrapper<Less>::type() );
}
/// Checks whether the list contains \p key
template <typename Q>
guarded_ptr get( Q const& key ) const
{
- guarded_ptr gp;
- get_at( head(), gp.guard(), key, key_comparator() );
- return gp;
+ return get_at( head(), key, key_comparator() );
}
/// Finds \p key and return the item found
guarded_ptr get_with( Q const& key, Less pred ) const
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- get_at( head(), gp.guard(), key, typename maker::template less_wrapper<Less>::type() );
- return gp;
+ return get_at( head(), key, typename maker::template less_wrapper<Less>::type() );
}
/// Checks if the list is empty
}
template <typename Q, typename Compare>
- bool extract_at( head_type& refHead, typename guarded_ptr::native_guard& guard, Q const& key, Compare cmp )
+ guarded_ptr extract_at( head_type& refHead, Q const& key, Compare cmp )
{
- return base_class::extract_at( refHead, guard, key, cmp );
+ return base_class::extract_at( refHead, key, cmp );
}
template <typename Q, typename Compare>
}
template <typename Q, typename Compare>
- bool get_at( head_type const& refHead, typename guarded_ptr::native_guard& guard, Q const& key, Compare cmp ) const
+ guarded_ptr get_at( head_type const& refHead, Q const& key, Compare cmp ) const
{
- return base_class::get_at( refHead, guard, key, cmp );
+ return base_class::get_at( refHead, key, cmp );
}
//@endcond
//@endcond
public:
- typedef GC gc; ///< Garbage collector
+ typedef GC gc; ///< Garbage collector
+ typedef Traits traits; ///< Traits
#ifdef CDS_DOXYGEN_INVOKED
typedef Key key_type ; ///< Key type
typedef Value mapped_type ; ///< Type of value stored in the list
static CDS_CONSTEXPR const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm
+ //@cond
+ // Rebind traits (split-list support)
+ template <typename... Options>
+ struct rebind_traits {
+ typedef LazyKVList<
+ gc
+ , key_type, mapped_type
+ , typename cds::opt::make_options< traits, Options...>::type
+ > type;
+ };
+
+ // Stat selector
+ template <typename Stat>
+ using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >;
+ //@endcond
+
protected:
//@cond
typedef typename base_class::value_type node_type;
Returns \p true if inserting successful, \p false otherwise.
*/
template <typename K>
- bool insert( const K& key )
+ bool insert( K&& key )
{
- return insert_at( head(), key );
+ return insert_at( head(), std::forward<K>( key ));
}
/// Inserts new node with a key and a value
Returns \p true if inserting successful, \p false otherwise.
*/
template <typename K, typename V>
- bool insert( const K& key, const V& val )
+ bool insert( K&& key, V&& val )
{
// We cannot use insert with functor here
// because we cannot lock inserted node for updating
// Therefore, we use separate function
- return insert_at( head(), key, val );
+ return insert_at( head(), std::forward<K>( key ), std::forward<V>( val ));
}
/// Inserts new node and initializes it by a functor
it is preferable that the initialization should be completed only if inserting is successful.
*/
template <typename K, typename Func>
- bool insert_with( const K& key, Func func )
+ bool insert_with( K&& key, Func func )
{
- return insert_with_at( head(), key, func );
+ return insert_with_at( head(), std::forward<K>( key ), func );
}
/// Inserts data of type \ref mapped_type constructed with <tt>std::forward<Args>(args)...</tt>
already exists.
*/
template <typename K, typename Func>
- std::pair<bool, bool> update( const K& key, Func f, bool bAllowInsert = true )
+ std::pair<bool, bool> update( K&& key, Func f, bool bAllowInsert = true )
{
- return update_at( head(), key, f, bAllowInsert );
+ return update_at( head(), std::forward<K>( key ), f, bAllowInsert );
}
//@cond
template <typename K, typename Func>
template <typename K>
guarded_ptr extract( K const& key )
{
- guarded_ptr gp;
- extract_at( head(), gp.guard(), key, intrusive_key_comparator() );
- return gp;
+ return extract_at( head(), key, intrusive_key_comparator() );
}
/// Extracts the item from the list with comparing functor \p pred
guarded_ptr extract_with( K const& key, Less pred )
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- extract_at( head(), gp.guard(), key, typename maker::template less_wrapper<Less>::type() );
- return gp;
+ return extract_at( head(), key, typename maker::template less_wrapper<Less>::type() );
}
/// Checks whether the list contains \p key
template <typename K>
guarded_ptr get( K const& key )
{
- guarded_ptr gp;
- get_at( head(), gp.guard(), key, intrusive_key_comparator() );
- return gp;
+ return get_at( head(), key, intrusive_key_comparator() );
}
/// Finds the key \p val and return the item found
guarded_ptr get_with( K const& key, Less pred )
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- get_at( head(), gp.guard(), key, typename maker::template less_wrapper<Less>::type() );
- return gp;
+ return get_at( head(), key, typename maker::template less_wrapper<Less>::type() );
}
/// Checks if the list is empty
}
template <typename K>
- bool insert_at( head_type& refHead, const K& key )
+ bool insert_at( head_type& refHead, K&& key )
{
- return insert_node_at( refHead, alloc_node( key ));
+ return insert_node_at( refHead, alloc_node( std::forward<K>( key )));
}
template <typename K, typename V>
- bool insert_at( head_type& refHead, const K& key, const V& val )
+ bool insert_at( head_type& refHead, K&& key, V&& val )
{
- return insert_node_at( refHead, alloc_node( key, val ));
+ return insert_node_at( refHead, alloc_node( std::forward<K>( key ), std::forward<V>( val )));
}
template <typename K, typename Func>
- bool insert_with_at( head_type& refHead, const K& key, Func f )
+ bool insert_with_at( head_type& refHead, K&& key, Func f )
{
- scoped_node_ptr pNode( alloc_node( key ));
+ scoped_node_ptr pNode( alloc_node( std::forward<K>( key )));
if ( base_class::insert_at( &refHead, *pNode, [&f](node_type& node){ f( node.m_Data ); } )) {
pNode.release();
}
template <typename K, typename Compare>
- bool extract_at( head_type& refHead, typename guarded_ptr::native_guard& guard, K const& key, Compare cmp )
+ guarded_ptr extract_at( head_type& refHead, K const& key, Compare cmp )
{
- return base_class::extract_at( &refHead, guard, key, cmp );
+ return base_class::extract_at( &refHead, key, cmp );
}
template <typename K, typename Func>
- std::pair<bool, bool> update_at( head_type& refHead, const K& key, Func f, bool bAllowInsert )
+ std::pair<bool, bool> update_at( head_type& refHead, K&& key, Func f, bool bAllowInsert )
{
- scoped_node_ptr pNode( alloc_node( key ));
+ scoped_node_ptr pNode( alloc_node( std::forward<K>( key )));
std::pair<bool, bool> ret = base_class::update_at( &refHead, *pNode,
[&f]( bool bNew, node_type& node, node_type& ){ f( bNew, node.m_Data ); },
}
template <typename K, typename Compare>
- bool get_at( head_type& refHead, typename guarded_ptr::native_guard& guard, K const& key, Compare cmp )
+ guarded_ptr get_at( head_type& refHead, K const& key, Compare cmp )
{
- return base_class::get_at( &refHead, guard, key, cmp );
+ return base_class::get_at( &refHead, key, cmp );
}
//@endcond
template <typename Q>
guarded_ptr extract( Q const& key )
{
- guarded_ptr gp;
- extract_at( head(), gp.guard(), key, intrusive_key_comparator() );
- return gp;
+ return extract_at( head(), key, intrusive_key_comparator() );
}
/// Extracts the item from the list with comparing functor \p pred
guarded_ptr extract_with( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- extract_at( head(), gp.guard(), key, typename maker::template less_wrapper<Less>::type() );
- return gp;
+ return extract_at( head(), key, typename maker::template less_wrapper<Less>::type() );
}
/// Checks whether the list contains \p key
template <typename Q>
guarded_ptr get( Q const& key )
{
- guarded_ptr gp;
- get_at( head(), gp.guard(), key, intrusive_key_comparator() );
- return gp;
+ return get_at( head(), key, intrusive_key_comparator() );
}
/// Finds the key \p key and return the item found
guarded_ptr get_with( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- get_at( head(), gp.guard(), key, typename maker::template less_wrapper<Less>::type() );
- return gp;
+ return get_at( head(), key, typename maker::template less_wrapper<Less>::type() );
}
/// Checks whether the list is empty
}
template <typename Q, typename Compare>
- bool extract_at( head_type& refHead, typename guarded_ptr::native_guard& guard, Q const& key, Compare cmp )
+ guarded_ptr extract_at( head_type& refHead, Q const& key, Compare cmp )
{
- return base_class::extract_at( &refHead, guard, key, cmp );
+ return base_class::extract_at( &refHead, key, cmp );
}
template <typename Q, typename Func>
}
template <typename Q, typename Compare>
- bool get_at( head_type& refHead, typename guarded_ptr::native_guard& guard, Q const& key, Compare cmp )
+ guarded_ptr get_at( head_type& refHead, Q const& key, Compare cmp )
{
- return base_class::get_at( &refHead, guard, key, cmp );
+ return base_class::get_at( &refHead, key, cmp );
}
//@endcond
#endif
typedef typename base_class::gc gc; ///< Garbage collector used
+ typedef Traits traits; ///< List traits
typedef typename base_class::back_off back_off; ///< Back-off strategy used
typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes
typedef typename base_class::item_counter item_counter; ///< Item counting policy used
static CDS_CONSTEXPR const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm
+ //@cond
+ // Rebind traits (split-list support)
+ template <typename... Options>
+ struct rebind_traits {
+ typedef MichaelKVList<
+ gc
+ , key_type, mapped_type
+ , typename cds::opt::make_options< traits, Options...>::type
+ > type;
+ };
+
+ // Stat selector
+ template <typename Stat>
+ using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >;
+ //@endcond
+
protected:
//@cond
typedef typename base_class::value_type node_type;
Returns \p true if inserting successful, \p false otherwise.
*/
template <typename K>
- bool insert( const K& key )
+ bool insert( K&& key )
{
- return insert_at( head(), key );
+ return insert_at( head(), std::forward<K>( key ));
}
/// Inserts new node with a key and a value
Returns \p true if inserting successful, \p false otherwise.
*/
template <typename K, typename V>
- bool insert( const K& key, const V& val )
+ bool insert( K&& key, V&& val )
{
// We cannot use insert with functor here
// because we cannot lock inserted node for updating
// Therefore, we use separate function
- return insert_at( head(), key, val );
+ return insert_at( head(), std::forward<K>( key ), std::forward<V>( val ));
}
/// Inserts new node and initialize it by a functor
@warning See \ref cds_intrusive_item_creating "insert item troubleshooting"
*/
template <typename K, typename Func>
- bool insert_with( const K& key, Func func )
+ bool insert_with( K&& key, Func func )
{
- return insert_with_at( head(), key, func );
+ return insert_with_at( head(), std::forward<K>( key ), func );
}
/// Updates data by \p key
@warning See \ref cds_intrusive_item_creating "insert item troubleshooting"
*/
template <typename K, typename Func>
- std::pair<bool, bool> update( K const& key, Func f, bool bAllowInsert = true )
+ std::pair<bool, bool> update( K&& key, Func f, bool bAllowInsert = true )
{
- return update_at( head(), key, f, bAllowInsert );
+ return update_at( head(), std::forward<K>( key ), f, bAllowInsert );
}
//@cond
template <typename K, typename Func>
template <typename K>
guarded_ptr extract( K const& key )
{
- guarded_ptr gp;
- extract_at( head(), gp.guard(), key, intrusive_key_comparator() );
- return gp;
+ return extract_at( head(), key, intrusive_key_comparator() );
}
/// Extracts the item from the list with comparing functor \p pred
guarded_ptr extract_with( K const& key, Less pred )
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- extract_at( head(), gp.guard(), key, typename maker::template less_wrapper<Less>::type() );
- return gp;
+ return extract_at( head(), key, typename maker::template less_wrapper<Less>::type() );
}
/// Checks whether the list contains \p key
template <typename K>
guarded_ptr get( K const& key )
{
- guarded_ptr gp;
- get_at( head(), gp.guard(), key, intrusive_key_comparator() );
- return gp;
+ return get_at( head(), key, intrusive_key_comparator() );
}
/// Finds the \p key and return the item found
guarded_ptr get_with( K const& key, Less pred )
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- get_at( head(), gp.guard(), key, typename maker::template less_wrapper<Less>::type() );
- return gp;
+ return get_at( head(), key, typename maker::template less_wrapper<Less>::type() );
}
/// Checks if the list is empty
}
template <typename K>
- bool insert_at( head_type& refHead, const K& key )
+ bool insert_at( head_type& refHead, K&& key )
{
- return insert_node_at( refHead, alloc_node( key ));
+ return insert_node_at( refHead, alloc_node( std::forward<K>( key )));
}
template <typename K, typename V>
- bool insert_at( head_type& refHead, const K& key, const V& val )
+ bool insert_at( head_type& refHead, K&& key, V&& val )
{
- return insert_node_at( refHead, alloc_node( key, val ));
+ return insert_node_at( refHead, alloc_node( std::forward<K>( key ), std::forward<V>( val )));
}
template <typename K, typename Func>
- bool insert_with_at( head_type& refHead, const K& key, Func f )
+ bool insert_with_at( head_type& refHead, K&& key, Func f )
{
- scoped_node_ptr pNode( alloc_node( key ));
+ scoped_node_ptr pNode( alloc_node( std::forward<K>( key )));
if ( base_class::insert_at( refHead, *pNode, [&f](node_type& node){ f( node.m_Data ); })) {
pNode.release();
}
template <typename K, typename Func>
- std::pair<bool, bool> update_at( head_type& refHead, const K& key, Func f, bool bAllowInsert )
+ std::pair<bool, bool> update_at( head_type& refHead, K&& key, Func f, bool bAllowInsert )
{
- scoped_node_ptr pNode( alloc_node( key ));
+ scoped_node_ptr pNode( alloc_node( std::forward<K>( key )));
std::pair<bool, bool> ret = base_class::update_at( refHead, *pNode,
[&f]( bool bNew, node_type& node, node_type& ){ f( bNew, node.m_Data ); },
return base_class::erase_at( refHead, key, cmp, [&f]( node_type const & node ){ f( const_cast<value_type&>(node.m_Data)); });
}
template <typename K, typename Compare>
- bool extract_at( head_type& refHead, typename guarded_ptr::native_guard& guard, K const& key, Compare cmp )
+ guarded_ptr extract_at( head_type& refHead, K const& key, Compare cmp )
{
- return base_class::extract_at( refHead, guard, key, cmp );
+ return base_class::extract_at( refHead, key, cmp );
}
template <typename K, typename Compare>
}
template <typename K, typename Compare>
- bool get_at( head_type& refHead, typename guarded_ptr::native_guard& guard, K const& key, Compare cmp )
+ guarded_ptr get_at( head_type& refHead, K const& key, Compare cmp )
{
- return base_class::get_at( refHead, guard, key, cmp );
+ return base_class::get_at( refHead, key, cmp );
}
//@endcond
template <typename Q>
guarded_ptr extract( Q const& key )
{
- guarded_ptr gp;
- extract_at( head(), gp.guard(), key, intrusive_key_comparator() );
- return gp;
+ return extract_at( head(), key, intrusive_key_comparator() );
}
/// Extracts the item from the list with comparing functor \p pred
guarded_ptr extract_with( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- extract_at( head(), gp.guard(), key, typename maker::template less_wrapper<Less>::type() );
- return gp;
+ return extract_at( head(), key, typename maker::template less_wrapper<Less>::type() );
}
/// Checks whether the list contains \p key
template <typename Q>
guarded_ptr get( Q const& key )
{
- guarded_ptr gp;
- get_at( head(), gp.guard(), key, intrusive_key_comparator() );
- return gp;
+ return get_at( head(), key, intrusive_key_comparator() );
}
/// Finds \p key and return the item found
guarded_ptr get_with( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- get_at( head(), gp.guard(), key, typename maker::template less_wrapper<Less>::type() );
- return gp;
+ return get_at( head(), key, typename maker::template less_wrapper<Less>::type() );
}
/// Check if the list is empty
}
template <typename Q, typename Compare>
- bool extract_at( head_type& refHead, typename guarded_ptr::native_guard& guard, Q const& key, Compare cmp )
+ guarded_ptr extract_at( head_type& refHead, Q const& key, Compare cmp )
{
- return base_class::extract_at( refHead, guard, key, cmp );
+ return base_class::extract_at( refHead, key, cmp );
}
template <typename Q, typename Func>
}
template <typename Q, typename Compare>
- bool get_at( head_type& refHead, typename guarded_ptr::native_guard& guard, Q const& key, Compare cmp )
+ guarded_ptr get_at( head_type& refHead, Q const& key, Compare cmp )
{
- return base_class::get_at( refHead, guard, key, cmp );
+ return base_class::get_at( refHead, key, cmp );
}
//@endcond
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_IMPL_SKIP_LIST_MAP_H
template <typename K>
guarded_ptr extract( K const& key )
{
- guarded_ptr gp;
- base_class::extract_( gp.guard(), key, typename base_class::key_comparator() );
- return gp;
+ return base_class::extract_( key, typename base_class::key_comparator() );
}
/// Extracts the item from the map with comparing functor \p pred
{
CDS_UNUSED( pred );
typedef cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor > wrapped_less;
- guarded_ptr gp;
- base_class::extract_( gp.guard(), key, cds::opt::details::make_comparator_from_less<wrapped_less>() );
- return gp;
+ return base_class::extract_( key, cds::opt::details::make_comparator_from_less<wrapped_less>() );
}
/// Extracts an item with minimal key from the map
*/
guarded_ptr extract_min()
{
- guarded_ptr gp;
- base_class::extract_min_( gp.guard() );
- return gp;
+ return base_class::extract_min_();
}
/// Extracts an item with maximal key from the map
*/
guarded_ptr extract_max()
{
- guarded_ptr gp;
- base_class::extract_max_( gp.guard() );
- return gp;
+ return base_class::extract_max_();
}
/// Find the key \p key
template <typename K>
guarded_ptr get( K const& key )
{
- guarded_ptr gp;
- base_class::get_with_( gp.guard(), key, typename base_class::key_comparator() );
- return gp;
+ return base_class::get_with_( key, typename base_class::key_comparator() );
}
/// Finds the key \p key and return the item found
{
CDS_UNUSED( pred );
typedef cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor > wrapped_less;
- guarded_ptr gp;
- base_class::get_with_( gp.guard(), key, cds::opt::details::make_comparator_from_less< wrapped_less >());
- return gp;
+ return base_class::get_with_( key, cds::opt::details::make_comparator_from_less< wrapped_less >());
}
/// Clears the map
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_IMPL_SKIP_LIST_SET_H
template <typename Q>
guarded_ptr extract( Q const& key )
{
- guarded_ptr gp;
- base_class::extract_( gp.guard(), key, typename base_class::key_comparator() );
- return gp;
+ return base_class::extract_( key, typename base_class::key_comparator() );
}
/// Extracts the item from the set with comparing functor \p pred
{
CDS_UNUSED( pred );
typedef cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor > wrapped_less;
- guarded_ptr gp;
- base_class::extract_( gp.guard(), key, cds::opt::details::make_comparator_from_less<wrapped_less>() );
- return gp;
+ return base_class::extract_( key, cds::opt::details::make_comparator_from_less<wrapped_less>() );
}
/// Extracts an item with minimal key from the set
*/
guarded_ptr extract_min()
{
- guarded_ptr gp;
- base_class::extract_min_( gp.guard() );
- return gp;
+ return base_class::extract_min_();
}
/// Extracts an item with maximal key from the set
*/
guarded_ptr extract_max()
{
- guarded_ptr gp;
- base_class::extract_max_( gp.guard() );
- return gp;
+ return base_class::extract_max_();
}
/// Find the \p key
template <typename Q>
guarded_ptr get( Q const& key )
{
- guarded_ptr gp;
- base_class::get_with_( gp.guard(), key, typename base_class::key_comparator() );
- return gp;
+ return base_class::get_with_( key, typename base_class::key_comparator() );
}
/// Finds \p key and return the item found
{
CDS_UNUSED( pred );
typedef cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor > wrapped_less;
- guarded_ptr gp;
- base_class::get_with_( gp.guard(), key, cds::opt::details::make_comparator_from_less< wrapped_less >());
- return gp;
+ return base_class::get_with_( key, cds::opt::details::make_comparator_from_less< wrapped_less >());
}
/// Clears the set (not atomic).
typedef typename base_class::stat stat; ///< Internal statistics
static CDS_CONSTEXPR bool const c_bSort = base_class::c_bSort; ///< List type: ordered (\p true) or unordered (\p false)
+ //@cond
+ // Rebind traits (split-list support)
+ template <typename... Options>
+ struct rebind_traits {
+ typedef LazyKVList<
+ gc
+ , key_type, mapped_type
+ , typename cds::opt::make_options< traits, Options...>::type
+ > type;
+ };
+
+ // Stat selector
+ template <typename Stat>
+ using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >;
+ //@endcond
+
protected:
//@cond
typedef typename base_class::value_type node_type;
public:
typedef cds::urcu::gc<RCU> gc; ///< Garbage collector
+ typedef Traits traits; ///< List traits
#ifdef CDS_DOXYGEN_INVOKED
typedef Key key_type ; ///< Key type
typedef Value mapped_type ; ///< Type of value stored in the list
typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock
static CDS_CONSTEXPR const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; ///< Group of \p extract_xxx functions require external locking
+ //@cond
+ // Rebind traits (split-list support)
+ template <typename... Options>
+ struct rebind_traits {
+ typedef LazyKVList<
+ gc
+ , key_type, mapped_type
+ , typename cds::opt::make_options< traits, Options...>::type
+ > type;
+ };
+
+ // Stat selector
+ template <typename Stat>
+ using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >;
+ //@endcond
+
protected:
//@cond
typedef typename base_class::value_type node_type;
typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
typedef typename base_class::stat stat; ///< Internal statistics
+ //@cond
+ // Rebind traits (split-list support)
+ template <typename... Options>
+ struct rebind_traits {
+ typedef MichaelKVList<
+ gc
+ , key_type, mapped_type
+ , typename cds::opt::make_options< traits, Options...>::type
+ > type;
+ };
+
+ // Stat selector
+ template <typename Stat>
+ using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >;
+ //@endcond
+
protected:
//@cond
typedef typename base_class::value_type node_type;
typedef typename maker::intrusive_traits::compare intrusive_key_comparator;
typedef typename base_class::atomic_node_ptr head_type;
- //@endcond
-
- protected:
- //@cond
- template <typename K>
- static node_type * alloc_node(const K& key)
- {
- return cxx_allocator().New( key );
- }
-
- template <typename K, typename V>
- static node_type * alloc_node( const K& key, const V& val )
- {
- return cxx_allocator().New( key, val );
- }
-
- template <typename K, typename... Args>
- static node_type * alloc_node( K&& key, Args&&... args )
- {
- return cxx_allocator().MoveNew( std::forward<K>(key), std::forward<Args>(args)... );
- }
-
- static void free_node( node_type * pNode )
- {
- cxx_allocator().Delete( pNode );
- }
struct node_disposer {
void operator()( node_type * pNode )
}
};
typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr;
-
- head_type& head()
- {
- return base_class::m_pHead;
- }
-
- head_type const& head() const
- {
- return base_class::m_pHead;
- }
//@endcond
protected:
}
//@}
- protected:
- //@cond
- iterator node_to_iterator( node_type * pNode )
- {
- if ( pNode )
- return iterator( *pNode );
- return end();
- }
- //@endcond
-
public:
/// Default constructor
/**
{
return base_class::find_at( refHead, key, cmp );
}
+
+ template <typename K>
+ static node_type * alloc_node( const K& key )
+ {
+ return cxx_allocator().New( key );
+ }
+
+ template <typename K, typename V>
+ static node_type * alloc_node( const K& key, const V& val )
+ {
+ return cxx_allocator().New( key, val );
+ }
+
+ template <typename K, typename... Args>
+ static node_type * alloc_node( K&& key, Args&&... args )
+ {
+ return cxx_allocator().MoveNew( std::forward<K>( key ), std::forward<Args>( args )... );
+ }
+
+ static void free_node( node_type * pNode )
+ {
+ cxx_allocator().Delete( pNode );
+ }
+
+ head_type& head()
+ {
+ return base_class::m_pHead;
+ }
+
+ head_type const& head() const
+ {
+ return base_class::m_pHead;
+ }
+
+ iterator node_to_iterator( node_type * pNode )
+ {
+ if ( pNode )
+ return iterator( *pNode );
+ return end();
+ }
//@endcond
};
typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock
static CDS_CONSTEXPR const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; ///< Group of \p extract_xxx functions do not require external locking
+ //@cond
+ // Rebind traits (split-list support)
+ template <typename... Options>
+ struct rebind_traits {
+ typedef MichaelKVList<
+ gc
+ , key_type, mapped_type
+ , typename cds::opt::make_options< traits, Options...>::type
+ > type;
+ };
+
+ // Stat selector
+ template <typename Stat>
+ using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >;
+ //@endcond
+
protected:
//@cond
typedef typename base_class::value_type node_type;
typedef typename maker::intrusive_traits::compare intrusive_key_comparator;
typedef typename base_class::atomic_node_ptr head_type;
+
+ struct node_disposer {
+ void operator()( node_type * pNode )
+ {
+ free_node( pNode );
+ }
+ };
+ typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr;
//@endcond
public:
/// Result of \p get(), \p get_with() functions - pointer to the node found
typedef cds::urcu::raw_ptr_adaptor< value_type, typename base_class::raw_ptr, raw_ptr_converter > raw_ptr;
- protected:
- //@cond
- template <typename K>
- static node_type * alloc_node(const K& key)
- {
- return cxx_allocator().New( key );
- }
-
- template <typename K, typename V>
- static node_type * alloc_node( const K& key, const V& val )
- {
- return cxx_allocator().New( key, val );
- }
-
- template <typename K, typename... Args>
- static node_type * alloc_node( K&& key, Args&&... args )
- {
- return cxx_allocator().MoveNew( std::forward<K>(key), std::forward<Args>(args)...);
- }
-
- static void free_node( node_type * pNode )
- {
- cxx_allocator().Delete( pNode );
- }
-
- struct node_disposer {
- void operator()( node_type * pNode )
- {
- free_node( pNode );
- }
- };
- typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr;
-
- head_type& head()
- {
- return base_class::m_pHead;
- }
-
- head_type& head() const
- {
- return const_cast<head_type&>( base_class::m_pHead );
- }
- //@endcond
protected:
//@cond
return raw_ptr( base_class::get_at( refHead, val, cmp ));
}
+ template <typename K>
+ static node_type * alloc_node( const K& key )
+ {
+ return cxx_allocator().New( key );
+ }
+
+ template <typename K, typename V>
+ static node_type * alloc_node( const K& key, const V& val )
+ {
+ return cxx_allocator().New( key, val );
+ }
+
+ template <typename K, typename... Args>
+ static node_type * alloc_node( K&& key, Args&&... args )
+ {
+ return cxx_allocator().MoveNew( std::forward<K>( key ), std::forward<Args>( args )... );
+ }
+
+ static void free_node( node_type * pNode )
+ {
+ cxx_allocator().Delete( pNode );
+ }
+
+ head_type& head()
+ {
+ return base_class::m_pHead;
+ }
+
+ head_type& head() const
+ {
+ return const_cast<head_type&>(base_class::m_pHead);
+ }
//@endcond
};
#define CDSLIB_CONTAINER_MICHAEL_MAP_H
#include <cds/container/details/michael_map_base.h>
+#include <cds/container/details/iterable_list_base.h>
#include <cds/details/allocator.h>
namespace cds { namespace container {
- \p GC - Garbage collector used. You may use any \ref cds_garbage_collector "Garbage collector"
from the \p libcds library.
Note the \p GC must be the same as the GC used for \p OrderedList
- - \p OrderedList - ordered key-value list implementation used as bucket for hash map, for example, \p MichaelKVList
- or \p LazyKVList. The ordered list implementation specifies the \p Key and \p Value types stored in the hash-map,
- the reclamation schema \p GC used by hash-map, the comparison functor for the type \p Key and other features
- specific for the ordered list.
+ - \p OrderedList - ordered key-value list implementation used as bucket for hash map, for example, \p MichaelKVList,
+ \p LazyKVList, \p IterableKVList. The ordered list implementation specifies the \p Key and \p Value types
+ stored in the hash-map, the reclamation schema \p GC used by hash-map, the comparison functor for the type \p Key
+ and other features specific for the ordered list.
- \p Traits - map traits, default is \p michael_map::traits.
Instead of defining \p Traits struct you may use option-based syntax with \p michael_map::make_traits metafunction.
class MichaelHashMap
{
public:
- typedef GC gc; ///< Garbage collector
- typedef OrderedList bucket_type; ///< type of ordered list to be used as a bucket
- typedef Traits traits; ///< Map traits
+ typedef GC gc; ///< Garbage collector
+ typedef OrderedList ordered_list; ///< type of ordered list to be used as a bucket
+ typedef Traits traits; ///< Map traits
- typedef typename bucket_type::key_type key_type; ///< key type
- typedef typename bucket_type::mapped_type mapped_type; ///< value type
- typedef typename bucket_type::value_type value_type; ///< key/value pair stored in the map
+ typedef typename ordered_list::key_type key_type; ///< key type
+ typedef typename ordered_list::mapped_type mapped_type; ///< value type
+ typedef typename ordered_list::value_type value_type; ///< key/value pair stored in the map
+ typedef typename traits::allocator allocator; ///< Bucket table allocator
- typedef typename bucket_type::key_comparator key_comparator; ///< key compare functor
+ typedef typename ordered_list::key_comparator key_comparator; ///< key compare functor
+#ifdef CDS_DOXYGEN_INVOKED
+ typedef typename ordered_list::stat stat; ///< Internal statistics
+ /// Guarded pointer - a result of \p get() and \p extract() functions
+ typedef typename ordered_list::guarded_ptr guarded_ptr;
+#endif
/// Hash functor for \ref key_type and all its derivatives that you use
typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
typedef typename traits::item_counter item_counter; ///< Item counter type
- /// Bucket table allocator
- typedef cds::details::Allocator< bucket_type, typename traits::allocator > bucket_table_allocator;
- typedef typename bucket_type::guarded_ptr guarded_ptr; ///< Guarded pointer
+ // GC and OrderedList::gc must be the same
+ static_assert( std::is_same<gc, typename ordered_list::gc>::value, "GC and OrderedList::gc must be the same");
- static CDS_CONSTEXPR const size_t c_nHazardPtrCount = bucket_type::c_nHazardPtrCount; ///< Count of hazard pointer required
+ // atomicity::empty_item_counter is not allowed as a item counter
+ static_assert( !std::is_same<item_counter, atomicity::empty_item_counter>::value,
+ "atomicity::empty_item_counter is not allowed as a item counter");
- protected:
- item_counter m_ItemCounter; ///< Item counter
- hash m_HashFunctor; ///< Hash functor
- bucket_type * m_Buckets; ///< bucket table
+ static CDS_CONSTEXPR const size_t c_nHazardPtrCount = ordered_list::c_nHazardPtrCount; ///< Count of hazard pointer required
- private:
//@cond
- const size_t m_nHashBitmask;
+ typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat;
+
+ typedef typename ordered_list::template rebind_traits<
+ cds::opt::item_counter< cds::atomicity::empty_item_counter >
+ , cds::opt::stat< typename bucket_stat::wrapped_stat >
+ >::type internal_bucket_type;
+
+ typedef typename internal_bucket_type::guarded_ptr guarded_ptr;
+ typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator;
+ typedef typename bucket_stat::stat stat;
//@endcond
protected:
//@cond
- /// Calculates hash value of \p key
- template <typename Q>
- size_t hash_value( Q const& key ) const
- {
- return m_HashFunctor( key ) & m_nHashBitmask;
- }
-
- /// Returns the bucket (ordered list) for \p key
- template <typename Q>
- bucket_type& bucket( Q const& key )
- {
- return m_Buckets[ hash_value( key ) ];
- }
+ const size_t m_nHashBitmask;
+ internal_bucket_type* m_Buckets; ///< bucket table
+ item_counter m_ItemCounter; ///< Item counter
+ hash m_HashFunctor; ///< Hash functor
+ stat m_Stat; ///< Internal statistics
//@endcond
protected:
//@cond
/// Forward iterator
template <bool IsConst>
- class iterator_type: private cds::intrusive::michael_set::details::iterator< bucket_type, IsConst >
+ class iterator_type: private cds::intrusive::michael_set::details::iterator< internal_bucket_type, IsConst >
{
- typedef cds::intrusive::michael_set::details::iterator< bucket_type, IsConst > base_class;
+ typedef cds::intrusive::michael_set::details::iterator< internal_bucket_type, IsConst > base_class;
friend class MichaelHashMap;
protected:
//@{
/// Forward iterator
/**
- The iteration is unordered.
- The iterator object is thread-safe: the element pointed by the iterator object is guarded,
- so, the element cannot be reclaimed while the iterator object is alive.
- However, passing an iterator object between threads is dangerous.
-
- @warning Due to concurrent nature of Michael's map it is not guarantee that you can iterate
- all elements in the map: any concurrent deletion can exclude the element
- pointed by the iterator from the map, and your iteration can be terminated
- before end of the map. Therefore, such iteration is more suitable for debugging purpose only.
-
- Remember, each iterator object requires an additional hazard pointer, that may be
- a limited resource for \p GC like \p gc::HP (for \p gc::DHP the count of
- guards is unlimited).
-
- The iterator class supports the following minimalistic interface:
+ The forward iterator for Michael's map has some features:
+ - it has no post-increment operator
+ - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator.
+ For some GC (like as \p gc::HP), a guard is a limited resource per thread, so an exception (or assertion) "no free guard"
+ may be thrown if the limit of guard count per thread is exceeded.
+ - The iterator cannot be moved across thread boundary because it contains thread-private GC's guard.
+
+ Iterator thread safety depends on type of \p OrderedList:
+ - for \p MichaelKVList and \p LazyKVList: iterator guarantees safety even if you delete the item that iterator points to
+ because that item is guarded by hazard pointer.
+ However, in case of concurrent deleting operations it is no guarantee that you iterate all item in the map.
+ Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread.
+ Use this iterator on the concurrent container for debugging purpose only.
+ - for \p IterableList: iterator is thread-safe. You may use it freely in concurrent environment.
+
+ The iterator interface:
\code
- struct iterator {
- // Default ctor
+ class iterator {
+ public:
+ // Default constructor
iterator();
- // Copy ctor
- iterator( iterator const& s);
+ // Copy construtor
+ iterator( iterator const& src );
+ // Dereference operator
value_type * operator ->() const;
+
+ // Dereference operator
value_type& operator *() const;
- // Pre-increment
+ // Preincrement operator
iterator& operator ++();
- // Copy assignment
+ // Assignment operator
iterator& operator = (iterator const& src);
+ // Equality operators
bool operator ==(iterator const& i ) const;
bool operator !=(iterator const& i ) const;
};
*/
iterator begin()
{
- return iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count() );
+ return iterator( bucket_begin()->begin(), bucket_begin(), bucket_end() );
}
/// Returns an iterator that addresses the location succeeding the last element in a map
*/
iterator end()
{
- return iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() );
+ return iterator( bucket_end()[-1].end(), bucket_end() - 1, bucket_end() );
}
/// Returns a forward const iterator addressing the first element in a map
}
//@}
- private:
- //@cond
- const_iterator get_const_begin() const
- {
- return const_iterator( const_cast<bucket_type const&>(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count() );
- }
- const_iterator get_const_end() const
- {
- return const_iterator( const_cast<bucket_type const&>(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() );
- }
- //@endcond
-
public:
/// Initializes the map
/** @anchor cds_nonintrusive_MichaelHashMap_hp_ctor
MichaelHashMap(
size_t nMaxItemCount, ///< estimation of max item count in the hash map
size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket
- ) : m_nHashBitmask( michael_map::details::init_hash_bitmask( nMaxItemCount, nLoadFactor ))
+ )
+ : m_nHashBitmask( michael_map::details::init_hash_bitmask( nMaxItemCount, nLoadFactor ))
+ , m_Buckets( bucket_table_allocator().allocate( bucket_count() ) )
{
- // GC and OrderedList::gc must be the same
- static_assert( std::is_same<gc, typename bucket_type::gc>::value, "GC and OrderedList::gc must be the same");
-
- // atomicity::empty_item_counter is not allowed as a item counter
- static_assert( !std::is_same<item_counter, atomicity::empty_item_counter>::value,
- "atomicity::empty_item_counter is not allowed as a item counter");
-
- m_Buckets = bucket_table_allocator().NewArray( bucket_count() );
+ for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it )
+ construct_bucket<bucket_stat>( it );
}
/// Clears hash map and destroys it
~MichaelHashMap()
{
clear();
- bucket_table_allocator().Delete( m_Buckets, bucket_count() );
+
+ for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it )
+ it->~internal_bucket_type();
+ bucket_table_allocator().deallocate( m_Buckets, bucket_count() );
}
/// Inserts new node with key and default value
Returns \p true if inserting successful, \p false otherwise.
*/
template <typename K>
- bool insert( const K& key )
+ bool insert( K&& key )
{
- const bool bRet = bucket( key ).insert( key );
+ const bool bRet = bucket( key ).insert( std::forward<K>( key ));
if ( bRet )
++m_ItemCounter;
return bRet;
Returns \p true if \p val is inserted into the map, \p false otherwise.
*/
template <typename K, typename V>
- bool insert( K const& key, V const& val )
+ bool insert( K&& key, V&& val )
{
- const bool bRet = bucket( key ).insert( key, val );
+ const bool bRet = bucket( key ).insert( std::forward<K>( key ), std::forward<V>( val ));
if ( bRet )
++m_ItemCounter;
return bRet;
synchronization.
*/
template <typename K, typename Func>
- bool insert_with( const K& key, Func func )
+ bool insert_with( K&& key, Func func )
{
- const bool bRet = bucket( key ).insert_with( key, func );
+ const bool bRet = bucket( key ).insert_with( std::forward<K>( key ), func );
if ( bRet )
++m_ItemCounter;
return bRet;
(note that in this case the \ref key_type should be constructible from type \p K).
Otherwise, if \p key is found, the functor \p func is called with item found.
- The functor \p Func signature is:
+ The functor \p func signature depends of \p OrderedList:
+
+ <b>for \p MichaelKVList, \p LazyKVList</b>
\code
struct my_functor {
void operator()( bool bNew, value_type& item );
The functor may change any fields of the \p item.second that is \p mapped_type.
- Returns <tt> std::pair<bool, bool> </tt> where \p first is true if operation is successful,
+ <b>for \p IterableKVList</b>
+ \code
+ void func( value_type& val, value_type * old );
+ \endcode
+ where
+ - \p val - a new data constructed from \p key
+ - \p old - old value that will be retired. If new item has been inserted then \p old is \p nullptr.
+
+ The functor may change non-key fields of \p val; however, \p func must guarantee
+ that during changing no any other modifications could be made on this item by concurrent threads.
+
+ @return <tt> std::pair<bool, bool> </tt> where \p first is true if operation is successful,
\p second is true if new item has been added or \p false if the item with \p key
already exists.
- @warning For \ref cds_nonintrusive_MichaelKVList_gc "MichaelKVList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting".
+ @warning For \ref cds_nonintrusive_MichaelKVList_gc "MichaelKVList" and \ref cds_nonintrusive_IterableKVList_gc "IterableKVList"
+ as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting".
\ref cds_nonintrusive_LazyKVList_gc "LazyKVList" provides exclusive access to inserted item and does not require any node-level
synchronization.
*/
template <typename K, typename Func >
- std::pair<bool, bool> update( K const& key, Func func, bool bAllowInsert = true )
+ std::pair<bool, bool> update( K&& key, Func func, bool bAllowInsert = true )
{
- std::pair<bool, bool> bRet = bucket( key ).update( key, func, bAllowInsert );
+ std::pair<bool, bool> bRet = bucket( key ).update( std::forward<K>( key ), func, bAllowInsert );
if ( bRet.first && bRet.second )
++m_ItemCounter;
return bRet;
}
//@endcond
+ /// Inserts or updates the node (only for \p IterableKVList)
+ /**
+ The operation performs inserting or changing data with lock-free manner.
+
+ If the item \p val is not found in the map, then \p val is inserted iff \p bAllowInsert is \p true.
+ Otherwise, the current element is changed to \p val, the old element will be retired later.
+
+ Returns std::pair<bool, bool> where \p first is \p true if operation is successful,
+ \p second is \p true if \p val has been added or \p false if the item with that key
+ already in the map.
+ */
+ template <typename Q, typename V>
+#ifdef CDS_DOXYGEN_INVOKED
+ std::pair<bool, bool>
+#else
+ typename std::enable_if<
+ std::is_same< Q, Q>::value && is_iterable_list< ordered_list >::value,
+ std::pair<bool, bool>
+ >::type
+#endif
+ upsert( Q&& key, V&& val, bool bAllowInsert = true )
+ {
+ std::pair<bool, bool> bRet = bucket( val ).upsert( std::forward<Q>( key ), std::forward<V>( val ), bAllowInsert );
+ if ( bRet.second )
+ ++m_ItemCounter;
+ return bRet;
+ }
+
/// For key \p key inserts data of type \p mapped_type created from \p args
/**
\p key_type should be constructible from type \p K
{
return m_nHashBitmask + 1;
}
+
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return m_Stat;
+ }
+
+ protected:
+ //@cond
+ /// Calculates hash value of \p key
+ template <typename Q>
+ size_t hash_value( Q const& key ) const
+ {
+ return m_HashFunctor( key ) & m_nHashBitmask;
+ }
+
+ /// Returns the bucket (ordered list) for \p key
+ template <typename Q>
+ internal_bucket_type& bucket( Q const& key )
+ {
+ return m_Buckets[hash_value( key )];
+ }
+ //@endcond
+
+ private:
+ //@cond
+ internal_bucket_type* bucket_begin() const
+ {
+ return m_Buckets;
+ }
+
+ internal_bucket_type* bucket_end() const
+ {
+ return m_Buckets + bucket_count();
+ }
+
+ const_iterator get_const_begin() const
+ {
+ return const_iterator( bucket_begin()->cbegin(), bucket_begin(), bucket_end() );
+ }
+ const_iterator get_const_end() const
+ {
+ return const_iterator( (bucket_end() - 1)->cend(), bucket_end() - 1, bucket_end() );
+ }
+
+ template <typename Stat>
+ typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type* bucket )
+ {
+ new (bucket) internal_bucket_type;
+ }
+
+ template <typename Stat>
+ typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type* bucket )
+ {
+ new (bucket) internal_bucket_type( m_Stat );
+ }
+ //@endcond
};
}} // namespace cds::container
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_MICHAEL_MAP_NOGC_H
{
public:
typedef cds::gc::nogc gc; ///< No garbage collector
- typedef OrderedList bucket_type; ///< type of ordered list used as a bucket implementation
+ typedef OrderedList ordered_list; ///< type of ordered list used as a bucket implementation
typedef Traits traits; ///< Map traits
- typedef typename bucket_type::key_type key_type; ///< key type
- typedef typename bucket_type::mapped_type mapped_type; ///< type of value to be stored in the map
- typedef typename bucket_type::value_type value_type; ///< Pair used as the some functor's argument
+ typedef typename ordered_list::key_type key_type; ///< key type
+ typedef typename ordered_list::mapped_type mapped_type; ///< type of value to be stored in the map
+ typedef typename ordered_list::value_type value_type; ///< Pair used as the some functor's argument
- typedef typename bucket_type::key_comparator key_comparator; ///< key comparing functor
+ typedef typename ordered_list::key_comparator key_comparator; ///< key comparing functor
/// Hash functor for \ref key_type and all its derivatives that you use
typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
- typedef typename traits::item_counter item_counter; ///< Item counter type
+ typedef typename traits::item_counter item_counter; ///< Item counter type
+ typedef typename traits::allocator allocator; ///< Bucket table allocator
- /// Bucket table allocator
- typedef cds::details::Allocator< bucket_type, typename traits::allocator > bucket_table_allocator;
+#ifdef CDS_DOXYGEN_INVOKED
+ typedef typename ordered_list::stat stat; ///< Internal statistics
+#endif
+
+ // GC and OrderedList::gc must be the same
+ static_assert(std::is_same<gc, typename ordered_list::gc>::value, "GC and OrderedList::gc must be the same");
+
+ // atomicity::empty_item_counter is not allowed as a item counter
+ static_assert(!std::is_same<item_counter, atomicity::empty_item_counter>::value,
+ "cds::atomicity::empty_item_counter is not allowed as a item counter");
protected:
//@cond
- typedef typename bucket_type::iterator bucket_iterator;
- typedef typename bucket_type::const_iterator bucket_const_iterator;
- //@endcond
+ typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat;
- protected:
- item_counter m_ItemCounter; ///< Item counter
- hash m_HashFunctor; ///< Hash functor
- bucket_type * m_Buckets; ///< bucket table
+ typedef typename ordered_list::template rebind_traits<
+ cds::opt::item_counter< cds::atomicity::empty_item_counter >
+ , cds::opt::stat< typename bucket_stat::wrapped_stat >
+ >::type internal_bucket_type;
- private:
+ /// Bucket table allocator
+ typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator;
+
+ typedef typename internal_bucket_type::iterator bucket_iterator;
+ typedef typename internal_bucket_type::const_iterator bucket_const_iterator;
+ //@endcond
+
+ public:
//@cond
- const size_t m_nHashBitmask;
+ typedef typename bucket_stat::stat stat;
//@endcond
protected:
//@cond
- /// Calculates hash value of \p key
- template <typename K>
- size_t hash_value( K const & key ) const
- {
- return m_HashFunctor( key ) & m_nHashBitmask;
- }
-
- /// Returns the bucket (ordered list) for \p key
- template <typename K>
- bucket_type& bucket( K const& key )
- {
- return m_Buckets[ hash_value( key ) ];
- }
+ const size_t m_nHashBitmask;
+ item_counter m_ItemCounter; ///< Item counter
+ hash m_HashFunctor; ///< Hash functor
+ internal_bucket_type* m_Buckets; ///< bucket table
+ stat m_Stat; ///< Internal statistics
//@endcond
protected:
//@cond
template <bool IsConst>
- class iterator_type: private cds::intrusive::michael_set::details::iterator< bucket_type, IsConst >
+ class iterator_type: private cds::intrusive::michael_set::details::iterator< internal_bucket_type, IsConst >
{
- typedef cds::intrusive::michael_set::details::iterator< bucket_type, IsConst > base_class;
+ typedef cds::intrusive::michael_set::details::iterator< internal_bucket_type, IsConst > base_class;
friend class MichaelHashMap;
protected:
}
//@}
- private:
- //@cond
- const_iterator get_const_begin() const
- {
- return const_iterator( const_cast<bucket_type const&>(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count() );
- }
- const_iterator get_const_end() const
- {
- return const_iterator( const_cast<bucket_type const&>(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() );
- }
- //@endcond
-
public:
/// Initialize the map
/**
size_t nMaxItemCount, ///< estimation of max item count in the hash set
size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket
) : m_nHashBitmask( michael_map::details::init_hash_bitmask( nMaxItemCount, nLoadFactor ))
+ , m_Buckets( bucket_table_allocator().allocate( bucket_count()))
{
- // GC and OrderedList::gc must be the same
- static_assert( std::is_same<gc, typename bucket_type::gc>::value, "GC and OrderedList::gc must be the same");
-
- // atomicity::empty_item_counter is not allowed as a item counter
- static_assert( !std::is_same<item_counter, atomicity::empty_item_counter>::value,
- "cds::atomicity::empty_item_counter is not allowed as a item counter");
-
- m_Buckets = bucket_table_allocator().NewArray( bucket_count() );
+ for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it )
+ construct_bucket<bucket_stat>( it );
}
/// Clears hash set and destroys it
~MichaelHashMap()
{
clear();
- bucket_table_allocator().Delete( m_Buckets, bucket_count() );
+ for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it )
+ it->~internal_bucket_type();
+ bucket_table_allocator().deallocate( m_Buckets, bucket_count() );
}
/// Inserts new node with key and default value
template <typename K>
iterator insert( const K& key )
{
- bucket_type& refBucket = bucket( key );
+ internal_bucket_type& refBucket = bucket( key );
bucket_iterator it = refBucket.insert( key );
if ( it != refBucket.end() ) {
template <typename K, typename V>
iterator insert( K const& key, V const& val )
{
- bucket_type& refBucket = bucket( key );
+ internal_bucket_type& refBucket = bucket( key );
bucket_iterator it = refBucket.insert( key, val );
if ( it != refBucket.end() ) {
template <typename K, typename Func>
iterator insert_with( const K& key, Func func )
{
- bucket_type& refBucket = bucket( key );
+ internal_bucket_type& refBucket = bucket( key );
bucket_iterator it = refBucket.insert_with( key, func );
if ( it != refBucket.end() ) {
template <typename K, typename... Args>
iterator emplace( K&& key, Args&&... args )
{
- bucket_type& refBucket = bucket( key );
+ internal_bucket_type& refBucket = bucket( key );
bucket_iterator it = refBucket.emplace( std::forward<K>(key), std::forward<Args>(args)... );
if ( it != refBucket.end() ) {
template <typename K>
std::pair<iterator, bool> update( const K& key, bool bAllowInsert = true )
{
- bucket_type& refBucket = bucket( key );
+ internal_bucket_type& refBucket = bucket( key );
std::pair<bucket_iterator, bool> ret = refBucket.update( key, bAllowInsert );
if ( ret.second )
template <typename K>
iterator contains( K const& key )
{
- bucket_type& refBucket = bucket( key );
+ internal_bucket_type& refBucket = bucket( key );
bucket_iterator it = refBucket.contains( key );
if ( it != refBucket.end() )
template <typename K, typename Less>
iterator contains( K const& key, Less pred )
{
- bucket_type& refBucket = bucket( key );
+ internal_bucket_type& refBucket = bucket( key );
bucket_iterator it = refBucket.contains( key, pred );
if ( it != refBucket.end() )
return m_ItemCounter;
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return m_Stat;
+ }
+
/// Returns the size of hash table
/**
Since \p %MichaelHashMap cannot dynamically extend the hash table size,
{
return m_nHashBitmask + 1;
}
+
+ protected:
+ //@cond
+ /// Calculates hash value of \p key
+ template <typename K>
+ size_t hash_value( K const & key ) const
+ {
+ return m_HashFunctor( key ) & m_nHashBitmask;
+ }
+
+ /// Returns the bucket (ordered list) for \p key
+ template <typename K>
+ internal_bucket_type& bucket( K const& key )
+ {
+ return m_Buckets[hash_value( key )];
+ }
+ //@endcond
+
+ private:
+ //@cond
+ const_iterator get_const_begin() const
+ {
+ return const_iterator( const_cast<internal_bucket_type const&>(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count() );
+ }
+ const_iterator get_const_end() const
+ {
+ return const_iterator( const_cast<internal_bucket_type const&>(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() );
+ }
+
+ template <typename Stat>
+ typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type* bucket )
+ {
+ new (bucket) internal_bucket_type;
+ }
+
+ template <typename Stat>
+ typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type* bucket )
+ {
+ new (bucket) internal_bucket_type( m_Stat );
+ }
+ //@endcond
};
}} // namespace cds::container
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_MICHAEL_MAP_RCU_H
{
public:
typedef cds::urcu::gc< RCU > gc; ///< RCU used as garbage collector
- typedef OrderedList bucket_type; ///< type of ordered list used as a bucket implementation
+ typedef OrderedList ordered_list; ///< type of ordered list used as a bucket implementation
typedef Traits traits; ///< Map traits
- typedef typename bucket_type::key_type key_type ; ///< key type
- typedef typename bucket_type::mapped_type mapped_type ; ///< value type
- typedef typename bucket_type::value_type value_type ; ///< key/value pair stored in the list
- typedef typename bucket_type::key_comparator key_comparator ; ///< key comparison functor
+ typedef typename ordered_list::key_type key_type; ///< key type
+ typedef typename ordered_list::mapped_type mapped_type; ///< value type
+ typedef typename ordered_list::value_type value_type; ///< key/value pair stored in the list
+ typedef typename ordered_list::key_comparator key_comparator;///< key comparison functor
+#ifdef CDS_DOXYGEN_INVOKED
+ typedef typename ordered_list::stat stat; ///< Internal statistics
+ typedef typename ordered_list::exempt_ptr exempt_ptr; ///< pointer to extracted node
+ /// Type of \p get() member function return value
+ typedef typename ordered_list::raw_ptr raw_ptr;
+ typedef typename ordered_list::rcu_lock rcu_lock; ///< RCU scoped lock
+#endif
/// Hash functor for \ref key_type and all its derivatives that you use
typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
typedef typename traits::item_counter item_counter; ///< Item counter type
+ typedef typename traits::allocator allocator; ///< Bucket table allocator
- /// Bucket table allocator
- typedef cds::details::Allocator< bucket_type, typename traits::allocator > bucket_table_allocator;
-
- typedef typename bucket_type::rcu_lock rcu_lock; ///< RCU scoped lock
- typedef typename bucket_type::exempt_ptr exempt_ptr; ///< pointer to extracted node
/// Group of \p extract_xxx functions require external locking if underlying ordered list requires that
- static CDS_CONSTEXPR const bool c_bExtractLockExternal = bucket_type::c_bExtractLockExternal;
- /// Type of \p get() member function return value
- typedef typename bucket_type::raw_ptr raw_ptr;
+ static CDS_CONSTEXPR const bool c_bExtractLockExternal = ordered_list::c_bExtractLockExternal;
+
+ // GC and OrderedList::gc must be the same
+ static_assert(std::is_same<gc, typename ordered_list::gc>::value, "GC and OrderedList::gc must be the same");
+
+ // atomicity::empty_item_counter is not allowed as a item counter
+ static_assert(!std::is_same<item_counter, cds::atomicity::empty_item_counter>::value,
+ "cds::atomicity::empty_item_counter is not allowed as a item counter");
protected:
- item_counter m_ItemCounter; ///< Item counter
- hash m_HashFunctor; ///< Hash functor
- bucket_type * m_Buckets; ///< bucket table
+ //@cond
+ typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat;
- private:
+ typedef typename ordered_list::template rebind_traits<
+ cds::opt::item_counter< cds::atomicity::empty_item_counter >
+ , cds::opt::stat< typename bucket_stat::wrapped_stat >
+ >::type internal_bucket_type;
+
+ /// Bucket table allocator
+ typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator;
+ //@endcond
+
+ public:
//@cond
- const size_t m_nHashBitmask;
+ typedef typename bucket_stat::stat stat;
+ typedef typename internal_bucket_type::exempt_ptr exempt_ptr;
+ typedef typename internal_bucket_type::raw_ptr raw_ptr;
+ typedef typename internal_bucket_type::rcu_lock rcu_lock;
//@endcond
protected:
//@cond
- /// Calculates hash value of \p key
- template <typename Q>
- size_t hash_value( Q const& key ) const
- {
- return m_HashFunctor( key ) & m_nHashBitmask;
- }
-
- /// Returns the bucket (ordered list) for \p key
- template <typename Q>
- bucket_type& bucket( Q const& key )
- {
- return m_Buckets[ hash_value( key ) ];
- }
- template <typename Q>
- bucket_type const& bucket( Q const& key ) const
- {
- return m_Buckets[ hash_value( key ) ];
- }
+ const size_t m_nHashBitmask;
+ item_counter m_ItemCounter; ///< Item counter
+ hash m_HashFunctor; ///< Hash functor
+ internal_bucket_type * m_Buckets; ///< bucket table
+ stat m_Stat; ///< Internal statistics
//@endcond
protected:
//@cond
template <bool IsConst>
- class iterator_type: private cds::intrusive::michael_set::details::iterator< bucket_type, IsConst >
+ class iterator_type: private cds::intrusive::michael_set::details::iterator< internal_bucket_type, IsConst >
{
- typedef cds::intrusive::michael_set::details::iterator< bucket_type, IsConst > base_class;
+ typedef cds::intrusive::michael_set::details::iterator< internal_bucket_type, IsConst > base_class;
friend class MichaelHashMap;
protected:
}
//@}
- private:
- //@cond
- const_iterator get_const_begin() const
- {
- return const_iterator( const_cast<bucket_type const&>(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count() );
- }
- const_iterator get_const_end() const
- {
- return const_iterator( const_cast<bucket_type const&>(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() );
- }
- //@endcond
-
public:
/// Initializes the map
/**
size_t nMaxItemCount, ///< estimation of max item count in the hash map
size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket
) : m_nHashBitmask( michael_map::details::init_hash_bitmask( nMaxItemCount, nLoadFactor ))
+ , m_Buckets( bucket_table_allocator().allocate( bucket_count()))
{
- // GC and OrderedList::gc must be the same
- static_assert( std::is_same<gc, typename bucket_type::gc>::value, "GC and OrderedList::gc must be the same");
-
- // atomicity::empty_item_counter is not allowed as a item counter
- static_assert( !std::is_same<item_counter, cds::atomicity::empty_item_counter>::value,
- "cds::atomicity::empty_item_counter is not allowed as a item counter");
-
- m_Buckets = bucket_table_allocator().NewArray( bucket_count() );
+ for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it )
+ construct_bucket<bucket_stat>( it );
}
/// Clears hash map and destroys it
~MichaelHashMap()
{
clear();
- bucket_table_allocator().Delete( m_Buckets, bucket_count() );
+ for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it )
+ it->~internal_bucket_type();
+ bucket_table_allocator().deallocate( m_Buckets, bucket_count() );
}
/// Inserts new node with key and default value
If the item is not found the function return an empty \p exempt_ptr.
The function just excludes the key from the map and returns a pointer to item found.
- Depends on \p bucket_type you should or should not lock RCU before calling of this function:
+ Depends on \p ordered_list you should or should not lock RCU before calling of this function:
- for the set based on \ref cds_nonintrusive_MichaelList_rcu "MichaelList" RCU should not be locked
- for the set based on \ref cds_nonintrusive_LazyList_rcu "LazyList" RCU should be locked
See ordered list implementation for details.
/** \anchor cds_nonintrusive_MichaelHashMap_rcu_get
The function searches the item with key equal to \p key and returns the pointer to item found.
If \p key is not found it returns \p nullptr.
- Note the type of returned value depends on underlying \p bucket_type.
+ Note the type of returned value depends on underlying \p ordered_list.
For details, see documentation of ordered list you use.
Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type.
return m_ItemCounter;
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return m_Stat;
+ }
+
/// Returns the size of hash table
/**
Since \p %MichaelHashMap cannot dynamically extend the hash table size,
{
return m_nHashBitmask + 1;
}
+
+ protected:
+ //@cond
+ /// Calculates hash value of \p key
+ template <typename Q>
+ size_t hash_value( Q const& key ) const
+ {
+ return m_HashFunctor( key ) & m_nHashBitmask;
+ }
+
+ /// Returns the bucket (ordered list) for \p key
+ template <typename Q>
+ internal_bucket_type& bucket( Q const& key )
+ {
+ return m_Buckets[hash_value( key )];
+ }
+ template <typename Q>
+ internal_bucket_type const& bucket( Q const& key ) const
+ {
+ return m_Buckets[hash_value( key )];
+ }
+ //@endcond
+ private:
+ //@cond
+ const_iterator get_const_begin() const
+ {
+ return const_iterator( const_cast<internal_bucket_type const&>(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count() );
+ }
+ const_iterator get_const_end() const
+ {
+ return const_iterator( const_cast<internal_bucket_type const&>(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() );
+ }
+
+ template <typename Stat>
+ typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type* bucket )
+ {
+ new (bucket) internal_bucket_type;
+ }
+
+ template <typename Stat>
+ typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type* bucket )
+ {
+ new (bucket) internal_bucket_type( m_Stat );
+ }
+ //@endcond
};
}} // namespace cds::container
typedef typename ordered_list::value_type value_type; ///< type of value to be stored in the list
typedef typename ordered_list::key_comparator key_comparator; ///< key comparison functor
+#ifdef CDS_DOXYGEN_INVOKED
typedef typename ordered_list::stat stat; ///< Internal statistics
+#endif
/// Hash functor for \ref value_type and all its derivatives that you use
typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
static_assert( !std::is_same<item_counter, atomicity::empty_item_counter>::value,
"cds::atomicity::empty_item_counter is not allowed as a item counter");
-#ifdef CDS_DOXYGEN_INVOKED
- /// Wrapped internal statistics for \p ordered_list
- typedef implementatin_specific bucket_stat;
-#else
+ //@cond
typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat;
-#endif
-#ifdef CDS_DOXYGEN_INVOKED
- /// Internal bucket type - rebind \p ordered_list with empty item counter and wrapped internal statistics
- typedef modified_ordered_list internal_bucket_type;
-#else
typedef typename ordered_list::template rebind_traits<
cds::opt::item_counter< cds::atomicity::empty_item_counter >
, cds::opt::stat< typename bucket_stat::wrapped_stat >
>::type internal_bucket_type;
-#endif
- /// Guarded pointer - a result of \p get() and \p extract() functions
- typedef typename internal_bucket_type::guarded_ptr guarded_ptr;
-
- //@cond
/// Bucket table allocator
typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator;
+
+ typedef typename bucket_stat::stat stat;
//@endcond
+ /// Guarded pointer - a result of \p get() and \p extract() functions
+ typedef typename internal_bucket_type::guarded_ptr guarded_ptr;
+
protected:
//@cond
size_t const m_nHashBitmask;
internal_bucket_type * m_Buckets; ///< bucket table
item_counter m_ItemCounter; ///< Item counter
hash m_HashFunctor; ///< Hash functor
- typename bucket_stat::stat m_Stat; ///< Internal statistics
+ stat m_Stat; ///< Internal statistics
//@endcond
public:
The operation performs inserting or changing data with lock-free manner.
If the item \p val is not found in the set, then \p val is inserted iff \p bAllowInsert is \p true.
- Otherwise, the current element is changed to \p val, the old element will be retired later
- by call \p Traits::disposer.
+ Otherwise, the current element is changed to \p val, the old element will be retired later.
Returns std::pair<bool, bool> where \p first is \p true if operation is successful,
\p second is \p true if \p val has been added or \p false if the item with that key
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_MICHAEL_SET_NOGC_H
typedef typename ordered_list::value_type value_type; ///< type of value stored in the list
typedef typename ordered_list::key_comparator key_comparator; ///< key comparison functor
+#ifdef CDS_DOXYGEN_INVOKED
typedef typename ordered_list::stat stat; ///< Internal statistics
+#endif
/// Hash functor for \ref value_type and all its derivatives that you use
typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
typedef typename internal_bucket_type::const_iterator bucket_const_iterator;
//@endcond
+ public:
+ //@cond
+ typedef typename bucket_stat::stat stat;
+ //@endcond
+
protected:
//@cond
const size_t m_nHashBitmask;
item_counter m_ItemCounter; ///< Item counter
hash m_HashFunctor; ///< Hash functor
internal_bucket_type* m_Buckets; ///< bucket table
- typename bucket_stat::stat m_Stat; ///< Internal statistics
+ stat m_Stat; ///< Internal statistics
//@endcond
public:
typedef typename ordered_list::value_type value_type; ///< type of value to be stored in the list
typedef typename ordered_list::key_comparator key_comparator; ///< key comparing functor
- typedef typename ordered_list::stat stat; ///< Internal statistics
+#ifdef CDS_DOXYGEN_INVOKED
+ typedef typename ordered_list::stat stat; ///< Internal statistics
+ typedef typename ordered_list::exempt_ptr exempt_ptr; ///< pointer to extracted node
+ typedef typename ordered_list::raw_ptr raw_ptr; ///< Return type of \p get() member function and its derivatives
+#endif
/// Hash functor for \ref value_type and all its derivatives that you use
typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
static_assert(!std::is_same<item_counter, atomicity::empty_item_counter>::value,
"atomicity::empty_item_counter is not allowed as a item counter");
-#ifdef CDS_DOXYGEN_INVOKED
- /// Wrapped internal statistics for \p ordered_list
- typedef implementatin_specific bucket_stat;
-
- /// Internal bucket type - rebind \p ordered_list with empty item counter and wrapped internal statistics
- typedef modified_ordered_list internal_bucket_type;
-#else
+ //@cond
typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat;
typedef typename ordered_list::template rebind_traits<
using base_class::insert_node;
using base_class::node_to_value;
};
-#endif
- typedef typename internal_bucket_type::exempt_ptr exempt_ptr; ///< pointer to extracted node
- typedef typename internal_bucket_type::raw_ptr raw_ptr; ///< Return type of \p get() member function and its derivatives
+ typedef typename internal_bucket_type::exempt_ptr exempt_ptr;
+ typedef typename internal_bucket_type::raw_ptr raw_ptr;
+ typedef typename bucket_stat::stat stat;
+ //@endcond
protected:
//@cond
item_counter m_ItemCounter; ///< Item counter
hash m_HashFunctor; ///< Hash functor
internal_bucket_type* m_Buckets; ///< bucket table
- typename bucket_stat::stat m_Stat; ///< Internal statistics
+ stat m_Stat; ///< Internal statistics
//@endcond
public:
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_SPLIT_LIST_MAP_H
template <typename K>
guarded_ptr extract( K const& key )
{
- guarded_ptr gp;
- base_class::extract_( gp.guard(), key );
- return gp;
+ return base_class::extract_( key );
}
/// Extracts the item using compare functor \p pred
guarded_ptr extract_with( K const& key, Less pred )
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- base_class::extract_with_( gp.guard(), key, cds::details::predicate_wrapper<value_type, Less, key_accessor>());
- return gp;
+ return base_class::extract_with_( key, cds::details::predicate_wrapper<value_type, Less, key_accessor>());
}
/// Finds the key \p key
template <typename K>
guarded_ptr get( K const& key )
{
- guarded_ptr gp;
- base_class::get_( gp.guard(), key );
- return gp;
+ return base_class::get_( key );
}
/// Finds \p key and return the item found
guarded_ptr get_with( K const& key, Less pred )
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- base_class::get_with_( gp.guard(), key, cds::details::predicate_wrapper<value_type, Less, key_accessor>());
- return gp;
+ return base_class::get_with_( key, cds::details::predicate_wrapper<value_type, Less, key_accessor>());
}
/// Clears the map (not atomic)
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_SPLIT_LIST_SET_H
template <typename Q>
guarded_ptr extract( Q const& key )
{
- guarded_ptr gp;
- extract_( gp.guard(), key );
- return gp;
+ return extract_( key );
}
/// Extracts the item using compare functor \p pred
template <typename Q, typename Less>
guarded_ptr extract_with( Q const& key, Less pred )
{
- guarded_ptr gp;
- extract_with_( gp.guard(), key, pred );
- return gp;
+ return extract_with_( key, pred );
}
/// Finds the key \p key
template <typename Q>
guarded_ptr get( Q const& key )
{
- guarded_ptr gp;
- get_( gp.guard(), key );
- return gp;
+ return get_( key );
}
/// Finds \p key and return the item found
template <typename Q, typename Less>
guarded_ptr get_with( Q const& key, Less pred )
{
- guarded_ptr gp;
- get_with_( gp.guard(), key, pred );
- return gp;
+ return get_with_( key, pred );
}
/// Clears the set (not atomic)
using base_class::get_;
template <typename Q, typename Less>
- bool extract_with_( typename guarded_ptr::native_guard& guard, Q const& key, Less pred )
+ guarded_ptr extract_with_( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- return base_class::extract_with_( guard, key, typename maker::template predicate_wrapper<Less>::type());
+ return base_class::extract_with_( key, typename maker::template predicate_wrapper<Less>::type());
}
template <typename Q, typename Less>
- bool get_with_( typename guarded_ptr::native_guard& guard, Q const& key, Less pred )
+ guarded_ptr get_with_( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- return base_class::get_with_( guard, key, typename maker::template predicate_wrapper<Less>::type());
+ return base_class::get_with_( key, typename maker::template predicate_wrapper<Less>::type());
}
//@endcond
The test projects depends on the following static library from \p boost:
- \p boost.thread
- - \p boost.date_time
+ - \p boost.system
\par Windows build
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_GC_DETAILS_DHP_H
{
return pPost.load( atomics::memory_order_acquire ) == nullptr;
}
+
+ guarded_ptr get( atomics::memory_order order = atomics::memory_order_acquire )
+ {
+ return pPost.load( order );
+ }
+
+ void set( guarded_ptr p, atomics::memory_order order = atomics::memory_order_release )
+ {
+ pPost.store( p, order );
+ }
};
/// Guard allocator
template <class Alloc = CDS_DEFAULT_ALLOCATOR>
class guard_allocator
{
- cds::details::Allocator<details::guard_data> m_GuardAllocator ; ///< guard allocator
+ cds::details::Allocator<details::guard_data> m_GuardAllocator; ///< guard allocator
atomics::atomic<guard_data *> m_GuardList; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
atomics::atomic<guard_data *> m_FreeGuardList; ///< Head of free guard list (linked by guard_data::pNextFree field)
}
/// Allocates a guard from free list or from heap if free list is empty
- guard_data * alloc()
+ guard_data* alloc()
{
// Try to pop a guard from free-list
details::guard_data * pGuard;
/**
The function places the guard \p pGuard into free-list
*/
- void free( guard_data * pGuard ) CDS_NOEXCEPT
+ void free( guard_data* pGuard ) CDS_NOEXCEPT
{
pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
// Item counter is needed only as a threshold for \p scan() function
// So, we may clear the item counter without synchronization with m_pHead
res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
-
res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
-
return res;
}
} while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
}
};
-
- /// Uninitialized guard
- class guard
- {
- friend class dhp::ThreadGC;
- protected:
- details::guard_data * m_pGuard ; ///< Pointer to guard data
-
- public:
- /// Initialize empty guard.
- CDS_CONSTEXPR guard() CDS_NOEXCEPT
- : m_pGuard( nullptr )
- {}
-
- /// Copy-ctor is disabled
- guard( guard const& ) = delete;
-
- /// Move-ctor is disabled
- guard( guard&& ) = delete;
-
- /// Object destructor, does nothing
- ~guard() CDS_NOEXCEPT
- {}
-
- /// Get current guarded pointer
- void * get( atomics::memory_order order = atomics::memory_order_acquire ) const CDS_NOEXCEPT
- {
- assert( m_pGuard != nullptr );
- return m_pGuard->pPost.load( order );
- }
-
- /// Guards pointer \p p
- void set( void * p, atomics::memory_order order = atomics::memory_order_release ) CDS_NOEXCEPT
- {
- assert( m_pGuard != nullptr );
- m_pGuard->pPost.store( p, order );
- }
-
- /// Clears the guard
- void clear( atomics::memory_order order = atomics::memory_order_relaxed ) CDS_NOEXCEPT
- {
- assert( m_pGuard != nullptr );
- m_pGuard->pPost.store( nullptr, order );
- }
-
- /// Guards pointer \p p
- template <typename T>
- T * operator =(T * p) CDS_NOEXCEPT
- {
- set( reinterpret_cast<void *>( const_cast<T *>(p) ));
- return p;
- }
-
- std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
- {
- clear();
- return nullptr;
- }
-
- public: // for ThreadGC.
- /*
- GCC cannot compile code for template versions of ThreadGC::allocGuard/freeGuard,
- the compiler produces error: 'cds::gc::dhp::details::guard_data* cds::gc::dhp::details::guard::m_pGuard' is protected
- despite the fact that ThreadGC is declared as friend for guard class.
- Therefore, we have to add set_guard/get_guard public functions
- */
- /// Set guard data
- void set_guard( details::guard_data * pGuard ) CDS_NOEXCEPT
- {
- assert( m_pGuard == nullptr );
- m_pGuard = pGuard;
- }
-
- /// Get current guard data
- details::guard_data * get_guard() CDS_NOEXCEPT
- {
- return m_pGuard;
- }
- /// Get current guard data
- details::guard_data * get_guard() const CDS_NOEXCEPT
- {
- return m_pGuard;
- }
-
- details::guard_data * release_guard() CDS_NOEXCEPT
- {
- details::guard_data * p = m_pGuard;
- m_pGuard = nullptr;
- return p;
- }
-
- bool is_initialized() const
- {
- return m_pGuard != nullptr;
- }
- };
-
} // namespace details
- /// Guard
- /**
- This class represents auto guard: ctor allocates a guard from guard pool,
- dtor returns the guard back to the pool of free guard.
- */
- class Guard: public details::guard
- {
- typedef details::guard base_class;
- friend class ThreadGC;
- public:
- /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
- Guard(); // inline in dhp_impl.h
-
- /// Returns guard allocated back to pool of free guards
- ~Guard(); // inline in dhp_impl.h
-
- /// Guards pointer \p p
- template <typename T>
- T * operator =(T * p) CDS_NOEXCEPT
- {
- return base_class::operator =<T>( p );
- }
-
- std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
- {
- return base_class::operator =(nullptr);
- }
- };
-
- /// Array of guards
- /**
- This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
- dtor returns the guards allocated back to the pool.
- */
- template <size_t Count>
- class GuardArray
- {
- details::guard m_arr[Count] ; ///< array of guard
- const static size_t c_nCapacity = Count ; ///< Array capacity (equal to \p Count template parameter)
-
- public:
- /// Rebind array for other size \p OtherCount
- template <size_t OtherCount>
- struct rebind {
- typedef GuardArray<OtherCount> other ; ///< rebinding result
- };
-
- public:
- /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
- GuardArray(); // inline in dhp_impl.h
-
- /// The object is not copy-constructible
- GuardArray( GuardArray const& ) = delete;
-
- /// The object is not move-constructible
- GuardArray( GuardArray&& ) = delete;
-
- /// Returns guards allocated back to pool
- ~GuardArray(); // inline in dh_impl.h
-
- /// Returns the capacity of array
- CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
- {
- return c_nCapacity;
- }
-
- /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
- details::guard& operator []( size_t nIndex ) CDS_NOEXCEPT
- {
- assert( nIndex < capacity() );
- return m_arr[nIndex];
- }
-
- /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
- const details::guard& operator []( size_t nIndex ) const CDS_NOEXCEPT
- {
- assert( nIndex < capacity() );
- return m_arr[nIndex];
- }
-
- /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
- template <typename T>
- void set( size_t nIndex, T * p ) CDS_NOEXCEPT
- {
- assert( nIndex < capacity() );
- m_arr[nIndex].set( p );
- }
-
- /// Clears (sets to \p nullptr) the guard \p nIndex
- void clear( size_t nIndex ) CDS_NOEXCEPT
- {
- assert( nIndex < capacity() );
- m_arr[nIndex].clear();
- }
-
- /// Clears all guards in the array
- void clearAll() CDS_NOEXCEPT
- {
- for ( size_t i = 0; i < capacity(); ++i )
- clear(i);
- }
- };
-
/// Memory manager (Garbage collector)
class CDS_EXPORT_API GarbageCollector
{
}
/// Allocates guard list for a thread.
- details::guard_data * allocGuardList( size_t nCount )
+ details::guard_data* allocGuardList( size_t nCount )
{
return m_GuardPool.allocList( nCount );
}
*/
class ThreadGC
{
- GarbageCollector& m_gc ; ///< reference to GC singleton
- details::guard_data * m_pList ; ///< Local list of guards owned by the thread
- details::guard_data * m_pFree ; ///< The list of free guard from m_pList
+ GarbageCollector& m_gc; ///< reference to GC singleton
+ details::guard_data * m_pList; ///< Local list of guards owned by the thread
+ details::guard_data * m_pFree; ///< The list of free guard from m_pList
public:
/// Default constructor
}
public:
- /// Initializes guard \p g
- void allocGuard( dhp::details::guard& g )
+ /// Allocates new guard
+ dhp::details::guard_data* allocGuard()
{
assert( m_pList != nullptr );
- if ( !g.m_pGuard ) {
- if ( m_pFree ) {
- g.m_pGuard = m_pFree;
- m_pFree = m_pFree->pNextFree.load( atomics::memory_order_relaxed );
- }
- else {
- g.m_pGuard = m_gc.allocGuard();
- g.m_pGuard->pThreadNext = m_pList;
- m_pList = g.m_pGuard;
- }
+
+ dhp::details::guard_data* ret;
+ if ( cds_likely( m_pFree )) {
+ ret = m_pFree;
+ m_pFree = m_pFree->pNextFree.load( atomics::memory_order_relaxed );
+ }
+ else {
+ ret = m_gc.allocGuard();
+ ret->pThreadNext = m_pList;
+ m_pList = ret;
}
+ return ret;
}
/// Frees guard \p g
- void freeGuard( dhp::details::guard& g )
+ void freeGuard( dhp::details::guard_data* g )
{
assert( m_pList != nullptr );
- if ( g.m_pGuard ) {
- g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
- g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
- m_pFree = g.m_pGuard;
- g.m_pGuard = nullptr;
+ if ( cds_likely( g )) {
+ g->pPost.store( nullptr, atomics::memory_order_relaxed );
+ g->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
+ m_pFree = g;
}
}
+ /// Guard array
+ template <size_t Count>
+ using guard_array = dhp::details::guard_data* [Count];
+
/// Initializes guard array \p arr
template <size_t Count>
- void allocGuard( GuardArray<Count>& arr )
+ void allocGuard( guard_array<Count>& arr )
{
assert( m_pList != nullptr );
size_t nCount = 0;
while ( m_pFree && nCount < Count ) {
- arr[nCount].set_guard( m_pFree );
+ arr[nCount] = m_pFree;
m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
++nCount;
}
while ( nCount < Count ) {
- details::guard& g = arr[nCount++];
- g.set_guard( m_gc.allocGuard() );
- g.get_guard()->pThreadNext = m_pList;
- m_pList = g.get_guard();
+ dhp::details::guard_data*& g = arr[nCount];
+ g = m_gc.allocGuard();
+ g->pThreadNext = m_pList;
+ m_pList = g;
+ ++nCount;
}
}
/// Frees guard array \p arr
template <size_t Count>
- void freeGuard( GuardArray<Count>& arr )
+ void freeGuard( guard_array<Count>& arr )
{
assert( m_pList != nullptr );
- details::guard_data * pGuard;
- for ( size_t i = 0; i < Count - 1; ++i ) {
- pGuard = arr[i].get_guard();
- pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
- pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
+ details::guard_data* first = nullptr;
+ details::guard_data* last;
+ for ( size_t i = 0; i < Count; ++i ) {
+ details::guard_data* guard = arr[i];
+ if ( cds_likely( guard )) {
+ guard->pPost.store( nullptr, atomics::memory_order_relaxed );
+ if ( first )
+ last->pNextFree.store( guard, atomics::memory_order_relaxed );
+ else
+ first = guard;
+ last = guard;
+ }
+ }
+ if ( first ) {
+ last->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
+ m_pFree = first;
}
- pGuard = arr[Count-1].get_guard();
- pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
- pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
- m_pFree = arr[0].get_guard();
}
/// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_GC_DETAILS_HP_H
char padding[cds::c_nCacheLineSize];
atomics::atomic<unsigned int> m_nSync; ///< dummy var to introduce synchronizes-with relationship between threads
+ char padding2[cds::c_nCacheLineSize];
/// Ctor
- hp_record( const cds::gc::hp::GarbageCollector& HzpMgr ); // inline
+ hp_record( const cds::gc::hp::GarbageCollector& HzpMgr ); // inline
~hp_record()
{}
//@endcond
};
- /// Not enough required Hazard Pointer count
+ /// Not enough Hazard Pointer
class too_many_hazard_ptr : public std::length_error
{
public:
//@cond
too_many_hazard_ptr()
- : std::length_error( "Not enough required Hazard Pointer count" )
+ : std::length_error( "Not enough Hazard Pointer" )
{}
//@endcond
};
public: // Internals for threads
/// Allocates Hazard Pointer GC record. For internal use only
- details::hp_record * alloc_hp_record();
+ details::hp_record* alloc_hp_record();
/// Free HP record. For internal use only
- void free_hp_record( details::hp_record * pRec );
+ void free_hp_record( details::hp_record* pRec );
/// The main garbage collecting function
/**
*/
class ThreadGC
{
- GarbageCollector& m_HzpManager; ///< Hazard Pointer GC singleton
- details::hp_record * m_pHzpRec; ///< Pointer to thread's HZP record
+ GarbageCollector& m_HzpManager; ///< Hazard Pointer GC singleton
+ details::hp_record* m_pHzpRec; ///< Pointer to thread's HZP record
public:
/// Default constructor
}
/// Checks if thread GC is initialized
- bool isInitialized() const { return m_pHzpRec != nullptr; }
+ bool isInitialized() const { return m_pHzpRec != nullptr; }
/// Initialization. Repeat call is available
void init()
void fini()
{
if ( m_pHzpRec ) {
- details::hp_record * pRec = m_pHzpRec;
+ details::hp_record* pRec = m_pHzpRec;
m_pHzpRec = nullptr;
m_HzpManager.free_hp_record( pRec );
}
}
/// Initializes HP guard \p guard
- details::hp_guard& allocGuard()
+ details::hp_guard* allocGuard()
{
assert( m_pHzpRec );
return m_pHzpRec->m_hzp.alloc();
}
/// Frees HP guard \p guard
- void freeGuard( details::hp_guard& guard )
+ void freeGuard( details::hp_guard* guard )
{
assert( m_pHzpRec );
m_pHzpRec->m_hzp.free( guard );
/// Initializes HP guard array \p arr
template <size_t Count>
- void allocGuard( details::hp_array<Count>& arr )
+ size_t allocGuard( details::hp_array<Count>& arr )
{
assert( m_pHzpRec );
- m_pHzpRec->m_hzp.alloc( arr );
+ return m_pHzpRec->m_hzp.alloc( arr );
}
/// Frees HP guard array \p arr
template <typename T>
void retirePtr( T * p, void (* pFunc)(T *) )
{
- /*
- union {
- T * p;
- hazard_pointer hp;
- } cast_ptr;
- cast_ptr.p = p;
-
- union{
- void( *pFunc )(T *);
- free_retired_ptr_func hpFunc;
- } cast_func;
- cast_func.pFunc = pFunc;
-
- retirePtr( details::retired_ptr( cast_ptr.hp, cast_func.hpFunc ) );
- */
retirePtr( details::retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc )));
}
}
};
- /// Auto hp_guard.
- /**
- This class encapsulates Hazard Pointer guard to protect a pointer against deletion.
- It allocates one HP from thread's HP array in constructor and free the hazard pointer allocated
- in destructor.
- */
- class guard
- {
- details::hp_guard& m_hp ; ///< Hazard pointer guarded
-
- public:
- typedef details::hp_guard::hazard_ptr hazard_ptr ; ///< Hazard pointer type
-
- public:
- /// Allocates HP guard
- guard(); // inline in hp_impl.h
-
- /// Allocates HP guard from \p gc and protects the pointer \p p of type \p T
- template <typename T>
- explicit guard( T * p ); // inline in hp_impl.h
-
- /// Frees HP guard. The pointer guarded may be deleted after this.
- ~guard(); // inline in hp_impl.h
-
- /// Protects the pointer \p p against reclamation (guards the pointer).
- template <typename T>
- T * operator =( T * p )
- {
- return m_hp = p;
- }
-
- //@cond
- std::nullptr_t operator =(std::nullptr_t)
- {
- return m_hp = nullptr;
- }
- //@endcond
-
- /// Get raw guarded pointer
- hazard_ptr get() const
- {
- return m_hp;
- }
- };
-
- /// Auto-managed array of hazard pointers
- /**
- This class is wrapper around cds::gc::hp::details::hp_array class.
- \p Count is the size of HP array
- */
- template <size_t Count>
- class array : public details::hp_array<Count>
- {
- public:
- /// Rebind array for other size \p COUNT2
- template <size_t Count2>
- struct rebind {
- typedef array<Count2> other; ///< rebinding result
- };
-
- public:
- /// Allocates array of HP guard
- array(); // inline in hp_impl.h
-
- /// Frees array of HP guard
- ~array(); //inline in hp_impl.h
- };
-
} // namespace hp
}} // namespace cds::gc
//@endcond
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_GC_DETAILS_HP_ALLOC_H
#include <cds/algo/atomic.h>
#include <cds/details/allocator.h>
#include <cds/gc/details/hp_type.h>
+#include <string.h> // memset
//@cond
namespace cds {
*/
class hp_guard : protected atomics::atomic < hazard_pointer >
{
+ template <class Allocator> friend class hp_allocator;
+
public:
- typedef hazard_pointer hazard_ptr; ///< Hazard pointer type
+ typedef hazard_pointer hazard_ptr;///< Hazard pointer type
+
private:
- typedef atomics::atomic<hazard_ptr> base_class;
+ typedef atomics::atomic<hazard_ptr> atomic_hazard_ptr;
- protected:
- template <class Allocator> friend class hp_allocator;
+ atomic_hazard_ptr m_hp;
+ hp_guard* m_next; // next free guard
public:
hp_guard() CDS_NOEXCEPT
- : base_class( nullptr )
+ : m_hp( nullptr )
+ , m_next( nullptr )
{}
+
~hp_guard() CDS_NOEXCEPT
{}
return nullptr;
}
- /// Returns current value of hazard pointer
- /**
- Loading has acquire semantics
- */
- operator hazard_ptr() const CDS_NOEXCEPT
- {
- return get();
- }
-
/// Returns current value of hazard pointer
/**
Loading has acquire semantics
*/
hazard_ptr get( atomics::memory_order order = atomics::memory_order_acquire ) const CDS_NOEXCEPT
{
- return base_class::load( order );
+ return m_hp.load( order );
}
template <typename T>
void set( T * p, atomics::memory_order order = atomics::memory_order_release ) CDS_NOEXCEPT
{
- base_class::store( reinterpret_cast<hazard_ptr>(p), order );
+ m_hp.store( reinterpret_cast<hazard_ptr>(p), order );
}
/// Clears HP
void clear( atomics::memory_order order = atomics::memory_order_release ) CDS_NOEXCEPT
{
// memory order is not necessary here
- base_class::store( nullptr, order );
+ m_hp.store( nullptr, order );
}
};
Template parameter:
\li Count - capacity of array
-
*/
template <size_t Count>
class hp_array
{
+ template <class Allocator> friend class hp_allocator;
+
public:
- typedef hazard_pointer hazard_ptr_type; ///< Hazard pointer type
- typedef hp_guard atomic_hazard_ptr; ///< Element type of the array
+ typedef hazard_pointer hazard_ptr; ///< Hazard pointer type
static CDS_CONSTEXPR const size_t c_nCapacity = Count ; ///< Capacity of the array
- private:
- atomic_hazard_ptr * m_arr ; ///< Hazard pointer array of size = \p Count
- template <class Allocator> friend class hp_allocator;
-
public:
/// Constructs uninitialized array.
hp_array() CDS_NOEXCEPT
- {}
+ {
+ memset( m_arr, 0, sizeof( m_arr ));
+ }
/// Destructs object
~hp_array() CDS_NOEXCEPT
}
/// Set hazard pointer \p nIndex. 0 <= \p nIndex < \p Count
- void set( size_t nIndex, hazard_ptr_type hzPtr ) CDS_NOEXCEPT
+ void set( size_t nIndex, hazard_ptr hptr ) CDS_NOEXCEPT
{
- assert( nIndex < capacity() );
- m_arr[nIndex] = hzPtr;
+ assert( nIndex < capacity());
+ assert( m_arr[nIndex] != nullptr );
+
+ *m_arr[nIndex] = hptr;
}
- /// Returns reference to hazard pointer of index \p nIndex (0 <= \p nIndex < \p Count)
- atomic_hazard_ptr& operator []( size_t nIndex ) CDS_NOEXCEPT
+ /// Returns pointer to hazard pointer of index \p nIndex (0 <= \p nIndex < \p Count)
+ hp_guard* operator []( size_t nIndex ) CDS_NOEXCEPT
{
- assert( nIndex < capacity() );
+ assert( nIndex < capacity());
return m_arr[nIndex];
}
- /// Returns reference to hazard pointer of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
- atomic_hazard_ptr& operator []( size_t nIndex ) const CDS_NOEXCEPT
+ /// Returns pointer to hazard pointer of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
+ hp_guard* operator []( size_t nIndex ) const CDS_NOEXCEPT
{
- assert( nIndex < capacity() );
+ assert( nIndex < capacity());
return m_arr[nIndex];
}
/// Clears (sets to \p nullptr) hazard pointer \p nIndex
void clear( size_t nIndex ) CDS_NOEXCEPT
+ {
+ assert( nIndex < capacity());
+ assert( m_arr[nIndex] != nullptr );
+
+ m_arr[ nIndex ]->clear();
+ }
+
+ hp_guard* release( size_t nIndex ) CDS_NOEXCEPT
{
assert( nIndex < capacity() );
- m_arr[ nIndex ].clear();
+
+ hp_guard* p = m_arr[ nIndex ];
+ m_arr[ nIndex ] = nullptr;
+ return p;
}
+
+
+ private:
+ hp_guard* m_arr[c_nCapacity]; ///< Hazard pointer array of size = \p Count
};
/// Allocator of hazard pointers for the thread
class hp_allocator
{
public:
- typedef hazard_pointer hazard_ptr_type; ///< type of hazard pointer
- typedef hp_guard atomic_hazard_ptr; ///< Atomic hazard pointer type
- typedef Allocator allocator_type; ///< allocator type
+ typedef hazard_pointer hazard_ptr; ///< type of hazard pointer
+ typedef Allocator allocator_type; ///< allocator type
private:
- typedef cds::details::Allocator< atomic_hazard_ptr, allocator_type > allocator_impl;
+ typedef cds::details::Allocator< hp_guard, allocator_type > allocator_impl;
- atomic_hazard_ptr * m_arrHazardPtr ; ///< Array of hazard pointers
- size_t m_nTop ; ///< The top of stack
- const size_t m_nCapacity ; ///< Array capacity
+ hp_guard* m_arrHazardPtr; ///< Array of hazard pointers
+ hp_guard* m_FreeListHead; ///< List of free hp guards
+ size_t const m_nCapacity; ///< Array capacity
public:
/// Default ctor
explicit hp_allocator(
- size_t nCapacity ///< max count of hazard pointer per thread
- )
- : m_arrHazardPtr( alloc_array( nCapacity ) )
- , m_nCapacity( nCapacity )
+ size_t nCapacity ///< max count of hazard pointer per thread
+ )
+ : m_arrHazardPtr( alloc_array( nCapacity ))
+ , m_FreeListHead( m_arrHazardPtr )
+ , m_nCapacity( nCapacity )
{
- make_free();
+ build_free_list();
}
/// Dtor
}
/// Checks if all items are allocated
- bool isFull() const CDS_NOEXCEPT
+ bool full() const CDS_NOEXCEPT
{
- return m_nTop == 0;
+ return m_FreeListHead == nullptr;
}
/// Allocates hazard pointer
- atomic_hazard_ptr& alloc()
+ hp_guard* alloc()
{
- assert( m_nTop > 0 );
- --m_nTop;
- return m_arrHazardPtr[m_nTop];
+ assert( !full());
+
+ hp_guard* p = m_FreeListHead;
+ m_FreeListHead = m_FreeListHead->m_next;
+ return p;
}
/// Frees previously allocated hazard pointer
- void free( atomic_hazard_ptr& hp ) CDS_NOEXCEPT
+ void free( hp_guard* hp ) CDS_NOEXCEPT
{
- assert( m_nTop < capacity() );
- hp.clear();
- ++m_nTop;
+ if ( hp ) {
+ hp->clear();
+ hp->m_next = m_FreeListHead;
+ m_FreeListHead = hp;
+ }
}
/// Allocates hazard pointers array
/**
Allocates \p Count hazard pointers from array \p m_arrHazardPtr
- Returns initialized object \p arr
+ Initializes \p arr with hazard pointers.
+
+ @return actual size of allocated array.
*/
template <size_t Count>
- void alloc( hp_array<Count>& arr )
+ size_t alloc( hp_array<Count>& arr )
{
- assert( m_nTop >= Count );
- m_nTop -= Count;
- arr.m_arr = m_arrHazardPtr + m_nTop;
+ size_t i;
+ hp_guard* p = m_FreeListHead;
+ for ( i = 0; i < Count && p; ++i ) {
+ arr.m_arr[i] = p;
+ p = p->m_next;
+ }
+ size_t ret = i;
+ for ( ; i < Count; ++i )
+ arr.m_arr[i] = nullptr;
+ m_FreeListHead = p;
+ return ret;
}
/// Frees hazard pointer array
template <size_t Count>
void free( hp_array<Count> const& arr ) CDS_NOEXCEPT
{
- CDS_UNUSED( arr );
-
- assert( m_nTop + Count <= capacity());
- for ( size_t i = m_nTop; i < m_nTop + Count; ++i )
- m_arrHazardPtr[i].clear();
- m_nTop += Count;
+ hp_guard* pList = m_FreeListHead;
+ for ( size_t i = 0; i < Count; ++i ) {
+ hp_guard* p = arr[i];
+ if ( p ) {
+ p->clear();
+ p->m_next = pList;
+ pList = p;
+ }
+ }
+ m_FreeListHead = pList;
}
/// Makes all HP free
void clear() CDS_NOEXCEPT
{
- make_free();
+ for ( size_t i = 0; i < capacity(); ++i )
+ m_arrHazardPtr[i].clear();
}
- /// Returns to i-th hazard pointer
- atomic_hazard_ptr& operator []( size_t i ) CDS_NOEXCEPT
+ /// Returns i-th hazard pointer
+ hp_guard& operator []( size_t i ) CDS_NOEXCEPT
{
assert( i < capacity() );
return m_arrHazardPtr[i];
}
private:
- void make_free() CDS_NOEXCEPT
+ hp_guard* alloc_array( size_t nCapacity )
{
- for ( size_t i = 0; i < capacity(); ++i )
- m_arrHazardPtr[i].clear();
- m_nTop = capacity();
+ return allocator_impl().NewArray( nCapacity );
}
- atomic_hazard_ptr * alloc_array( size_t nCapacity )
+ void build_free_list()
{
- return allocator_impl().NewArray( nCapacity );
+ hp_guard* first = m_arrHazardPtr;
+ hp_guard* last = m_arrHazardPtr + capacity();
+ hp_guard* prev = first;
+ for ( ++first; first < last; ++first ) {
+ prev->m_next = first;
+ prev = first;
+ }
+ prev->m_next = nullptr;
+ m_FreeListHead = m_arrHazardPtr;
}
};
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_GC_DETAILS_HP_TYPE_H
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_GC_HP_H
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_GC_IMPL_DHP_DECL_H
public: // for internal use only!!!
//@cond
- static void alloc_guard( cds::gc::dhp::details::guard& g ); // inline in dhp_impl.h
- static void free_guard( cds::gc::dhp::details::guard& g ); // inline in dhp_impl.h
+ static dhp::details::guard_data* alloc_guard(); // inline in dhp_impl.h
+ static void free_guard( dhp::details::guard_data* g ); // inline in dhp_impl.h
//@endcond
};
A guard is the hazard pointer.
Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer
- A \p %Guard object is not copy- and move-constructible
- and not copy- and move-assignable.
- */
- class Guard: public dhp::Guard
- {
- //@cond
- typedef dhp::Guard base_class;
- //@endcond
+ \p %Guard object is movable but not copyable.
- public: // for internal use only
- //@cond
- typedef cds::gc::dhp::details::guard native_guard;
- //@endcond
+ The guard object can be in two states:
+ - unlinked - the guard is not linked with any internal hazard pointer.
+ In this state no operation except \p link() and move assignment is supported.
+ - linked (default) - the guard allocates an internal hazard pointer and fully operable.
+ Due to performance reason the implementation does not check state of the guard in runtime.
+
+ @warning Move assignment can transfer the guard in unlinked state, use with care.
+ */
+ class Guard
+ {
public:
- // Default ctor
- Guard()
+ /// Default ctor allocates a guard (hazard pointer) from thread-private storage
+ Guard() CDS_NOEXCEPT
+ : m_guard( thread_gc::alloc_guard())
{}
- //@cond
+ /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
+ explicit Guard( std::nullptr_t ) CDS_NOEXCEPT
+ : m_guard( nullptr )
+ {}
+
+ /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership)
+ Guard( Guard&& src ) CDS_NOEXCEPT
+ : m_guard( src.m_guard )
+ {
+ src.m_guard = nullptr;
+ }
+
+ /// Move assignment: the internal guards are swapped between \p src and \p this
+ /**
+ @warning \p src will become in unlinked state if \p this was unlinked on entry.
+ */
+ Guard& operator=( Guard&& src ) CDS_NOEXCEPT
+ {
+ std::swap( m_guard, src.m_guard );
+ return *this;
+ }
+
+ /// Copy ctor is prohibited - the guard is not copyable
Guard( Guard const& ) = delete;
- Guard( Guard&& s ) = delete;
- Guard& operator=(Guard const&) = delete;
- Guard& operator=(Guard&&) = delete;
- //@endcond
+
+ /// Copy assignment is prohibited
+ Guard& operator=( Guard const& ) = delete;
+
+ ~Guard()
+ {
+ if ( m_guard )
+ thread_gc::free_guard( m_guard );
+ }
+
+ /// Checks if the guard object linked with any internal hazard pointer
+ bool is_linked() const
+ {
+ return m_guard != nullptr;
+ }
+
+ /// Links the guard with internal hazard pointer if the guard is in unlinked state
+ void link()
+ {
+ if ( !m_guard )
+ m_guard = thread_gc::alloc_guard();
+ }
+
+ /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state
+ void unlink()
+ {
+ if ( m_guard ) {
+ thread_gc::free_guard( m_guard );
+ m_guard = nullptr;
+ }
+ }
/// Protects a pointer of type <tt> atomic<T*> </tt>
/**
or for already guarded pointer.
*/
template <typename T>
- T * assign( T * p )
+ T* assign( T* p )
{
- return base_class::operator =(p);
+ assert( m_guard != nullptr );
+ m_guard->pPost.store( p, atomics::memory_order_release );
+ return p;
}
//@cond
std::nullptr_t assign( std::nullptr_t )
{
- return base_class::operator =(nullptr);
+ clear();
+ return nullptr;
}
//@endcond
or for already guarded pointer.
*/
template <typename T, int BITMASK>
- T * assign( cds::details::marked_ptr<T, BITMASK> p )
+ T* assign( cds::details::marked_ptr<T, BITMASK> p )
{
- return base_class::operator =( p.ptr() );
+ return assign( p.ptr() );
}
/// Copy from \p src guard to \p this guard
/// Clears value of the guard
void clear()
{
- base_class::clear();
+ assert( m_guard != nullptr );
+ m_guard->pPost.store( nullptr, atomics::memory_order_release );
}
/// Gets the value currently protected (relaxed read)
}
/// Gets native guarded pointer stored
- guarded_pointer get_native() const
+ void* get_native() const
{
- return base_class::get_guard()->pPost.load(atomics::memory_order_relaxed);
+ assert( m_guard != nullptr );
+ return m_guard->pPost.load( atomics::memory_order_acquire );
}
+
+ //@cond
+ dhp::details::guard_data* release()
+ {
+ dhp::details::guard_data* g = m_guard;
+ m_guard = nullptr;
+ return g;
+ }
+ //@endcond
+
+ private:
+ //@cond
+ dhp::details::guard_data* m_guard;
+ //@endcond
};
/// Array of Dynamic Hazard Pointer guards
and not copy- and move-assignable.
*/
template <size_t Count>
- class GuardArray: public dhp::GuardArray<Count>
+ class GuardArray
{
- //@cond
- typedef dhp::GuardArray<Count> base_class;
- //@endcond
public:
/// Rebind array for other size \p OtherCount
template <size_t OtherCount>
typedef GuardArray<OtherCount> other ; ///< rebinding result
};
+ /// Array capacity
+ static CDS_CONSTEXPR const size_t c_nCapacity = Count;
+
public:
- // Default ctor
- GuardArray()
- {}
+ /// Default ctor allocates \p Count hazard pointers
+ GuardArray(); // inline in dhp_impl.h
- //@cond
- GuardArray( GuardArray const& ) = delete;
+ /// Move ctor is prohibited
GuardArray( GuardArray&& ) = delete;
- GuardArray& operator=(GuardArray const&) = delete;
- GuardArray& operator-(GuardArray&&) = delete;
- //@endcond
+
+ /// Move assignment is prohibited
+ GuardArray& operator=( GuardArray&& ) = delete;
+
+ /// Copy ctor is prohibited
+ GuardArray( GuardArray const& ) = delete;
+
+ /// Copy assignment is prohibited
+ GuardArray& operator=( GuardArray const& ) = delete;
+
+ /// Frees allocated hazard pointers
+ ~GuardArray(); // inline in dhp_impl.h
/// Protects a pointer of type \p atomic<T*>
/**
template <typename T>
T * assign( size_t nIndex, T * p )
{
- base_class::set(nIndex, p);
+ assert( nIndex < capacity());
+ assert( m_arr[nIndex] != nullptr );
+
+ m_arr[nIndex]->pPost.store( p, atomics::memory_order_release );
return p;
}
/// Clear value of the slot \p nIndex
void clear( size_t nIndex )
{
- base_class::clear( nIndex );
+ assert( nIndex < capacity() );
+ assert( m_arr[nIndex] != nullptr );
+
+ m_arr[nIndex]->pPost.store( nullptr, atomics::memory_order_release );
}
/// Get current value of slot \p nIndex
/// Get native guarded pointer stored
guarded_pointer get_native( size_t nIndex ) const
{
- return base_class::operator[](nIndex).get_guard()->pPost.load(atomics::memory_order_relaxed);
+ assert( nIndex < capacity() );
+ assert( m_arr[nIndex] != nullptr );
+
+ return m_arr[nIndex]->pPost.load( atomics::memory_order_acquire );
}
+ //@cond
+ dhp::details::guard_data* release( size_t nIndex ) CDS_NOEXCEPT
+ {
+ assert( nIndex < capacity() );
+
+ dhp::details::guard_data* ret = m_arr[ nIndex ];
+ m_arr[nIndex] = nullptr;
+ return ret;
+ }
+ //@endcond
+
/// Capacity of the guard array
static CDS_CONSTEXPR size_t capacity()
{
return Count;
}
+
+ private:
+ //@cond
+ dhp::details::guard_data* m_arr[c_nCapacity];
+ //@endcond
};
/// Guarded pointer
return p;
}
};
+
+ template <typename GT, typename VT, typename C> friend class guarded_ptr;
//@endcond
public:
/// Functor for casting \p guarded_type to \p value_type
typedef typename std::conditional< std::is_same<Cast, void>::value, trivial_cast, Cast >::type value_cast;
- //@cond
- typedef cds::gc::dhp::details::guard native_guard;
- //@endcond
-
- private:
- //@cond
- native_guard m_guard;
- //@endcond
-
public:
/// Creates empty guarded pointer
guarded_ptr() CDS_NOEXCEPT
+ : m_guard( nullptr )
{}
//@cond
+ explicit guarded_ptr( dhp::details::guard_data* g ) CDS_NOEXCEPT
+ : m_guard( g )
+ {}
+
/// Initializes guarded pointer with \p p
explicit guarded_ptr( guarded_type * p ) CDS_NOEXCEPT
{
- alloc_guard();
- assert( m_guard.is_initialized() );
- m_guard.set( p );
+ reset( p );
}
explicit guarded_ptr( std::nullptr_t ) CDS_NOEXCEPT
+ : m_guard( nullptr )
{}
//@endcond
/// Move ctor
guarded_ptr( guarded_ptr&& gp ) CDS_NOEXCEPT
+ : m_guard( gp.m_guard )
{
- m_guard.set_guard( gp.m_guard.release_guard() );
+ gp.m_guard = nullptr;
}
+ /// Move ctor
+ template <typename GT, typename VT, typename C>
+ guarded_ptr( guarded_ptr<GT, VT, C>&& gp ) CDS_NOEXCEPT
+ : m_guard( gp.m_guard )
+ {
+ gp.m_guard = nullptr;
+ }
+
+ /// Ctor from \p Guard
+ explicit guarded_ptr( Guard&& g ) CDS_NOEXCEPT
+ : m_guard( g.release() )
+ {}
+
/// The guarded pointer is not copy-constructible
guarded_ptr( guarded_ptr const& gp ) = delete;
*/
~guarded_ptr() CDS_NOEXCEPT
{
- free_guard();
+ release();
}
/// Move-assignment operator
guarded_ptr& operator=( guarded_ptr&& gp ) CDS_NOEXCEPT
{
- free_guard();
- m_guard.set_guard( gp.m_guard.release_guard() );
+ std::swap( m_guard, gp.m_guard );
+ return *this;
+ }
+
+ /// Move-assignment from \p Guard
+ guarded_ptr& operator=( Guard&& g ) CDS_NOEXCEPT
+ {
+ std::swap( m_guard, g.m_guard );
return *this;
}
value_type * operator ->() const CDS_NOEXCEPT
{
assert( !empty() );
- return value_cast()( reinterpret_cast<guarded_type *>(m_guard.get()));
+ return value_cast()( reinterpret_cast<guarded_type *>(m_guard->get()));
}
/// Returns a reference to guarded value
value_type& operator *() CDS_NOEXCEPT
{
assert( !empty());
- return *value_cast()(reinterpret_cast<guarded_type *>(m_guard.get()));
+ return *value_cast()(reinterpret_cast<guarded_type *>(m_guard->get()));
}
/// Returns const reference to guarded value
value_type const& operator *() const CDS_NOEXCEPT
{
assert( !empty() );
- return *value_cast()(reinterpret_cast<guarded_type *>(m_guard.get()));
+ return *value_cast()(reinterpret_cast<guarded_type *>(m_guard->get()));
}
/// Checks if the guarded pointer is \p nullptr
bool empty() const CDS_NOEXCEPT
{
- return !m_guard.is_initialized() || m_guard.get( atomics::memory_order_relaxed ) == nullptr;
+ return m_guard == nullptr || m_guard->get( atomics::memory_order_relaxed ) == nullptr;
}
/// \p bool operator returns <tt>!empty()</tt>
//@cond
// For internal use only!!!
- native_guard& guard() CDS_NOEXCEPT
- {
- alloc_guard();
- assert( m_guard.is_initialized() );
- return m_guard;
- }
-
void reset(guarded_type * p) CDS_NOEXCEPT
{
alloc_guard();
- assert( m_guard.is_initialized() );
- m_guard.set(p);
+ assert( m_guard );
+ m_guard->set( p );
}
//@endcond
//@cond
void alloc_guard()
{
- if ( !m_guard.is_initialized() )
- thread_gc::alloc_guard( m_guard );
+ if ( !m_guard )
+ m_guard = thread_gc::alloc_guard();
}
void free_guard()
{
- if ( m_guard.is_initialized() )
+ if ( m_guard ) {
thread_gc::free_guard( m_guard );
+ m_guard = nullptr;
+ }
}
//@endcond
+
+ private:
+ //@cond
+ dhp::details::guard_data* m_guard;
+ //@endcond
};
public:
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_GC_IMPL_DHP_IMPL_H
namespace dhp {
- inline Guard::Guard()
+ static inline ThreadGC& get_thread_gc()
{
- cds::threading::getGC<DHP>().allocGuard( *this );
+ return cds::threading::getGC<DHP>();
}
- inline Guard::~Guard()
- {
- cds::threading::getGC<DHP>().freeGuard( *this );
- }
-
- template <size_t Count>
- inline GuardArray<Count>::GuardArray()
- {
- cds::threading::getGC<DHP>().allocGuard( *this );
- }
-
- template <size_t Count>
- inline GuardArray<Count>::~GuardArray()
- {
- cds::threading::getGC<DHP>().freeGuard( *this );
- }
+ //inline Guard::Guard()
+ //{
+ // cds::threading::getGC<DHP>().allocGuard( *this );
+ //}
+
+ //inline Guard::~Guard()
+ //{
+ // cds::threading::getGC<DHP>().freeGuard( *this );
+ //}
+
+ //template <size_t Count>
+ //inline GuardArray<Count>::GuardArray()
+ //{
+ // cds::threading::getGC<DHP>().allocGuard( *this );
+ //}
+
+ //template <size_t Count>
+ //inline GuardArray<Count>::~GuardArray()
+ //{
+ // cds::threading::getGC<DHP>().freeGuard( *this );
+ //}
} // namespace dhp
cds::threading::Manager::detachThread();
}
- inline /*static*/ void DHP::thread_gc::alloc_guard( cds::gc::dhp::details::guard& g )
+ inline /*static*/ dhp::details::guard_data* DHP::thread_gc::alloc_guard()
+ {
+ return dhp::get_thread_gc().allocGuard();
+ }
+ inline /*static*/ void DHP::thread_gc::free_guard( dhp::details::guard_data* g )
{
- return cds::threading::getGC<DHP>().allocGuard(g);
+ if ( g )
+ dhp::get_thread_gc().freeGuard( g );
}
- inline /*static*/ void DHP::thread_gc::free_guard( cds::gc::dhp::details::guard& g )
+
+ template <size_t Count>
+ inline DHP::GuardArray<Count>::GuardArray()
+ {
+ dhp::get_thread_gc().allocGuard( m_arr );
+ }
+
+ template <size_t Count>
+ inline DHP::GuardArray<Count>::~GuardArray()
{
- cds::threading::getGC<DHP>().freeGuard(g);
+ dhp::get_thread_gc().freeGuard( m_arr );
}
inline void DHP::scan()
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_GC_IMPL_HP_DECL_H
*/
typedef hp::ThreadGC thread_gc_impl;
+ /// Exception "Too many Hazard Pointer"
+ typedef hp::GarbageCollector::too_many_hazard_ptr too_many_hazard_ptr_exception;
+
/// Wrapper for hp::ThreadGC class
/**
@headerfile cds/gc/hp.h
public: // for internal use only!!!
//@cond
- static cds::gc::hp::details::hp_guard& alloc_guard(); // inline in hp_impl.h
- static void free_guard( cds::gc::hp::details::hp_guard& g ); // inline in hp_impl.h
+ static cds::gc::hp::details::hp_guard* alloc_guard(); // inline in hp_impl.h
+ static void free_guard( cds::gc::hp::details::hp_guard* g ); // inline in hp_impl.h
//@endcond
};
/**
@headerfile cds/gc/hp.h
- A guard is the hazard pointer.
- Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer
+ A guard is a hazard pointer.
+ Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer.
+
+ \p %Guard object is movable but not copyable.
+
+ The guard object can be in two states:
+ - unlinked - the guard is not linked with any internal hazard pointer.
+ In this state no operation except \p link() and move assignment is supported.
+ - linked (default) - the guard allocates an internal hazard pointer and fully operable.
+
+ Due to performance reason the implementation does not check state of the guard in runtime.
- A \p %Guard object is not copy- and move-constructible
- and not copy- and move-assignable.
+ @warning Move assignment can transfer the guard in unlinked state, use with care.
*/
- class Guard : public hp::guard
+ class Guard
{
- //@cond
- typedef hp::guard base_class;
- //@endcond
-
public:
- /// Default ctor
- Guard()
+ /// Default ctor allocates a guard (hazard pointer) from thread-private storage
+ /**
+ @warning Can throw \p too_many_hazard_ptr_exception if internal hazard pointer objects are exhausted.
+ */
+ Guard(); // inline in hp_impl.h
+
+ /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support
+ explicit Guard( std::nullptr_t ) CDS_NOEXCEPT
+ : m_guard( nullptr )
{}
- //@cond
+ /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership)
+ Guard( Guard&& src ) CDS_NOEXCEPT
+ : m_guard( src.m_guard )
+ {
+ src.m_guard = nullptr;
+ }
+
+ /// Move assignment: the internal guards are swapped between \p src and \p this
+ /**
+ @warning \p src will become in unlinked state if \p this was unlinked on entry.
+ */
+ Guard& operator=( Guard&& src ) CDS_NOEXCEPT
+ {
+ std::swap( m_guard, src.m_guard );
+ return *this;
+ }
+
+ /// Copy ctor is prohibited - the guard is not copyable
Guard( Guard const& ) = delete;
- Guard( Guard&& s ) = delete;
- Guard& operator=(Guard const&) = delete;
- Guard& operator=(Guard&&) = delete;
- //@endcond
+
+ /// Copy assignment is prohibited
+ Guard& operator=( Guard const& ) = delete;
+
+ /// Frees the internal hazard pointer if the guard is in linked state
+ ~Guard()
+ {
+ unlink();
+ }
+
+ /// Checks if the guard object linked with any internal hazard pointer
+ bool is_linked() const
+ {
+ return m_guard != nullptr;
+ }
+
+ /// Links the guard with internal hazard pointer if the guard is in unlinked state
+ /**
+ @warning Can throw \p too_many_hazard_ptr_exception if internal hazard pointer objects are exhausted.
+ */
+ void link(); // inline in hp_impl.h
+
+ /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state
+ void unlink(); // inline in hp_impl.h
/// Protects a pointer of type \p atomic<T*>
/**
The function tries to load \p toGuard and to store it
to the HP slot repeatedly until the guard's value equals \p toGuard
+
+ @warning The guad object should be in linked state, otherwise the result is undefined
*/
template <typename T>
T protect( atomics::atomic<T> const& toGuard )
{
+ assert( m_guard != nullptr );
+
T pCur = toGuard.load(atomics::memory_order_acquire);
T pRet;
do {
value_type * operator()( T * p );
};
\endcode
- Really, the result of <tt> f( toGuard.load() ) </tt> is assigned to the hazard pointer.
+ Actually, the result of <tt> f( toGuard.load() ) </tt> is assigned to the hazard pointer.
+
+ @warning The guad object should be in linked state, otherwise the result is undefined
*/
template <typename T, class Func>
T protect( atomics::atomic<T> const& toGuard, Func f )
{
+ assert( m_guard != nullptr );
+
T pCur = toGuard.load(atomics::memory_order_acquire);
T pRet;
do {
/**
The function equals to a simple assignment the value \p p to guard, no loop is performed.
Can be used for a pointer that cannot be changed concurrently
+
+ @warning The guad object should be in linked state, otherwise the result is undefined
*/
template <typename T>
- T * assign( T * p ); // inline in hp_impl.h
+ T * assign( T* p ); // inline in hp_impl.h
//@cond
std::nullptr_t assign( std::nullptr_t )
{
- return base_class::operator =(nullptr);
+ assert(m_guard != nullptr );
+ return *m_guard = nullptr;
}
//@endcond
- /// Copy from \p src guard to \p this guard
+ /// Copy a value guarded from \p src guard to \p this guard (valid only in linked state)
void copy( Guard const& src )
{
assign( src.get_native() );
/**
The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
Can be used for a marked pointer that cannot be changed concurrently.
+
+ @warning The guad object should be in linked state, otherwise the result is undefined
*/
template <typename T, int BITMASK>
T * assign( cds::details::marked_ptr<T, BITMASK> p )
{
- return base_class::operator =( p.ptr() );
+ return assign( p.ptr());
}
- /// Clear value of the guard
+ /// Clear value of the guard (valid only in linked state)
void clear()
{
assign( nullptr );
}
- /// Get the value currently protected
+ /// Get the value currently protected (valid only in linked state)
template <typename T>
T * get() const
{
return reinterpret_cast<T *>( get_native() );
}
- /// Get native hazard pointer stored
+ /// Get native hazard pointer stored (valid only in linked state)
guarded_pointer get_native() const
{
- return base_class::get();
+ assert( m_guard != nullptr );
+ return m_guard->get();
}
+
+ //@cond
+ hp::details::hp_guard* release()
+ {
+ hp::details::hp_guard* g = m_guard;
+ m_guard = nullptr;
+ return g;
+ }
+ //@endcond
+
+ private:
+ //@cond
+ hp::details::hp_guard* m_guard;
+ //@endcond
};
/// Array of Hazard Pointer guards
The class is intended for allocating an array of hazard pointer guards.
Template parameter \p Count defines the size of the array.
- A \p %GuardArray object is not copy- and move-constructible
- and not copy- and move-assignable.
*/
template <size_t Count>
- class GuardArray : public hp::array<Count>
+ class GuardArray
{
- //@cond
- typedef hp::array<Count> base_class;
- //@endcond
public:
/// Rebind array for other size \p Count2
template <size_t Count2>
struct rebind {
- typedef GuardArray<Count2> other ; ///< rebinding result
+ typedef GuardArray<Count2> other; ///< rebinding result
};
+ /// Array capacity
+ static CDS_CONSTEXPR const size_t c_nCapacity = Count;
+
public:
- /// Default ctor
- GuardArray()
- {}
+ /// Default ctor allocates \p Count hazard pointers
+ GuardArray(); // inline in hp_impl.h
- //@cond
- GuardArray( GuardArray const& ) = delete;
+ /// Move ctor is prohibited
GuardArray( GuardArray&& ) = delete;
- GuardArray& operator=(GuardArray const&) = delete;
- GuardArray& operator=(GuardArray&&) = delete;
- //@endcond
+
+ /// Move assignment is prohibited
+ GuardArray& operator=( GuardArray&& ) = delete;
+
+ /// Copy ctor is prohibited
+ GuardArray( GuardArray const& ) = delete;
+
+ /// Copy assignment is prohibited
+ GuardArray& operator=( GuardArray const& ) = delete;
+
+ /// Frees allocated hazard pointers
+ ~GuardArray(); // inline in hp_impl.h
/// Protects a pointer of type \p atomic<T*>
/**
template <typename T>
T protect( size_t nIndex, atomics::atomic<T> const& toGuard )
{
+ assert( nIndex < capacity());
+
T pRet;
do {
pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire) );
template <typename T, class Func>
T protect( size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
{
+ assert( nIndex < capacity() );
+
T pRet;
do {
assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire) ));
/// Clear value of the slot \p nIndex
void clear( size_t nIndex )
{
- base_class::clear( nIndex );
+ m_arr.clear( nIndex );
}
/// Get current value of slot \p nIndex
/// Get native hazard pointer stored
guarded_pointer get_native( size_t nIndex ) const
{
- return base_class::operator[](nIndex).get();
+ assert( nIndex < capacity() );
+ return m_arr[nIndex]->get();
}
+ //@cond
+ hp::details::hp_guard* release( size_t nIndex ) CDS_NOEXCEPT
+ {
+ return m_arr.release( nIndex );
+ }
+ //@endcond
+
/// Capacity of the guard array
static CDS_CONSTEXPR size_t capacity()
{
- return Count;
+ return c_nCapacity;
}
+
+ private:
+ //@cond
+ hp::details::hp_array<Count> m_arr;
+ //@endcond
};
/// Guarded pointer
return p;
}
};
+
+ template <typename GT, typename VT, typename C> friend class guarded_ptr;
//@endcond
public:
/// Functor for casting \p guarded_type to \p value_type
typedef typename std::conditional< std::is_same<Cast, void>::value, trivial_cast, Cast >::type value_cast;
- //@cond
- typedef cds::gc::hp::details::hp_guard native_guard;
- //@endcond
-
- private:
- //@cond
- native_guard * m_pGuard;
- //@endcond
-
public:
/// Creates empty guarded pointer
guarded_ptr() CDS_NOEXCEPT
: m_pGuard(nullptr)
- {
- alloc_guard();
- }
+ {}
//@cond
+ explicit guarded_ptr( hp::details::hp_guard* g ) CDS_NOEXCEPT
+ : m_pGuard( g )
+ {}
+
/// Initializes guarded pointer with \p p
- explicit guarded_ptr( guarded_type * p ) CDS_NOEXCEPT
+ explicit guarded_ptr( guarded_type* p ) CDS_NOEXCEPT
: m_pGuard( nullptr )
{
reset(p);
gp.m_pGuard = nullptr;
}
+ /// Move ctor
+ template <typename GT, typename VT, typename C>
+ guarded_ptr( guarded_ptr<GT, VT, C>&& gp ) CDS_NOEXCEPT
+ : m_pGuard( gp.m_pGuard )
+ {
+ gp.m_pGuard = nullptr;
+ }
+
+ /// Ctor from \p Guard
+ explicit guarded_ptr( Guard&& g ) CDS_NOEXCEPT
+ : m_pGuard( g.release() )
+ {}
+
/// The guarded pointer is not copy-constructible
guarded_ptr( guarded_ptr const& gp ) = delete;
/// Clears the guarded pointer
/**
- \ref release is called if guarded pointer is not \ref empty
+ \ref release() is called if guarded pointer is not \ref empty()
*/
~guarded_ptr() CDS_NOEXCEPT
{
- free_guard();
+ release();
}
/// Move-assignment operator
guarded_ptr& operator=( guarded_ptr&& gp ) CDS_NOEXCEPT
{
- // Hazard Pointer array is organized as a stack
- if ( m_pGuard && m_pGuard > gp.m_pGuard ) {
- m_pGuard->set( gp.m_pGuard->get(atomics::memory_order_relaxed) );
- gp.free_guard();
- }
- else {
- free_guard();
- m_pGuard = gp.m_pGuard;
- gp.m_pGuard = nullptr;
- }
+ std::swap( m_pGuard, gp.m_pGuard );
+ return *this;
+ }
+
+ /// Move-assignment from \p Guard
+ guarded_ptr& operator=( Guard&& g ) CDS_NOEXCEPT
+ {
+ std::swap( m_pGuard, g.m_guard );
return *this;
}
//@cond
// For internal use only!!!
- native_guard& guard() CDS_NOEXCEPT
- {
- alloc_guard();
- assert( m_pGuard );
- return *m_pGuard;
- }
-
void reset(guarded_type * p) CDS_NOEXCEPT
{
alloc_guard();
void alloc_guard()
{
if ( !m_pGuard )
- m_pGuard = &thread_gc::alloc_guard();
+ m_pGuard = thread_gc::alloc_guard();
}
void free_guard()
{
if ( m_pGuard ) {
- thread_gc::free_guard( *m_pGuard );
+ thread_gc::free_guard( m_pGuard );
m_pGuard = nullptr;
}
}
//@endcond
+
+ private:
+ //@cond
+ hp::details::hp_guard* m_pGuard;
+ //@endcond
};
public:
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_GC_IMPL_HP_IMPL_H
namespace cds { namespace gc {
namespace hp {
- inline guard::guard()
- : m_hp( cds::threading::getGC<HP>().allocGuard() )
- {}
-
- template <typename T>
- inline guard::guard( T * p )
- : m_hp( cds::threading::getGC<HP>().allocGuard() )
+ static inline ThreadGC& get_thread_gc()
{
- m_hp = p;
+ return cds::threading::getGC<HP>();
}
-
- inline guard::~guard()
- {
- cds::threading::getGC<HP>().freeGuard( m_hp );
- }
-
- template <size_t Count>
- inline array<Count>::array()
- {
- cds::threading::getGC<HP>().allocGuard( *this );
- }
-
- template <size_t Count>
- inline array<Count>::~array()
- {
- cds::threading::getGC<HP>().freeGuard( *this );
- }
-
-
-
} // namespace hp
inline HP::thread_gc::thread_gc(
cds::threading::Manager::detachThread();
}
- inline /*static*/ cds::gc::hp::details::hp_guard& HP::thread_gc::alloc_guard()
+ inline /*static*/ cds::gc::hp::details::hp_guard* HP::thread_gc::alloc_guard()
{
- return cds::threading::getGC<HP>().allocGuard();
+ return hp::get_thread_gc().allocGuard();
}
- inline /*static*/ void HP::thread_gc::free_guard( cds::gc::hp::details::hp_guard& g )
+ inline /*static*/ void HP::thread_gc::free_guard( cds::gc::hp::details::hp_guard* g )
{
- cds::threading::getGC<HP>().freeGuard( g );
+ hp::get_thread_gc().freeGuard( g );
+ }
+
+ inline HP::Guard::Guard()
+ : m_guard( hp::get_thread_gc().allocGuard())
+ {
+ if ( !m_guard )
+ throw too_many_hazard_ptr_exception();
}
template <typename T>
inline T * HP::Guard::assign( T * p )
{
- T * pp = base_class::operator =(p);
- cds::threading::getGC<HP>().sync();
+ assert( m_guard != nullptr );
+
+ T * pp = ( *m_guard = p );
+ hp::get_thread_gc().sync();
return pp;
}
+ inline void HP::Guard::link()
+ {
+ if ( !m_guard ) {
+ m_guard = hp::get_thread_gc().allocGuard();
+ if ( !m_guard )
+ throw too_many_hazard_ptr_exception();
+ }
+ }
+
+ inline void HP::Guard::unlink()
+ {
+ if ( m_guard ) {
+ hp::get_thread_gc().freeGuard( m_guard );
+ m_guard = nullptr;
+ }
+ }
+
+ template <size_t Count>
+ inline HP::GuardArray<Count>::GuardArray()
+ {
+ if ( hp::get_thread_gc().allocGuard( m_arr ) != Count )
+ throw too_many_hazard_ptr_exception();
+ }
+
+ template <size_t Count>
+ inline HP::GuardArray<Count>::~GuardArray()
+ {
+ hp::get_thread_gc().freeGuard( m_arr );
+ }
+
template <size_t Count>
template <typename T>
- inline T * HP::GuardArray<Count>::assign( size_t nIndex, T * p )
+ inline T * HP::GuardArray<Count>::assign( size_t nIndex, T* p )
{
- base_class::set(nIndex, p);
- cds::threading::getGC<HP>().sync();
+ assert( nIndex < capacity() );
+
+ m_arr.set(nIndex, p);
+ hp::get_thread_gc().sync();
return p;
}
template <class Disposer, typename T>
inline void HP::retire( T * p )
{
- cds::threading::getGC<HP>().retirePtr( p, cds::details::static_functor<Disposer, T>::call );
+ hp::get_thread_gc().retirePtr( p, cds::details::static_functor<Disposer, T>::call );
}
inline void HP::scan()
{
- cds::threading::getGC<HP>().scan();
+ hp::get_thread_gc().scan();
}
}} // namespace cds::gc
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_INTRUSIVE_DETAILS_FELDMAN_HASHSET_BASE_H
//@cond
namespace details {
- template <typename HashType, typename UInt = size_t >
- using hash_splitter = cds::algo::split_bitstring< HashType, UInt >;
+ template <typename HashType >
+ using hash_splitter = cds::algo::split_bitstring< HashType >;
struct metrics {
size_t head_node_size; // power-of-two
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_INTRUSIVE_ELLEN_BINTREE_DHP_H
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_INTRUSIVE_ELLEN_BINTREE_HP_H
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_INTRUSIVE_IMPL_ELLEN_BINTREE_H
*/
guarded_ptr extract_min()
{
- guarded_ptr gp;
- extract_min_( gp.guard() );
- return gp;
+ return extract_min_();
}
/// Extracts an item with maximal key from the tree
*/
guarded_ptr extract_max()
{
- guarded_ptr gp;
- extract_max_( gp.guard());
- return gp;
+ return extract_max_();
}
/// Extracts an item from the tree
template <typename Q>
guarded_ptr extract( Q const& key )
{
- guarded_ptr gp;
- extract_( gp.guard(), key );
- return gp;
+ return extract_( key );
}
/// Extracts an item from the tree using \p pred for searching
template <typename Q, typename Less>
guarded_ptr extract_with( Q const& key, Less pred )
{
- guarded_ptr gp;
- extract_with_( gp.guard(), key, pred );
- return gp;
+ return extract_with_( key, pred );
}
/// Checks whether the set contains \p key
template <typename Q>
guarded_ptr get( Q const& key ) const
{
- guarded_ptr gp;
- get_( gp.guard(), key );
- return gp;
+ return get_( key );
}
/// Finds \p key with predicate \p pred and returns the item found
template <typename Q, typename Less>
guarded_ptr get_with( Q const& key, Less pred ) const
{
- guarded_ptr gp;
- get_with_( gp.guard(), key, pred );
- return gp;
+ return get_with_( key, pred );
}
/// Checks if the tree is empty
return true;
}
+ template <typename Q, typename Compare>
+ guarded_ptr extract_item( Q const& key, Compare cmp )
+ {
+ update_desc * pOp = nullptr;
+ search_result res;
+ back_off bkoff;
+
+ for ( ;; ) {
+ if ( !search( res, key, cmp )) {
+ if ( pOp )
+ retire_update_desc( pOp );
+ m_Stat.onEraseFailed();
+ return guarded_ptr();
+ }
+
+ if ( res.updGrandParent.bits() == update_desc::Clean && res.updParent.bits() == update_desc::Clean ) {
+ if ( !pOp )
+ pOp = alloc_update_desc();
+ if ( check_delete_precondition( res ) ) {
+ typename gc::Guard guard;
+ guard.assign( pOp );
+
+ pOp->dInfo.pGrandParent = res.pGrandParent;
+ pOp->dInfo.pParent = res.pParent;
+ pOp->dInfo.pLeaf = res.pLeaf;
+ pOp->dInfo.pUpdateParent = res.updParent.ptr();
+ pOp->dInfo.bRightParent = res.bRightParent;
+ pOp->dInfo.bRightLeaf = res.bRightLeaf;
+
+ update_ptr updGP( res.updGrandParent.ptr() );
+ if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ),
+ memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
+ if ( help_delete( pOp ))
+ break;
+ pOp = nullptr;
+ }
+ }
+ }
+
+ bkoff();
+ m_Stat.onEraseRetry();
+ }
+
+ --m_ItemCounter;
+ m_Stat.onEraseSuccess();
+ return guarded_ptr( res.guards.release( search_result::Guard_Leaf ));
+ }
+
template <typename Q>
- bool extract_( typename guarded_ptr::native_guard& guard, Q const& key )
+ guarded_ptr extract_( Q const& key )
{
- return erase_( key, node_compare(),
- []( Q const&, leaf_node const& ) -> bool { return true; },
- [&guard]( value_type& found ) { guard.set( &found ); } );
+ return extract_item( key, node_compare());
}
template <typename Q, typename Less>
- bool extract_with_( typename guarded_ptr::native_guard& guard, Q const& key, Less /*pred*/ )
+ guarded_ptr extract_with_( Q const& key, Less /*pred*/ )
{
typedef ellen_bintree::details::compare<
key_type,
node_traits
> compare_functor;
- return erase_( key, compare_functor(),
- []( Q const&, leaf_node const& ) -> bool { return true; },
- [&guard]( value_type& found ) { guard.set( &found ); } );
+ return extract_item( key, compare_functor());
}
- bool extract_max_( typename guarded_ptr::native_guard& gp )
+ guarded_ptr extract_max_()
{
update_desc * pOp = nullptr;
search_result res;
if ( pOp )
retire_update_desc( pOp );
m_Stat.onExtractMaxFailed();
- return false;
+ return guarded_ptr();
}
if ( res.updGrandParent.bits() == update_desc::Clean && res.updParent.bits() == update_desc::Clean ) {
--m_ItemCounter;
m_Stat.onExtractMaxSuccess();
- gp.set( node_traits::to_value_ptr( res.pLeaf ));
- return true;
+ return guarded_ptr( res.guards.release( search_result::Guard_Leaf ));
}
- bool extract_min_( typename guarded_ptr::native_guard& gp )
+ guarded_ptr extract_min_()
{
update_desc * pOp = nullptr;
search_result res;
if ( pOp )
retire_update_desc( pOp );
m_Stat.onExtractMinFailed();
- return false;
+ return guarded_ptr();
}
if ( res.updGrandParent.bits() == update_desc::Clean && res.updParent.bits() == update_desc::Clean ) {
--m_ItemCounter;
m_Stat.onExtractMinSuccess();
- gp.set( node_traits::to_value_ptr( res.pLeaf ));
- return true;
+ return guarded_ptr( res.guards.release( search_result::Guard_Leaf ));
}
template <typename Q, typename Func>
}
template <typename Q>
- bool get_( typename guarded_ptr::native_guard& guard, Q const& val ) const
+ guarded_ptr get_( Q const& val ) const
{
- return find_( val, [&guard]( value_type& found, Q const& ) { guard.set( &found ); } );
+ search_result res;
+ if ( search( res, val, node_compare() ) ) {
+ assert( res.pLeaf );
+ m_Stat.onFindSuccess();
+ return guarded_ptr( res.guards.release( search_result::Guard_Leaf ));
+ }
+
+ m_Stat.onFindFailed();
+ return guarded_ptr();
}
template <typename Q, typename Less>
- bool get_with_( typename guarded_ptr::native_guard& guard, Q const& val, Less pred ) const
+ guarded_ptr get_with_( Q const& val, Less pred ) const
{
- return find_with_( val, pred, [&guard]( value_type& found, Q const& ) { guard.set( &found ); } );
+ typedef ellen_bintree::details::compare<
+ key_type,
+ value_type,
+ opt::details::make_comparator_from_less<Less>,
+ node_traits
+ > compare_functor;
+
+ search_result res;
+ if ( search( res, val, compare_functor() ) ) {
+ assert( res.pLeaf );
+ m_Stat.onFindSuccess();
+ return guarded_ptr( res.guards.release( search_result::Guard_Leaf ));
+ }
+
+ m_Stat.onFindFailed();
+ return guarded_ptr();
+
}
//@endcond
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_INTRUSIVE_IMPL_FELDMAN_HASHSET_H
*/
guarded_ptr extract( hash_type const& hash )
{
- guarded_ptr gp;
- {
- typename gc::Guard guard;
- value_type * p = do_erase( hash, guard, []( value_type const&) -> bool {return true;} );
-
- // p is guarded by HP
- if ( p )
- gp.reset( p );
- }
- return gp;
+ typename gc::Guard guard;
+ if ( do_erase( hash, guard, []( value_type const&) -> bool {return true;} ))
+ return guarded_ptr( std::move( guard ));
+ return guarded_ptr();
}
/// Finds an item by it's \p hash
*/
guarded_ptr get( hash_type const& hash )
{
- guarded_ptr gp;
- {
- typename gc::Guard guard;
- gp.reset( search( hash, guard ));
- }
- return gp;
+ typename gc::Guard guard;
+ if ( search( hash, guard ))
+ return guarded_ptr( std::move( guard ));
+ return guarded_ptr();
}
/// Clears the set (non-atomic)
ord_list theList;
// ...
{
- ord_list::guarded_ptr gp(theList.extract( 5 ));
+ ord_list::guarded_ptr gp( theList.extract( 5 ));
if ( gp ) {
// Deal with gp
// ...
template <typename Q>
guarded_ptr extract( Q const& key )
{
- guarded_ptr gp;
- extract_at( m_pHead, gp.guard(), key, key_comparator());
- return gp;
+ return extract_at( m_pHead, key, key_comparator());
}
/// Extracts the item using compare functor \p pred
guarded_ptr extract_with( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- extract_at( m_pHead, gp.guard(), key, cds::opt::details::make_comparator_from_less<Less>());
- return gp;
+ return extract_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>());
}
/// Finds \p key in the list
template <typename Q>
guarded_ptr get( Q const& key ) const
{
- guarded_ptr gp;
- get_at( m_pHead, gp.guard(), key, key_comparator());
- return gp;
+ return get_at( m_pHead, key, key_comparator());
}
/// Finds the \p key and return the item found
guarded_ptr get_with( Q const& key, Less pred ) const
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- get_at( m_pHead, gp.guard(), key, cds::opt::details::make_comparator_from_less<Less>());
- return gp;
+ return get_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>());
}
/// Clears the list (thread safe, not atomic)
}
template <typename Q, typename Compare>
- bool extract_at( atomic_node_ptr& refHead, typename guarded_ptr::native_guard& dest, Q const& val, Compare cmp )
+ guarded_ptr extract_at( atomic_node_ptr& refHead, Q const& val, Compare cmp )
{
position pos;
back_off bkoff;
while ( search( refHead, val, pos, cmp )) {
if ( unlink_node( pos )) {
- dest.set( pos.pFound );
--m_ItemCounter;
m_Stat.onEraseSuccess();
- return true;
+ assert( pos.pFound != nullptr );
+ return guarded_ptr( std::move( pos.guard ));
}
else
bkoff();
}
m_Stat.onEraseFailed();
- return false;
+ return guarded_ptr();
}
template <typename Q, typename Compare>
}
template <typename Q, typename Compare>
- bool get_at( atomic_node_ptr const& refHead, typename guarded_ptr::native_guard& guard, Q const& val, Compare cmp ) const
+ guarded_ptr get_at( atomic_node_ptr const& refHead, Q const& val, Compare cmp ) const
{
position pos;
if ( search( refHead, val, pos, cmp )) {
- guard.set( pos.pFound );
m_Stat.onFindSuccess();
- return true;
+ return guarded_ptr( std::move( pos.guard ));
}
m_Stat.onFindFailed();
- return false;
+ return guarded_ptr();
}
//@endcond
template <typename Q>
guarded_ptr extract( Q const& key )
{
- guarded_ptr gp;
- extract_at( &m_Head, gp.guard(), key, key_comparator());
- return gp;
+ return extract_at( &m_Head, key, key_comparator());
}
/// Extracts the item from the list with comparing functor \p pred
guarded_ptr extract_with( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- extract_at( &m_Head, gp.guard(), key, cds::opt::details::make_comparator_from_less<Less>());
- return gp;
+ return extract_at( &m_Head, key, cds::opt::details::make_comparator_from_less<Less>());
}
/// Finds the key \p key
template <typename Q>
guarded_ptr get( Q const& key )
{
- guarded_ptr gp;
- get_at( &m_Head, gp.guard(), key, key_comparator());
- return gp;
+ return get_at( &m_Head, key, key_comparator());
}
/// Finds \p key and return the item found
guarded_ptr get_with( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- get_at( &m_Head, gp.guard(), key, cds::opt::details::make_comparator_from_less<Less>());
- return gp;
+ return get_at( &m_Head, key, cds::opt::details::make_comparator_from_less<Less>());
}
/// Clears the list
}
template <typename Q, typename Compare>
- bool extract_at( node_type * pHead, typename guarded_ptr::native_guard& gp, const Q& val, Compare cmp )
+ guarded_ptr extract_at( node_type * pHead, const Q& val, Compare cmp )
{
position pos;
- if ( erase_at( pHead, val, cmp, [](value_type const &){}, pos )) {
- gp.set( pos.guards.template get<value_type>(position::guard_current_item));
- return true;
- }
- return false;
+ if ( erase_at( pHead, val, cmp, [](value_type const &){}, pos ))
+ return guarded_ptr( pos.guards.release( position::guard_current_item ));
+ return guarded_ptr();
}
template <typename Q, typename Compare, typename Func>
}
template <typename Q, typename Compare>
- bool get_at( node_type * pHead, typename guarded_ptr::native_guard& gp, Q const& val, Compare cmp )
+ guarded_ptr get_at( node_type * pHead, Q const& val, Compare cmp )
{
position pos;
&& !pos.pCur->is_marked()
&& cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 )
{
- gp.set( pos.guards.template get<value_type>( position::guard_current_item ));
m_Stat.onFindSuccess();
- return true;
+ return guarded_ptr( pos.guards.release( position::guard_current_item ));
}
m_Stat.onFindFailed();
- return false;
+ return guarded_ptr();
}
//@endcond
template <typename Q>
guarded_ptr extract( Q const& key )
{
- guarded_ptr gp;
- extract_at( m_pHead, gp.guard(), key, key_comparator());
- return gp;
+ return extract_at( m_pHead, key, key_comparator());
}
/// Extracts the item using compare functor \p pred
guarded_ptr extract_with( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- extract_at( m_pHead, gp.guard(), key, cds::opt::details::make_comparator_from_less<Less>());
- return gp;
+ return extract_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>());
}
/// Finds \p key in the list
template <typename Q>
guarded_ptr get( Q const& key )
{
- guarded_ptr gp;
- get_at( m_pHead, gp.guard(), key, key_comparator());
- return gp;
+ return get_at( m_pHead, key, key_comparator());
}
/// Finds the \p key and return the item found
guarded_ptr get_with( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- get_at( m_pHead, gp.guard(), key, cds::opt::details::make_comparator_from_less<Less>());
- return gp;
+ return get_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>());
}
/// Clears the list
}
template <typename Q, typename Compare>
- bool extract_at( atomic_node_ptr& refHead, typename guarded_ptr::native_guard& dest, Q const& val, Compare cmp )
+ guarded_ptr extract_at( atomic_node_ptr& refHead, Q const& val, Compare cmp )
{
position pos;
back_off bkoff;
while ( search( refHead, val, pos, cmp )) {
if ( unlink_node( pos )) {
- dest.set( pos.guards.template get<value_type>( position::guard_current_item ));
--m_ItemCounter;
m_Stat.onEraseSuccess();
- return true;
+ return guarded_ptr( pos.guards.release( position::guard_current_item ));
}
else
bkoff();
}
m_Stat.onEraseFailed();
- return false;
+ return guarded_ptr();
}
template <typename Q, typename Compare>
}
template <typename Q, typename Compare>
- bool get_at( atomic_node_ptr& refHead, typename guarded_ptr::native_guard& guard, Q const& val, Compare cmp )
+ guarded_ptr get_at( atomic_node_ptr& refHead, Q const& val, Compare cmp )
{
position pos;
if ( search( refHead, val, pos, cmp )) {
- guard.set( pos.guards.template get<value_type>( position::guard_current_item ));
m_Stat.onFindSuccess();
- return true;
+ return guarded_ptr( pos.guards.release( position::guard_current_item ));
}
m_Stat.onFindFailed();
- return false;
+ return guarded_ptr();
}
//@endcond
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_INTRUSIVE_IMPL_SKIP_LIST_H
}
template <typename Q, typename Compare>
- bool get_with_( typename guarded_ptr::native_guard& guard, Q const& val, Compare cmp )
+ guarded_ptr get_with_( Q const& val, Compare cmp )
{
- return find_with_( val, cmp, [&guard](value_type& found, Q const& ) { guard.set(&found); } );
+ guarded_ptr gp;
+ if ( find_with_( val, cmp, [&gp](value_type& found, Q const& ) { gp.reset(&found); } ))
+ return gp;
+ return guarded_ptr();
}
template <typename Q, typename Compare, typename Func>
}
template <typename Q, typename Compare>
- bool extract_( typename guarded_ptr::native_guard& guard, Q const& val, Compare cmp )
+ guarded_ptr extract_( Q const& val, Compare cmp )
{
position pos;
+ guarded_ptr gp;
for (;;) {
if ( !find_position( val, pos, cmp, false ) ) {
m_Stat.onExtractFailed();
- guard.clear();
- return false;
+ return guarded_ptr();
}
node_type * pDel = pos.pCur;
- guard.set( node_traits::to_value_ptr(pDel));
+ gp.reset( node_traits::to_value_ptr( pDel ));
assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 );
unsigned int nHeight = pDel->height();
--m_ItemCounter;
m_Stat.onRemoveNode( nHeight );
m_Stat.onExtractSuccess();
- return true;
+ return gp;
}
m_Stat.onExtractRetry();
}
}
- bool extract_min_( typename guarded_ptr::native_guard& gDel )
+ guarded_ptr extract_min_()
{
position pos;
+ guarded_ptr gp;
for (;;) {
if ( !find_min_position( pos ) ) {
// The list is empty
m_Stat.onExtractMinFailed();
- gDel.clear();
- return false;
+ return guarded_ptr();
}
node_type * pDel = pos.pCur;
unsigned int nHeight = pDel->height();
- gDel.set( node_traits::to_value_ptr(pDel) );
+ gp.reset( node_traits::to_value_ptr(pDel) );
if ( try_remove_at( pDel, pos, [](value_type const&) {} )) {
--m_ItemCounter;
m_Stat.onRemoveNode( nHeight );
m_Stat.onExtractMinSuccess();
- return true;
+ return gp;
}
m_Stat.onExtractMinRetry();
}
}
- bool extract_max_( typename guarded_ptr::native_guard& gDel )
+ guarded_ptr extract_max_()
{
position pos;
+ guarded_ptr gp;
for (;;) {
if ( !find_max_position( pos ) ) {
// The list is empty
m_Stat.onExtractMaxFailed();
- gDel.clear();
- return false;
+ return guarded_ptr();
}
node_type * pDel = pos.pCur;
unsigned int nHeight = pDel->height();
- gDel.set( node_traits::to_value_ptr(pDel) );
+ gp.reset( node_traits::to_value_ptr(pDel) );
if ( try_remove_at( pDel, pos, [](value_type const&) {} )) {
--m_ItemCounter;
m_Stat.onRemoveNode( nHeight );
m_Stat.onExtractMaxSuccess();
- return true;
+ return gp;
}
m_Stat.onExtractMaxRetry();
template <typename Q>
guarded_ptr extract( Q const& key )
{
- guarded_ptr gp;
- extract_( gp.guard(), key, key_comparator() );
- return gp;
+ return extract_( key, key_comparator() );
}
/// Extracts the item from the set with comparing functor \p pred
guarded_ptr extract_with( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- extract_( gp.guard(), key, cds::opt::details::make_comparator_from_less<Less>() );
- return gp;
+ return extract_( key, cds::opt::details::make_comparator_from_less<Less>() );
}
/// Extracts an item with minimal key from the list
*/
guarded_ptr extract_min()
{
- guarded_ptr gp;
- extract_min_( gp.guard() );
- return gp;
+ return extract_min_();
}
/// Extracts an item with maximal key from the list
*/
guarded_ptr extract_max()
{
- guarded_ptr gp;
- extract_max_( gp.guard() );
- return gp;
+ return extract_max_();
}
/// Deletes the item from the set
template <typename Q>
guarded_ptr get( Q const& key )
{
- guarded_ptr gp;
- get_with_( gp.guard(), key, key_comparator() );
- return gp;
+ return get_with_( key, key_comparator() );
}
/// Finds \p key and return the item found
guarded_ptr get_with( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- guarded_ptr gp;
- get_with_( gp.guard(), key, cds::opt::details::make_comparator_from_less<Less>() );
- return gp;
+ return get_with_( key, cds::opt::details::make_comparator_from_less<Less>() );
}
/// Returns item count in the set
*/
void clear()
{
- guarded_ptr gp;
- while ( extract_min_( gp.guard() ));
+ while ( extract_min_());
}
/// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32.
typedef typename ordered_list::value_type value_type ; ///< type of value to be stored in the set
typedef typename ordered_list::key_comparator key_comparator ; ///< key comparing functor
typedef typename ordered_list::disposer disposer ; ///< Node disposer functor
+#ifdef CDS_DOXYGEN_INVOKED
typedef typename ordered_list::stat stat ; ///< Internal statistics
+#endif
/// Hash functor for \p value_type and all its derivatives that you use
typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
>::type internal_bucket_type;
typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator;
+ //@endcond
- hash m_HashFunctor; ///< Hash functor
- size_t const m_nHashBitmask;
- internal_bucket_type* m_Buckets; ///< bucket table
- item_counter m_ItemCounter; ///< Item counter
- typename bucket_stat::stat m_Stat; ///< Internal statistics
+ public:
+ //@cond
+ typedef typename bucket_stat::stat stat;
+ //@endcond
+
+ protected:
+ //@cond
+ hash m_HashFunctor; ///< Hash functor
+ size_t const m_nHashBitmask;
+ internal_bucket_type* m_Buckets; ///< bucket table
+ item_counter m_ItemCounter; ///< Item counter
+ stat m_Stat; ///< Internal statistics
//@endcond
public:
typedef typename ordered_list::value_type value_type; ///< type of value to be stored in the set
typedef typename ordered_list::key_comparator key_comparator; ///< key comparing functor
typedef typename ordered_list::disposer disposer; ///< Node disposer functor
+#ifdef CDS_DOXYGEN_INVOKED
typedef typename ordered_list::stat stat; ///< Internal statistics
+#endif
/// Hash functor for \p value_type and all its derivatives that you use
typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
>::type internal_bucket_type;
typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator;
+ //@endcond
- hash m_HashFunctor; ///< Hash functor
- const size_t m_nHashBitmask;
- internal_bucket_type * m_Buckets; ///< bucket table
- item_counter m_ItemCounter; ///< Item counter
- typename bucket_stat::stat m_Stat; ///< Internal statistics
+ public:
+ //@cond
+ typedef typename bucket_stat::stat stat;
+ //@endcond
+
+ protected:
+ //@cond
+ hash m_HashFunctor; ///< Hash functor
+ const size_t m_nHashBitmask;
+ internal_bucket_type * m_Buckets; ///< bucket table
+ item_counter m_ItemCounter; ///< Item counter
+ stat m_Stat; ///< Internal statistics
//@endcond
protected:
typedef typename ordered_list::value_type value_type; ///< type of value stored in the list
typedef typename ordered_list::key_comparator key_comparator; ///< key comparing functor
typedef typename ordered_list::disposer disposer; ///< Node disposer functor
+#ifdef CDS_DOXYGEN_INVOKED
typedef typename ordered_list::stat stat; ///< Internal statistics
+#endif
/// Hash functor for \ref value_type and all its derivatives that you use
typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
typedef typename internal_bucket_type::exempt_ptr exempt_ptr; ///< pointer to extracted node
typedef typename internal_bucket_type::raw_ptr raw_ptr; ///< Return type of \p get() member function and its derivatives
+ //@cond
+ typedef typename bucket_stat::stat stat;
+ //@endcond
+
private:
//@cond
- hash m_HashFunctor; ///< Hash functor
- size_t const m_nHashBitmask;
- internal_bucket_type* m_Buckets; ///< bucket table
- item_counter m_ItemCounter; ///< Item counter
- typename bucket_stat::stat m_Stat; ///< Internal statistics
+ hash m_HashFunctor; ///< Hash functor
+ size_t const m_nHashBitmask;
+ internal_bucket_type* m_Buckets; ///< bucket table
+ item_counter m_ItemCounter; ///< Item counter
+ stat m_Stat; ///< Internal statistics
//@endcond
public:
}
template <typename Q, typename Compare>
- bool extract_at( dummy_node_type * pHead, typename guarded_ptr::native_guard& guard, split_list::details::search_value_type<Q> const& val, Compare cmp )
+ guarded_ptr extract_at( dummy_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp )
{
assert( pHead != nullptr );
bucket_head_type h(pHead);
- return base_class::extract_at( h, guard, val, cmp );
+ return base_class::extract_at( h, val, cmp );
}
template <typename Q, typename Compare, typename Func>
}
template <typename Q, typename Compare>
- bool get_at( dummy_node_type * pHead, typename guarded_ptr::native_guard& guard, split_list::details::search_value_type<Q> const& val, Compare cmp )
+ guarded_ptr get_at( dummy_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp )
{
assert( pHead != nullptr );
bucket_head_type h(pHead);
- return base_class::get_at( h, guard, val, cmp );
+ return base_class::get_at( h, val, cmp );
}
bool insert_aux_node( dummy_node_type * pNode )
}
template <typename Q, typename Compare>
- bool get_( typename guarded_ptr::native_guard& guard, Q const& val, Compare cmp )
+ guarded_ptr get_( Q const& val, Compare cmp )
{
size_t nHash = hash_value( val );
split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
dummy_node_type * pHead = get_bucket( nHash );
assert( pHead != nullptr );
- return m_Stat.onFind( m_List.get_at( pHead, guard, sv, cmp ));
+ guarded_ptr gp = m_List.get_at( pHead, sv, cmp );
+ m_Stat.onFind( !gp.empty() );
+ return gp;
}
template <typename Q>
- bool get_( typename guarded_ptr::native_guard& guard, Q const& key )
+ guarded_ptr get_( Q const& key )
{
- return get_( guard, key, key_comparator());
+ return get_( key, key_comparator());
}
template <typename Q, typename Less>
- bool get_with_( typename guarded_ptr::native_guard& guard, Q const& key, Less )
+ guarded_ptr get_with_( Q const& key, Less )
{
- return get_( guard, key, typename wrapped_ordered_list::template make_compare_from_less<Less>());
+ return get_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>());
}
template <typename Q, typename Compare, typename Func>
}
template <typename Q, typename Compare>
- bool extract_( typename guarded_ptr::native_guard& guard, Q const& val, Compare cmp )
+ guarded_ptr extract_( Q const& val, Compare cmp )
{
size_t nHash = hash_value( val );
split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
dummy_node_type * pHead = get_bucket( nHash );
assert( pHead != nullptr );
- if ( m_List.extract_at( pHead, guard, sv, cmp )) {
+ guarded_ptr gp = m_List.extract_at( pHead, sv, cmp );
+ if ( gp ) {
--m_ItemCounter;
m_Stat.onExtractSuccess();
- return true;
}
- m_Stat.onExtractFailed();
- return false;
+ else
+ m_Stat.onExtractFailed();
+ return gp;
}
template <typename Q>
- bool extract_( typename guarded_ptr::native_guard& guard, Q const& key )
+ guarded_ptr extract_( Q const& key )
{
- return extract_( guard, key, key_comparator());
+ return extract_( key, key_comparator());
}
template <typename Q, typename Less>
- bool extract_with_( typename guarded_ptr::native_guard& guard, Q const& key, Less )
+ guarded_ptr extract_with_( Q const& key, Less )
{
- return extract_( guard, key, typename wrapped_ordered_list::template make_compare_from_less<Less>());
+ return extract_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>());
}
//@endcond
template <typename Q>
guarded_ptr extract( Q const& key )
{
- guarded_ptr gp;
- extract_( gp.guard(), key );
- return gp;
+ return extract_( key );
}
/// Extracts the item using compare functor \p pred
template <typename Q, typename Less>
guarded_ptr extract_with( Q const& key, Less pred )
{
- guarded_ptr gp;
- extract_with_( gp.guard(), key, pred );
- return gp;
+ return extract_with_( key, pred );
}
/// Finds the key \p key
template <typename Q>
guarded_ptr get( Q const& key )
{
- guarded_ptr gp;
- get_( gp.guard(), key );
- return gp;
+ return get_( key );
}
/// Finds the key \p key and return the item found
template <typename Q, typename Less>
guarded_ptr get_with( Q const& key, Less pred )
{
- guarded_ptr gp;
- get_with_( gp.guard(), key, pred );
- return gp;
+ return get_with_( key, pred );
}
/// Returns item count in the set
..\..\..\test\stress\map\map_type_cuckoo.h = ..\..\..\test\stress\map\map_type_cuckoo.h\r
..\..\..\test\stress\map\map_type_ellen_bintree.h = ..\..\..\test\stress\map\map_type_ellen_bintree.h\r
..\..\..\test\stress\map\map_type_feldman_hashmap.h = ..\..\..\test\stress\map\map_type_feldman_hashmap.h\r
+ ..\..\..\test\stress\map\map_type_iterable_list.h = ..\..\..\test\stress\map\map_type_iterable_list.h\r
..\..\..\test\stress\map\map_type_lazy_list.h = ..\..\..\test\stress\map\map_type_lazy_list.h\r
..\..\..\test\stress\map\map_type_michael.h = ..\..\..\test\stress\map\map_type_michael.h\r
..\..\..\test\stress\map\map_type_michael_list.h = ..\..\..\test\stress\map\map_type_michael_list.h\r
EndProjectSection\r
EndProject\r
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "stress-set-iteration", "stress-set-iteration.vcxproj", "{31952FA8-A303-4A0B-94C4-ABA5A8A6DBCE}"\r
+ ProjectSection(ProjectDependencies) = postProject\r
+ {A34CED07-A442-4FA1-81C4-F8B9CD3C832B} = {A34CED07-A442-4FA1-81C4-F8B9CD3C832B}\r
+ {408FE9BC-44F0-4E6A-89FA-D6F952584239} = {408FE9BC-44F0-4E6A-89FA-D6F952584239}\r
+ EndProjectSection\r
EndProject\r
Global\r
GlobalSection(SolutionConfigurationPlatforms) = preSolution\r
<ClCompile Include="..\..\..\test\unit\map\feldman_hashset_rcu_gpt.cpp" />\r
<ClCompile Include="..\..\..\test\unit\map\feldman_hashset_rcu_shb.cpp" />\r
<ClCompile Include="..\..\..\test\unit\map\feldman_hashset_rcu_sht.cpp" />\r
+ <ClCompile Include="..\..\..\test\unit\map\michael_iterable_dhp.cpp" />\r
+ <ClCompile Include="..\..\..\test\unit\map\michael_iterable_hp.cpp" />\r
<ClCompile Include="..\..\..\test\unit\map\michael_lazy_dhp.cpp" />\r
<ClCompile Include="..\..\..\test\unit\map\michael_lazy_hp.cpp" />\r
<ClCompile Include="..\..\..\test\unit\map\michael_lazy_nogc.cpp" />\r
<ClInclude Include="..\..\..\test\unit\map\test_map_hp.h" />\r
<ClInclude Include="..\..\..\test\unit\map\test_map_nogc.h" />\r
<ClInclude Include="..\..\..\test\unit\map\test_map_rcu.h" />\r
+ <ClInclude Include="..\..\..\test\unit\map\test_michael_iterable.h" />\r
+ <ClInclude Include="..\..\..\test\unit\map\test_michael_iterable_hp.h" />\r
<ClInclude Include="..\..\..\test\unit\map\test_michael_lazy_rcu.h" />\r
<ClInclude Include="..\..\..\test\unit\map\test_michael_michael_rcu.h" />\r
<ClInclude Include="..\..\..\test\unit\map\test_skiplist_hp.h" />\r
<PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>\r
+ <DisableSpecificWarnings>4503</DisableSpecificWarnings>\r
</ClCompile>\r
<Link>\r
<SubSystem>Console</SubSystem>\r
<PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>\r
+ <DisableSpecificWarnings>4503</DisableSpecificWarnings>\r
</ClCompile>\r
<Link>\r
<SubSystem>Console</SubSystem>\r
<PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>\r
+ <DisableSpecificWarnings>4503</DisableSpecificWarnings>\r
</ClCompile>\r
<Link>\r
<SubSystem>Console</SubSystem>\r
<PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>\r
+ <DisableSpecificWarnings>4503</DisableSpecificWarnings>\r
</ClCompile>\r
<Link>\r
<SubSystem>Console</SubSystem>\r
<PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>\r
+ <DisableSpecificWarnings>4503</DisableSpecificWarnings>\r
</ClCompile>\r
<Link>\r
<SubSystem>Console</SubSystem>\r
<PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>\r
+ <DisableSpecificWarnings>4503</DisableSpecificWarnings>\r
</ClCompile>\r
<Link>\r
<SubSystem>Console</SubSystem>\r
<ClCompile Include="..\..\..\test\unit\map\feldman_hashset_rcu_sht.cpp">\r
<Filter>Source Files\FeldmanHashMap</Filter>\r
</ClCompile>\r
+ <ClCompile Include="..\..\..\test\unit\map\michael_iterable_hp.cpp">\r
+ <Filter>Source Files\MichaelMap</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\..\test\unit\map\michael_iterable_dhp.cpp">\r
+ <Filter>Source Files\MichaelMap</Filter>\r
+ </ClCompile>\r
</ItemGroup>\r
<ItemGroup>\r
<ClInclude Include="..\..\..\test\unit\map\test_map.h">\r
<ClInclude Include="..\..\..\test\unit\map\test_feldman_hashmap_rcu.h">\r
<Filter>Header Files</Filter>\r
</ClInclude>\r
+ <ClInclude Include="..\..\..\test\unit\map\test_michael_iterable.h">\r
+ <Filter>Header Files</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\..\test\unit\map\test_michael_iterable_hp.h">\r
+ <Filter>Header Files</Filter>\r
+ </ClInclude>\r
</ItemGroup>\r
</Project>
\ No newline at end of file
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
- <PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;CDSUNIT_USE_URCU;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;CDSUNIT_USE_URCU;_SCL_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(SolutionDir)..\..\..\test\stress\set;$(SolutionDir)..\..\..\test\stress\;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>
</ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
- <PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;CDSUNIT_USE_URCU;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;CDSUNIT_USE_URCU;_SCL_SECURE_NO_WARNINGS;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(SolutionDir)..\..\..\test\stress\set;$(SolutionDir)..\..\..\test\stress\;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>
</ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
- <PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;CDSUNIT_USE_URCU;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;CDSUNIT_USE_URCU;_SCL_SECURE_NO_WARNINGS;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(SolutionDir)..\..\..\test\stress\set;$(SolutionDir)..\..\..\test\stress\;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>
</ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
- <PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;CDSUNIT_USE_URCU;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;CDSUNIT_USE_URCU;_SCL_SECURE_NO_WARNINGS;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(SolutionDir)..\..\..\test\stress\set;$(SolutionDir)..\..\..\test\stress\;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>
</ClCompile>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
- <PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;CDSUNIT_USE_URCU;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;CDSUNIT_USE_URCU;_SCL_SECURE_NO_WARNINGS;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(SolutionDir)..\..\..\test\stress\set;$(SolutionDir)..\..\..\test\stress\;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>
</ClCompile>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
- <PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;CDSUNIT_USE_URCU;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;CDSUNIT_USE_URCU;_SCL_SECURE_NO_WARNINGS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(SolutionDir)..\..\..\test\stress\set;$(SolutionDir)..\..\..\test\stress\;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>
</ClCompile>
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Dynamic Hazard Pointer memory manager implementation
item_type& refBucket = bucket( node );
if ( refBucket ) {
item_type p = refBucket;
+ item_type prev = nullptr;
do {
- if ( p->m_ptr.m_p == node.m_ptr.m_p ) {
- assert( node.m_pNextFree.load( atomics::memory_order_relaxed ) == nullptr );
-
- node.m_pNextFree.store( p->m_pNextFree.load( atomics::memory_order_relaxed ), atomics::memory_order_relaxed );
- p->m_pNextFree.store( &node, atomics::memory_order_relaxed );
+ if ( p->m_ptr.m_p >= node.m_ptr.m_p ) {
+ node.m_pNext.store( p, atomics::memory_order_relaxed );
+ if ( prev )
+ prev->m_pNext.store( &node, atomics::memory_order_relaxed );
+ else
+ refBucket = &node;
return;
}
+ prev = p;
p = p->m_pNext.load(atomics::memory_order_relaxed);
} while ( p );
- node.m_pNext.store( refBucket, atomics::memory_order_relaxed );
+ assert( prev != nullptr );
+ prev->m_pNext.store( &node, atomics::memory_order_relaxed );
}
- refBucket = &node;
+ else
+ refBucket = &node;
}
- item_type erase( guard_data::guarded_ptr ptr )
+ struct erase_result
+ {
+ item_type head;
+ item_type tail;
+ size_t size;
+
+ erase_result()
+ : head( nullptr )
+ , tail( nullptr )
+ , size(0)
+ {}
+ };
+
+ erase_result erase( guard_data::guarded_ptr ptr )
{
item_type& refBucket = bucket( ptr );
item_type p = refBucket;
item_type pPrev = nullptr;
- while ( p ) {
+ erase_result ret;
+ while ( p && p->m_ptr.m_p <= ptr ) {
if ( p->m_ptr.m_p == ptr ) {
if ( pPrev )
pPrev->m_pNext.store( p->m_pNext.load(atomics::memory_order_relaxed ), atomics::memory_order_relaxed );
else
refBucket = p->m_pNext.load(atomics::memory_order_relaxed);
- p->m_pNext.store( nullptr, atomics::memory_order_relaxed );
- return p;
+
+ if ( ret.head )
+ ret.tail->m_pNext.store( p, atomics::memory_order_relaxed );
+ else
+ ret.head = p;
+ ret.tail = p;
+ ++ret.size;
}
- pPrev = p;
+ else
+ pPrev = p;
p = p->m_pNext.load( atomics::memory_order_relaxed );
}
- return nullptr;
+ if ( ret.tail )
+ ret.tail->m_pNext.store( nullptr, atomics::memory_order_relaxed );
+ return ret;
}
typedef std::pair<item_type, item_type> list_range;
for ( item_type * ppBucket = m_Buckets; ppBucket < pEndBucket; ++ppBucket ) {
item_type pBucket = *ppBucket;
if ( pBucket ) {
- if ( !ret.first )
- ret.first = pBucket;
- else
+ if ( ret.first )
pTail->m_pNextFree.store( pBucket, atomics::memory_order_relaxed );
+ else
+ ret.first = pBucket;
pTail = pBucket;
for (;;) {
pTail->m_ptr.free();
pTail->m_pNext.store( nullptr, atomics::memory_order_relaxed );
+ /*
while ( pTail->m_pNextFree.load( atomics::memory_order_relaxed )) {
pTail = pTail->m_pNextFree.load( atomics::memory_order_relaxed );
pTail->m_ptr.free();
pTail->m_pNext.store( nullptr, atomics::memory_order_relaxed );
}
+ */
if ( pNext ) {
pTail->m_pNextFree.store( pNext, atomics::memory_order_relaxed );
// Liberate cycle
- details::retired_ptr_node * pBusyFirst = nullptr;
- details::retired_ptr_node * pBusyLast = nullptr;
+ details::retired_ptr_node dummy;
+ dummy.m_pNext.store( nullptr, atomics::memory_order_relaxed );
+ details::retired_ptr_node * pBusyLast = &dummy;
size_t nBusyCount = 0;
for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_acquire) )
details::guard_data::guarded_ptr valGuarded = pGuard->pPost.load(atomics::memory_order_acquire);
if ( valGuarded ) {
- details::retired_ptr_node * pRetired = set.erase( valGuarded );
- if ( pRetired ) {
+ auto retired = set.erase( valGuarded );
+ if ( retired.head ) {
// Retired pointer is being guarded
- // pRetired is the head of retired pointers list for which the m_ptr.m_p field is equal
- // List is linked on m_pNextFree field
+ // [retired.head, retired.tail] is the list linked by m_pNext field
- if ( pBusyLast )
- pBusyLast->m_pNext.store( pRetired, atomics::memory_order_relaxed );
- else
- pBusyFirst = pRetired;
- pBusyLast = pRetired;
- ++nBusyCount;
- details::retired_ptr_node * p = pBusyLast->m_pNextFree.load(atomics::memory_order_relaxed);
- while ( p != nullptr ) {
- pBusyLast->m_pNext.store( p, atomics::memory_order_relaxed );
- pBusyLast = p;
- ++nBusyCount;
- }
+ pBusyLast->m_pNext.store( retired.head, atomics::memory_order_relaxed );
+ pBusyLast = retired.tail;
+ nBusyCount += retired.size;
}
}
}
- // Place [pBusyList, pBusyLast] back to m_RetiredBuffer
- if ( pBusyFirst )
- m_RetiredBuffer.push_list( pBusyFirst, pBusyLast, nBusyCount );
+ // Place [dummy.m_pNext, pBusyLast] back to m_RetiredBuffer
+ if ( nBusyCount )
+ m_RetiredBuffer.push_list( dummy.m_pNext.load(atomics::memory_order_relaxed), pBusyLast, nBusyCount );
// Free all retired pointers
details::liberate_set::list_range range = set.free_all();
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSIMPL_HP_CONST_H
File: hp_const.h
Michael's Hazard Pointer reclamation schema global constants
- Gidenstam's reclamation schema global constants
Editions:
2008.03.10 Maxim.Khiszinsky Created
// Hazard Pointers reclamation schema constants
namespace hp {
// Max number of threads expected
- static const size_t c_nMaxThreadCount = 100;
+ static CDS_CONSTEXPR const size_t c_nMaxThreadCount = 100;
// Number of Hazard Pointers per thread
- static const size_t c_nHazardPointerPerThread = 8;
+ static CDS_CONSTEXPR const size_t c_nHazardPointerPerThread = 8;
} // namespace hp
} /* namespace gc */ } /* namespace cds */
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
while ( pNode ) {
for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
pRec->sync();
- void * hptr = pNode->m_hzp[i];
+ void * hptr = pNode->m_hzp[i].get();
if ( hptr )
plist.push_back( hptr );
}
if ( !pNode->m_bFree.load( atomics::memory_order_acquire ) ) {
for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
pRec->sync();
- void * hptr = pNode->m_hzp[i];
+ void * hptr = pNode->m_hzp[i].get();
if ( hptr ) {
dummyRetired.m_p = hptr;
details::retired_vector::iterator it = std::lower_bound( itRetired, itRetiredEnd, dummyRetired, cds::gc::details::retired_ptr::less );
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "map_delodd.h"
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "map_delodd.h"
--- /dev/null
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CDSUNIT_MAP_TYPE_ITERABLE_LIST_H
+#define CDSUNIT_MAP_TYPE_ITERABLE_LIST_H
+
+#include "map_type.h"
+
+#include <cds/container/iterable_kvlist_hp.h>
+#include <cds/container/iterable_kvlist_dhp.h>
+
+namespace map {
+
+ template <typename Key, typename Value>
+ struct iterable_list_type
+ {
+ typedef typename map_type_base<Key, Value>::key_compare compare;
+ typedef typename map_type_base<Key, Value>::key_less less;
+
+ struct traits_IterableList_cmp_stdAlloc :
+ public cc::iterable_list::make_traits<
+ co::compare< compare >
+ >::type
+ {};
+ typedef cc::IterableKVList< cds::gc::HP, Key, Value, traits_IterableList_cmp_stdAlloc > IterableList_HP_cmp_stdAlloc;
+ typedef cc::IterableKVList< cds::gc::DHP, Key, Value, traits_IterableList_cmp_stdAlloc > IterableList_DHP_cmp_stdAlloc;
+// typedef cc::IterableKVList< rcu_gpi, Key, Value, traits_IterableList_cmp_stdAlloc > IterableList_RCU_GPI_cmp_stdAlloc;
+// typedef cc::IterableKVList< rcu_gpb, Key, Value, traits_IterableList_cmp_stdAlloc > IterableList_RCU_GPB_cmp_stdAlloc;
+// typedef cc::IterableKVList< rcu_gpt, Key, Value, traits_IterableList_cmp_stdAlloc > IterableList_RCU_GPT_cmp_stdAlloc;
+//#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
+// typedef cc::IterableKVList< rcu_shb, Key, Value, traits_IterableList_cmp_stdAlloc > IterableList_RCU_SHB_cmp_stdAlloc;
+// typedef cc::IterableKVList< rcu_sht, Key, Value, traits_IterableList_cmp_stdAlloc > IterableList_RCU_SHT_cmp_stdAlloc;
+//#endif
+
+ struct traits_IterableList_cmp_stdAlloc_stat: public traits_IterableList_cmp_stdAlloc
+ {
+ typedef cc::iterable_list::stat<> stat;
+ };
+ typedef cc::IterableKVList< cds::gc::HP, Key, Value, traits_IterableList_cmp_stdAlloc_stat > IterableList_HP_cmp_stdAlloc_stat;
+ typedef cc::IterableKVList< cds::gc::DHP, Key, Value, traits_IterableList_cmp_stdAlloc_stat > IterableList_DHP_cmp_stdAlloc_stat;
+ // typedef cc::IterableKVList< rcu_gpi, Key, Value, traits_IterableList_cmp_stdAlloc_stat > IterableList_RCU_GPI_cmp_stdAlloc_stat;
+ // typedef cc::IterableKVList< rcu_gpb, Key, Value, traits_IterableList_cmp_stdAlloc_stat > IterableList_RCU_GPB_cmp_stdAlloc_stat;
+ // typedef cc::IterableKVList< rcu_gpt, Key, Value, traits_IterableList_cmp_stdAlloc_stat > IterableList_RCU_GPT_cmp_stdAlloc_stat;
+ //#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
+ // typedef cc::IterableKVList< rcu_shb, Key, Value, traits_IterableList_cmp_stdAlloc_stat > IterableList_RCU_SHB_cmp_stdAlloc_stat;
+ // typedef cc::IterableKVList< rcu_sht, Key, Value, traits_IterableList_cmp_stdAlloc_stat > IterableList_RCU_SHT_cmp_stdAlloc_stat;
+ //#endif
+
+ struct traits_IterableList_cmp_stdAlloc_seqcst :
+ public cc::iterable_list::make_traits<
+ co::compare< compare >
+ ,co::memory_model< co::v::sequential_consistent >
+ >::type
+ {};
+ typedef cc::IterableKVList< cds::gc::HP, Key, Value, traits_IterableList_cmp_stdAlloc_seqcst > IterableList_HP_cmp_stdAlloc_seqcst;
+ typedef cc::IterableKVList< cds::gc::DHP, Key, Value, traits_IterableList_cmp_stdAlloc_seqcst > IterableList_DHP_cmp_stdAlloc_seqcst;
+// typedef cc::IterableKVList< rcu_gpi, Key, Value, traits_IterableList_cmp_stdAlloc_seqcst > IterableList_RCU_GPI_cmp_stdAlloc_seqcst;
+// typedef cc::IterableKVList< rcu_gpb, Key, Value, traits_IterableList_cmp_stdAlloc_seqcst > IterableList_RCU_GPB_cmp_stdAlloc_seqcst;
+// typedef cc::IterableKVList< rcu_gpt, Key, Value, traits_IterableList_cmp_stdAlloc_seqcst > IterableList_RCU_GPT_cmp_stdAlloc_seqcst;
+//#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
+// typedef cc::IterableKVList< rcu_shb, Key, Value, traits_IterableList_cmp_stdAlloc_seqcst > IterableList_RCU_SHB_cmp_stdAlloc_seqcst;
+// typedef cc::IterableKVList< rcu_sht, Key, Value, traits_IterableList_cmp_stdAlloc_seqcst > IterableList_RCU_SHT_cmp_stdAlloc_seqcst;
+//#endif
+
+ struct traits_IterableList_cmp_michaelAlloc :
+ public cc::iterable_list::make_traits<
+ co::compare< compare >,
+ co::allocator< memory::MichaelAllocator<int> >
+ >::type
+ {};
+ typedef cc::IterableKVList< cds::gc::HP, Key, Value, traits_IterableList_cmp_michaelAlloc > IterableList_HP_cmp_michaelAlloc;
+ typedef cc::IterableKVList< cds::gc::DHP, Key, Value, traits_IterableList_cmp_michaelAlloc > IterableList_DHP_cmp_michaelAlloc;
+// typedef cc::IterableKVList< rcu_gpi, Key, Value, traits_IterableList_cmp_michaelAlloc > IterableList_RCU_GPI_cmp_michaelAlloc;
+// typedef cc::IterableKVList< rcu_gpb, Key, Value, traits_IterableList_cmp_michaelAlloc > IterableList_RCU_GPB_cmp_michaelAlloc;
+// typedef cc::IterableKVList< rcu_gpt, Key, Value, traits_IterableList_cmp_michaelAlloc > IterableList_RCU_GPT_cmp_michaelAlloc;
+//#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
+// typedef cc::IterableKVList< rcu_shb, Key, Value, traits_IterableList_cmp_michaelAlloc > IterableList_RCU_SHB_cmp_michaelAlloc;
+// typedef cc::IterableKVList< rcu_sht, Key, Value, traits_IterableList_cmp_michaelAlloc > IterableList_RCU_SHT_cmp_michaelAlloc;
+//#endif
+
+ struct traits_IterableList_less_stdAlloc :
+ public cc::iterable_list::make_traits<
+ co::less< less >
+ >::type
+ {};
+ typedef cc::IterableKVList< cds::gc::HP, Key, Value, traits_IterableList_less_stdAlloc > IterableList_HP_less_stdAlloc;
+ typedef cc::IterableKVList< cds::gc::DHP, Key, Value, traits_IterableList_less_stdAlloc > IterableList_DHP_less_stdAlloc;
+// typedef cc::IterableKVList< rcu_gpi, Key, Value, traits_IterableList_less_stdAlloc > IterableList_RCU_GPI_less_stdAlloc;
+// typedef cc::IterableKVList< rcu_gpb, Key, Value, traits_IterableList_less_stdAlloc > IterableList_RCU_GPB_less_stdAlloc;
+// typedef cc::IterableKVList< rcu_gpt, Key, Value, traits_IterableList_less_stdAlloc > IterableList_RCU_GPT_less_stdAlloc;
+//#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
+// typedef cc::IterableKVList< rcu_shb, Key, Value, traits_IterableList_less_stdAlloc > IterableList_RCU_SHB_less_stdAlloc;
+// typedef cc::IterableKVList< rcu_sht, Key, Value, traits_IterableList_less_stdAlloc > IterableList_RCU_SHT_less_stdAlloc;
+//#endif
+
+ struct traits_IterableList_less_stdAlloc_stat: public traits_IterableList_less_stdAlloc
+ {
+ typedef cc::iterable_list::stat<> stat;
+ };
+ typedef cc::IterableKVList< cds::gc::HP, Key, Value, traits_IterableList_less_stdAlloc_stat > IterableList_HP_less_stdAlloc_stat;
+ typedef cc::IterableKVList< cds::gc::DHP, Key, Value, traits_IterableList_less_stdAlloc_stat > IterableList_DHP_less_stdAlloc_stat;
+// typedef cc::IterableKVList< rcu_gpi, Key, Value, traits_IterableList_less_stdAlloc_stat > IterableList_RCU_GPI_less_stdAlloc_stat;
+// typedef cc::IterableKVList< rcu_gpb, Key, Value, traits_IterableList_less_stdAlloc_stat > IterableList_RCU_GPB_less_stdAlloc_stat;
+// typedef cc::IterableKVList< rcu_gpt, Key, Value, traits_IterableList_less_stdAlloc_stat > IterableList_RCU_GPT_less_stdAlloc_stat;
+//#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
+// typedef cc::IterableKVList< rcu_shb, Key, Value, traits_IterableList_less_stdAlloc_stat > IterableList_RCU_SHB_less_stdAlloc_stat;
+// typedef cc::IterableKVList< rcu_sht, Key, Value, traits_IterableList_less_stdAlloc_stat > IterableList_RCU_SHT_less_stdAlloc_stat;
+//#endif
+
+ struct traits_IterableList_less_stdAlloc_seqcst :
+ public cc::iterable_list::make_traits<
+ co::less< less >
+ ,co::memory_model< co::v::sequential_consistent >
+ >::type
+ {};
+ typedef cc::IterableKVList< cds::gc::HP, Key, Value, traits_IterableList_less_stdAlloc_seqcst > IterableList_HP_less_stdAlloc_seqcst;
+ typedef cc::IterableKVList< cds::gc::DHP, Key, Value, traits_IterableList_less_stdAlloc_seqcst > IterableList_DHP_less_stdAlloc_seqcst;
+// typedef cc::IterableKVList< rcu_gpi, Key, Value, traits_IterableList_less_stdAlloc_seqcst > IterableList_RCU_GPI_less_stdAlloc_seqcst;
+// typedef cc::IterableKVList< rcu_gpb, Key, Value, traits_IterableList_less_stdAlloc_seqcst > IterableList_RCU_GPB_less_stdAlloc_seqcst;
+// typedef cc::IterableKVList< rcu_gpt, Key, Value, traits_IterableList_less_stdAlloc_seqcst > IterableList_RCU_GPT_less_stdAlloc_seqcst;
+//#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
+// typedef cc::IterableKVList< rcu_shb, Key, Value, traits_IterableList_less_stdAlloc_seqcst > IterableList_RCU_SHB_less_stdAlloc_seqcst;
+// typedef cc::IterableKVList< rcu_sht, Key, Value, traits_IterableList_less_stdAlloc_seqcst > IterableList_RCU_SHT_less_stdAlloc_seqcst;
+//#endif
+
+ struct traits_IterableList_less_michaelAlloc :
+ public cc::iterable_list::make_traits<
+ co::less< less >,
+ co::allocator< memory::MichaelAllocator<int> >
+ >::type
+ {};
+ typedef cc::IterableKVList< cds::gc::HP, Key, Value, traits_IterableList_less_michaelAlloc > IterableList_HP_less_michaelAlloc;
+ typedef cc::IterableKVList< cds::gc::DHP, Key, Value, traits_IterableList_less_michaelAlloc > IterableList_DHP_less_michaelAlloc;
+// typedef cc::IterableKVList< rcu_gpi, Key, Value, traits_IterableList_less_michaelAlloc > IterableList_RCU_GPI_less_michaelAlloc;
+// typedef cc::IterableKVList< rcu_gpb, Key, Value, traits_IterableList_less_michaelAlloc > IterableList_RCU_GPB_less_michaelAlloc;
+// typedef cc::IterableKVList< rcu_gpt, Key, Value, traits_IterableList_less_michaelAlloc > IterableList_RCU_GPT_less_michaelAlloc;
+//#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
+// typedef cc::IterableKVList< rcu_shb, Key, Value, traits_IterableList_less_michaelAlloc > IterableList_RCU_SHB_less_michaelAlloc;
+// typedef cc::IterableKVList< rcu_sht, Key, Value, traits_IterableList_less_michaelAlloc > IterableList_RCU_SHT_less_michaelAlloc;
+//#endif
+ };
+
+} // namespace map
+
+#endif // ifndef CDSUNIT_MAP_TYPE_ITERABLE_LIST_H
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSUNIT_MAP_TYPE_LAZY_LIST_H
typedef cc::LazyKVList< rcu_sht, Key, Value, traits_LazyList_cmp_stdAlloc > LazyList_RCU_SHT_cmp_stdAlloc;
#endif
+ struct traits_LazyList_cmp_stdAlloc_stat: public traits_LazyList_cmp_stdAlloc
+ {
+ typedef cc::lazy_list::stat<> stat;
+ };
+ typedef cc::LazyKVList< cds::gc::HP, Key, Value, traits_LazyList_cmp_stdAlloc_stat > LazyList_HP_cmp_stdAlloc_stat;
+ typedef cc::LazyKVList< cds::gc::DHP, Key, Value, traits_LazyList_cmp_stdAlloc_stat > LazyList_DHP_cmp_stdAlloc_stat;
+ typedef cc::LazyKVList< cds::gc::nogc, Key, Value, traits_LazyList_cmp_stdAlloc_stat > LazyList_NOGC_cmp_stdAlloc_stat;
+ typedef cc::LazyKVList< rcu_gpi, Key, Value, traits_LazyList_cmp_stdAlloc_stat > LazyList_RCU_GPI_cmp_stdAlloc_stat;
+ typedef cc::LazyKVList< rcu_gpb, Key, Value, traits_LazyList_cmp_stdAlloc_stat > LazyList_RCU_GPB_cmp_stdAlloc_stat;
+ typedef cc::LazyKVList< rcu_gpt, Key, Value, traits_LazyList_cmp_stdAlloc_stat > LazyList_RCU_GPT_cmp_stdAlloc_stat;
+#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
+ typedef cc::LazyKVList< rcu_shb, Key, Value, traits_LazyList_cmp_stdAlloc_stat > LazyList_RCU_SHB_cmp_stdAlloc_stat;
+ typedef cc::LazyKVList< rcu_sht, Key, Value, traits_LazyList_cmp_stdAlloc_stat > LazyList_RCU_SHT_cmp_stdAlloc_stat;
+#endif
+
struct traits_LazyList_unord_stdAlloc :
public cc::lazy_list::make_traits<
co::equal_to< equal_to >
typedef cc::LazyKVList< rcu_shb, Key, Value, traits_LazyList_cmp_michaelAlloc > LazyList_RCU_SHB_cmp_michaelAlloc;
typedef cc::LazyKVList< rcu_sht, Key, Value, traits_LazyList_cmp_michaelAlloc > LazyList_RCU_SHT_cmp_michaelAlloc;
#endif
+
struct traits_LazyList_less_stdAlloc :
public cc::lazy_list::make_traits<
co::less< less >
typedef cc::LazyKVList< rcu_sht, Key, Value, traits_LazyList_less_stdAlloc > LazyList_RCU_SHT_less_stdAlloc;
#endif
+ struct traits_LazyList_less_stdAlloc_stat: public traits_LazyList_less_stdAlloc
+ {
+ typedef cc::lazy_list::stat<> stat;
+ };
+ typedef cc::LazyKVList< cds::gc::HP, Key, Value, traits_LazyList_less_stdAlloc_stat > LazyList_HP_less_stdAlloc_stat;
+ typedef cc::LazyKVList< cds::gc::DHP, Key, Value, traits_LazyList_less_stdAlloc_stat > LazyList_DHP_less_stdAlloc_stat;
+ typedef cc::LazyKVList< cds::gc::nogc, Key, Value, traits_LazyList_less_stdAlloc_stat > LazyList_NOGC_less_stdAlloc_stat;
+ typedef cc::LazyKVList< rcu_gpi, Key, Value, traits_LazyList_less_stdAlloc_stat > LazyList_RCU_GPI_less_stdAlloc_stat;
+ typedef cc::LazyKVList< rcu_gpb, Key, Value, traits_LazyList_less_stdAlloc_stat > LazyList_RCU_GPB_less_stdAlloc_stat;
+ typedef cc::LazyKVList< rcu_gpt, Key, Value, traits_LazyList_less_stdAlloc_stat > LazyList_RCU_GPT_less_stdAlloc_stat;
+#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
+ typedef cc::LazyKVList< rcu_shb, Key, Value, traits_LazyList_less_stdAlloc_stat > LazyList_RCU_SHB_less_stdAlloc_stat;
+ typedef cc::LazyKVList< rcu_sht, Key, Value, traits_LazyList_less_stdAlloc_stat > LazyList_RCU_SHT_less_stdAlloc_stat;
+#endif
+
struct traits_LazyList_less_stdAlloc_seqcst :
public cc::lazy_list::make_traits<
co::less< less >
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSUNIT_MAP_TYPE_MICHAEL_H
#include "map_type_michael_list.h"
#include "map_type_lazy_list.h"
+#include "map_type_iterable_list.h"
#include <cds/container/michael_map.h>
#include <cds/container/michael_map_rcu.h>
typedef typename base_class::equal_to equal_to;
typedef typename base_class::key_hash hash;
-
- // ***************************************************************************
- // MichaelHashMap based on MichaelKVList
- typedef michael_list_type< Key, Value > ml;
-
struct traits_MichaelMap_hash :
public cc::michael_map::make_traits<
co::hash< hash >
>::type
{};
+
+ // ***************************************************************************
+ // MichaelHashMap based on MichaelKVList
+ typedef michael_list_type< Key, Value > ml;
+
typedef MichaelHashMap< cds::gc::HP, typename ml::MichaelList_HP_cmp_stdAlloc, traits_MichaelMap_hash > MichaelMap_HP_cmp_stdAlloc;
typedef MichaelHashMap< cds::gc::DHP, typename ml::MichaelList_DHP_cmp_stdAlloc, traits_MichaelMap_hash > MichaelMap_DHP_cmp_stdAlloc;
typedef MichaelHashMap< cds::gc::nogc, typename ml::MichaelList_NOGC_cmp_stdAlloc, traits_MichaelMap_hash > MichaelMap_NOGC_cmp_stdAlloc;
typedef MichaelHashMap< rcu_sht, typename ml::MichaelList_RCU_SHT_cmp_stdAlloc, traits_MichaelMap_hash > MichaelMap_RCU_SHT_cmp_stdAlloc;
#endif
+ typedef MichaelHashMap< cds::gc::HP, typename ml::MichaelList_HP_cmp_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_HP_cmp_stdAlloc_stat;
+ typedef MichaelHashMap< cds::gc::DHP, typename ml::MichaelList_DHP_cmp_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_DHP_cmp_stdAlloc_stat;
+ typedef MichaelHashMap< cds::gc::nogc, typename ml::MichaelList_NOGC_cmp_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_NOGC_cmp_stdAlloc_stat;
+ typedef MichaelHashMap< rcu_gpi, typename ml::MichaelList_RCU_GPI_cmp_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_RCU_GPI_cmp_stdAlloc_stat;
+ typedef MichaelHashMap< rcu_gpb, typename ml::MichaelList_RCU_GPB_cmp_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_RCU_GPB_cmp_stdAlloc_stat;
+ typedef MichaelHashMap< rcu_gpt, typename ml::MichaelList_RCU_GPT_cmp_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_RCU_GPT_cmp_stdAlloc_stat;
+#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
+ typedef MichaelHashMap< rcu_shb, typename ml::MichaelList_RCU_SHB_cmp_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_RCU_SHB_cmp_stdAlloc_stat;
+ typedef MichaelHashMap< rcu_sht, typename ml::MichaelList_RCU_SHT_cmp_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_RCU_SHT_cmp_stdAlloc_stat;
+#endif
+
typedef MichaelHashMap< cds::gc::HP, typename ml::MichaelList_HP_less_stdAlloc, traits_MichaelMap_hash > MichaelMap_HP_less_stdAlloc;
typedef MichaelHashMap< cds::gc::DHP, typename ml::MichaelList_DHP_less_stdAlloc, traits_MichaelMap_hash > MichaelMap_DHP_less_stdAlloc;
typedef MichaelHashMap< cds::gc::nogc, typename ml::MichaelList_NOGC_less_stdAlloc, traits_MichaelMap_hash > MichaelMap_NOGC_less_stdAlloc;
typedef MichaelHashMap< rcu_sht, typename ml::MichaelList_RCU_SHT_less_stdAlloc, traits_MichaelMap_hash > MichaelMap_RCU_SHT_less_stdAlloc;
#endif
+ typedef MichaelHashMap< cds::gc::HP, typename ml::MichaelList_HP_less_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_HP_less_stdAlloc_stat;
+ typedef MichaelHashMap< cds::gc::DHP, typename ml::MichaelList_DHP_less_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_DHP_less_stdAlloc_stat;
+ typedef MichaelHashMap< cds::gc::nogc, typename ml::MichaelList_NOGC_less_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_NOGC_less_stdAlloc_stat;
+ typedef MichaelHashMap< rcu_gpi, typename ml::MichaelList_RCU_GPI_less_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_RCU_GPI_less_stdAlloc_stat;
+ typedef MichaelHashMap< rcu_gpb, typename ml::MichaelList_RCU_GPB_less_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_RCU_GPB_less_stdAlloc_stat;
+ typedef MichaelHashMap< rcu_gpt, typename ml::MichaelList_RCU_GPT_less_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_RCU_GPT_less_stdAlloc_stat;
+#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
+ typedef MichaelHashMap< rcu_shb, typename ml::MichaelList_RCU_SHB_less_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_RCU_SHB_less_stdAlloc_stat;
+ typedef MichaelHashMap< rcu_sht, typename ml::MichaelList_RCU_SHT_less_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_RCU_SHT_less_stdAlloc_stat;
+#endif
+
typedef MichaelHashMap< cds::gc::HP, typename ml::MichaelList_HP_cmp_stdAlloc_seqcst, traits_MichaelMap_hash > MichaelMap_HP_cmp_stdAlloc_seqcst;
typedef MichaelHashMap< cds::gc::DHP, typename ml::MichaelList_DHP_cmp_stdAlloc_seqcst, traits_MichaelMap_hash > MichaelMap_DHP_cmp_stdAlloc_seqcst;
typedef MichaelHashMap< cds::gc::nogc, typename ml::MichaelList_NOGC_cmp_stdAlloc_seqcst, traits_MichaelMap_hash > MichaelMap_NOGC_cmp_stdAlloc_seqcst;
typedef MichaelHashMap< rcu_sht, typename ll::LazyList_RCU_SHT_cmp_stdAlloc, traits_MichaelMap_hash > MichaelMap_Lazy_RCU_SHT_cmp_stdAlloc;
#endif
+ typedef MichaelHashMap< cds::gc::HP, typename ll::LazyList_HP_cmp_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Lazy_HP_cmp_stdAlloc_stat;
+ typedef MichaelHashMap< cds::gc::DHP, typename ll::LazyList_DHP_cmp_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Lazy_DHP_cmp_stdAlloc_stat;
+ typedef MichaelHashMap< cds::gc::nogc, typename ll::LazyList_NOGC_cmp_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Lazy_NOGC_cmp_stdAlloc_stat;
+ typedef MichaelHashMap< rcu_gpi, typename ll::LazyList_RCU_GPI_cmp_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Lazy_RCU_GPI_cmp_stdAlloc_stat;
+ typedef MichaelHashMap< rcu_gpb, typename ll::LazyList_RCU_GPB_cmp_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Lazy_RCU_GPB_cmp_stdAlloc_stat;
+ typedef MichaelHashMap< rcu_gpt, typename ll::LazyList_RCU_GPT_cmp_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Lazy_RCU_GPT_cmp_stdAlloc_stat;
+#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
+ typedef MichaelHashMap< rcu_shb, typename ll::LazyList_RCU_SHB_cmp_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Lazy_RCU_SHB_cmp_stdAlloc_stat;
+ typedef MichaelHashMap< rcu_sht, typename ll::LazyList_RCU_SHT_cmp_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Lazy_RCU_SHT_cmp_stdAlloc_stat;
+#endif
+
typedef MichaelHashMap< cds::gc::nogc, typename ll::LazyList_NOGC_unord_stdAlloc, traits_MichaelMap_hash > MichaelMap_Lazy_NOGC_unord_stdAlloc;
typedef MichaelHashMap< cds::gc::HP, typename ll::LazyList_HP_less_stdAlloc, traits_MichaelMap_hash > MichaelMap_Lazy_HP_less_stdAlloc;
typedef MichaelHashMap< rcu_sht, typename ll::LazyList_RCU_SHT_less_stdAlloc, traits_MichaelMap_hash > MichaelMap_Lazy_RCU_SHT_less_stdAlloc;
#endif
+ typedef MichaelHashMap< cds::gc::HP, typename ll::LazyList_HP_less_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Lazy_HP_less_stdAlloc_stat;
+ typedef MichaelHashMap< cds::gc::DHP, typename ll::LazyList_DHP_less_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Lazy_DHP_less_stdAlloc_stat;
+ typedef MichaelHashMap< cds::gc::nogc, typename ll::LazyList_NOGC_less_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Lazy_NOGC_less_stdAlloc_stat;
+ typedef MichaelHashMap< rcu_gpi, typename ll::LazyList_RCU_GPI_less_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Lazy_RCU_GPI_less_stdAlloc_stat;
+ typedef MichaelHashMap< rcu_gpb, typename ll::LazyList_RCU_GPB_less_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Lazy_RCU_GPB_less_stdAlloc_stat;
+ typedef MichaelHashMap< rcu_gpt, typename ll::LazyList_RCU_GPT_less_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Lazy_RCU_GPT_less_stdAlloc_stat;
+#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
+ typedef MichaelHashMap< rcu_shb, typename ll::LazyList_RCU_SHB_less_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Lazy_RCU_SHB_less_stdAlloc_stat;
+ typedef MichaelHashMap< rcu_sht, typename ll::LazyList_RCU_SHT_less_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Lazy_RCU_SHT_less_stdAlloc_stat;
+#endif
+
typedef MichaelHashMap< cds::gc::HP, typename ll::LazyList_HP_cmp_stdAlloc_seqcst, traits_MichaelMap_hash > MichaelMap_Lazy_HP_cmp_stdAlloc_seqcst;
typedef MichaelHashMap< cds::gc::DHP, typename ll::LazyList_DHP_cmp_stdAlloc_seqcst, traits_MichaelMap_hash > MichaelMap_Lazy_DHP_cmp_stdAlloc_seqcst;
typedef MichaelHashMap< cds::gc::nogc, typename ll::LazyList_NOGC_cmp_stdAlloc_seqcst, traits_MichaelMap_hash > MichaelMap_Lazy_NOGC_cmp_stdAlloc_seqcst;
typedef MichaelHashMap< rcu_sht, typename ll::LazyList_RCU_SHT_less_michaelAlloc, traits_MichaelSet_michaelAlloc > MichaelMap_Lazy_RCU_SHT_less_michaelAlloc;
#endif
+ // ***************************************************************************
+ // MichaelHashMap based on IterableKVList
+ typedef iterable_list_type< Key, Value > il;
+
+ typedef MichaelHashMap< cds::gc::HP, typename il::IterableList_HP_cmp_stdAlloc, traits_MichaelMap_hash > MichaelMap_Iterable_HP_cmp_stdAlloc;
+ typedef MichaelHashMap< cds::gc::DHP, typename il::IterableList_DHP_cmp_stdAlloc, traits_MichaelMap_hash > MichaelMap_Iterable_DHP_cmp_stdAlloc;
+// typedef MichaelHashMap< rcu_gpi, typename il::IterableList_RCU_GPI_cmp_stdAlloc, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_GPI_cmp_stdAlloc;
+// typedef MichaelHashMap< rcu_gpb, typename il::IterableList_RCU_GPB_cmp_stdAlloc, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_GPB_cmp_stdAlloc;
+// typedef MichaelHashMap< rcu_gpt, typename il::IterableList_RCU_GPT_cmp_stdAlloc, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_GPT_cmp_stdAlloc;
+//#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
+// typedef MichaelHashMap< rcu_shb, typename il::IterableList_RCU_SHB_cmp_stdAlloc, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_SHB_cmp_stdAlloc;
+// typedef MichaelHashMap< rcu_sht, typename il::IterableList_RCU_SHT_cmp_stdAlloc, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_SHT_cmp_stdAlloc;
+//#endif
+
+ typedef MichaelHashMap< cds::gc::HP, typename il::IterableList_HP_cmp_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Iterable_HP_cmp_stdAlloc_stat;
+ typedef MichaelHashMap< cds::gc::DHP, typename il::IterableList_DHP_cmp_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Iterable_DHP_cmp_stdAlloc_stat;
+// typedef MichaelHashMap< rcu_gpi, typename il::IterableList_RCU_GPI_cmp_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_GPI_cmp_stdAlloc_stat;
+// typedef MichaelHashMap< rcu_gpb, typename il::IterableList_RCU_GPB_cmp_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_GPB_cmp_stdAlloc_stat;
+// typedef MichaelHashMap< rcu_gpt, typename il::IterableList_RCU_GPT_cmp_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_GPT_cmp_stdAlloc_stat;
+//#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
+// typedef MichaelHashMap< rcu_shb, typename il::IterableList_RCU_SHB_cmp_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_SHB_cmp_stdAlloc_stat;
+// typedef MichaelHashMap< rcu_sht, typename il::IterableList_RCU_SHT_cmp_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_SHT_cmp_stdAlloc_stat;
+//#endif
+
+ typedef MichaelHashMap< cds::gc::HP, typename il::IterableList_HP_less_stdAlloc, traits_MichaelMap_hash > MichaelMap_Iterable_HP_less_stdAlloc;
+ typedef MichaelHashMap< cds::gc::DHP, typename il::IterableList_DHP_less_stdAlloc, traits_MichaelMap_hash > MichaelMap_Iterable_DHP_less_stdAlloc;
+// typedef MichaelHashMap< rcu_gpi, typename il::IterableList_RCU_GPI_less_stdAlloc, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_GPI_less_stdAlloc;
+// typedef MichaelHashMap< rcu_gpb, typename il::IterableList_RCU_GPB_less_stdAlloc, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_GPB_less_stdAlloc;
+// typedef MichaelHashMap< rcu_gpt, typename il::IterableList_RCU_GPT_less_stdAlloc, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_GPT_less_stdAlloc;
+//#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
+// typedef MichaelHashMap< rcu_shb, typename il::IterableList_RCU_SHB_less_stdAlloc, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_SHB_less_stdAlloc;
+// typedef MichaelHashMap< rcu_sht, typename il::IterableList_RCU_SHT_less_stdAlloc, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_SHT_less_stdAlloc;
+//#endif
+
+ typedef MichaelHashMap< cds::gc::HP, typename il::IterableList_HP_less_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Iterable_HP_less_stdAlloc_stat;
+ typedef MichaelHashMap< cds::gc::DHP, typename il::IterableList_DHP_less_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Iterable_DHP_less_stdAlloc_stat;
+// typedef MichaelHashMap< rcu_gpi, typename il::IterableList_RCU_GPI_less_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_GPI_less_stdAlloc_stat;
+// typedef MichaelHashMap< rcu_gpb, typename il::IterableList_RCU_GPB_less_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_GPB_less_stdAlloc_stat;
+// typedef MichaelHashMap< rcu_gpt, typename il::IterableList_RCU_GPT_less_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_GPT_less_stdAlloc_stat;
+//#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
+// typedef MichaelHashMap< rcu_shb, typename il::IterableList_RCU_SHB_less_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_SHB_less_stdAlloc_stat;
+// typedef MichaelHashMap< rcu_sht, typename il::IterableList_RCU_SHT_less_stdAlloc_stat, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_SHT_less_stdAlloc_stat;
+//#endif
+
+ typedef MichaelHashMap< cds::gc::HP, typename il::IterableList_HP_cmp_stdAlloc_seqcst, traits_MichaelMap_hash > MichaelMap_Iterable_HP_cmp_stdAlloc_seqcst;
+ typedef MichaelHashMap< cds::gc::DHP, typename il::IterableList_DHP_cmp_stdAlloc_seqcst, traits_MichaelMap_hash > MichaelMap_Iterable_DHP_cmp_stdAlloc_seqcst;
+// typedef MichaelHashMap< rcu_gpi, typename il::IterableList_RCU_GPI_cmp_stdAlloc_seqcst, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_GPI_cmp_stdAlloc_seqcst;
+// typedef MichaelHashMap< rcu_gpb, typename il::IterableList_RCU_GPB_cmp_stdAlloc_seqcst, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_GPB_cmp_stdAlloc_seqcst;
+// typedef MichaelHashMap< rcu_gpt, typename il::IterableList_RCU_GPT_cmp_stdAlloc_seqcst, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_GPT_cmp_stdAlloc_seqcst;
+//#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
+// typedef MichaelHashMap< rcu_shb, typename il::IterableList_RCU_SHB_cmp_stdAlloc_seqcst, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_SHB_cmp_stdAlloc_seqcst;
+// typedef MichaelHashMap< rcu_sht, typename il::IterableList_RCU_SHT_cmp_stdAlloc_seqcst, traits_MichaelMap_hash > MichaelMap_Iterable_RCU_SHT_cmp_stdAlloc_seqcst;
+//#endif
+
};
} // namespace map
#define CDSSTRESS_MichaelMap( fixture, test_case, key_type, value_type ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_HP_cmp_stdAlloc, key_type, value_type, 0 ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_DHP_cmp_stdAlloc, key_type, value_type, 1 ) \
+ CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_HP_cmp_stdAlloc_stat, key_type, value_type, 1 ) \
+ CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_DHP_cmp_stdAlloc_stat, key_type, value_type, 0 ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_RCU_GPI_cmp_stdAlloc, key_type, value_type, 0 ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_RCU_GPB_cmp_stdAlloc, key_type, value_type, 1 ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_RCU_GPT_cmp_stdAlloc, key_type, value_type, 0 ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_HP_less_stdAlloc, key_type, value_type, 1 ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_DHP_less_stdAlloc, key_type, value_type, 0 ) \
+ CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_HP_less_stdAlloc_stat, key_type, value_type, 0 ) \
+ CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_DHP_less_stdAlloc_stat, key_type, value_type, 1 ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_RCU_GPI_less_stdAlloc, key_type, value_type, 1 ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_RCU_GPB_less_stdAlloc, key_type, value_type, 0 ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_RCU_GPT_less_stdAlloc, key_type, value_type, 1 ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_RCU_GPI_less_michaelAlloc, key_type, value_type, 1 ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_RCU_GPB_less_michaelAlloc, key_type, value_type, 0 ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_RCU_GPT_less_michaelAlloc, key_type, value_type, 1 ) \
+ \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Lazy_HP_cmp_stdAlloc, key_type, value_type, 0 ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Lazy_DHP_cmp_stdAlloc, key_type, value_type, 1 ) \
+ CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Lazy_HP_cmp_stdAlloc_stat, key_type, value_type, 1 ) \
+ CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Lazy_DHP_cmp_stdAlloc_stat, key_type, value_type, 0 ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Lazy_RCU_GPI_cmp_stdAlloc, key_type, value_type, 0 ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Lazy_RCU_GPB_cmp_stdAlloc, key_type, value_type, 1 ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Lazy_RCU_GPT_cmp_stdAlloc, key_type, value_type, 0 ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Lazy_HP_less_stdAlloc, key_type, value_type, 1 ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Lazy_DHP_less_stdAlloc, key_type, value_type, 0 ) \
+ CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Lazy_HP_less_stdAlloc_stat, key_type, value_type, 0 ) \
+ CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Lazy_DHP_less_stdAlloc_stat, key_type, value_type, 1 ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Lazy_RCU_GPI_less_stdAlloc, key_type, value_type, 1 ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Lazy_RCU_GPB_less_stdAlloc, key_type, value_type, 0 ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Lazy_RCU_GPT_less_stdAlloc, key_type, value_type, 1 ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Lazy_RCU_GPI_less_michaelAlloc, key_type, value_type, 1 ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Lazy_RCU_GPB_less_michaelAlloc, key_type, value_type, 0 ) \
CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Lazy_RCU_GPT_less_michaelAlloc, key_type, value_type, 1 ) \
+ \
+ CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Iterable_HP_cmp_stdAlloc, key_type, value_type, 0 ) \
+ CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Iterable_DHP_cmp_stdAlloc, key_type, value_type, 1 ) \
+ CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Iterable_HP_cmp_stdAlloc_stat, key_type, value_type, 1 ) \
+ CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Iterable_DHP_cmp_stdAlloc_stat, key_type, value_type, 0 ) \
+ CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Iterable_HP_less_stdAlloc, key_type, value_type, 1 ) \
+ CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Iterable_DHP_less_stdAlloc, key_type, value_type, 0 ) \
+ CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Iterable_HP_less_stdAlloc_stat, key_type, value_type, 0 ) \
+ CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Iterable_DHP_less_stdAlloc_stat, key_type, value_type, 1 ) \
+ CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Iterable_HP_cmp_stdAlloc_seqcst, key_type, value_type, 2 ) \
+ CDSSTRESS_MichaelMap_case( fixture, test_case, MichaelMap_Iterable_DHP_cmp_stdAlloc_seqcst, key_type, value_type, 2 ) \
CDSSTRESS_MichaelMap_SHRCU( fixture, test_case, key_type, value_type )
#define CDSSTRESS_MichaelMap_nogc( fixture, test_case, key_type, value_type ) \
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSUNIT_MAP_TYPE_MICHAEL_LIST_H
typedef cc::MichaelKVList< rcu_sht, Key, Value, traits_MichaelList_cmp_stdAlloc > MichaelList_RCU_SHT_cmp_stdAlloc;
#endif
+ struct traits_MichaelList_cmp_stdAlloc_stat : public traits_MichaelList_cmp_stdAlloc
+ {
+ typedef cc::michael_list::stat<> stat;
+ };
+ typedef cc::MichaelKVList< cds::gc::HP, Key, Value, traits_MichaelList_cmp_stdAlloc_stat > MichaelList_HP_cmp_stdAlloc_stat;
+ typedef cc::MichaelKVList< cds::gc::DHP, Key, Value, traits_MichaelList_cmp_stdAlloc_stat > MichaelList_DHP_cmp_stdAlloc_stat;
+ typedef cc::MichaelKVList< cds::gc::nogc, Key, Value, traits_MichaelList_cmp_stdAlloc_stat > MichaelList_NOGC_cmp_stdAlloc_stat;
+ typedef cc::MichaelKVList< rcu_gpi, Key, Value, traits_MichaelList_cmp_stdAlloc_stat > MichaelList_RCU_GPI_cmp_stdAlloc_stat;
+ typedef cc::MichaelKVList< rcu_gpb, Key, Value, traits_MichaelList_cmp_stdAlloc_stat > MichaelList_RCU_GPB_cmp_stdAlloc_stat;
+ typedef cc::MichaelKVList< rcu_gpt, Key, Value, traits_MichaelList_cmp_stdAlloc_stat > MichaelList_RCU_GPT_cmp_stdAlloc_stat;
+#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
+ typedef cc::MichaelKVList< rcu_shb, Key, Value, traits_MichaelList_cmp_stdAlloc_stat > MichaelList_RCU_SHB_cmp_stdAlloc_stat;
+ typedef cc::MichaelKVList< rcu_sht, Key, Value, traits_MichaelList_cmp_stdAlloc_stat > MichaelList_RCU_SHT_cmp_stdAlloc_stat;
+#endif
+
struct traits_MichaelList_cmp_stdAlloc_seqcst :
public cc::michael_list::make_traits<
co::compare< compare >
typedef cc::MichaelKVList< rcu_sht, Key, Value, traits_MichaelList_less_stdAlloc > MichaelList_RCU_SHT_less_stdAlloc;
#endif
+ struct traits_MichaelList_less_stdAlloc_stat: public traits_MichaelList_less_stdAlloc
+ {
+ typedef cc::michael_list::stat<> stat;
+ };
+ typedef cc::MichaelKVList< cds::gc::HP, Key, Value, traits_MichaelList_less_stdAlloc_stat > MichaelList_HP_less_stdAlloc_stat;
+ typedef cc::MichaelKVList< cds::gc::DHP, Key, Value, traits_MichaelList_less_stdAlloc_stat > MichaelList_DHP_less_stdAlloc_stat;
+ typedef cc::MichaelKVList< cds::gc::nogc, Key, Value, traits_MichaelList_less_stdAlloc_stat > MichaelList_NOGC_less_stdAlloc_stat;
+ typedef cc::MichaelKVList< rcu_gpi, Key, Value, traits_MichaelList_less_stdAlloc_stat > MichaelList_RCU_GPI_less_stdAlloc_stat;
+ typedef cc::MichaelKVList< rcu_gpb, Key, Value, traits_MichaelList_less_stdAlloc_stat > MichaelList_RCU_GPB_less_stdAlloc_stat;
+ typedef cc::MichaelKVList< rcu_gpt, Key, Value, traits_MichaelList_less_stdAlloc_stat > MichaelList_RCU_GPT_less_stdAlloc_stat;
+#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
+ typedef cc::MichaelKVList< rcu_shb, Key, Value, traits_MichaelList_less_stdAlloc_stat > MichaelList_RCU_SHB_less_stdAlloc_stat;
+ typedef cc::MichaelKVList< rcu_sht, Key, Value, traits_MichaelList_less_stdAlloc_stat > MichaelList_RCU_SHT_less_stdAlloc_stat;
+#endif
+
struct traits_MichaelList_less_stdAlloc_seqcst :
public cc::michael_list::make_traits<
co::less< less >
typename Set::iterator itEnd;
itEnd = rSet.end();
for ( it = rSet.begin(); it != itEnd; ++it ) {
+#if CDS_BUILD_BITS == 64
it->val.hash = CityHash64( it->key.c_str(), it->key.length());
+#else
+ it->val.hash = std::hash<std::string>()( it->key );
+#endif
++m_nVisitCount;
}
}
++m_nPassCount;
typename Set::rcu_lock l;
for ( auto it = rSet.begin(); it != rSet.end(); ++it ) {
+#if CDS_BUILD_BITS == 64
it->val.hash = CityHash64( it->key.c_str(), it->key.length() );
+#else
+ it->val.hash = std::hash<std::string>()(it->key);
+#endif
++m_nVisitCount;
}
}
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelIterableSet_DHP, wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelIterableSet_HP, wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelLazySet_DHP, base_wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelLazySet_DHP, member_cmp )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelLazySet_DHP, member_wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelLazySet_HP, base_wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelLazySet_HP, member_wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelLazySet_NoGC, base_wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelLazySet_NoGC, member_cmp )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelLazySet_NoGC, member_wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelSet_DHP, base_wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelSet_DHP, member_cmp )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelSet_DHP, member_wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelSet_HP, base_wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelSet_HP, member_cmp )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelSet_HP, member_wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelSet_NoGC, base_wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelSet_NoGC, member_cmp )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelSet_NoGC, member_wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( TestFixture::kSize, 2 );
this->test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TYPED_TEST_P( IntrusiveMichaelLazySet, base_wrapped_stat )
set_type s( TestFixture::kSize, 2 );
this->test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TYPED_TEST_P( IntrusiveMichaelLazySet, member_cmp )
set_type s( TestFixture::kSize, 2 );
this->test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TYPED_TEST_P( IntrusiveMichaelLazySet, member_wrapped_stat )
set_type s( TestFixture::kSize, 2 );
this->test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
// GCC 5: All test names should be written on single line, otherwise a runtime error will be encountered like as
set_type s( TestFixture::kSize, 2 );
this->test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TYPED_TEST_P( IntrusiveMichaelSet, base_wrapped_stat )
set_type s( TestFixture::kSize, 2 );
this->test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TYPED_TEST_P( IntrusiveMichaelSet, member_cmp )
set_type s( TestFixture::kSize, 2 );
this->test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TYPED_TEST_P( IntrusiveMichaelSet, member_wrapped_stat )
set_type s( TestFixture::kSize, 2 );
this->test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
// GCC 5: All test names should be written on single line, otherwise a runtime error will be encountered like as
EXPECT_EQ( i.s.nUpdateExistsCall, 1 );
}
+ // Apply retired pointer to clean links
+ List::gc::force_dispose();
+
for ( auto& i : arr ) {
EXPECT_EQ( i.s.nUpdateExistsCall, 0 );
std::pair<bool, bool> ret = l.update( i, []( value_type& i, value_type * old ) {
feldman_hashset_rcu_gpt.cpp
feldman_hashset_rcu_shb.cpp
feldman_hashset_rcu_sht.cpp
+ michael_iterable_hp.cpp
+ michael_iterable_dhp.cpp
michael_lazy_hp.cpp
michael_lazy_dhp.cpp
michael_lazy_nogc.cpp
--- /dev/null
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "test_michael_iterable_hp.h"
+
+#include <cds/container/iterable_kvlist_dhp.h>
+#include <cds/container/michael_map.h>
+
+namespace {
+
+ namespace cc = cds::container;
+ typedef cds::gc::DHP gc_type;
+
+ class MichaelIterableMap_DHP: public cds_test::michael_iterable_hp
+ {
+ protected:
+ typedef cds_test::michael_iterable_hp base_class;
+
+ void SetUp()
+ {
+ typedef cc::IterableKVList< gc_type, key_type, value_type > list_type;
+ typedef cc::MichaelHashMap< gc_type, list_type > map_type;
+
+ cds::gc::dhp::GarbageCollector::Construct( 16, map_type::c_nHazardPtrCount );
+ cds::threading::Manager::attachThread();
+ }
+
+ void TearDown()
+ {
+ cds::threading::Manager::detachThread();
+ cds::gc::dhp::GarbageCollector::Destruct();
+ }
+ };
+
+ TEST_F( MichaelIterableMap_DHP, compare )
+ {
+ typedef cc::IterableKVList< gc_type, key_type, value_type,
+ typename cc::iterable_list::make_traits<
+ cds::opt::compare< cmp >
+ >::type
+ > list_type;
+
+ typedef cc::MichaelHashMap< gc_type, list_type,
+ typename cc::michael_map::make_traits<
+ cds::opt::hash< hash1 >
+ >::type
+ > map_type;
+
+ map_type m( kSize, 2 );
+ test( m );
+ }
+
+ TEST_F( MichaelIterableMap_DHP, less )
+ {
+ typedef cc::IterableKVList< gc_type, key_type, value_type,
+ typename cc::iterable_list::make_traits<
+ cds::opt::less< less >
+ >::type
+ > list_type;
+
+ typedef cc::MichaelHashMap< gc_type, list_type,
+ typename cc::michael_map::make_traits<
+ cds::opt::hash< hash1 >
+ >::type
+ > map_type;
+
+ map_type m( kSize, 1 );
+ test( m );
+ }
+
+ TEST_F( MichaelIterableMap_DHP, cmpmix )
+ {
+ typedef cc::IterableKVList< gc_type, key_type, value_type,
+ typename cc::iterable_list::make_traits<
+ cds::opt::less< less >
+ ,cds::opt::compare< cmp >
+ >::type
+ > list_type;
+
+ typedef cc::MichaelHashMap< gc_type, list_type,
+ typename cc::michael_map::make_traits<
+ cds::opt::hash< hash1 >
+ >::type
+ > map_type;
+
+ map_type m( kSize, 2 );
+ test( m );
+ }
+
+ TEST_F( MichaelIterableMap_DHP, backoff )
+ {
+ struct list_traits: public cc::iterable_list::traits
+ {
+ typedef cmp compare;
+ typedef cds::backoff::exponential<cds::backoff::pause, cds::backoff::yield> back_off;
+ };
+ typedef cc::IterableKVList< gc_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef hash1 hash;
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
+
+ map_type m( kSize, 4 );
+ test( m );
+ }
+
+ TEST_F( MichaelIterableMap_DHP, seq_cst )
+ {
+ struct list_traits: public cc::iterable_list::traits
+ {
+ typedef cmp compare;
+ typedef cds::backoff::yield back_off;
+ typedef cds::opt::v::sequential_consistent memory_model;
+ };
+ typedef cc::IterableKVList< gc_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef hash1 hash;
+ };
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
+
+ map_type s( kSize, 8 );
+ test( s );
+ }
+
+ TEST_F( MichaelIterableMap_DHP, stat )
+ {
+ struct list_traits: public cc::iterable_list::traits
+ {
+ typedef cmp compare;
+ typedef cds::backoff::yield back_off;
+ typedef cc::iterable_list::stat<> stat;
+ };
+ typedef cc::IterableKVList< gc_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef hash1 hash;
+ };
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
+
+ map_type m( kSize, 8 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
+
+ TEST_F( MichaelIterableMap_DHP, wrapped_stat )
+ {
+ struct list_traits: public cc::iterable_list::traits
+ {
+ typedef cmp compare;
+ typedef cc::iterable_list::wrapped_stat<> stat;
+ };
+ typedef cc::IterableKVList< gc_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef hash1 hash;
+ };
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
+
+ map_type m( kSize, 8 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
+
+} // namespace
+
--- /dev/null
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "test_michael_iterable_hp.h"
+
+#include <cds/container/iterable_kvlist_hp.h>
+#include <cds/container/michael_map.h>
+
+namespace {
+
+ namespace cc = cds::container;
+ typedef cds::gc::HP gc_type;
+
+ class MichaelIterableMap_HP: public cds_test::michael_iterable_hp
+ {
+ protected:
+ typedef cds_test::michael_iterable_hp base_class;
+
+ void SetUp()
+ {
+ typedef cc::IterableKVList< gc_type, key_type, value_type > list_type;
+ typedef cc::MichaelHashMap< gc_type, list_type > map_type;
+
+ // +3 - for guarded_ptr and iterator
+ cds::gc::hp::GarbageCollector::Construct( map_type::c_nHazardPtrCount + 3, 1, 16 );
+ cds::threading::Manager::attachThread();
+ }
+
+ void TearDown()
+ {
+ cds::threading::Manager::detachThread();
+ cds::gc::hp::GarbageCollector::Destruct( true );
+ }
+ };
+
+ TEST_F( MichaelIterableMap_HP, compare )
+ {
+ typedef cc::IterableKVList< gc_type, key_type, value_type,
+ typename cc::iterable_list::make_traits<
+ cds::opt::compare< cmp >
+ >::type
+ > list_type;
+
+ typedef cc::MichaelHashMap< gc_type, list_type,
+ typename cc::michael_map::make_traits<
+ cds::opt::hash< hash1 >
+ >::type
+ > map_type;
+
+ map_type m( kSize, 2 );
+ test( m );
+ }
+
+ TEST_F( MichaelIterableMap_HP, less )
+ {
+ typedef cc::IterableKVList< gc_type, key_type, value_type,
+ typename cc::iterable_list::make_traits<
+ cds::opt::less< less >
+ >::type
+ > list_type;
+
+ typedef cc::MichaelHashMap< gc_type, list_type,
+ typename cc::michael_map::make_traits<
+ cds::opt::hash< hash1 >
+ >::type
+ > map_type;
+
+ map_type m( kSize, 1 );
+ test( m );
+ }
+
+ TEST_F( MichaelIterableMap_HP, cmpmix )
+ {
+ typedef cc::IterableKVList< gc_type, key_type, value_type,
+ typename cc::iterable_list::make_traits<
+ cds::opt::less< less >
+ ,cds::opt::compare< cmp >
+ >::type
+ > list_type;
+
+ typedef cc::MichaelHashMap< gc_type, list_type,
+ typename cc::michael_map::make_traits<
+ cds::opt::hash< hash1 >
+ >::type
+ > map_type;
+
+ map_type m( kSize, 2 );
+ test( m );
+ }
+
+ TEST_F( MichaelIterableMap_HP, backoff )
+ {
+ struct list_traits: public cc::iterable_list::traits
+ {
+ typedef cmp compare;
+ typedef cds::backoff::exponential<cds::backoff::pause, cds::backoff::yield> back_off;
+ };
+ typedef cc::IterableKVList< gc_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef hash1 hash;
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
+
+ map_type m( kSize, 4 );
+ test( m );
+ }
+
+ TEST_F( MichaelIterableMap_HP, seq_cst )
+ {
+ struct list_traits: public cc::iterable_list::traits
+ {
+ typedef cmp compare;
+ typedef cds::backoff::yield back_off;
+ typedef cds::opt::v::sequential_consistent memory_model;
+ };
+ typedef cc::IterableKVList< gc_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef hash1 hash;
+ };
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
+
+ map_type s( kSize, 8 );
+ test( s );
+ }
+
+ TEST_F( MichaelIterableMap_HP, stat )
+ {
+ struct list_traits: public cc::iterable_list::traits
+ {
+ typedef cmp compare;
+ typedef cds::backoff::yield back_off;
+ typedef cc::iterable_list::stat<> stat;
+ };
+ typedef cc::IterableKVList< gc_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef hash1 hash;
+ };
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
+
+ map_type m( kSize, 8 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
+
+ TEST_F( MichaelIterableMap_HP, wrapped_stat )
+ {
+ struct list_traits: public cc::iterable_list::traits
+ {
+ typedef cmp compare;
+ typedef cc::iterable_list::wrapped_stat<> stat;
+ };
+ typedef cc::IterableKVList< gc_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef hash1 hash;
+ };
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
+
+ map_type m( kSize, 8 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
+
+} // namespace
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_map_hp.h"
test( m );
}
+ TEST_F( MichaelLazyMap_DHP, stat )
+ {
+ struct list_traits: public cc::lazy_list::traits
+ {
+ typedef cmp compare;
+ typedef cds::backoff::yield back_off;
+ typedef cc::lazy_list::stat<> stat;
+ };
+ typedef cc::LazyKVList< gc_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef hash1 hash;
+ };
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
+
+ map_type m( kSize, 2 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
+
+ TEST_F( MichaelLazyMap_DHP, wrapped_stat )
+ {
+ struct list_traits: public cc::lazy_list::traits
+ {
+ typedef cmp compare;
+ typedef cds::backoff::yield back_off;
+ typedef cc::lazy_list::wrapped_stat<> stat;
+ };
+ typedef cc::LazyKVList< gc_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef hash1 hash;
+ };
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
+
+ map_type m( kSize, 2 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_map_hp.h"
test( m );
}
+ TEST_F( MichaelLazyMap_HP, stat )
+ {
+ struct list_traits: public cc::lazy_list::traits
+ {
+ typedef cmp compare;
+ typedef cds::backoff::yield back_off;
+ typedef cc::lazy_list::stat<> stat;
+ };
+ typedef cc::LazyKVList< gc_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef hash1 hash;
+ };
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
+
+ map_type m( kSize, 2 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
+
+ TEST_F( MichaelLazyMap_HP, wrapped_stat )
+ {
+ struct list_traits: public cc::lazy_list::traits
+ {
+ typedef cmp compare;
+ typedef cds::backoff::yield back_off;
+ typedef cc::lazy_list::wrapped_stat<> stat;
+ };
+ typedef cc::LazyKVList< gc_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef hash1 hash;
+ };
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
+
+ map_type m( kSize, 2 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_map_nogc.h"
typedef hash1 hash;
typedef cds::atomicity::item_counter item_counter;
};
- typedef cc::MichaelHashMap< gc_type, list_type, map_traits >map_type;
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
+
+ map_type m( kSize, 4 );
+ test( m );
+ }
+
+ TEST_F( MichaelLazyMap_NoGC, stat )
+ {
+ struct list_traits: public cc::lazy_list::traits
+ {
+ typedef base_class::less less;
+ typedef cds::backoff::pause back_off;
+ typedef cc::lazy_list::stat<> stat;
+ };
+ typedef cc::LazyKVList< gc_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef hash1 hash;
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
+
+ map_type m( kSize, 4 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
+
+ TEST_F( MichaelLazyMap_NoGC, wrapped_stat )
+ {
+ struct list_traits: public cc::lazy_list::traits
+ {
+ typedef base_class::less less;
+ typedef cds::backoff::pause back_off;
+ typedef cc::lazy_list::wrapped_stat<> stat;
+ };
+ typedef cc::LazyKVList< gc_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef hash1 hash;
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
map_type m( kSize, 4 );
test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
}
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_map_hp.h"
test( s );
}
+ TEST_F( MichaelMap_DHP, stat )
+ {
+ struct list_traits: public cc::michael_list::traits
+ {
+ typedef cmp compare;
+ typedef cds::backoff::yield back_off;
+ typedef cc::michael_list::stat<> stat;
+ };
+ typedef cc::MichaelKVList< gc_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef hash1 hash;
+ };
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
+
+ map_type m( kSize, 8 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
+
+ TEST_F( MichaelMap_DHP, wrapped_stat )
+ {
+ struct list_traits: public cc::michael_list::traits
+ {
+ typedef cmp compare;
+ typedef cc::michael_list::wrapped_stat<> stat;
+ };
+ typedef cc::MichaelKVList< gc_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef hash1 hash;
+ };
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
+
+ map_type m( kSize, 8 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_map_hp.h"
test( s );
}
+ TEST_F( MichaelMap_HP, stat )
+ {
+ struct list_traits: public cc::michael_list::traits
+ {
+ typedef cmp compare;
+ typedef cds::backoff::yield back_off;
+ typedef cc::michael_list::stat<> stat;
+ };
+ typedef cc::MichaelKVList< gc_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef hash1 hash;
+ };
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
+
+ map_type m( kSize, 8 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
+
+ TEST_F( MichaelMap_HP, wrapped_stat )
+ {
+ struct list_traits: public cc::michael_list::traits
+ {
+ typedef cmp compare;
+ typedef cds::backoff::yield back_off;
+ typedef cc::michael_list::wrapped_stat<> stat;
+ };
+ typedef cc::MichaelKVList< gc_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef hash1 hash;
+ };
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
+
+ map_type m( kSize, 8 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_map_nogc.h"
test( m );
}
+ TEST_F( MichaelMap_NoGC, stat )
+ {
+ struct list_traits: public cc::michael_list::traits
+ {
+ typedef base_class::less less;
+ typedef cds::backoff::pause back_off;
+ typedef cc::michael_list::stat<> stat;
+ };
+ typedef cc::MichaelKVList< gc_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef hash1 hash;
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
+
+ map_type m( kSize, 4 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
+
+ TEST_F( MichaelMap_NoGC, wrapped_stat )
+ {
+ struct list_traits: public cc::michael_list::traits
+ {
+ typedef base_class::less less;
+ typedef cds::backoff::pause back_off;
+ typedef cc::michael_list::wrapped_stat<> stat;
+ };
+ typedef cc::MichaelKVList< gc_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef hash1 hash;
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
+
+ map_type m( kSize, 4 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSUNIT_MAP_TEST_MAP_H
// Precondition: map is empty
// Postcondition: map is empty
- ASSERT_TRUE( m.empty());
- ASSERT_CONTAINER_SIZE( m, 0 );
+ EXPECT_TRUE( m.empty());
+ EXPECT_CONTAINER_SIZE( m, 0 );
typedef typename Map::value_type map_pair;
size_t const kkSize = kSize;
for ( auto const& i : arrKeys ) {
value_type const& val( arrVals.at( i.nKey ));
- ASSERT_FALSE( m.contains( i.nKey ));
- ASSERT_FALSE( m.contains( i ));
- ASSERT_FALSE( m.contains( other_item( i.nKey ), other_less()));
- ASSERT_FALSE( m.find( i, []( map_pair const& ) {
- ASSERT_TRUE( false );
+ EXPECT_FALSE( m.contains( i.nKey ));
+ EXPECT_FALSE( m.contains( i ));
+ EXPECT_FALSE( m.contains( other_item( i.nKey ), other_less()));
+ EXPECT_FALSE( m.find( i, []( map_pair const& ) {
+ EXPECT_TRUE( false );
} ));
- ASSERT_FALSE( m.find( i.nKey, []( map_pair const& ) {
+ EXPECT_FALSE( m.find( i.nKey, []( map_pair const& ) {
EXPECT_TRUE( false );
} ));
- ASSERT_FALSE( m.find_with( other_item( i.nKey ), other_less(), []( map_pair const& ) {
+ EXPECT_FALSE( m.find_with( other_item( i.nKey ), other_less(), []( map_pair const& ) {
EXPECT_TRUE( false );
} ));
switch ( i.nKey % 16 ) {
case 0:
- ASSERT_TRUE( m.insert( i ));
- ASSERT_FALSE( m.insert( i ));
- ASSERT_TRUE( m.find( i.nKey, []( map_pair& v ) {
+ EXPECT_TRUE( m.insert( i ));
+ EXPECT_FALSE( m.insert( i ));
+ EXPECT_TRUE( m.find( i.nKey, []( map_pair& v ) {
v.second.nVal = v.first.nKey;
v.second.strVal = std::to_string( v.first.nKey );
} ));
break;
case 1:
- ASSERT_TRUE( m.insert( i.nKey ));
- ASSERT_FALSE( m.insert( i.nKey ));
- ASSERT_TRUE( m.find( i.nKey, []( map_pair& v ) {
+ EXPECT_TRUE( m.insert( i.nKey ));
+ EXPECT_FALSE( m.insert( i.nKey ));
+ EXPECT_TRUE( m.find( i.nKey, []( map_pair& v ) {
v.second.nVal = v.first.nKey;
v.second.strVal = std::to_string( v.first.nKey );
} ));
break;
case 2:
- ASSERT_TRUE( m.insert( std::to_string( i.nKey )));
- ASSERT_FALSE( m.insert( std::to_string( i.nKey )));
- ASSERT_TRUE( m.find( i.nKey, []( map_pair& v ) {
+ EXPECT_TRUE( m.insert( std::to_string( i.nKey )));
+ EXPECT_FALSE( m.insert( std::to_string( i.nKey )));
+ EXPECT_TRUE( m.find( i.nKey, []( map_pair& v ) {
v.second.nVal = v.first.nKey;
v.second.strVal = std::to_string( v.first.nKey );
} ));
break;
case 3:
- ASSERT_TRUE( m.insert( i, val ));
- ASSERT_FALSE( m.insert( i, val ));
+ EXPECT_TRUE( m.insert( i, val ));
+ EXPECT_FALSE( m.insert( i, val ));
break;
case 4:
- ASSERT_TRUE( m.insert( i.nKey, val.strVal ));
- ASSERT_FALSE( m.insert( i.nKey, val.strVal ));
+ EXPECT_TRUE( m.insert( i.nKey, val.strVal ));
+ EXPECT_FALSE( m.insert( i.nKey, val.strVal ));
break;
case 5:
- ASSERT_TRUE( m.insert( val.strVal, i.nKey ));
- ASSERT_FALSE( m.insert( val.strVal, i.nKey ));
+ EXPECT_TRUE( m.insert( val.strVal, i.nKey ));
+ EXPECT_FALSE( m.insert( val.strVal, i.nKey ));
break;
case 6:
- ASSERT_TRUE( m.insert_with( i, []( map_pair& v ) {
+ EXPECT_TRUE( m.insert_with( i, []( map_pair& v ) {
v.second.nVal = v.first.nKey;
v.second.strVal = std::to_string( v.first.nKey );
} ));
- ASSERT_FALSE( m.insert_with( i, []( map_pair& v ) {
+ EXPECT_FALSE( m.insert_with( i, []( map_pair& v ) {
EXPECT_TRUE( false );
} ));
break;
case 7:
- ASSERT_TRUE( m.insert_with( i.nKey, []( map_pair& v ) {
+ EXPECT_TRUE( m.insert_with( i.nKey, []( map_pair& v ) {
v.second.nVal = v.first.nKey;
v.second.strVal = std::to_string( v.first.nKey );
} ));
- ASSERT_FALSE( m.insert_with( i.nKey, []( map_pair& v ) {
+ EXPECT_FALSE( m.insert_with( i.nKey, []( map_pair& v ) {
EXPECT_TRUE( false );
} ));
break;
case 8:
- ASSERT_TRUE( m.insert_with( val.strVal, []( map_pair& v ) {
+ EXPECT_TRUE( m.insert_with( val.strVal, []( map_pair& v ) {
v.second.nVal = v.first.nKey;
v.second.strVal = std::to_string( v.first.nKey );
} ));
- ASSERT_FALSE( m.insert_with( val.strVal, []( map_pair& v ) {
+ EXPECT_FALSE( m.insert_with( val.strVal, []( map_pair& v ) {
EXPECT_TRUE( false );
} ));
break;
updResult = m.update( i.nKey, []( bool, map_pair& ) {
EXPECT_TRUE( false );
}, false );
- ASSERT_FALSE( updResult.first );
- ASSERT_FALSE( updResult.second );
+ EXPECT_FALSE( updResult.first );
+ EXPECT_FALSE( updResult.second );
updResult = m.update( i.nKey, []( bool bNew, map_pair& v ) {
EXPECT_TRUE( bNew );
v.second.nVal = v.first.nKey;
});
- ASSERT_TRUE( updResult.first );
- ASSERT_TRUE( updResult.second );
+ EXPECT_TRUE( updResult.first );
+ EXPECT_TRUE( updResult.second );
updResult = m.update( i.nKey, []( bool bNew, map_pair& v ) {
EXPECT_FALSE( bNew );
EXPECT_EQ( v.first.nKey, v.second.nVal );
v.second.strVal = std::to_string( v.second.nVal );
} );
- ASSERT_TRUE( updResult.first );
- ASSERT_FALSE( updResult.second );
+ EXPECT_TRUE( updResult.first );
+ EXPECT_FALSE( updResult.second );
break;
case 10:
updResult = m.update( i, []( bool, map_pair& ) {
EXPECT_TRUE( false );
}, false );
- ASSERT_FALSE( updResult.first );
- ASSERT_FALSE( updResult.second );
+ EXPECT_FALSE( updResult.first );
+ EXPECT_FALSE( updResult.second );
updResult = m.update( i, []( bool bNew, map_pair& v ) {
EXPECT_TRUE( bNew );
v.second.nVal = v.first.nKey;
});
- ASSERT_TRUE( updResult.first );
- ASSERT_TRUE( updResult.second );
+ EXPECT_TRUE( updResult.first );
+ EXPECT_TRUE( updResult.second );
updResult = m.update( i, []( bool bNew, map_pair& v ) {
EXPECT_FALSE( bNew );
EXPECT_EQ( v.first.nKey, v.second.nVal );
v.second.strVal = std::to_string( v.second.nVal );
} );
- ASSERT_TRUE( updResult.first );
- ASSERT_FALSE( updResult.second );
+ EXPECT_TRUE( updResult.first );
+ EXPECT_FALSE( updResult.second );
break;
case 11:
updResult = m.update( val.strVal, []( bool, map_pair& ) {
EXPECT_TRUE( false );
}, false );
- ASSERT_FALSE( updResult.first );
- ASSERT_FALSE( updResult.second );
+ EXPECT_FALSE( updResult.first );
+ EXPECT_FALSE( updResult.second );
updResult = m.update( val.strVal, []( bool bNew, map_pair& v ) {
EXPECT_TRUE( bNew );
v.second.nVal = v.first.nKey;
});
- ASSERT_TRUE( updResult.first );
- ASSERT_TRUE( updResult.second );
+ EXPECT_TRUE( updResult.first );
+ EXPECT_TRUE( updResult.second );
updResult = m.update( val.strVal, []( bool bNew, map_pair& v ) {
EXPECT_FALSE( bNew );
EXPECT_EQ( v.first.nKey, v.second.nVal );
v.second.strVal = std::to_string( v.second.nVal );
} );
- ASSERT_TRUE( updResult.first );
- ASSERT_FALSE( updResult.second );
+ EXPECT_TRUE( updResult.first );
+ EXPECT_FALSE( updResult.second );
break;
case 12:
- ASSERT_TRUE( m.emplace( i.nKey ));
- ASSERT_FALSE( m.emplace( i.nKey ));
- ASSERT_TRUE( m.find( i.nKey, []( map_pair& v ) {
+ EXPECT_TRUE( m.emplace( i.nKey ));
+ EXPECT_FALSE( m.emplace( i.nKey ));
+ EXPECT_TRUE( m.find( i.nKey, []( map_pair& v ) {
v.second.nVal = v.first.nKey;
v.second.strVal = std::to_string( v.first.nKey );
} ));
break;
case 13:
- ASSERT_TRUE( m.emplace( i, i.nKey ));
- ASSERT_FALSE( m.emplace( i, i.nKey ));
+ EXPECT_TRUE( m.emplace( i, i.nKey ));
+ EXPECT_FALSE( m.emplace( i, i.nKey ));
break;
case 14:
{
std::string str = val.strVal;
- ASSERT_TRUE( m.emplace( i, std::move( str )));
- ASSERT_TRUE( str.empty());
+ EXPECT_TRUE( m.emplace( i, std::move( str )));
+ EXPECT_TRUE( str.empty());
str = val.strVal;
- ASSERT_FALSE( m.emplace( i, std::move( str )));
- ASSERT_TRUE( str.empty());
+ EXPECT_FALSE( m.emplace( i, std::move( str )));
+ EXPECT_TRUE( str.empty());
}
break;
case 15:
{
std::string str = val.strVal;
- ASSERT_TRUE( m.emplace( i, i.nKey, std::move( str )));
- ASSERT_TRUE( str.empty());
+ EXPECT_TRUE( m.emplace( i, i.nKey, std::move( str )));
+ EXPECT_TRUE( str.empty());
str = val.strVal;
- ASSERT_FALSE( m.emplace( i, i.nKey, std::move( str )));
- ASSERT_TRUE( str.empty());
+ EXPECT_FALSE( m.emplace( i, i.nKey, std::move( str )));
+ EXPECT_TRUE( str.empty());
}
break;
}
- ASSERT_TRUE( m.contains( i.nKey ));
- ASSERT_TRUE( m.contains( i ));
- ASSERT_TRUE( m.contains( other_item( i.nKey ), other_less()));
- ASSERT_TRUE( m.find( i, []( map_pair const& v ) {
+ EXPECT_TRUE( m.contains( i.nKey ));
+ EXPECT_TRUE( m.contains( i ));
+ EXPECT_TRUE( m.contains( other_item( i.nKey ), other_less()));
+ EXPECT_TRUE( m.find( i, []( map_pair const& v ) {
EXPECT_EQ( v.first.nKey, v.second.nVal );
EXPECT_EQ( std::to_string( v.first.nKey ), v.second.strVal );
} ));
- ASSERT_TRUE( m.find( i.nKey, []( map_pair const& v ) {
+ EXPECT_TRUE( m.find( i.nKey, []( map_pair const& v ) {
EXPECT_EQ( v.first.nKey, v.second.nVal );
EXPECT_EQ( std::to_string( v.first.nKey ), v.second.strVal );
} ));
- ASSERT_TRUE( m.find_with( other_item( i.nKey ), other_less(), []( map_pair const& v ) {
+ EXPECT_TRUE( m.find_with( other_item( i.nKey ), other_less(), []( map_pair const& v ) {
EXPECT_EQ( v.first.nKey, v.second.nVal );
EXPECT_EQ( std::to_string( v.first.nKey ), v.second.strVal );
} ));
}
- ASSERT_FALSE( m.empty() );
- ASSERT_CONTAINER_SIZE( m, kkSize );
- ASSERT_FALSE( m.begin() == m.end() );
- ASSERT_FALSE( m.cbegin() == m.cend() );
+ EXPECT_FALSE( m.empty() );
+ EXPECT_CONTAINER_SIZE( m, kkSize );
+ EXPECT_FALSE( m.begin() == m.end() );
+ EXPECT_FALSE( m.cbegin() == m.cend() );
shuffle( arrKeys.begin(), arrKeys.end() );
for ( auto const& i : arrKeys ) {
value_type const& val( arrVals.at( i.nKey ) );
- ASSERT_TRUE( m.contains( i.nKey ));
- ASSERT_TRUE( m.contains( val.strVal ) );
- ASSERT_TRUE( m.contains( i ));
- ASSERT_TRUE( m.contains( other_item( i.nKey ), other_less()));
- ASSERT_TRUE( m.find( i, []( map_pair const& v ) {
+ EXPECT_TRUE( m.contains( i.nKey ));
+ EXPECT_TRUE( m.contains( val.strVal ) );
+ EXPECT_TRUE( m.contains( i ));
+ EXPECT_TRUE( m.contains( other_item( i.nKey ), other_less()));
+ EXPECT_TRUE( m.find( i, []( map_pair const& v ) {
EXPECT_EQ( v.first.nKey, v.second.nVal );
EXPECT_EQ( std::to_string( v.first.nKey ), v.second.strVal );
} ));
- ASSERT_TRUE( m.find( i.nKey, []( map_pair const& v ) {
+ EXPECT_TRUE( m.find( i.nKey, []( map_pair const& v ) {
EXPECT_EQ( v.first.nKey, v.second.nVal );
EXPECT_EQ( std::to_string( v.first.nKey ), v.second.strVal );
} ));
- ASSERT_TRUE( m.find_with( other_item( i.nKey ), other_less(), []( map_pair const& v ) {
+ EXPECT_TRUE( m.find_with( other_item( i.nKey ), other_less(), []( map_pair const& v ) {
EXPECT_EQ( v.first.nKey, v.second.nVal );
EXPECT_EQ( std::to_string( v.first.nKey ), v.second.strVal );
} ));
switch ( i.nKey % 8 ) {
case 0:
- ASSERT_TRUE( m.erase( i ));
- ASSERT_FALSE( m.erase( i ));
+ EXPECT_TRUE( m.erase( i ));
+ EXPECT_FALSE( m.erase( i ));
break;
case 1:
- ASSERT_TRUE( m.erase( i.nKey ));
- ASSERT_FALSE( m.erase( i.nKey ));
+ EXPECT_TRUE( m.erase( i.nKey ));
+ EXPECT_FALSE( m.erase( i.nKey ));
break;
case 2:
- ASSERT_TRUE( m.erase( val.strVal ));
- ASSERT_FALSE( m.erase( val.strVal ));
+ EXPECT_TRUE( m.erase( val.strVal ));
+ EXPECT_FALSE( m.erase( val.strVal ));
break;
case 3:
- ASSERT_TRUE( m.erase_with( other_item( i.nKey ), other_less()));
- ASSERT_FALSE( m.erase_with( other_item( i.nKey ), other_less()));
+ EXPECT_TRUE( m.erase_with( other_item( i.nKey ), other_less()));
+ EXPECT_FALSE( m.erase_with( other_item( i.nKey ), other_less()));
break;
case 4:
- ASSERT_TRUE( m.erase( i, []( map_pair& v ) {
+ EXPECT_TRUE( m.erase( i, []( map_pair& v ) {
EXPECT_EQ( v.first.nKey, v.second.nVal );
EXPECT_EQ( std::to_string( v.first.nKey ), v.second.strVal );
}));
- ASSERT_FALSE( m.erase( i, []( map_pair& ) {
+ EXPECT_FALSE( m.erase( i, []( map_pair& ) {
EXPECT_TRUE( false );
}));
break;
case 5:
- ASSERT_TRUE( m.erase( i.nKey, []( map_pair& v ) {
+ EXPECT_TRUE( m.erase( i.nKey, []( map_pair& v ) {
EXPECT_EQ( v.first.nKey, v.second.nVal );
EXPECT_EQ( std::to_string( v.first.nKey ), v.second.strVal );
}));
- ASSERT_FALSE( m.erase( i.nKey, []( map_pair& ) {
+ EXPECT_FALSE( m.erase( i.nKey, []( map_pair& ) {
EXPECT_TRUE( false );
}));
break;
case 6:
- ASSERT_TRUE( m.erase( val.strVal, []( map_pair& v ) {
+ EXPECT_TRUE( m.erase( val.strVal, []( map_pair& v ) {
EXPECT_EQ( v.first.nKey, v.second.nVal );
EXPECT_EQ( std::to_string( v.first.nKey ), v.second.strVal );
}));
- ASSERT_FALSE( m.erase( val.strVal, []( map_pair& ) {
+ EXPECT_FALSE( m.erase( val.strVal, []( map_pair& ) {
EXPECT_TRUE( false );
}));
break;
case 7:
- ASSERT_TRUE( m.erase_with( other_item( i.nKey ), other_less(), []( map_pair& v ) {
+ EXPECT_TRUE( m.erase_with( other_item( i.nKey ), other_less(), []( map_pair& v ) {
EXPECT_EQ( v.first.nKey, v.second.nVal );
EXPECT_EQ( std::to_string( v.first.nKey ), v.second.strVal );
}));
- ASSERT_FALSE( m.erase_with( other_item( i.nKey ), other_less(), []( map_pair& ) {
+ EXPECT_FALSE( m.erase_with( other_item( i.nKey ), other_less(), []( map_pair& ) {
EXPECT_TRUE( false );
}));
break;
}
- ASSERT_FALSE( m.contains( i.nKey ));
- ASSERT_FALSE( m.contains( i ));
- ASSERT_FALSE( m.contains( val.strVal ));
- ASSERT_FALSE( m.contains( other_item( i.nKey ), other_less()));
- ASSERT_FALSE( m.find( i, []( map_pair const& ) {
- ASSERT_TRUE( false );
+ EXPECT_FALSE( m.contains( i.nKey ));
+ EXPECT_FALSE( m.contains( i ));
+ EXPECT_FALSE( m.contains( val.strVal ));
+ EXPECT_FALSE( m.contains( other_item( i.nKey ), other_less()));
+ EXPECT_FALSE( m.find( i, []( map_pair const& ) {
+ EXPECT_TRUE( false );
} ));
- ASSERT_FALSE( m.find( i.nKey, []( map_pair const& ) {
+ EXPECT_FALSE( m.find( i.nKey, []( map_pair const& ) {
EXPECT_TRUE( false );
} ));
- ASSERT_FALSE( m.find_with( other_item( i.nKey ), other_less(), []( map_pair const& ) {
+ EXPECT_FALSE( m.find_with( other_item( i.nKey ), other_less(), []( map_pair const& ) {
EXPECT_TRUE( false );
} ));
}
- ASSERT_TRUE( m.empty() );
- ASSERT_CONTAINER_SIZE( m, 0 );
+ EXPECT_TRUE( m.empty() );
+ EXPECT_CONTAINER_SIZE( m, 0 );
- ASSERT_TRUE( m.begin() == m.end());
- ASSERT_TRUE( m.cbegin() == m.cend());
+ EXPECT_TRUE( m.begin() == m.end());
+ EXPECT_TRUE( m.cbegin() == m.cend());
// clear
for ( auto const& i : arrKeys )
- ASSERT_TRUE( m.insert( i ));
+ EXPECT_TRUE( m.insert( i ));
- ASSERT_FALSE( m.empty() );
- ASSERT_CONTAINER_SIZE( m, kkSize );
+ EXPECT_FALSE( m.empty() );
+ EXPECT_CONTAINER_SIZE( m, kkSize );
m.clear();
- ASSERT_TRUE( m.empty() );
- ASSERT_CONTAINER_SIZE( m, 0 );
+ EXPECT_TRUE( m.empty() );
+ EXPECT_CONTAINER_SIZE( m, 0 );
}
};
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSUNIT_MAP_TEST_MAP_HP_H
base_class::test( m );
- ASSERT_TRUE( m.empty());
- ASSERT_CONTAINER_SIZE( m, 0 );
+ EXPECT_TRUE( m.empty());
+ EXPECT_CONTAINER_SIZE( m, 0 );
typedef typename Map::value_type map_pair;
size_t const kkSize = base_class::kSize;
}
for ( auto const& i : arrKeys )
- ASSERT_TRUE( m.insert( i ) );
- ASSERT_FALSE( m.empty() );
- ASSERT_CONTAINER_SIZE( m, kkSize );
+ EXPECT_TRUE( m.insert( i ) );
+ EXPECT_FALSE( m.empty() );
+ EXPECT_CONTAINER_SIZE( m, kkSize );
// iterators
size_t nCount = 0;
gp = m.get( i.nKey );
ASSERT_FALSE( !gp );
- ASSERT_EQ( gp->first.nKey, i.nKey );
+ EXPECT_EQ( gp->first.nKey, i.nKey );
gp.release();
gp = m.get( i );
ASSERT_FALSE( !gp );
- ASSERT_EQ( gp->first.nKey, i.nKey );
+ EXPECT_EQ( gp->first.nKey, i.nKey );
gp.release();
gp = m.get_with( other_item( i.nKey ), other_less());
ASSERT_FALSE( !gp );
- ASSERT_EQ( gp->first.nKey, i.nKey );
+ EXPECT_EQ( gp->first.nKey, i.nKey );
switch ( i.nKey % 4 ) {
case 0:
break;
}
ASSERT_FALSE( !gp );
- ASSERT_EQ( gp->first.nKey, i.nKey );
+ EXPECT_EQ( gp->first.nKey, i.nKey );
gp.release();
gp = m.get( i.nKey );
gp = m.get_with( other_item( i.nKey ), other_less() );
ASSERT_TRUE( !gp );
}
- ASSERT_TRUE( m.empty() );
- ASSERT_CONTAINER_SIZE( m, 0 );
+ EXPECT_TRUE( m.empty() );
+ EXPECT_CONTAINER_SIZE( m, 0 );
}
};
--- /dev/null
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CDSUNIT_MAP_TEST_MICHAEL_ITERABLE_MAP_H
+#define CDSUNIT_MAP_TEST_MICHAEL_ITERABLE_MAP_H
+
+#include "test_map_data.h"
+
+// forward declaration
+namespace cds { namespace container {} }
+
+namespace cds_test {
+
+ class michael_iterable_map: public map_fixture
+ {
+ public:
+ static size_t const kSize = 1000;
+
+ protected:
+ template <class Map>
+ void test( Map& m )
+ {
+ // Precondition: map is empty
+ // Postcondition: map is empty
+
+ EXPECT_TRUE( m.empty());
+ EXPECT_CONTAINER_SIZE( m, 0 );
+
+ typedef typename Map::value_type map_pair;
+ size_t const kkSize = kSize;
+
+ std::vector<key_type> arrKeys;
+ for ( int i = 0; i < static_cast<int>(kkSize); ++i )
+ arrKeys.push_back( key_type( i ));
+ shuffle( arrKeys.begin(), arrKeys.end());
+
+ std::vector< value_type > arrVals;
+ for ( size_t i = 0; i < kkSize; ++i ) {
+ value_type val;
+ val.nVal = static_cast<int>( i );
+ val.strVal = std::to_string( i );
+ arrVals.push_back( val );
+ }
+
+ // insert/find
+ for ( auto const& i : arrKeys ) {
+ value_type const& val( arrVals.at( i.nKey ));
+
+ EXPECT_FALSE( m.contains( i.nKey ));
+ EXPECT_FALSE( m.contains( i ));
+ EXPECT_FALSE( m.contains( other_item( i.nKey ), other_less()));
+ EXPECT_FALSE( m.find( i, []( map_pair const& ) {
+ EXPECT_TRUE( false );
+ } ));
+ EXPECT_FALSE( m.find( i.nKey, []( map_pair const& ) {
+ EXPECT_TRUE( false );
+ } ));
+ EXPECT_FALSE( m.find_with( other_item( i.nKey ), other_less(), []( map_pair const& ) {
+ EXPECT_TRUE( false );
+ } ));
+
+ std::pair< bool, bool > updResult;
+
+ switch ( i.nKey % 17 ) {
+ case 0:
+ EXPECT_TRUE( m.insert( i ));
+ EXPECT_FALSE( m.insert( i ));
+ EXPECT_TRUE( m.find( i.nKey, []( map_pair& v ) {
+ v.second.nVal = v.first.nKey;
+ v.second.strVal = std::to_string( v.first.nKey );
+ } ));
+ break;
+ case 1:
+ EXPECT_TRUE( m.insert( i.nKey ));
+ EXPECT_FALSE( m.insert( i.nKey ));
+ EXPECT_TRUE( m.find( i.nKey, []( map_pair& v ) {
+ v.second.nVal = v.first.nKey;
+ v.second.strVal = std::to_string( v.first.nKey );
+ } ));
+ break;
+ case 2:
+ EXPECT_TRUE( m.insert( std::to_string( i.nKey )));
+ EXPECT_FALSE( m.insert( std::to_string( i.nKey )));
+ EXPECT_TRUE( m.find( i.nKey, []( map_pair& v ) {
+ v.second.nVal = v.first.nKey;
+ v.second.strVal = std::to_string( v.first.nKey );
+ } ));
+ break;
+ case 3:
+ EXPECT_TRUE( m.insert( i, val ));
+ EXPECT_FALSE( m.insert( i, val ));
+ break;
+ case 4:
+ EXPECT_TRUE( m.insert( i.nKey, val.strVal ));
+ EXPECT_FALSE( m.insert( i.nKey, val.strVal ));
+ break;
+ case 5:
+ EXPECT_TRUE( m.insert( val.strVal, i.nKey ));
+ EXPECT_FALSE( m.insert( val.strVal, i.nKey ));
+ break;
+ case 6:
+ EXPECT_TRUE( m.insert_with( i, []( map_pair& v ) {
+ v.second.nVal = v.first.nKey;
+ v.second.strVal = std::to_string( v.first.nKey );
+ } ));
+ EXPECT_FALSE( m.insert_with( i, []( map_pair& v ) {
+ EXPECT_TRUE( false );
+ } ));
+ break;
+ case 7:
+ EXPECT_TRUE( m.insert_with( i.nKey, []( map_pair& v ) {
+ v.second.nVal = v.first.nKey;
+ v.second.strVal = std::to_string( v.first.nKey );
+ } ));
+ EXPECT_FALSE( m.insert_with( i.nKey, []( map_pair& v ) {
+ EXPECT_TRUE( false );
+ } ));
+ break;
+ case 8:
+ EXPECT_TRUE( m.insert_with( val.strVal, []( map_pair& v ) {
+ v.second.nVal = v.first.nKey;
+ v.second.strVal = std::to_string( v.first.nKey );
+ } ));
+ EXPECT_FALSE( m.insert_with( val.strVal, []( map_pair& v ) {
+ EXPECT_TRUE( false );
+ } ));
+ break;
+ case 9:
+ updResult = m.update( i.nKey, []( map_pair&, map_pair* ) {
+ EXPECT_TRUE( false );
+ }, false );
+ EXPECT_FALSE( updResult.first );
+ EXPECT_FALSE( updResult.second );
+
+ updResult = m.update( i.nKey, []( map_pair& v, map_pair* old ) {
+ EXPECT_TRUE( old == nullptr );
+ v.second.nVal = v.first.nKey;
+ });
+ EXPECT_TRUE( updResult.first );
+ EXPECT_TRUE( updResult.second );
+
+ updResult = m.update( i.nKey, []( map_pair& v, map_pair* old ) {
+ ASSERT_FALSE( old == nullptr );
+ EXPECT_EQ( v.first.nKey, old->second.nVal );
+ v.second.nVal = old->second.nVal;
+ v.second.strVal = std::to_string( old->second.nVal );
+ } );
+ EXPECT_TRUE( updResult.first );
+ EXPECT_FALSE( updResult.second );
+ break;
+ case 10:
+ updResult = m.update( i, []( map_pair&, map_pair* ) {
+ EXPECT_TRUE( false );
+ }, false );
+ EXPECT_FALSE( updResult.first );
+ EXPECT_FALSE( updResult.second );
+
+ updResult = m.update( i, []( map_pair& v, map_pair* old ) {
+ EXPECT_TRUE( old == nullptr );
+ v.second.nVal = v.first.nKey;
+ });
+ EXPECT_TRUE( updResult.first );
+ EXPECT_TRUE( updResult.second );
+
+ updResult = m.update( i, []( map_pair& v, map_pair* old ) {
+ ASSERT_FALSE( old == nullptr );
+ EXPECT_EQ( v.first.nKey, old->second.nVal );
+ v.second.nVal = old->second.nVal;
+ v.second.strVal = std::to_string( v.second.nVal );
+ } );
+ EXPECT_TRUE( updResult.first );
+ EXPECT_FALSE( updResult.second );
+ break;
+ case 11:
+ updResult = m.update( val.strVal, []( map_pair&, map_pair* ) {
+ EXPECT_TRUE( false );
+ }, false );
+ EXPECT_FALSE( updResult.first );
+ EXPECT_FALSE( updResult.second );
+
+ updResult = m.update( val.strVal, []( map_pair& v, map_pair* old ) {
+ EXPECT_TRUE( old == nullptr );
+ v.second.nVal = v.first.nKey;
+ });
+ EXPECT_TRUE( updResult.first );
+ EXPECT_TRUE( updResult.second );
+
+ updResult = m.update( val.strVal, []( map_pair& v, map_pair* old ) {
+ ASSERT_FALSE( old == nullptr );
+ EXPECT_EQ( v.first.nKey, old->second.nVal );
+ v.second.nVal = old->second.nVal;
+ v.second.strVal = std::to_string( v.second.nVal );
+ } );
+ EXPECT_TRUE( updResult.first );
+ EXPECT_FALSE( updResult.second );
+ break;
+ case 12:
+ EXPECT_TRUE( m.emplace( i.nKey ));
+ EXPECT_FALSE( m.emplace( i.nKey ));
+ EXPECT_TRUE( m.find( i.nKey, []( map_pair& v ) {
+ v.second.nVal = v.first.nKey;
+ v.second.strVal = std::to_string( v.first.nKey );
+ } ));
+ break;
+ case 13:
+ EXPECT_TRUE( m.emplace( i, i.nKey ));
+ EXPECT_FALSE( m.emplace( i, i.nKey ));
+ break;
+ case 14:
+ {
+ std::string str = val.strVal;
+ EXPECT_TRUE( m.emplace( i, std::move( str )));
+ EXPECT_TRUE( str.empty());
+ str = val.strVal;
+ EXPECT_FALSE( m.emplace( i, std::move( str )));
+ EXPECT_TRUE( str.empty());
+ }
+ break;
+ case 15:
+ {
+ std::string str = val.strVal;
+ EXPECT_TRUE( m.emplace( i, i.nKey, std::move( str )));
+ EXPECT_TRUE( str.empty());
+ str = val.strVal;
+ EXPECT_FALSE( m.emplace( i, i.nKey, std::move( str )));
+ EXPECT_TRUE( str.empty());
+ }
+ break;
+ case 16:
+ {
+ auto res = m.upsert( i, i.nKey, false );
+ EXPECT_FALSE( res.first );
+ EXPECT_FALSE( res.second );
+
+ res = m.upsert( i, i.nKey );
+ EXPECT_TRUE( res.first );
+ EXPECT_TRUE( res.second );
+
+ std::string str = val.strVal;
+ res = m.upsert( i, std::move( str ));
+ EXPECT_TRUE( res.first );
+ EXPECT_FALSE( res.second );
+ EXPECT_TRUE( str.empty() );
+ }
+ break;
+ }
+
+ EXPECT_TRUE( m.contains( i.nKey ));
+ EXPECT_TRUE( m.contains( i ));
+ EXPECT_TRUE( m.contains( other_item( i.nKey ), other_less()));
+ EXPECT_TRUE( m.find( i, []( map_pair const& v ) {
+ EXPECT_EQ( v.first.nKey, v.second.nVal );
+ EXPECT_EQ( std::to_string( v.first.nKey ), v.second.strVal );
+ } ));
+ EXPECT_TRUE( m.find( i.nKey, []( map_pair const& v ) {
+ EXPECT_EQ( v.first.nKey, v.second.nVal );
+ EXPECT_EQ( std::to_string( v.first.nKey ), v.second.strVal );
+ } ));
+ EXPECT_TRUE( m.find_with( other_item( i.nKey ), other_less(), []( map_pair const& v ) {
+ EXPECT_EQ( v.first.nKey, v.second.nVal );
+ EXPECT_EQ( std::to_string( v.first.nKey ), v.second.strVal );
+ } ));
+ }
+ EXPECT_FALSE( m.empty() );
+ EXPECT_CONTAINER_SIZE( m, kkSize );
+ EXPECT_FALSE( m.begin() == m.end() );
+ EXPECT_FALSE( m.cbegin() == m.cend() );
+
+ shuffle( arrKeys.begin(), arrKeys.end() );
+
+ // erase/find
+ for ( auto const& i : arrKeys ) {
+ value_type const& val( arrVals.at( i.nKey ) );
+
+ EXPECT_TRUE( m.contains( i.nKey ));
+ EXPECT_TRUE( m.contains( val.strVal ) );
+ EXPECT_TRUE( m.contains( i ));
+ EXPECT_TRUE( m.contains( other_item( i.nKey ), other_less()));
+ EXPECT_TRUE( m.find( i, []( map_pair const& v ) {
+ EXPECT_EQ( v.first.nKey, v.second.nVal );
+ EXPECT_EQ( std::to_string( v.first.nKey ), v.second.strVal );
+ } ));
+ EXPECT_TRUE( m.find( i.nKey, []( map_pair const& v ) {
+ EXPECT_EQ( v.first.nKey, v.second.nVal );
+ EXPECT_EQ( std::to_string( v.first.nKey ), v.second.strVal );
+ } ));
+ EXPECT_TRUE( m.find_with( other_item( i.nKey ), other_less(), []( map_pair const& v ) {
+ EXPECT_EQ( v.first.nKey, v.second.nVal );
+ EXPECT_EQ( std::to_string( v.first.nKey ), v.second.strVal );
+ } ));
+
+
+ switch ( i.nKey % 8 ) {
+ case 0:
+ EXPECT_TRUE( m.erase( i ));
+ EXPECT_FALSE( m.erase( i ));
+ break;
+ case 1:
+ EXPECT_TRUE( m.erase( i.nKey ));
+ EXPECT_FALSE( m.erase( i.nKey ));
+ break;
+ case 2:
+ EXPECT_TRUE( m.erase( val.strVal ));
+ EXPECT_FALSE( m.erase( val.strVal ));
+ break;
+ case 3:
+ EXPECT_TRUE( m.erase_with( other_item( i.nKey ), other_less()));
+ EXPECT_FALSE( m.erase_with( other_item( i.nKey ), other_less()));
+ break;
+ case 4:
+ EXPECT_TRUE( m.erase( i, []( map_pair& v ) {
+ EXPECT_EQ( v.first.nKey, v.second.nVal );
+ EXPECT_EQ( std::to_string( v.first.nKey ), v.second.strVal );
+ }));
+ EXPECT_FALSE( m.erase( i, []( map_pair& ) {
+ EXPECT_TRUE( false );
+ }));
+ break;
+ case 5:
+ EXPECT_TRUE( m.erase( i.nKey, []( map_pair& v ) {
+ EXPECT_EQ( v.first.nKey, v.second.nVal );
+ EXPECT_EQ( std::to_string( v.first.nKey ), v.second.strVal );
+ }));
+ EXPECT_FALSE( m.erase( i.nKey, []( map_pair& ) {
+ EXPECT_TRUE( false );
+ }));
+ break;
+ case 6:
+ EXPECT_TRUE( m.erase( val.strVal, []( map_pair& v ) {
+ EXPECT_EQ( v.first.nKey, v.second.nVal );
+ EXPECT_EQ( std::to_string( v.first.nKey ), v.second.strVal );
+ }));
+ EXPECT_FALSE( m.erase( val.strVal, []( map_pair& ) {
+ EXPECT_TRUE( false );
+ }));
+ break;
+ case 7:
+ EXPECT_TRUE( m.erase_with( other_item( i.nKey ), other_less(), []( map_pair& v ) {
+ EXPECT_EQ( v.first.nKey, v.second.nVal );
+ EXPECT_EQ( std::to_string( v.first.nKey ), v.second.strVal );
+ }));
+ EXPECT_FALSE( m.erase_with( other_item( i.nKey ), other_less(), []( map_pair& ) {
+ EXPECT_TRUE( false );
+ }));
+ break;
+ }
+
+ EXPECT_FALSE( m.contains( i.nKey ));
+ EXPECT_FALSE( m.contains( i ));
+ EXPECT_FALSE( m.contains( val.strVal ));
+ EXPECT_FALSE( m.contains( other_item( i.nKey ), other_less()));
+ EXPECT_FALSE( m.find( i, []( map_pair const& ) {
+ EXPECT_TRUE( false );
+ } ));
+ EXPECT_FALSE( m.find( i.nKey, []( map_pair const& ) {
+ EXPECT_TRUE( false );
+ } ));
+ EXPECT_FALSE( m.find_with( other_item( i.nKey ), other_less(), []( map_pair const& ) {
+ EXPECT_TRUE( false );
+ } ));
+ }
+ EXPECT_TRUE( m.empty() );
+ EXPECT_CONTAINER_SIZE( m, 0 );
+
+ EXPECT_TRUE( m.begin() == m.end());
+ EXPECT_TRUE( m.cbegin() == m.cend());
+
+ // clear
+ for ( auto const& i : arrKeys )
+ EXPECT_TRUE( m.insert( i ));
+
+ EXPECT_FALSE( m.empty() );
+ EXPECT_CONTAINER_SIZE( m, kkSize );
+
+ m.clear();
+
+ EXPECT_TRUE( m.empty() );
+ EXPECT_CONTAINER_SIZE( m, 0 );
+ }
+ };
+
+} // namespace cds_test
+
+#endif // #ifndef CDSUNIT_MAP_TEST_MICHAEL_ITERABLE_MAP_H
--- /dev/null
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CDSUNIT_MAP_TEST_MICHAEL_ITERABLE_MAP_HP_H
+#define CDSUNIT_MAP_TEST_MICHAEL_ITERABLE_MAP_HP_H
+
+#include "test_michael_iterable.h"
+
+namespace cds_test {
+
+ class michael_iterable_hp: public michael_iterable_map
+ {
+ typedef michael_iterable_map base_class;
+
+ protected:
+ template <class Map>
+ void test( Map& m )
+ {
+ // Precondition: map is empty
+ // Postcondition: map is empty
+
+ base_class::test( m );
+
+ EXPECT_TRUE( m.empty());
+ EXPECT_CONTAINER_SIZE( m, 0 );
+
+ typedef typename Map::value_type map_pair;
+ size_t const kkSize = base_class::kSize;
+
+ std::vector<key_type> arrKeys;
+ for ( int i = 0; i < static_cast<int>(kkSize); ++i )
+ arrKeys.push_back( key_type( i ));
+ shuffle( arrKeys.begin(), arrKeys.end());
+
+ std::vector< value_type > arrVals;
+ for ( size_t i = 0; i < kkSize; ++i ) {
+ value_type val;
+ val.nVal = static_cast<int>( i );
+ val.strVal = std::to_string( i );
+ arrVals.push_back( val );
+ }
+
+ for ( auto const& i : arrKeys )
+ EXPECT_TRUE( m.insert( i ) );
+ EXPECT_FALSE( m.empty() );
+ EXPECT_CONTAINER_SIZE( m, kkSize );
+
+ // iterators
+ size_t nCount = 0;
+ for ( auto it = m.begin(); it != m.end(); ++it ) {
+ EXPECT_EQ( it->second.nVal, 0 );
+ it->second.nVal = it->first.nKey * 2;
+ ++nCount;
+ }
+ EXPECT_EQ( nCount, kkSize );
+
+ nCount = 0;
+ for ( auto it = m.cbegin(); it != m.cend(); ++it ) {
+ EXPECT_EQ( it->second.nVal, it->first.nKey * 2 );
+ ++nCount;
+ }
+ EXPECT_EQ( nCount, kkSize );
+
+ // get/extract
+ typedef typename Map::guarded_ptr guarded_ptr;
+ guarded_ptr gp;
+
+ for ( auto const& i : arrKeys ) {
+ value_type const& val = arrVals.at( i.nKey );
+
+ gp = m.get( i.nKey );
+ ASSERT_FALSE( !gp );
+ EXPECT_EQ( gp->first.nKey, i.nKey );
+ gp.release();
+ gp = m.get( i );
+ ASSERT_FALSE( !gp );
+ EXPECT_EQ( gp->first.nKey, i.nKey );
+ gp.release();
+ gp = m.get_with( other_item( i.nKey ), other_less());
+ ASSERT_FALSE( !gp );
+ EXPECT_EQ( gp->first.nKey, i.nKey );
+
+ switch ( i.nKey % 4 ) {
+ case 0:
+ gp = m.extract( i.nKey );
+ break;
+ case 1:
+ gp = m.extract( i );
+ break;
+ case 2:
+ gp = m.extract( val.strVal );
+ break;
+ case 3:
+ gp = m.extract_with( other_item( i.nKey ), other_less());
+ break;
+ }
+ ASSERT_FALSE( !gp );
+ EXPECT_EQ( gp->first.nKey, i.nKey );
+ gp.release();
+
+ gp = m.get( i.nKey );
+ ASSERT_TRUE( !gp );
+ gp = m.get( i );
+ ASSERT_TRUE( !gp );
+ gp = m.get_with( other_item( i.nKey ), other_less() );
+ ASSERT_TRUE( !gp );
+ }
+ EXPECT_TRUE( m.empty() );
+ EXPECT_CONTAINER_SIZE( m, 0 );
+ }
+ };
+
+} // namespace cds_test
+
+#endif // #ifndef CDSUNIT_MAP_TEST_MICHAEL_ITERABLE_MAP_HP_H
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSUNIT_MAP_TEST_MICHAEL_LAZY_RCU_H
#define CDSUNIT_MAP_TEST_MICHAEL_LAZY_RCU_H
this->test( m );
}
+ TYPED_TEST_P( MichaelLazyMap, stat )
+ {
+ typedef typename TestFixture::rcu_type rcu_type;
+ typedef typename TestFixture::key_type key_type;
+ typedef typename TestFixture::value_type value_type;
+
+ struct list_traits: public cc::lazy_list::traits
+ {
+ typedef typename TestFixture::less less;
+ typedef cds::backoff::pause back_off;
+ typedef cc::lazy_list::stat<> stat;
+ };
+ typedef cc::LazyKVList< rcu_type, key_type, value_type, list_traits > list_type;
+
+ struct set_traits: public cc::michael_map::traits
+ {
+ typedef typename TestFixture::hash1 hash;
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ typedef cc::MichaelHashMap< rcu_type, list_type, set_traits >map_type;
+
+ map_type m( TestFixture::kSize, 4 );
+ this->test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
+
+ TYPED_TEST_P( MichaelLazyMap, wrapped_stat )
+ {
+ typedef typename TestFixture::rcu_type rcu_type;
+ typedef typename TestFixture::key_type key_type;
+ typedef typename TestFixture::value_type value_type;
+
+ struct list_traits: public cc::lazy_list::traits
+ {
+ typedef typename TestFixture::less less;
+ typedef cds::backoff::pause back_off;
+ typedef cc::lazy_list::wrapped_stat<> stat;
+ };
+ typedef cc::LazyKVList< rcu_type, key_type, value_type, list_traits > list_type;
+
+ struct set_traits: public cc::michael_map::traits
+ {
+ typedef typename TestFixture::hash1 hash;
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ typedef cc::MichaelHashMap< rcu_type, list_type, set_traits >map_type;
+
+ map_type m( TestFixture::kSize, 4 );
+ this->test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
+
// GCC 5: All test names should be written on single line, otherwise a runtime error will be encountered like as
// "No test named <test_name> can be found in this test case"
REGISTER_TYPED_TEST_CASE_P( MichaelLazyMap,
- compare, less, cmpmix, backoff, seq_cst, mutex
+ compare, less, cmpmix, backoff, seq_cst, mutex, stat, wrapped_stat
);
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSUNIT_MAP_TEST_MICHAEL_MICHAEL_RCU_H
#define CDSUNIT_MAP_TEST_MICHAEL_MICHAEL_RCU_H
this->test( m );
}
+ TYPED_TEST_P( MichaelMap, stat )
+ {
+ typedef typename TestFixture::rcu_type rcu_type;
+ typedef typename TestFixture::key_type key_type;
+ typedef typename TestFixture::value_type value_type;
+
+ struct list_traits: public cc::michael_list::traits
+ {
+ typedef typename TestFixture::less less;
+ typedef cds::backoff::pause back_off;
+ typedef cc::michael_list::stat<> stat;
+ };
+ typedef cc::MichaelKVList< rcu_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef typename TestFixture::hash1 hash;
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ typedef cc::MichaelHashMap< rcu_type, list_type, map_traits >map_type;
+
+ map_type m( TestFixture::kSize, 4 );
+ this->test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
+
+ TYPED_TEST_P( MichaelMap, wrapped_stat )
+ {
+ typedef typename TestFixture::rcu_type rcu_type;
+ typedef typename TestFixture::key_type key_type;
+ typedef typename TestFixture::value_type value_type;
+
+ struct list_traits: public cc::michael_list::traits
+ {
+ typedef typename TestFixture::less less;
+ typedef cds::backoff::pause back_off;
+ typedef cc::michael_list::wrapped_stat<> stat;
+ };
+ typedef cc::MichaelKVList< rcu_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef typename TestFixture::hash1 hash;
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ typedef cc::MichaelHashMap< rcu_type, list_type, map_traits >map_type;
+
+ map_type m( TestFixture::kSize, 4 );
+ this->test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
REGISTER_TYPED_TEST_CASE_P( MichaelMap,
- compare, less, cmpmix, backoff, seq_cst
+ compare, less, cmpmix, backoff, seq_cst, stat, wrapped_stat
);
}
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( MichaelIterableSet_DHP, wrapped_stat )
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( MichaelIterableSet_HP, wrapped_stat )
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( MichaelLazySet_DHP, wrapped_stat )
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( MichaelLazySet_HP, wrapped_stat )
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( MichaelLazySet_NoGC, wrapped_stat )
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( MichaelSet_DHP, wrapped_stat )
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( MichaelSet_HP, wrapped_stat )
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( MichaelSet_NoGC, wrapped_stat )
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSUNIT_SET_TEST_FELDMAN_HASHSET_H
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSUNIT_SET_TEST_FELDMAN_HASHSET_HP_H
set_type s( TestFixture::kSize, 4 );
this->test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TYPED_TEST_P( MichaelLazySet, wrapped_stat )
set_type s( TestFixture::kSize, 4 );
this->test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
// GCC 5: All test names should be written on single line, otherwise a runtime error will be encountered like as
set_type s( TestFixture::kSize, 4 );
this->test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TYPED_TEST_P( MichaelSet, wrapped_stat )
set_type s( TestFixture::kSize, 4 );
this->test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
// GCC 5: All test names should be written on single line, otherwise a runtime error will be encountered like as