Removed trailing spaces
authorkhizmax <libcds.dev@gmail.com>
Tue, 3 Jan 2017 07:40:39 +0000 (10:40 +0300)
committerkhizmax <libcds.dev@gmail.com>
Tue, 3 Jan 2017 07:40:39 +0000 (10:40 +0300)
14 files changed:
cds/algo/flat_combining/kernel.h
cds/container/basket_queue.h
cds/container/msqueue.h
cds/details/defs.h
cds/intrusive/impl/skip_list.h
cds/intrusive/skip_list_rcu.h
cds/sync/spinlock.h
src/hp_gc.cpp
test/include/cds_test/thread.h
test/stress/map/delodd/map_delodd.cpp
test/stress/map/delodd/map_delodd.h
test/stress/set/delodd/set_delodd.h
test/stress/set/delodd/set_delodd_feldman_hashset.cpp
tools/brush_cds.pl

index 6c5e8f5..5e333e1 100644 (file)
@@ -289,7 +289,7 @@ namespace cds { namespace algo {
             {
                 assert( m_pThreadRec.get() == nullptr );
                 publication_record_type* pRec = cxx11_allocator().New();
-                m_pAllocatedHead = 
+                m_pAllocatedHead =
                     m_pHead = pRec;
                 m_pThreadRec.reset( pRec );
                 m_Stat.onCreatePubRecord();
@@ -706,11 +706,11 @@ namespace cds { namespace algo {
                 publication_record* p = m_pHead;
                 bool bOpDone = false;
                 while ( p ) {
-                    switch ( p->nState.load( memory_model::memory_order_acquire ) ) {
+                    switch ( p->nState.load( memory_model::memory_order_acquire )) {
                     case active:
                         if ( p->op() >= req_Operation ) {
                             p->nAge.store( nCurAge, memory_model::memory_order_relaxed );
-                            owner.fc_apply( static_cast<publication_record_type*>( p ) );
+                            owner.fc_apply( static_cast<publication_record_type*>( p ));
                             operation_done( *p );
                             bOpDone = true;
                         }
@@ -735,12 +735,12 @@ namespace cds { namespace algo {
             void batch_combining( Container& owner )
             {
                 // The thread is a combiner
-                assert( !m_Mutex.try_lock() );
+                assert( !m_Mutex.try_lock());
 
                 unsigned int const nCurAge = m_nCount.fetch_add( 1, memory_model::memory_order_relaxed ) + 1;
 
                 for ( unsigned int nPass = 0; nPass < m_nCombinePassCount; ++nPass )
-                    owner.fc_process( begin(), end() );
+                    owner.fc_process( begin(), end());
 
                 combining_pass( owner, nCurAge );
                 m_Stat.onCombining();
@@ -760,10 +760,10 @@ namespace cds { namespace algo {
                     m_Stat.onPassiveWaitIteration();
 
                     // Wait while operation processing
-                    if ( m_waitStrategy.wait( *this, *pRec ) )
+                    if ( m_waitStrategy.wait( *this, *pRec ))
                         m_Stat.onWakeupByNotifying();
 
-                    if ( m_Mutex.try_lock() ) {
+                    if ( m_Mutex.try_lock()) {
                         if ( pRec->op( memory_model::memory_order_acquire ) == req_Response ) {
                             // Operation is done
                             m_Mutex.unlock();
@@ -790,13 +790,13 @@ namespace cds { namespace algo {
             try_again:
                 publication_record * pPrev = m_pHead;
                 for ( publication_record * p = pPrev->pNext.load( memory_model::memory_order_acquire ); p; ) {
-                    switch ( p->nState.load( memory_model::memory_order_relaxed ) ) {
+                    switch ( p->nState.load( memory_model::memory_order_relaxed )) {
                     case active:
                         if ( p->nAge.load( memory_model::memory_order_relaxed ) + m_nCompactFactor < nCurAge )
                         {
                             publication_record * pNext = p->pNext.load( memory_model::memory_order_relaxed );
                             if ( pPrev->pNext.compare_exchange_strong( p, pNext,
-                                memory_model::memory_order_acquire, atomics::memory_order_relaxed ) )
+                                memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
                             {
                                 p->nState.store( inactive, memory_model::memory_order_release );
                                 p = pNext;
index d94aa57..a698aee 100644 (file)
@@ -397,7 +397,7 @@ namespace cds { namespace container {
         */
         bool dequeue( value_type& dest )
         {
-            return dequeue_with( [&dest]( value_type& src ) { 
+            return dequeue_with( [&dest]( value_type& src ) {
                 // TSan finds a race between this read of \p src and node_type constructor
                 // I think, it is wrong
                 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
index e3bcb7a..e385851 100644 (file)
@@ -355,7 +355,7 @@ namespace cds { namespace container {
         */
         bool dequeue( value_type& dest )
         {
-            return dequeue_with( [&dest]( value_type& src ) { 
+            return dequeue_with( [&dest]( value_type& src ) {
                 // TSan finds a race between this read of \p src and node_type constructor
                 // I think, it is wrong
                 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
index d43bedb..65d66e9 100644 (file)
    to \p boost library root directory. The test projects search \p boost libraries in:
    - for 32bit: <tt>\$(BOOST_PATH)/stage/lib</tt>, <tt>\$(BOOST_PATH)/stage32/lib</tt>, and <tt>\$(BOOST_PATH)/bin</tt>.
    - for 64bit: <tt>\$(BOOST_PATH)/stage64/lib</tt> and <tt>\$(BOOST_PATH)/bin</tt>.
-   
+
    All tests are based on googletest framework. The following environment variables specify
    where to find gtest include and library directories:
    - \p GTEST_ROOT - gtest root directory. <tt>\$(GTEST_ROOT)/include</tt> specifies full path to
index 13eddcb..6177243 100644 (file)
@@ -1138,29 +1138,29 @@ namespace cds { namespace intrusive {
 
         static value_type * gc_protect( marked_node_ptr p )
         {
-            return node_traits::to_value_ptr( p.ptr() );
+            return node_traits::to_value_ptr( p.ptr());
         }
 
         static void dispose_node( value_type * pVal )
         {
             assert( pVal != nullptr );
-            typename node_builder::node_disposer()( node_traits::to_node_ptr( pVal ) );
+            typename node_builder::node_disposer()( node_traits::to_node_ptr( pVal ));
             disposer()( pVal );
         }
 
         void help_remove( int nLevel, node_type* pPred, marked_node_ptr pCur )
         {
             if ( pCur->is_upper_level( nLevel )) {
-                marked_node_ptr p( pCur.ptr() );
+                marked_node_ptr p( pCur.ptr());
                 typename gc::Guard hp;
                 marked_node_ptr pSucc = hp.protect( pCur->next( nLevel ), gc_protect );
 
-                if ( pSucc.bits() && 
-                     pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
-                        memory_model::memory_order_acquire, atomics::memory_order_relaxed ) )
+                if ( pSucc.bits() &&
+                     pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()),
+                        memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
                 {
-                    if ( pCur->level_unlinked() ) {
-                        gc::retire( node_traits::to_value_ptr( pCur.ptr() ), dispose_node );
+                    if ( pCur->level_unlinked()) {
+                        gc::retire( node_traits::to_value_ptr( pCur.ptr()), dispose_node );
                         m_Stat.onEraseWhileFind();
                     }
                 }
@@ -1183,10 +1183,10 @@ namespace cds { namespace intrusive {
             int nCmp = 1;
 
             for ( int nLevel = static_cast<int>( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) {
-                pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred ) );
+                pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred ));
                 while ( true ) {
                     pCur = pos.guards.protect( nLevel * 2 + 1, pPred->next( nLevel ), gc_protect );
-                    if ( pCur.bits() ) {
+                    if ( pCur.bits()) {
                         // pCur.bits() means that pPred is logically deleted
                         goto retry;
                     }
@@ -1199,17 +1199,17 @@ namespace cds { namespace intrusive {
                     // pSucc contains deletion mark for pCur
                     pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
 
-                    if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
+                    if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
                         goto retry;
 
-                    if ( pSucc.bits() ) {
+                    if ( pSucc.bits()) {
                         // pCur is marked, i.e. logically deleted
                         // try to help deleting pCur
                         help_remove( nLevel, pPred, pCur );
                         goto retry;
                     }
                     else {
-                        nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
+                        nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
                         if ( nCmp < 0 ) {
                             pPred = pCur.ptr();
                             pos.guards.copy( nLevel * 2, nLevel * 2 + 1 );   // pPrev guard := cur guard
@@ -1248,22 +1248,22 @@ namespace cds { namespace intrusive {
             pPred = m_Head.head();
 
             for ( int nLevel = static_cast<int>( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) {
-                pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred ) );
+                pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred ));
                 pCur = pos.guards.protect( nLevel * 2 + 1, pPred->next( nLevel ), gc_protect );
 
                 // pCur.bits() means that pPred is logically deleted
                 // head cannot be deleted
                 assert( pCur.bits() == 0 );
 
-                if ( pCur.ptr() ) {
+                if ( pCur.ptr()) {
 
                     // pSucc contains deletion mark for pCur
                     pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
 
-                    if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
+                    if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
                         goto retry;
 
-                    if ( pSucc.bits() ) {
+                    if ( pSucc.bits()) {
                         // pCur is marked, i.e. logically deleted.
                         // try to help deleting pCur
                         help_remove( nLevel, pPred, pCur );
@@ -1276,7 +1276,7 @@ namespace cds { namespace intrusive {
                 pos.pSucc[nLevel] = pCur.ptr();
             }
 
-            return ( pos.pCur = pCur.ptr() ) != nullptr;
+            return ( pos.pCur = pCur.ptr()) != nullptr;
         }
 
         bool find_max_position( position& pos )
@@ -1293,10 +1293,10 @@ namespace cds { namespace intrusive {
             pPred = m_Head.head();
 
             for ( int nLevel = static_cast<int>( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) {
-                pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred ) );
+                pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred ));
                 while ( true ) {
                     pCur = pos.guards.protect( nLevel * 2 + 1, pPred->next( nLevel ), gc_protect );
-                    if ( pCur.bits() ) {
+                    if ( pCur.bits()) {
                         // pCur.bits() means that pPred is logically deleted
                         goto retry;
                     }
@@ -1309,21 +1309,21 @@ namespace cds { namespace intrusive {
                     // pSucc contains deletion mark for pCur
                     pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
 
-                    if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
+                    if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
                         goto retry;
 
-                    if ( pSucc.bits() ) {
+                    if ( pSucc.bits()) {
                         // pCur is marked, i.e. logically deleted.
                         // try to help deleting pCur
                         help_remove( nLevel, pPred, pCur );
                         goto retry;
                     }
                     else {
-                        if ( !pSucc.ptr() )
+                        if ( !pSucc.ptr())
                             break;
 
                         pPred = pCur.ptr();
-                        pos.guards.copy( nLevel * 2, nLevel * 2 + 1 ); 
+                        pos.guards.copy( nLevel * 2, nLevel * 2 + 1 );
                     }
                 }
 
@@ -1332,7 +1332,7 @@ namespace cds { namespace intrusive {
                 pos.pSucc[nLevel] = pCur.ptr();
             }
 
-            return ( pos.pCur = pCur.ptr() ) != nullptr;
+            return ( pos.pCur = pCur.ptr()) != nullptr;
         }
 
         bool renew_insert_position( value_type& val, node_type * pNode, position& pos )
@@ -1351,10 +1351,10 @@ namespace cds { namespace intrusive {
             int nCmp = 1;
 
             for ( int nLevel = static_cast<int>( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) {
-                pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred ) );
+                pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred ));
                 while ( true ) {
                     pCur = pos.guards.protect( nLevel * 2 + 1, pPred->next( nLevel ), gc_protect );
-                    if ( pCur.bits() ) {
+                    if ( pCur.bits()) {
                         // pCur.bits() means that pPred is logically deleted
                         goto retry;
                     }
@@ -1367,7 +1367,7 @@ namespace cds { namespace intrusive {
                     // pSucc contains deletion mark for pCur
                     pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
 
-                    if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
+                    if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
                         goto retry;
 
                     if ( pSucc.bits()) {
@@ -1426,7 +1426,7 @@ namespace cds { namespace intrusive {
                     // Set pNode->next
                     // pNode->next can have "logical deleted" flag if another thread is removing pNode right now
                     if ( !pNode->next( nLevel ).compare_exchange_strong( p, pSucc,
-                        memory_model::memory_order_acq_rel, atomics::memory_order_acquire ) )
+                        memory_model::memory_order_acq_rel, atomics::memory_order_acquire ))
                     {
                         // pNode has been marked as removed while we are inserting it
                         // Stop inserting
@@ -1485,7 +1485,7 @@ namespace cds { namespace intrusive {
                 if ( pSucc.bits() == 0 ) {
                     bkoff.reset();
                     while ( !( pDel->next( nLevel ).compare_exchange_weak( pSucc, pSucc | 1,
-                        memory_model::memory_order_release, atomics::memory_order_acquire ) 
+                        memory_model::memory_order_release, atomics::memory_order_acquire )
                         || pSucc.bits() != 0 ))
                     {
                         bkoff();
@@ -1498,7 +1498,7 @@ namespace cds { namespace intrusive {
             while ( true ) {
                 if ( pDel->next( 0 ).compare_exchange_strong( p, p | 1, memory_model::memory_order_release, atomics::memory_order_acquire ))
                 {
-                    f( *node_traits::to_value_ptr( pDel ) );
+                    f( *node_traits::to_value_ptr( pDel ));
 
                     // Physical deletion
                     // try fast erase
@@ -1507,15 +1507,15 @@ namespace cds { namespace intrusive {
                     for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
 
                         pSucc = pDel->next( nLevel ).load( memory_model::memory_order_acquire );
-                        if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
-                            memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ) )
+                        if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()),
+                            memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ))
                         {
                             pDel->level_unlinked();
                         }
                         else {
                             // Make slow erase
 #       ifdef CDS_DEBUG
-                            if ( find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false ) )
+                            if ( find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false ))
                                 assert( pDel != pos.pCur );
 #       else
                             find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false );
@@ -1565,7 +1565,7 @@ namespace cds { namespace intrusive {
                 pCur = guards.protect( 1, pPred->next( nLevel ), gc_protect );
 
                 while ( pCur != pNull ) {
-                    if ( pCur.bits() ) {
+                    if ( pCur.bits()) {
                         // pPred is being removed
                         if ( ++attempt < 4 ) {
                             bkoff();
@@ -1575,7 +1575,7 @@ namespace cds { namespace intrusive {
                         return find_fastpath_abort;
                     }
 
-                    if ( pCur.ptr() ) {
+                    if ( pCur.ptr()) {
                         int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
                         if ( nCmp < 0 ) {
                             guards.copy( 0, 1 );
@@ -1584,7 +1584,7 @@ namespace cds { namespace intrusive {
                         }
                         else if ( nCmp == 0 ) {
                             // found
-                            f( *node_traits::to_value_ptr( pCur.ptr() ), val );
+                            f( *node_traits::to_value_ptr( pCur.ptr()), val );
                             return find_fastpath_found;
                         }
                         else {
@@ -1602,7 +1602,7 @@ namespace cds { namespace intrusive {
         bool find_slowpath( Q& val, Compare cmp, Func f )
         {
             position pos;
-            if ( find_position( val, pos, cmp, true ) ) {
+            if ( find_position( val, pos, cmp, true )) {
                 assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 );
 
                 f( *node_traits::to_value_ptr( pos.pCur ), val );
@@ -1615,7 +1615,7 @@ namespace cds { namespace intrusive {
         template <typename Q, typename Compare, typename Func>
         bool find_with_( Q& val, Compare cmp, Func f )
         {
-            switch ( find_fastpath( val, cmp, f ) ) {
+            switch ( find_fastpath( val, cmp, f )) {
             case find_fastpath_found:
                 m_Stat.onFindFastSuccess();
                 return true;
@@ -1626,7 +1626,7 @@ namespace cds { namespace intrusive {
                 break;
             }
 
-            if ( find_slowpath( val, cmp, f ) ) {
+            if ( find_slowpath( val, cmp, f )) {
                 m_Stat.onFindSlowSuccess();
                 return true;
             }
@@ -1639,7 +1639,7 @@ namespace cds { namespace intrusive {
         guarded_ptr get_with_( Q const& val, Compare cmp )
         {
             guarded_ptr gp;
-            if ( find_with_( val, cmp, [&gp]( value_type& found, Q const& ) { gp.reset( &found ); } ) )
+            if ( find_with_( val, cmp, [&gp]( value_type& found, Q const& ) { gp.reset( &found ); } ))
                 return gp;
             return guarded_ptr();
         }
@@ -1649,18 +1649,18 @@ namespace cds { namespace intrusive {
         {
             position pos;
 
-            if ( !find_position( val, pos, cmp, false ) ) {
+            if ( !find_position( val, pos, cmp, false )) {
                 m_Stat.onEraseFailed();
                 return false;
             }
 
             node_type * pDel = pos.pCur;
             typename gc::Guard gDel;
-            gDel.assign( node_traits::to_value_ptr( pDel ) );
+            gDel.assign( node_traits::to_value_ptr( pDel ));
             assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 );
 
             unsigned int nHeight = pDel->height();
-            if ( try_remove_at( pDel, pos, f ) ) {
+            if ( try_remove_at( pDel, pos, f )) {
                 --m_ItemCounter;
                 m_Stat.onRemoveNode( nHeight );
                 m_Stat.onEraseSuccess();
@@ -1678,17 +1678,17 @@ namespace cds { namespace intrusive {
 
             guarded_ptr gp;
             for (;;) {
-                if ( !find_position( val, pos, cmp, false ) ) {
+                if ( !find_position( val, pos, cmp, false )) {
                     m_Stat.onExtractFailed();
                     return guarded_ptr();
                 }
 
                 node_type * pDel = pos.pCur;
-                gp.reset( node_traits::to_value_ptr( pDel ) );
+                gp.reset( node_traits::to_value_ptr( pDel ));
                 assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 );
 
                 unsigned int nHeight = pDel->height();
-                if ( try_remove_at( pDel, pos, []( value_type const& ) {} ) ) {
+                if ( try_remove_at( pDel, pos, []( value_type const& ) {} )) {
                     --m_ItemCounter;
                     m_Stat.onRemoveNode( nHeight );
                     m_Stat.onExtractSuccess();
@@ -1704,7 +1704,7 @@ namespace cds { namespace intrusive {
 
             guarded_ptr gp;
             for ( ;;) {
-                if ( !find_min_position( pos ) ) {
+                if ( !find_min_position( pos )) {
                     // The list is empty
                     m_Stat.onExtractMinFailed();
                     return guarded_ptr();
@@ -1713,9 +1713,9 @@ namespace cds { namespace intrusive {
                 node_type * pDel = pos.pCur;
 
                 unsigned int nHeight = pDel->height();
-                gp.reset( node_traits::to_value_ptr( pDel ) );
+                gp.reset( node_traits::to_value_ptr( pDel ));
 
-                if ( try_remove_at( pDel, pos, []( value_type const& ) {} ) ) {
+                if ( try_remove_at( pDel, pos, []( value_type const& ) {} )) {
                     --m_ItemCounter;
                     m_Stat.onRemoveNode( nHeight );
                     m_Stat.onExtractMinSuccess();
@@ -1732,7 +1732,7 @@ namespace cds { namespace intrusive {
 
             guarded_ptr gp;
             for ( ;;) {
-                if ( !find_max_position( pos ) ) {
+                if ( !find_max_position( pos )) {
                     // The list is empty
                     m_Stat.onExtractMaxFailed();
                     return guarded_ptr();
@@ -1741,9 +1741,9 @@ namespace cds { namespace intrusive {
                 node_type * pDel = pos.pCur;
 
                 unsigned int nHeight = pDel->height();
-                gp.reset( node_traits::to_value_ptr( pDel ) );
+                gp.reset( node_traits::to_value_ptr( pDel ));
 
-                if ( try_remove_at( pDel, pos, []( value_type const& ) {} ) ) {
+                if ( try_remove_at( pDel, pos, []( value_type const& ) {} )) {
                     --m_ItemCounter;
                     m_Stat.onRemoveNode( nHeight );
                     m_Stat.onExtractMaxSuccess();
index afd0d3f..1d3a977 100644 (file)
@@ -1408,17 +1408,17 @@ namespace cds { namespace intrusive {
 
         void help_remove( int nLevel, node_type* pPred, marked_node_ptr pCur, marked_node_ptr pSucc, position& pos )
         {
-            marked_node_ptr p( pCur.ptr() );
+            marked_node_ptr p( pCur.ptr());
 
             if ( pCur->is_upper_level( nLevel )
                 && pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()),
                     memory_model::memory_order_release, atomics::memory_order_relaxed ))
             {
                 if ( pCur->level_unlinked()) {
-                    if ( !is_extracted( pSucc ) ) {
+                    if ( !is_extracted( pSucc )) {
                         // We cannot free the node at this moment because RCU is locked
                         // Link deleted nodes to a chain to free later
-                        pos.dispose( pCur.ptr() );
+                        pos.dispose( pCur.ptr());
                         m_Stat.onEraseWhileFind();
                     }
                     else
@@ -1430,7 +1430,7 @@ namespace cds { namespace intrusive {
         template <typename Q, typename Compare >
         bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound )
         {
-            assert( gc::is_locked() );
+            assert( gc::is_locked());
 
             node_type * pPred;
             marked_node_ptr pSucc;
@@ -1444,7 +1444,7 @@ namespace cds { namespace intrusive {
 
                 while ( true ) {
                     pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
-                    if ( pCur.bits() ) {
+                    if ( pCur.bits()) {
                         // pCur.bits() means that pPred is logically deleted
                         goto retry;
                     }
@@ -1457,16 +1457,16 @@ namespace cds { namespace intrusive {
                     // pSucc contains deletion mark for pCur
                     pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
 
-                    if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
+                    if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
                         goto retry;
 
-                    if ( pSucc.bits() ) {
+                    if ( pSucc.bits()) {
                         // pCur is marked, i.e. logically deleted.
                         help_remove( nLevel, pPred, pCur, pSucc, pos );
                         goto retry;
                     }
                     else {
-                        nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
+                        nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
                         if ( nCmp < 0 )
                             pPred = pCur.ptr();
                         else if ( nCmp == 0 && bStopIfFound )
@@ -1491,7 +1491,7 @@ namespace cds { namespace intrusive {
 
         bool find_min_position( position& pos )
         {
-            assert( gc::is_locked() );
+            assert( gc::is_locked());
 
             node_type * pPred;
             marked_node_ptr pSucc;
@@ -1507,15 +1507,15 @@ namespace cds { namespace intrusive {
                 // head cannot be deleted
                 assert( pCur.bits() == 0 );
 
-                if ( pCur.ptr() ) {
+                if ( pCur.ptr()) {
 
                     // pSucc contains deletion mark for pCur
                     pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
 
-                    if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
+                    if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
                         goto retry;
 
-                    if ( pSucc.bits() ) {
+                    if ( pSucc.bits()) {
                         // pCur is marked, i.e. logically deleted.
                         help_remove( nLevel, pPred, pCur, pSucc, pos );
                         goto retry;
@@ -1526,12 +1526,12 @@ namespace cds { namespace intrusive {
                 pos.pPrev[nLevel] = pPred;
                 pos.pSucc[nLevel] = pCur.ptr();
             }
-            return ( pos.pCur = pCur.ptr() ) != nullptr;
+            return ( pos.pCur = pCur.ptr()) != nullptr;
         }
 
         bool find_max_position( position& pos )
         {
-            assert( gc::is_locked() );
+            assert( gc::is_locked());
 
             node_type * pPred;
             marked_node_ptr pSucc;
@@ -1544,7 +1544,7 @@ namespace cds { namespace intrusive {
 
                 while ( true ) {
                     pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
-                    if ( pCur.bits() ) {
+                    if ( pCur.bits()) {
                         // pCur.bits() means that pPred is logically deleted
                         goto retry;
                     }
@@ -1557,16 +1557,16 @@ namespace cds { namespace intrusive {
                     // pSucc contains deletion mark for pCur
                     pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
 
-                    if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
+                    if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
                         goto retry;
 
-                    if ( pSucc.bits() ) {
+                    if ( pSucc.bits()) {
                         // pCur is marked, i.e. logically deleted.
                         help_remove( nLevel, pPred, pCur, pSucc, pos );
                         goto retry;
                     }
                     else {
-                        if ( !pSucc.ptr() )
+                        if ( !pSucc.ptr())
                             break;
 
                         pPred = pCur.ptr();
@@ -1578,12 +1578,12 @@ namespace cds { namespace intrusive {
                 pos.pSucc[nLevel] = pCur.ptr();
             }
 
-            return ( pos.pCur = pCur.ptr() ) != nullptr;
+            return ( pos.pCur = pCur.ptr()) != nullptr;
         }
 
         bool renew_insert_position( value_type& val, node_type * pNode, position& pos )
         {
-            assert( gc::is_locked() );
+            assert( gc::is_locked());
 
             node_type * pPred;
             marked_node_ptr pSucc;
@@ -1598,7 +1598,7 @@ namespace cds { namespace intrusive {
 
                 while ( true ) {
                     pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
-                    if ( pCur.bits() ) {
+                    if ( pCur.bits()) {
                         // pCur.bits() means that pPred is logically deleted
                         goto retry;
                     }
@@ -1611,10 +1611,10 @@ namespace cds { namespace intrusive {
                     // pSucc contains deletion mark for pCur
                     pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
 
-                    if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
+                    if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
                         goto retry;
 
-                    if ( pSucc.bits() ) {
+                    if ( pSucc.bits()) {
                         // pCur is marked, i.e. logically deleted.
                         if ( pCur.ptr() == pNode ) {
                             // Node is removing while we are inserting it
@@ -1626,7 +1626,7 @@ namespace cds { namespace intrusive {
                         goto retry;
                     }
                     else {
-                        nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
+                        nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
                         if ( nCmp < 0 )
                             pPred = pCur.ptr();
                         else
@@ -1645,7 +1645,7 @@ namespace cds { namespace intrusive {
         template <typename Func>
         bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f )
         {
-            assert( gc::is_locked() );
+            assert( gc::is_locked());
 
             unsigned int const nHeight = pNode->height();
             pNode->clear_tower();
@@ -1669,7 +1669,7 @@ namespace cds { namespace intrusive {
                     // Set pNode->next
                     // pNode->next must be null but can have a "logical deleted" flag if another thread is removing pNode right now
                     if ( !pNode->next( nLevel ).compare_exchange_strong( p, pSucc,
-                        memory_model::memory_order_acq_rel, atomics::memory_order_acquire ) )
+                        memory_model::memory_order_acq_rel, atomics::memory_order_acquire ))
                     {
                         // pNode has been marked as removed while we are inserting it
                         // Stop inserting
@@ -1701,7 +1701,7 @@ namespace cds { namespace intrusive {
                     if ( !renew_insert_position( val, pNode, pos )) {
                         // The node has been deleted while we are inserting it
                         // Update current height for concurent removing
-                        CDS_VERIFY_FALSE( pNode->level_unlinked( nHeight - nLevel ) );
+                        CDS_VERIFY_FALSE( pNode->level_unlinked( nHeight - nLevel ));
 
                         m_Stat.onRemoveWhileInsert();
 
@@ -1718,7 +1718,7 @@ namespace cds { namespace intrusive {
         bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract )
         {
             assert( pDel != nullptr );
-            assert( gc::is_locked() );
+            assert( gc::is_locked());
 
             marked_node_ptr pSucc;
             back_off bkoff;
@@ -1743,11 +1743,11 @@ namespace cds { namespace intrusive {
                 }
             }
 
-            marked_node_ptr p( pDel->next( 0 ).load( memory_model::memory_order_relaxed ).ptr() );
+            marked_node_ptr p( pDel->next( 0 ).load( memory_model::memory_order_relaxed ).ptr());
             while ( true ) {
                 if ( pDel->next( 0 ).compare_exchange_strong( p, p | nMask, memory_model::memory_order_release, atomics::memory_order_acquire ))
                 {
-                    f( *node_traits::to_value_ptr( pDel ) );
+                    f( *node_traits::to_value_ptr( pDel ));
 
                     // physical deletion
                     // try fast erase
@@ -1755,15 +1755,15 @@ namespace cds { namespace intrusive {
                     for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
 
                         pSucc = pDel->next( nLevel ).load( memory_model::memory_order_acquire );
-                        if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
-                            memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ) )
+                        if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()),
+                            memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ))
                         {
                             pDel->level_unlinked();
                         }
                         else {
                             // Make slow erase
 #       ifdef CDS_DEBUG
-                            if ( find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false ) )
+                            if ( find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false ))
                                 assert( pDel != pos.pCur );
 #       else
                             find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false );
@@ -1788,7 +1788,7 @@ namespace cds { namespace intrusive {
                         m_Stat.onFastExtract();
                     return true;
                 }
-                else if ( p.bits() ) {
+                else if ( p.bits()) {
                     // Another thread is deleting pDel right now
                     m_Stat.onEraseContention();
                     return false;
@@ -1821,7 +1821,7 @@ namespace cds { namespace intrusive {
                 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
 
                 while ( pCur != pNull ) {
-                    if ( pCur.bits() ) {
+                    if ( pCur.bits()) {
                         // pPred is being removed
                         if ( ++attempt < 4 ) {
                             bkoff();
@@ -1831,15 +1831,15 @@ namespace cds { namespace intrusive {
                         return find_fastpath_abort;
                     }
 
-                    if ( pCur.ptr() ) {
-                        int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
+                    if ( pCur.ptr()) {
+                        int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
                         if ( nCmp < 0 ) {
                             pPred = pCur.ptr();
                             pCur = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
                         }
                         else if ( nCmp == 0 ) {
                             // found
-                            f( *node_traits::to_value_ptr( pCur.ptr() ), val );
+                            f( *node_traits::to_value_ptr( pCur.ptr()), val );
                             return find_fastpath_found;
                         }
                         else // pCur > val - go down
@@ -1854,7 +1854,7 @@ namespace cds { namespace intrusive {
         template <typename Q, typename Compare, typename Func>
         bool find_slowpath( Q& val, Compare cmp, Func f, position& pos )
         {
-            if ( find_position( val, pos, cmp, true ) ) {
+            if ( find_position( val, pos, cmp, true )) {
                 assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 );
 
                 f( *node_traits::to_value_ptr( pos.pCur ), val );
@@ -1879,7 +1879,7 @@ namespace cds { namespace intrusive {
             {
                 rcu_lock l;
 
-                switch ( find_fastpath( val, cmp, f ) ) {
+                switch ( find_fastpath( val, cmp, f )) {
                 case find_fastpath_found:
                     m_Stat.onFindFastSuccess();
                     return true;
@@ -1890,7 +1890,7 @@ namespace cds { namespace intrusive {
                     break;
                 }
 
-                if ( find_slowpath( val, cmp, f, pos ) ) {
+                if ( find_slowpath( val, cmp, f, pos )) {
                     m_Stat.onFindSlowSuccess();
                     bRet = true;
                 }
@@ -1913,7 +1913,7 @@ namespace cds { namespace intrusive {
             {
                 rcu_lock rcuLock;
 
-                if ( !find_position( val, pos, cmp, false ) ) {
+                if ( !find_position( val, pos, cmp, false )) {
                     m_Stat.onEraseFailed();
                     bRet = false;
                 }
@@ -1922,7 +1922,7 @@ namespace cds { namespace intrusive {
                     assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 );
 
                     unsigned int nHeight = pDel->height();
-                    if ( try_remove_at( pDel, pos, f, false ) ) {
+                    if ( try_remove_at( pDel, pos, f, false )) {
                         --m_ItemCounter;
                         m_Stat.onRemoveNode( nHeight );
                         m_Stat.onEraseSuccess();
@@ -1942,11 +1942,11 @@ namespace cds { namespace intrusive {
         value_type * do_extract_key( Q const& key, Compare cmp, position& pos )
         {
             // RCU should be locked!!!
-            assert( gc::is_locked() );
+            assert( gc::is_locked());
 
             node_type * pDel;
 
-            if ( !find_position( key, pos, cmp, false ) ) {
+            if ( !find_position( key, pos, cmp, false )) {
                 m_Stat.onExtractFailed();
                 pDel = nullptr;
             }
@@ -1956,7 +1956,7 @@ namespace cds { namespace intrusive {
 
                 unsigned int const nHeight = pDel->height();
 
-                if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
+                if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true )) {
                     --m_ItemCounter;
                     m_Stat.onRemoveNode( nHeight );
                     m_Stat.onExtractSuccess();
@@ -2001,7 +2001,7 @@ namespace cds { namespace intrusive {
 
         value_type * do_extract_min()
         {
-            assert( !gc::is_locked() );
+            assert( !gc::is_locked());
 
             position pos;
             node_type * pDel;
@@ -2009,7 +2009,7 @@ namespace cds { namespace intrusive {
             {
                 rcu_lock l;
 
-                if ( !find_min_position( pos ) ) {
+                if ( !find_min_position( pos )) {
                     m_Stat.onExtractMinFailed();
                     pDel = nullptr;
                 }
@@ -2017,7 +2017,7 @@ namespace cds { namespace intrusive {
                     pDel = pos.pCur;
                     unsigned int const nHeight = pDel->height();
 
-                    if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
+                    if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true )) {
                         --m_ItemCounter;
                         m_Stat.onRemoveNode( nHeight );
                         m_Stat.onExtractMinSuccess();
@@ -2034,7 +2034,7 @@ namespace cds { namespace intrusive {
 
         value_type * do_extract_max()
         {
-            assert( !gc::is_locked() );
+            assert( !gc::is_locked());
 
             position pos;
             node_type * pDel;
@@ -2042,7 +2042,7 @@ namespace cds { namespace intrusive {
             {
                 rcu_lock l;
 
-                if ( !find_max_position( pos ) ) {
+                if ( !find_max_position( pos )) {
                     m_Stat.onExtractMaxFailed();
                     pDel = nullptr;
                 }
@@ -2050,7 +2050,7 @@ namespace cds { namespace intrusive {
                     pDel = pos.pCur;
                     unsigned int const nHeight = pDel->height();
 
-                    if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
+                    if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true )) {
                         --m_ItemCounter;
                         m_Stat.onRemoveNode( nHeight );
                         m_Stat.onExtractMaxSuccess();
@@ -2077,7 +2077,7 @@ namespace cds { namespace intrusive {
             node_type* p = m_Head.head()->next( 0 ).load( atomics::memory_order_relaxed ).ptr();
             while ( p ) {
                 node_type* pNext = p->next( 0 ).load( atomics::memory_order_relaxed ).ptr();
-                dispose_node( node_traits::to_value_ptr( p ) );
+                dispose_node( node_traits::to_value_ptr( p ));
                 p = pNext;
             }
         }
index 4bb8369..0241cd8 100644 (file)
@@ -149,7 +149,7 @@ namespace cds {
             {
                 backoff_strategy backoff;
                 while ( nTryCount-- ) {
-                    if ( try_lock() )
+                    if ( try_lock())
                         return true;
                     backoff();
                 }
@@ -246,7 +246,7 @@ namespace cds {
                 backoff_strategy bkoff;
 
                 while ( nTryCount-- ) {
-                    if ( try_acquire() )
+                    if ( try_acquire())
                         return true;
                     bkoff();
                 }
index 7081cc0..b4ab05d 100644 (file)
@@ -330,8 +330,8 @@ namespace cds { namespace gc {
                 // Several threads may work concurrently so we use atomic technique only.
                 {
                     cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_relaxed);
-                    if ( curOwner == nullThreadId || !cds::OS::is_thread_alive( curOwner ) ) {
-                        if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
+                    if ( curOwner == nullThreadId || !cds::OS::is_thread_alive( curOwner )) {
+                        if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed ))
                             continue;
                     }
                     else
index d787815..fe9e925 100644 (file)
@@ -192,7 +192,7 @@ namespace cds_test {
 
             // Create threads
             std::vector< std::thread > threads;
-            threads.reserve( m_workers.size() );
+            threads.reserve( m_workers.size());
             for ( auto w : m_workers )
                 threads.emplace_back( &thread::run, w );
 
index c97181b..bd9d985 100644 (file)
@@ -46,7 +46,7 @@ namespace map {
 
     size_t Map_DelOdd::s_nFeldmanMap_HeadBits = 10;
     size_t Map_DelOdd::s_nFeldmanMap_ArrayBits = 4;
-    
+
     size_t Map_DelOdd::s_nLoadFactor = 1;
     std::vector<size_t> Map_DelOdd::m_arrElements;
 
@@ -95,7 +95,7 @@ namespace map {
         m_arrElements.resize( s_nMapSize );
         for ( size_t i = 0; i < s_nMapSize; ++i )
             m_arrElements[i] = i;;
-        shuffle( m_arrElements.begin(), m_arrElements.end() );
+        shuffle( m_arrElements.begin(), m_arrElements.end());
     }
 
     void Map_DelOdd::TearDownTestCase()
index 4ff183d..f4db078 100644 (file)
@@ -140,13 +140,13 @@ namespace map {
         template <typename Pred>
         static void prepare_array( std::vector<size_t>& arr, Pred pred )
         {
-            arr.reserve( m_arrElements.size() );
+            arr.reserve( m_arrElements.size());
             for ( auto el : m_arrElements ) {
-                if ( pred( el ) )
+                if ( pred( el ))
                     arr.push_back( el );
             }
-            arr.resize( arr.size() );
-            shuffle( arr.begin(), arr.end() );
+            arr.resize( arr.size());
+            shuffle( arr.begin(), arr.end());
         }
 
     protected:
@@ -190,7 +190,7 @@ namespace map {
             {
                 prepare_array( m_arr, []( size_t ) -> bool { return true; } );
                 for ( size_t i = 0; i < m_arr.size(); ++i ) {
-                    if ( m_Map.insert( key_type( m_arr[i], id() ) ) )
+                    if ( m_Map.insert( key_type( m_arr[i], id())))
                         ++m_nInsertInitSuccess;
                     else
                         ++m_nInsertInitFailed;
@@ -237,7 +237,7 @@ namespace map {
                         // insert pass
                         for ( auto el : m_arr ) {
                             if ( el & 1 ) {
-                                if ( rMap.insert( key_type( el, id() )))
+                                if ( rMap.insert( key_type( el, id())))
                                     ++m_nInsertSuccess;
                                 else
                                     ++m_nInsertFailed;
@@ -250,7 +250,7 @@ namespace map {
                             if ( el & 1 ) {
                                 bool success;
                                 bool inserted;
-                                std::tie( success, inserted ) = rMap.update( key_type( el, id() ), f );
+                                std::tie( success, inserted ) = rMap.update( key_type( el, id()), f );
                                 if ( success && inserted )
                                     ++m_nInsertSuccess;
                                 else
@@ -356,7 +356,7 @@ namespace map {
                     else {
                         for ( size_t k = 0; k < nInsThreadCount; ++k ) {
                             for ( auto el: m_arr ) {
-                                if ( rMap.erase( key_type( el, k ) ) )
+                                if ( rMap.erase( key_type( el, k )))
                                     ++m_nDeleteSuccess;
                                 else
                                     ++m_nDeleteFailed;
@@ -431,7 +431,7 @@ namespace map {
                     else {
                         for ( size_t k = 0; k < nInsThreadCount; ++k ) {
                             for ( auto el: m_arr ) {
-                                gp = rMap.extract( key_type( el, k ) );
+                                gp = rMap.extract( key_type( el, k ));
                                 if ( gp )
                                     ++m_nDeleteSuccess;
                                 else
@@ -497,14 +497,14 @@ namespace map {
                             for ( auto el: m_arr ) {
                                 if ( Map::c_bExtractLockExternal ) {
                                     typename Map::rcu_lock l;
-                                    xp = rMap.extract( key_type( el, k ) );
+                                    xp = rMap.extract( key_type( el, k ));
                                     if ( xp )
                                         ++m_nDeleteSuccess;
                                     else
                                         ++m_nDeleteFailed;
                                 }
                                 else {
-                                    xp = rMap.extract( key_type( el, k ) );
+                                    xp = rMap.extract( key_type( el, k ));
                                     if ( xp )
                                         ++m_nDeleteSuccess;
                                     else
@@ -519,14 +519,14 @@ namespace map {
                             for ( size_t k = 0; k < nInsThreadCount; ++k ) {
                                 if ( Map::c_bExtractLockExternal ) {
                                     typename Map::rcu_lock l;
-                                    xp = rMap.extract( key_type( el, k ) );
+                                    xp = rMap.extract( key_type( el, k ));
                                     if ( xp )
                                         ++m_nDeleteSuccess;
                                     else
                                         ++m_nDeleteFailed;
                                 }
                                 else {
-                                    xp = rMap.extract( key_type( el, k ) );
+                                    xp = rMap.extract( key_type( el, k ));
                                     if ( xp )
                                         ++m_nDeleteSuccess;
                                     else
@@ -642,7 +642,7 @@ namespace map {
 
             for ( size_t i = 0; i < pool.size(); ++i ) {
                 cds_test::thread& thr = pool.get( i );
-                switch ( thr.type() ) {
+                switch ( thr.type()) {
                 case inserter_thread:
                     {
                         insert_thread& inserter = static_cast<insert_thread&>( thr );
index 83809c6..45302cd 100644 (file)
@@ -142,13 +142,13 @@ namespace set {
         template <typename Pred>
         static void prepare_array( std::vector<size_t>& arr, Pred pred )
         {
-            arr.reserve( m_arrData.size() );
+            arr.reserve( m_arrData.size());
             for ( auto el : m_arrData ) {
-                if ( pred( el ) )
+                if ( pred( el ))
                     arr.push_back( el );
             }
-            arr.resize( arr.size() );
-            shuffle( arr.begin(), arr.end() );
+            arr.resize( arr.size());
+            shuffle( arr.begin(), arr.end());
         }
 
     protected:
@@ -186,7 +186,7 @@ namespace set {
             {
                 prepare_array( m_arr, []( size_t ) -> bool { return true; } );
                 for ( size_t i = 0; i < m_arr.size(); ++i ) {
-                    if ( m_Set.insert( key_type( m_arr[i], id() ) ) )
+                    if ( m_Set.insert( key_type( m_arr[i], id())))
                         ++m_nInsertInitSuccess;
                     else
                         ++m_nInsertInitFailed;
@@ -231,7 +231,7 @@ namespace set {
                         // insert pass
                         for ( auto el : m_arr ) {
                             if ( el & 1 ) {
-                                if ( rSet.insert( key_type( el, id() ) ) )
+                                if ( rSet.insert( key_type( el, id())))
                                     ++m_nInsertSuccess;
                                 else
                                     ++m_nInsertFailed;
@@ -244,7 +244,7 @@ namespace set {
                             if ( el & 1 ) {
                                 bool success;
                                 bool inserted;
-                                std::tie( success, inserted ) = rSet.update( key_type( el, id() ), update_functor() );
+                                std::tie( success, inserted ) = rSet.update( key_type( el, id()), update_functor());
                                 if ( success && inserted )
                                     ++m_nInsertSuccess;
                                 else
@@ -395,7 +395,7 @@ namespace set {
                     if ( id() & 1 ) {
                         for ( auto el : m_arr ) {
                             for ( size_t k = 0; k < nInsThreadCount; ++k ) {
-                                if ( rSet.erase( key_type( el, k ) ) )
+                                if ( rSet.erase( key_type( el, k )))
                                     ++m_nDeleteSuccess;
                                 else
                                     ++m_nDeleteFailed;
@@ -405,7 +405,7 @@ namespace set {
                     else {
                         for ( size_t k = 0; k < nInsThreadCount; ++k ) {
                             for ( auto el : m_arr ) {
-                                if ( rSet.erase( key_type( el, k ) ) )
+                                if ( rSet.erase( key_type( el, k )))
                                     ++m_nDeleteSuccess;
                                 else
                                     ++m_nDeleteFailed;
@@ -468,7 +468,7 @@ namespace set {
                     if ( id() & 1 ) {
                         for ( auto el : m_arr ) {
                             for ( size_t k = 0; k < nInsThreadCount; ++k ) {
-                                gp = rSet.extract( key_type( el, k ) );
+                                gp = rSet.extract( key_type( el, k ));
                                 if ( gp )
                                     ++m_nExtractSuccess;
                                 else
@@ -480,7 +480,7 @@ namespace set {
                     else {
                         for ( size_t k = 0; k < nInsThreadCount; ++k ) {
                             for ( auto el : m_arr ) {
-                                gp = rSet.extract( key_type( el, k ) );
+                                gp = rSet.extract( key_type( el, k ));
                                 if ( gp )
                                     ++m_nExtractSuccess;
                                 else
@@ -545,14 +545,14 @@ namespace set {
                             for ( auto el : m_arr ) {
                                 if ( Set::c_bExtractLockExternal ) {
                                     typename Set::rcu_lock l;
-                                    xp = rSet.extract( key_type( el, k ) );
+                                    xp = rSet.extract( key_type( el, k ));
                                     if ( xp )
                                         ++m_nExtractSuccess;
                                     else
                                         ++m_nExtractFailed;
                                 }
                                 else {
-                                    xp = rSet.extract( key_type( el, k ) );
+                                    xp = rSet.extract( key_type( el, k ));
                                     if ( xp )
                                         ++m_nExtractSuccess;
                                     else
@@ -567,14 +567,14 @@ namespace set {
                             for ( size_t k = 0; k < nInsThreadCount; ++k ) {
                                 if ( Set::c_bExtractLockExternal ) {
                                     typename Set::rcu_lock l;
-                                    xp = rSet.extract( key_type( el, k ) );
+                                    xp = rSet.extract( key_type( el, k ));
                                     if ( xp )
                                         ++m_nExtractSuccess;
                                     else
                                         ++m_nExtractFailed;
                                 }
                                 else {
-                                    xp = rSet.extract( key_type( el, k ) );
+                                    xp = rSet.extract( key_type( el, k ));
                                     if ( xp )
                                         ++m_nExtractSuccess;
                                     else
@@ -630,7 +630,7 @@ namespace set {
                     for ( size_t key : arr ) {
                         if ( key & 1 ) {
                             for ( size_t k = 0; k < nInsThreadCount; ++k ) {
-                                if ( set.contains( key_thread( key, k ) ) )
+                                if ( set.contains( key_thread( key, k )))
                                     ++m_nFindOddSuccess;
                                 else
                                     ++m_nFindOddFailed;
@@ -639,7 +639,7 @@ namespace set {
                         else {
                             // even keys MUST be in the map
                             for ( size_t k = 0; k < nInsThreadCount; ++k ) {
-                                if ( set.contains( key_thread( key, k ) ) )
+                                if ( set.contains( key_thread( key, k )))
                                     ++m_nFindEvenSuccess;
                                 else
                                     ++m_nFindEvenFailed;
index 983d148..41a2dd4 100644 (file)
@@ -46,5 +46,5 @@ namespace set {
     }
 
     CDSSTRESS_FeldmanHashSet_fixed( Set_DelOdd, run_feldman, key_thread, size_t )
+
 } // namespace set
index f4b2e87..4e67a4e 100644 (file)
@@ -46,46 +46,12 @@ sub processFile( $ )
                #binmode $fh    ;\r
                my $str = '';\r
                while (<$fh>) {\r
-            if ( /^\/\/\$\$CDS-header\$\$/ ) {\r
-                $str .= \r
-"/*\r
-    This file is a part of libcds - Concurrent Data Structures library\r
-\r
-    (C) Copyright Maxim Khizhinsky (libcds.dev\@gmail.com) 2006-$year\r
-\r
-    Source code repo: http://github.com/khizmax/libcds/\r
-    Download: http://sourceforge.net/projects/libcds/files/\r
-    \r
-    Redistribution and use in source and binary forms, with or without\r
-    modification, are permitted provided that the following conditions are met:\r
-\r
-    * Redistributions of source code must retain the above copyright notice, this\r
-      list of conditions and the following disclaimer.\r
-\r
-    * Redistributions in binary form must reproduce the above copyright notice,\r
-      this list of conditions and the following disclaimer in the documentation\r
-      and/or other materials provided with the distribution.\r
-\r
-    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\r
-    AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\r
-    IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\r
-    DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\r
-    FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\r
-    DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\r
-    SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\r
-    CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\r
-    OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\r
-    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.     \r
-*/\n" ;\r
-            }\r
-            else {\r
-                $nTabsFound += $_ =~ s/\t/    /g;\r
-                $_ =~ s/\s+$//;\r
-                $_ =~ s/\s+;$/;/;\r
-                $_ =~ s/\)\s+\)/\)\)/g;\r
-                $str .= $_      ;\r
-                $str .= "\n"    ;\r
-            }\r
+            $nTabsFound += $_ =~ s/\t/    /g;\r
+            $_ =~ s/\s+$//;\r
+            $_ =~ s/\s+;$/;/;\r
+            $_ =~ s/\)\s+\)/\)\)/g;\r
+            $str .= $_      ;\r
+            $str .= "\n"    ;\r
                }\r
                close $fh;\r
                \r