Fixed -Wshadow warnings
[libcds.git] / cds / container / impl / bronson_avltree_map_rcu.h
index a7d682fefc6b8d87b0ee9ded2725831806dee58c..458cbdb77009d6b648ba8eceb2e39919806d7d80 100644 (file)
@@ -381,9 +381,9 @@ namespace cds { namespace container {
             return do_remove(
                 key,
                 key_comparator(),
-                [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
+                [&f]( key_type const& k, mapped_type pVal, rcu_disposer& disp ) -> bool {
                     assert( pVal );
-                    f( key, *pVal );
+                    f( k, *pVal );
                     disp.dispose_value(pVal);
                     return true;
                 }
@@ -404,9 +404,9 @@ namespace cds { namespace container {
             return do_remove(
                 key,
                 cds::opt::details::make_comparator_from_less<Less>(),
-                [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
+                [&f]( key_type const& k, mapped_type pVal, rcu_disposer& disp ) -> bool {
                     assert( pVal );
-                    f( key, *pVal );
+                    f( k, *pVal );
                     disp.dispose_value(pVal);
                     return true;
                 }
@@ -494,7 +494,7 @@ namespace cds { namespace container {
 
             @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
             It means that the function gets rightmost leaf of the tree and tries to unlink it.
-            During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
+            During unlinking, a concurrent thread may insert an item with key greater than rightmost item's key.
             So, the function returns the item with maximum key at the moment of tree traversing.
 
             RCU \p synchronize method can be called. RCU should NOT be locked.
@@ -525,7 +525,7 @@ namespace cds { namespace container {
 
             @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
             It means that the function gets rightmost leaf of the tree and tries to unlink it.
-            During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
+            During unlinking, a concurrent thread may insert an item with key greater than rightmost item's key.
             So, the function returns the item with maximum key at the moment of tree traversing.
 
             RCU \p synchronize method can be called. RCU should NOT be locked.
@@ -1349,7 +1349,7 @@ namespace cds { namespace container {
 
                     if ( !pChild ) {
                         // Found min/max
-                        if ( pNode->is_valued( memory_model::memory_order_acquire ) ) {
+                        if ( pNode->is_valued( memory_model::memory_order_acquire )) {
                             int result = try_remove_node( pParent, pNode, nVersion, func, disp );
 
                             if ( result == update_flags::result_removed )
@@ -1410,13 +1410,13 @@ namespace cds { namespace container {
         {
             node_type * pNew;
 
-            auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
-                mapped_type pVal = funcUpdate( pNew );
+            auto fnCreateNode = [&funcUpdate]( node_type * node ) {
+                mapped_type pVal = funcUpdate( node );
                 assert( pVal != nullptr );
-                pNew->m_pValue.store( pVal, memory_model::memory_order_release );
+                node->m_pValue.store( pVal, memory_model::memory_order_release );
             };
 
-            if ( c_bRelaxedInsert ) {
+            static_if ( c_bRelaxedInsert ) {
                 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
                      || child( pNode, nDir, memory_model::memory_order_acquire ) != nullptr )
                 {
@@ -1435,7 +1435,7 @@ namespace cds { namespace container {
                 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
                      || child( pNode, nDir, memory_model::memory_order_acquire ) != nullptr )
                 {
-                    if ( c_bRelaxedInsert ) {
+                    static_if ( c_bRelaxedInsert ) {
                         mapped_type pVal = pNew->m_pValue.load( memory_model::memory_order_relaxed );
                         pNew->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
                         free_value( pVal );
@@ -1447,7 +1447,7 @@ namespace cds { namespace container {
                     return update_flags::retry;
                 }
 
-                if ( !c_bRelaxedInsert )
+                static_if ( !c_bRelaxedInsert )
                     fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
 
                 pNode->child( pNew, nDir, memory_model::memory_order_release );
@@ -1632,6 +1632,11 @@ namespace cds { namespace container {
 
     private: // rotations
         //@cond
+        int check_node_ordering( node_type* pParent, node_type* pChild )
+        {
+            return key_comparator()( pParent->m_key, pChild->m_key );
+        }
+
         int estimate_node_condition( node_type * pNode )
         {
             node_type * pLeft = child( pNode, left_child, memory_model::memory_order_acquire );
@@ -1750,49 +1755,51 @@ namespace cds { namespace container {
             // pNode->pLeft is too large, we will rotate-right.
             // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
 
-            {
-                assert( pLeft != nullptr );
-                node_scoped_lock l( m_Monitor, *pLeft );
-                if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
-                    return pNode; // retry for pNode
-
-                int hL = height( pLeft, memory_model::memory_order_acquire );
-                if ( hL - hR <= 1 )
-                    return pNode; // retry
-
-                node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
-                int hLR = height_null( pLRight, memory_model::memory_order_acquire );
-                node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
-                int hLL = height_null( pLLeft, memory_model::memory_order_acquire );
-
-                if ( hLL > hLR ) {
-                    // rotate right
-                    return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
-                }
-                else {
-                    if ( pLRight ) {
-                        node_scoped_lock lr( m_Monitor, *pLRight );
-                        if ( pLeft->m_pRight.load( memory_model::memory_order_acquire ) != pLRight )
-                            return pNode; // retry
-
-                        hLR = height( pLRight, memory_model::memory_order_acquire );
-                        if ( hLL > hLR )
-                            return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
-
-                        int hLRL = height_null( child( pLRight, left_child, memory_model::memory_order_relaxed ), memory_model::memory_order_acquire);
-                        int balance = hLL - hLRL;
-                        if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
-                            // nParent.child.left won't be damaged after a double rotation
-                            return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
-                        }
-                    }
-                    else
+            assert( pLeft != nullptr );
+            node_scoped_lock l( m_Monitor, *pLeft );
+            if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
+                return pNode; // retry for pNode
+
+            assert( check_node_ordering( pNode, pLeft ) > 0 );
+
+            int hL = height( pLeft, memory_model::memory_order_acquire );
+            if ( hL - hR <= 1 )
+                return pNode; // retry
+
+            node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
+            int hLR = height_null( pLRight, memory_model::memory_order_acquire );
+            node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
+            int hLL = height_null( pLLeft, memory_model::memory_order_acquire );
+
+            if ( pLRight ) {
+                {
+                    node_scoped_lock lr( m_Monitor, *pLRight );
+                    if ( pLeft->m_pRight.load( memory_model::memory_order_acquire ) != pLRight )
                         return pNode; // retry
 
-                    // focus on pLeft, if necessary pNode will be balanced later
-                    return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
+                    assert( check_node_ordering( pLeft, pLRight ) < 0 );
+
+                    hLR = height( pLRight, memory_model::memory_order_acquire );
+                    if ( hLL > hLR )
+                        return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
+
+                    int hLRL = height_null( child( pLRight, left_child, memory_model::memory_order_relaxed ), memory_model::memory_order_acquire );
+                    int balance = hLL - hLRL;
+                    if ( balance >= -1 && balance <= 1 && !( ( hLL == 0 || hLRL == 0 ) && !pLeft->is_valued( memory_model::memory_order_relaxed ))) {
+                        // nParent.child.left won't be damaged after a double rotation
+                        return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
+                    }
                 }
+
+                // focus on pLeft, if necessary pNode will be balanced later
+                return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
+            }
+            else if ( hLL > hLR ) {
+                // rotate right
+                return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
             }
+
+            return pNode; // retry
         }
 
         node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
@@ -1802,28 +1809,30 @@ namespace cds { namespace container {
                  || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
 
             // pParent and pNode is locked yet
-            {
-                assert( pRight != nullptr );
-                node_scoped_lock l( m_Monitor, *pRight );
-                if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
-                    return pNode; // retry for pNode
-
-                int hR = height( pRight, memory_model::memory_order_acquire );
-                if ( hL - hR >= -1 )
-                    return pNode; // retry
-
-                node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
-                int hRL = height_null( pRLeft, memory_model::memory_order_acquire );
-                node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
-                int hRR = height_null( pRRight, memory_model::memory_order_acquire );
-                if ( hRR > hRL )
-                    return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
-
-                if ( pRLeft ) {
+            assert( pRight != nullptr );
+            node_scoped_lock l( m_Monitor, *pRight );
+            if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
+                return pNode; // retry for pNode
+
+            assert( check_node_ordering( pNode, pRight ) < 0 );
+
+            int hR = height( pRight, memory_model::memory_order_acquire );
+            if ( hL - hR >= -1 )
+                return pNode; // retry
+
+            node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
+            int hRL = height_null( pRLeft, memory_model::memory_order_acquire );
+            node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
+            int hRR = height_null( pRRight, memory_model::memory_order_acquire );
+
+            if ( pRLeft ) {
+                {
                     node_scoped_lock lrl( m_Monitor, *pRLeft );
                     if ( pRight->m_pLeft.load( memory_model::memory_order_acquire ) != pRLeft )
                         return pNode; // retry
 
+                    assert( check_node_ordering( pRight, pRLeft ) > 0 );
+
                     hRL = height( pRLeft, memory_model::memory_order_acquire );
                     if ( hRR >= hRL )
                         return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
@@ -1831,20 +1840,23 @@ namespace cds { namespace container {
                     node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
                     int hRLR = height_null( pRLRight, memory_model::memory_order_acquire );
                     int balance = hRR - hRLR;
-                    if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
-                         return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
+                    if ( balance >= -1 && balance <= 1 && !( ( hRR == 0 || hRLR == 0 ) && !pRight->is_valued( memory_model::memory_order_relaxed )))
+                        return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
                 }
-                else
-                    return pNode; // retry
+
                 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
             }
+            else if ( hRR > hRL )
+                return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
+
+            return pNode;   // retry
         }
 
         static void begin_change( node_type * pNode, version_type version )
         {
             assert(pNode->version(memory_model::memory_order_acquire) == version );
             assert( (version & node_type::shrinking) == 0 );
-            pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
+            pNode->exchange_version( version | node_type::shrinking, memory_model::memory_order_acquire );
         }
         static void end_change( node_type * pNode, version_type version )
         {
@@ -1860,19 +1872,40 @@ namespace cds { namespace container {
             begin_change( pNode, nodeVersion );
 
             pNode->m_pLeft.store( pLRight, memory_model::memory_order_release );
-            if ( pLRight != nullptr )
-                pLRight->parent( pNode, memory_model::memory_order_release  );
 
-            pLeft->m_pRight.store( pNode, memory_model::memory_order_release );
-            pNode->parent( pLeft, memory_model::memory_order_release );
+            if ( pLRight != nullptr ) {
+                atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+                CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRight->m_pParent );
+                pLRight->parent( pNode, memory_model::memory_order_relaxed );
+                assert( check_node_ordering( pNode, pLRight ) > 0 );
+            }
 
-            if ( pParentLeft == pNode )
-                pParent->m_pLeft.store( pLeft, memory_model::memory_order_release );
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+            CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLeft->m_pRight );
+            pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
+
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+
+            CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_pParent );
+            pNode->parent( pLeft, memory_model::memory_order_relaxed );
+            assert( check_node_ordering( pLeft, pNode ) < 0 );
+
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+            if ( pParentLeft == pNode ) {
+                CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pLeft );
+                pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
+            }
             else {
                 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
-                pParent->m_pRight.store( pLeft, memory_model::memory_order_release );
+                CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pRight );
+                pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed );
             }
-            pLeft->parent( pParent, memory_model::memory_order_release );
+
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+            CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLeft->m_pParent );
+            pLeft->parent( pParent, memory_model::memory_order_relaxed );
+
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
 
             // fix up heights links
             int hNode = 1 + std::max( hLR, hR );
@@ -1930,19 +1963,37 @@ namespace cds { namespace container {
 
             // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
             pNode->m_pRight.store( pRLeft, memory_model::memory_order_release );
-            if ( pRLeft != nullptr )
-                pRLeft->parent( pNode, memory_model::memory_order_release );
+            if ( pRLeft != nullptr ) {
+                atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+                CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLeft->m_pParent );
+                pRLeft->parent( pNode, memory_model::memory_order_relaxed );
+            }
 
-            pRight->m_pLeft.store( pNode, memory_model::memory_order_release );
-            pNode->parent( pRight, memory_model::memory_order_release );
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+            CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRight->m_pLeft );
+            pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
 
-            if ( pParentLeft == pNode )
-                pParent->m_pLeft.store( pRight, memory_model::memory_order_release );
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+            CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_pParent );
+            pNode->parent( pRight, memory_model::memory_order_relaxed );
+            assert( check_node_ordering( pRight, pNode ) > 0 );
+
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+            if ( pParentLeft == pNode ) {
+                CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pLeft );
+                pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
+            }
             else {
                 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
-                pParent->m_pRight.store( pRight, memory_model::memory_order_release );
+                CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pRight );
+                pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed );
             }
-            pRight->parent( pParent, memory_model::memory_order_release );
+
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+            CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRight->m_pParent );
+            pRight->parent( pParent, memory_model::memory_order_relaxed );
+
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
 
             // fix up heights
             int hNode = 1 + std::max( hL, hRL );
@@ -1992,26 +2043,56 @@ namespace cds { namespace container {
 
             // fix up pNode links, careful about the order!
             pNode->m_pLeft.store( pLRR, memory_model::memory_order_release );
-            if ( pLRR != nullptr )
-                pLRR->parent( pNode, memory_model::memory_order_release );
+            if ( pLRR != nullptr ) {
+                atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+                CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRR->m_pParent );
+                pLRR->parent( pNode, memory_model::memory_order_relaxed );
+            }
 
-            pLeft->m_pRight.store( pLRL, memory_model::memory_order_release );
-            if ( pLRL != nullptr )
-                pLRL->parent( pLeft, memory_model::memory_order_release );
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+            CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLeft->m_pRight );
+            pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
 
-            pLRight->m_pLeft.store( pLeft, memory_model::memory_order_release );
-            pLeft->parent( pLRight, memory_model::memory_order_release );
+            if ( pLRL != nullptr ) {
+                atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+                CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRL->m_pParent );
+                pLRL->parent( pLeft, memory_model::memory_order_relaxed );
+            }
+
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+            CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRight->m_pLeft );
+            pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
+
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+            CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLeft->m_pParent );
+            pLeft->parent( pLRight, memory_model::memory_order_relaxed );
+            assert( check_node_ordering( pLRight, pLeft ) > 0 );
+
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+            CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRight->m_pRight );
+            pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
 
-            pLRight->m_pRight.store( pNode, memory_model::memory_order_release );
-            pNode->parent( pLRight, memory_model::memory_order_release );
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+            CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_pParent );
+            pNode->parent( pLRight, memory_model::memory_order_relaxed );
+            assert( check_node_ordering( pLRight, pNode ) < 0 );
 
-            if ( pPL == pNode )
-                pParent->m_pLeft.store( pLRight, memory_model::memory_order_release );
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+            if ( pPL == pNode ) {
+                CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pLeft );
+                pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
+            }
             else {
                 assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
-                pParent->m_pRight.store( pLRight, memory_model::memory_order_release );
+                CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pRight );
+                pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
             }
-            pLRight->parent( pParent, memory_model::memory_order_release );
+
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+            CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRight->m_pParent );
+            pLRight->parent( pParent, memory_model::memory_order_relaxed );
+
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
 
             // fix up heights
             int hNode = 1 + std::max( hLRR, hR );
@@ -2076,26 +2157,56 @@ namespace cds { namespace container {
 
             // fix up pNode links, careful about the order!
             pNode->m_pRight.store( pRLL, memory_model::memory_order_release );
-            if ( pRLL != nullptr )
-                pRLL->parent( pNode, memory_model::memory_order_release );
+            if ( pRLL != nullptr ) {
+                atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+                CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLL->m_pParent );
+                pRLL->parent( pNode, memory_model::memory_order_relaxed );
+            }
 
-            pRight->m_pLeft.store( pRLR, memory_model::memory_order_release );
-            if ( pRLR != nullptr )
-                pRLR->parent( pRight, memory_model::memory_order_release );
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+            CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRight->m_pLeft );
+            pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
 
-            pRLeft->m_pRight.store( pRight, memory_model::memory_order_release );
-            pRight->parent( pRLeft, memory_model::memory_order_release );
+            if ( pRLR != nullptr ) {
+                atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+                CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLR->m_pParent );
+                pRLR->parent( pRight, memory_model::memory_order_relaxed );
+            }
+
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+            CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLeft->m_pRight );
+            pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
+
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+            CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRight->m_pParent );
+            pRight->parent( pRLeft, memory_model::memory_order_relaxed );
+            assert( check_node_ordering( pRLeft, pRight ) < 0 );
+
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+            CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLeft->m_pLeft );
+            pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
 
-            pRLeft->m_pLeft.store( pNode, memory_model::memory_order_release );
-            pNode->parent( pRLeft, memory_model::memory_order_release );
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+            CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_pParent );
+            pNode->parent( pRLeft, memory_model::memory_order_relaxed );
+            assert( check_node_ordering( pRLeft, pNode ) > 0 );
 
-            if ( pPL == pNode )
-                pParent->m_pLeft.store( pRLeft, memory_model::memory_order_release );
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+            if ( pPL == pNode ) {
+                CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pLeft );
+                pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
+            }
             else {
                 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
-                pParent->m_pRight.store( pRLeft, memory_model::memory_order_release );
+                CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pRight );
+                pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
             }
-            pRLeft->parent( pParent, memory_model::memory_order_release );
+
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+            CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLeft->m_pParent );
+            pRLeft->parent( pParent, memory_model::memory_order_relaxed );
+
+            atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
 
             // fix up heights
             int hNode = 1 + std::max( hL, hRLL );