8d8310fcbc1391cc877c1485fd6c0cd976eefb5c
[libcds.git] / cds / intrusive / lazy_list_hrc.h
1 //$$CDS-header$$
2
3 #ifndef __CDS_INTRUSIVE_LAZY_LIST_HRC_H
4 #define __CDS_INTRUSIVE_LAZY_LIST_HRC_H
5
6 #include <cds/intrusive/impl/lazy_list.h>
7 #include <cds/gc/hrc.h>
8 #include <cds/details/allocator.h>
9
10 namespace cds { namespace intrusive { namespace lazy_list {
11     //@cond
12     // Specialization for HRC GC
13     template <typename Lock, typename Tag>
14     struct node< gc::HRC, Lock, Tag>: public gc::HRC::container_node
15     {
16         typedef gc::HRC gc          ;   ///< Garbage collector
17         typedef Lock    lock_type   ;   ///< Lock type
18         typedef Tag     tag         ;   ///< tag
19
20         typedef cds::details::marked_ptr<node, 1>   marked_ptr         ;   ///< marked pointer
21         typedef typename gc::template atomic_marked_ptr< marked_ptr>     atomic_marked_ptr   ;   ///< atomic marked pointer specific for GC
22
23         atomic_marked_ptr   m_pNext ; ///< pointer to the next node in the list + logical deletion mark
24         mutable lock_type   m_Lock  ; ///< Node lock
25
26         /// Checks if node is marked
27         bool is_marked() const
28         {
29             return m_pNext.load(atomics::memory_order_relaxed).bits() != 0;
30         }
31
32         node()
33             : m_pNext( nullptr )
34         {}
35
36     protected:
37         virtual void cleanUp( cds::gc::hrc::ThreadGC * pGC )
38         {
39             assert( pGC != nullptr );
40             typename gc::GuardArray<2> aGuards( *pGC );
41
42             while ( true ) {
43                 marked_ptr pNextMarked( aGuards.protect( 0, m_pNext ));
44                 node * pNext = pNextMarked.ptr();
45                 if ( pNext != nullptr && pNext->m_bDeleted.load( atomics::memory_order_acquire ) ) {
46                     marked_ptr p = aGuards.protect( 1, pNext->m_pNext );
47                     m_pNext.compare_exchange_weak( pNextMarked, p, atomics::memory_order_acquire, atomics::memory_order_relaxed );
48                     continue;
49                 }
50                 else {
51                     break;
52                 }
53             }
54         }
55
56         virtual void terminate( cds::gc::hrc::ThreadGC * pGC, bool bConcurrent )
57         {
58             if ( bConcurrent ) {
59                 marked_ptr pNext( m_pNext.load(atomics::memory_order_relaxed));
60                 do {} while ( !m_pNext.compare_exchange_weak( pNext, marked_ptr(), atomics::memory_order_release, atomics::memory_order_relaxed ) );
61             }
62             else {
63                 m_pNext.store( marked_ptr(), atomics::memory_order_relaxed );
64             }
65         }
66     };
67     //@endcond
68
69     //@cond
70     template <typename NodeType, typename Alloc >
71     class boundary_nodes< gc::HRC, NodeType, Alloc >
72     {
73         typedef NodeType node_type;
74         typedef cds::details::Allocator< node_type, Alloc> cxx_allocator   ;   ///< allocator for the tail node
75
76         struct boundary_disposer
77         {
78             void operator()( node_type * p )
79             {
80                 cxx_allocator().Delete( p );
81             }
82         };
83
84
85         node_type *  m_pHead;
86         node_type *  m_pTail;
87
88     public:
89         boundary_nodes()
90         {
91             m_pHead = cxx_allocator().New();
92             m_pTail = cxx_allocator().New();
93         }
94
95         ~boundary_nodes()
96         {
97             cds::gc::HRC::template retire<boundary_disposer>( m_pHead );
98             cds::gc::HRC::template retire<boundary_disposer>( m_pTail );
99         }
100
101     public:
102         node_type * head()
103         {
104             return m_pHead;
105         }
106         node_type const * head() const
107         {
108             return m_pHead;
109         }
110         node_type * tail()
111         {
112             return m_pTail;
113         }
114         node_type const * tail() const
115         {
116             return m_pTail;
117         }
118     };
119     //@endcond
120
121     //@cond
122     /*
123     template <typename Node, typename MemoryModel>
124     struct node_cleaner< gc::HRC, Node, MemoryModel> {
125         void operator()( Node * p )
126         {
127             typedef typename Node::marked_ptr marked_ptr;
128             p->m_pNext.store( marked_ptr(), MemoryModel::memory_order_release );
129             //p->clean( MemoryModel::memory_order_release );
130         }
131     };
132     */
133     //@endcond
134
135
136     //@cond
137     template <typename NODE>
138     struct link_checker_selector< gc::HRC, NODE, opt::never_check_link >
139     {
140         typedef link_checker<NODE>  type;
141     };
142
143     template <typename NODE>
144     struct link_checker_selector< gc::HRC, NODE, opt::debug_check_link >
145     {
146         typedef link_checker<NODE>  type;
147     };
148     //@endcond
149
150 }}}   // namespace cds::intrusive::lazy_list
151
152 #endif // #ifndef __CDS_INTRUSIVE_LAZY_LIST_HP_H