Uses different pass count for different parallel queue test cases
[libcds.git] / cds / intrusive / free_list_tagged.h
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #ifndef CDSLIB_INTRUSIVE_FREE_LIST_TAGGED_H
32 #define CDSLIB_INTRUSIVE_FREE_LIST_TAGGED_H
33
34 #include <cds/algo/atomic.h>
35
36 namespace cds { namespace intrusive {
37
38     /// Lock-free free list based on tagged pointers (required double-width CAS)
39     /** @ingroup cds_intrusive_freelist
40
41         This variant of \p FreeList is intended for processor architectures that support double-width CAS.
42         It uses <a href="https://en.wikipedia.org/wiki/Tagged_pointer">tagged pointer</a> technique to solve ABA problem.
43
44         \b How to use
45         \code
46         #include <cds/intrusive/free_list_tagged.h>
47
48         // Your struct should be derived from TaggedFreeList::node
49         struct Foo: public cds::intrusive::TaggedFreeList::node
50         {
51             // Foo fields
52         };
53
54         // Simplified Foo allocator
55         class FooAllocator
56         {
57         public:
58             // free-list clear() must be explicitly called before destroying the free-list object
59             ~FooAllocator()
60             {
61                 m_FreeList.clear( []( freelist_node * p ) { delete static_cast<Foo *>( p ); });
62             }
63
64             Foo * alloc()
65             {
66                 freelist_node * p = m_FreeList.get();
67                 if ( p )
68                     return static_cast<Foo *>( p );
69                 return new Foo;
70             };
71
72             void dealloc( Foo * p )
73             {
74                 m_FreeList.put( static_cast<freelist_node *>( p ));
75             };
76
77         private:
78             typedef cds::intrusive::TaggedFreeList::node freelist_node;
79             cds::intrusive::TaggedFreeList m_FreeList;
80         };
81         \endcode
82     */
83     class TaggedFreeList
84     {
85     public:
86         struct node {
87             //@cond
88             atomics::atomic<node *> m_freeListNext;
89
90             node()
91             {
92                 m_freeListNext.store( nullptr, atomics::memory_order_release );
93             }
94             //@endcond
95         };
96
97     private:
98         //@cond
99         struct tagged_ptr
100         {
101             node *    ptr;
102             uintptr_t tag;
103
104             tagged_ptr()
105                 : ptr( nullptr )
106                 , tag( 0 )
107             {}
108
109             tagged_ptr( node* p )
110                 : ptr( p )
111                 , tag( 0 )
112             {}
113         };
114
115         static_assert(sizeof( tagged_ptr ) == sizeof( void * ) * 2, "sizeof( tagged_ptr ) violation");
116         //@endcond
117
118     public:
119         /// Creates empty free-list
120         TaggedFreeList()
121             : m_Head( tagged_ptr())
122         {
123             // Your platform must support double-width CAS
124             assert( m_Head.is_lock_free());
125         }
126
127         /// Destroys the free list. Free-list must be empty.
128         /**
129             @warning dtor does not free elements of the list.
130             To free elements you should manually call \p clear() with an appropriate disposer.
131         */
132         ~TaggedFreeList()
133         {
134             assert( empty());
135         }
136
137         /// Puts \p pNode to the free list
138         void put( node * pNode )
139         {
140             assert( m_Head.is_lock_free());
141
142             tagged_ptr currentHead = m_Head.load( atomics::memory_order_relaxed );
143             tagged_ptr newHead = { pNode };
144             do {
145                 newHead.tag = currentHead.tag + 1;
146                 pNode->m_freeListNext.store( currentHead.ptr, atomics::memory_order_relaxed );
147                 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_freeListNext );
148             } while ( cds_unlikely( !m_Head.compare_exchange_weak( currentHead, newHead, atomics::memory_order_release, atomics::memory_order_acquire )));
149         }
150
151         /// Gets a node from the free list. If the list is empty, returns \p nullptr
152         node * get()
153         {
154             tagged_ptr currentHead = m_Head.load( atomics::memory_order_acquire );
155             tagged_ptr newHead;
156             while ( currentHead.ptr != nullptr ) {
157                 CDS_TSAN_ANNOTATE_HAPPENS_AFTER( &currentHead.ptr->m_freeListNext );
158                 newHead.ptr = currentHead.ptr->m_freeListNext.load( atomics::memory_order_relaxed );
159                 newHead.tag = currentHead.tag + 1;
160                 if ( cds_likely( m_Head.compare_exchange_weak( currentHead, newHead, atomics::memory_order_release, atomics::memory_order_acquire )))
161                     break;
162             }
163             return currentHead.ptr;
164         }
165
166         /// Checks whether the free list is empty
167         bool empty() const
168         {
169             return m_Head.load( atomics::memory_order_relaxed ).ptr == nullptr;
170         }
171
172         /// Clears the free list (not atomic)
173         /**
174             For each element \p disp disposer is called to free memory.
175             The \p Disposer interface:
176             \code
177             struct disposer
178             {
179                 void operator()( FreeList::node * node );
180             };
181             \endcode
182
183             This method must be explicitly called before the free list destructor.
184         */
185         template <typename Disposer>
186         void clear( Disposer disp )
187         {
188             node * head = m_Head.load( atomics::memory_order_relaxed ).ptr;
189             m_Head.store( { nullptr }, atomics::memory_order_relaxed );
190             while ( head ) {
191                 node * next = head->m_freeListNext.load( atomics::memory_order_relaxed );
192                 disp( head );
193                 head = next;
194             }
195         }
196
197     private:
198         //@cond
199         atomics::atomic<tagged_ptr> m_Head;
200         //@endcond
201     };
202
203 }} // namespace cds::intrusive
204
205 #endif // CDSLIB_INTRUSIVE_FREE_LIST_TAGGED_H