fixed adding file problem
[c11concurrency-benchmarks.git] / gdax-orderbook-hpp / demo / dependencies / libcds-2.3.2 / cds / urcu / details / sh.h
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #ifndef CDSLIB_URCU_DETAILS_SH_H
32 #define CDSLIB_URCU_DETAILS_SH_H
33
34 #include <memory.h> //memset
35 #include <cds/urcu/details/sh_decl.h>
36
37 #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
38 #include <cds/threading/model.h>
39
40 //@cond
41 namespace cds { namespace urcu { namespace details {
42
43     // Inlines
44
45     // sh_thread_gc
46     template <typename RCUtag>
47     inline sh_thread_gc<RCUtag>::sh_thread_gc()
48     {
49         if ( !threading::Manager::isThreadAttached())
50             cds::threading::Manager::attachThread();
51     }
52
53     template <typename RCUtag>
54     inline sh_thread_gc<RCUtag>::~sh_thread_gc()
55     {
56         cds::threading::Manager::detachThread();
57     }
58
59     template <typename RCUtag>
60     inline typename sh_thread_gc<RCUtag>::thread_record * sh_thread_gc<RCUtag>::get_thread_record()
61     {
62         return cds::threading::getRCU<RCUtag>();
63     }
64
65     template <typename RCUtag>
66     inline void sh_thread_gc<RCUtag>::access_lock()
67     {
68         thread_record * pRec = get_thread_record();
69         assert( pRec != nullptr );
70
71         uint32_t tmp = pRec->m_nAccessControl.load( atomics::memory_order_relaxed );
72
73         if ( (tmp & rcu_class::c_nNestMask) == 0 ) {
74             pRec->m_nAccessControl.store( sh_singleton<RCUtag>::instance()->global_control_word(atomics::memory_order_relaxed),
75                 atomics::memory_order_relaxed );
76
77             CDS_COMPILER_RW_BARRIER;
78         }
79         else {
80             // nested lock
81             pRec->m_nAccessControl.store( tmp + 1, atomics::memory_order_relaxed );
82         }
83     }
84
85     template <typename RCUtag>
86     inline void sh_thread_gc<RCUtag>::access_unlock()
87     {
88         thread_record * pRec = get_thread_record();
89         assert( pRec != nullptr);
90
91         uint32_t tmp = pRec->m_nAccessControl.load( atomics::memory_order_relaxed );
92         assert( ( tmp & rcu_class::c_nNestMask ) > 0 );
93
94         CDS_COMPILER_RW_BARRIER;
95         pRec->m_nAccessControl.store( tmp - 1, atomics::memory_order_relaxed );
96     }
97
98     template <typename RCUtag>
99     inline bool sh_thread_gc<RCUtag>::is_locked()
100     {
101         thread_record * pRec = get_thread_record();
102         assert( pRec != nullptr);
103
104         return (pRec->m_nAccessControl.load( atomics::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0;
105     }
106
107
108     // sh_singleton
109     template <typename RCUtag>
110     inline void sh_singleton<RCUtag>::set_signal_handler()
111     {
112         //TODO: OS-specific code must be moved to cds::OS namespace
113         struct sigaction sigact;
114         memset( &sigact, 0, sizeof(sigact));
115         sigact.sa_sigaction = signal_handler;
116         sigact.sa_flags = SA_SIGINFO;
117         sigemptyset( &sigact.sa_mask );
118         sigaction( m_nSigNo, &sigact, nullptr );
119
120         sigaddset( &sigact.sa_mask, m_nSigNo );
121         pthread_sigmask( SIG_UNBLOCK, &sigact.sa_mask, nullptr );
122     }
123
124     template <typename RCUtag>
125     inline void sh_singleton<RCUtag>::clear_signal_handler()
126     {}
127
128     template <typename RCUtag>
129     inline void sh_singleton<RCUtag>::raise_signal( cds::OS::ThreadId tid )
130     {
131         pthread_kill( tid, m_nSigNo );
132     }
133
134     template <typename RCUtag>
135     template <class Backoff>
136     inline void sh_singleton<RCUtag>::force_membar_all_threads( Backoff& bkOff )
137     {
138         OS::ThreadId const nullThreadId = OS::c_NullThreadId;
139
140         // Send "need membar" signal to all RCU threads
141         for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
142             OS::ThreadId tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire);
143             if ( tid != nullThreadId ) {
144                 pRec->m_bNeedMemBar.store( true, atomics::memory_order_release );
145                 raise_signal( tid );
146             }
147         }
148
149         // Wait while all RCU threads process the signal
150         for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
151             OS::ThreadId tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire);
152             if ( tid != nullThreadId ) {
153                 bkOff.reset();
154                 while ( (tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire )) != nullThreadId
155                      && pRec->m_bNeedMemBar.load( atomics::memory_order_acquire ))
156                 {
157                     // Some versions of OSes can lose signals
158                     // So, we resend the signal
159                     raise_signal( tid );
160                     bkOff();
161                 }
162             }
163         }
164     }
165
166     template <typename RCUtag>
167     bool sh_singleton<RCUtag>::check_grace_period( thread_record * pRec ) const
168     {
169         uint32_t const v = pRec->m_nAccessControl.load( atomics::memory_order_acquire );
170         return (v & signal_handling_rcu::c_nNestMask)
171             && ((( v ^ m_nGlobalControl.load( atomics::memory_order_relaxed )) & ~signal_handling_rcu::c_nNestMask ));
172     }
173
174     template <typename RCUtag>
175     template <class Backoff>
176     void sh_singleton<RCUtag>::wait_for_quiescent_state( Backoff& bkOff )
177     {
178         OS::ThreadId const nullThreadId = OS::c_NullThreadId;
179
180         for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
181             while ( pRec->m_list.m_idOwner.load( atomics::memory_order_acquire) != nullThreadId && check_grace_period( pRec ))
182                 bkOff();
183         }
184     }
185
186 }}} // namespace cds:urcu::details
187 //@endcond
188
189 #endif // #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
190 #endif // #ifndef CDSLIB_URCU_DETAILS_SH_H