fd37602d603c9c78094eed7ffe925b5882183b74
[libcds.git] / cds / urcu / details / sh.h
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #ifndef CDSLIB_URCU_DETAILS_SH_H
32 #define CDSLIB_URCU_DETAILS_SH_H
33
34 #include <memory.h> //memset
35 #include <cds/urcu/details/sh_decl.h>
36
37 #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
38 #include <cds/threading/model.h>
39
40 //@cond
41 namespace cds { namespace urcu { namespace details {
42
43     // Inlines
44
45     // sh_thread_gc
46     template <typename RCUtag>
47     inline sh_thread_gc<RCUtag>::sh_thread_gc()
48     {
49         if ( !threading::Manager::isThreadAttached())
50             cds::threading::Manager::attachThread();
51     }
52
53     template <typename RCUtag>
54     inline sh_thread_gc<RCUtag>::~sh_thread_gc()
55     {
56         cds::threading::Manager::detachThread();
57     }
58
59     template <typename RCUtag>
60     inline typename sh_thread_gc<RCUtag>::thread_record * sh_thread_gc<RCUtag>::get_thread_record()
61     {
62         return cds::threading::getRCU<RCUtag>();
63     }
64
65     template <typename RCUtag>
66     inline void sh_thread_gc<RCUtag>::access_lock()
67     {
68         thread_record * pRec = get_thread_record();
69         assert( pRec != nullptr );
70
71         uint32_t tmp = pRec->m_nAccessControl.load( atomics::memory_order_relaxed );
72         assert( ( tmp & rcu_class::c_nNestMask ) > 0 );
73
74         if ( (tmp & rcu_class::c_nNestMask) == 0 ) {
75             pRec->m_nAccessControl.store( sh_singleton<RCUtag>::instance()->global_control_word(atomics::memory_order_relaxed),
76                 atomics::memory_order_relaxed );
77
78             // acquire barrier
79             pRec->m_nAccessControl.load( atomics::memory_order_acquire );
80         }
81         else {
82             // nested lock
83             pRec->m_nAccessControl.store( tmp + 1, atomics::memory_order_relaxed );
84         }
85     }
86
87     template <typename RCUtag>
88     inline void sh_thread_gc<RCUtag>::access_unlock()
89     {
90         thread_record * pRec = get_thread_record();
91         assert( pRec != nullptr);
92
93         uint32_t tmp = pRec->m_nAccessControl.load( atomics::memory_order_relaxed );
94         assert( ( tmp & rcu_class::c_nNestMask ) > 0 );
95
96         pRec->m_nAccessControl.store( tmp - 1, atomics::memory_order_release );
97     }
98
99     template <typename RCUtag>
100     inline bool sh_thread_gc<RCUtag>::is_locked()
101     {
102         thread_record * pRec = get_thread_record();
103         assert( pRec != nullptr);
104
105         return (pRec->m_nAccessControl.load( atomics::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0;
106     }
107
108
109     // sh_singleton
110     template <typename RCUtag>
111     inline void sh_singleton<RCUtag>::set_signal_handler()
112     {
113         //TODO: OS-specific code must be moved to cds::OS namespace
114         struct sigaction sigact;
115         memset( &sigact, 0, sizeof(sigact));
116         sigact.sa_sigaction = signal_handler;
117         sigact.sa_flags = SA_SIGINFO;
118         sigemptyset( &sigact.sa_mask );
119         sigaction( m_nSigNo, &sigact, nullptr );
120
121         sigaddset( &sigact.sa_mask, m_nSigNo );
122         pthread_sigmask( SIG_UNBLOCK, &sigact.sa_mask, nullptr );
123     }
124
125     template <typename RCUtag>
126     inline void sh_singleton<RCUtag>::clear_signal_handler()
127     {}
128
129     template <typename RCUtag>
130     inline void sh_singleton<RCUtag>::raise_signal( cds::OS::ThreadId tid )
131     {
132         pthread_kill( tid, m_nSigNo );
133     }
134
135     template <typename RCUtag>
136     template <class Backoff>
137     inline void sh_singleton<RCUtag>::force_membar_all_threads( Backoff& bkOff )
138     {
139         OS::ThreadId const nullThreadId = OS::c_NullThreadId;
140
141         // Send "need membar" signal to all RCU threads
142         for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
143             OS::ThreadId tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire);
144             if ( tid != nullThreadId ) {
145                 pRec->m_bNeedMemBar.store( true, atomics::memory_order_release );
146                 raise_signal( tid );
147             }
148         }
149
150         // Wait while all RCU threads process the signal
151         for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
152             OS::ThreadId tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire);
153             if ( tid != nullThreadId ) {
154                 bkOff.reset();
155                 while ( (tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire )) != nullThreadId
156                      && pRec->m_bNeedMemBar.load( atomics::memory_order_acquire ))
157                 {
158                     // Some versions of OSes can lose signals
159                     // So, we resend the signal
160                     raise_signal( tid );
161                     bkOff();
162                 }
163             }
164         }
165     }
166
167     template <typename RCUtag>
168     bool sh_singleton<RCUtag>::check_grace_period( thread_record * pRec ) const
169     {
170         uint32_t const v = pRec->m_nAccessControl.load( atomics::memory_order_acquire );
171         return (v & signal_handling_rcu::c_nNestMask)
172             && ((( v ^ m_nGlobalControl.load( atomics::memory_order_relaxed )) & ~signal_handling_rcu::c_nNestMask ));
173     }
174
175     template <typename RCUtag>
176     template <class Backoff>
177     void sh_singleton<RCUtag>::wait_for_quiescent_state( Backoff& bkOff )
178     {
179         OS::ThreadId const nullThreadId = OS::c_NullThreadId;
180
181         for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
182             while ( pRec->m_list.m_idOwner.load( atomics::memory_order_acquire) != nullThreadId && check_grace_period( pRec ))
183                 bkOff();
184         }
185     }
186
187 }}} // namespace cds:urcu::details
188 //@endcond
189
190 #endif // #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
191 #endif // #ifndef CDSLIB_URCU_DETAILS_SH_H