Removed redundant spaces
[libcds.git] / cds / urcu / details / sh.h
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #ifndef CDSLIB_URCU_DETAILS_SH_H
32 #define CDSLIB_URCU_DETAILS_SH_H
33
34 #include <memory.h> //memset
35 #include <cds/urcu/details/sh_decl.h>
36
37 #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
38 #include <cds/threading/model.h>
39
40 //@cond
41 namespace cds { namespace urcu { namespace details {
42
43     // Inlines
44
45     // sh_thread_gc
46     template <typename RCUtag>
47     inline sh_thread_gc<RCUtag>::sh_thread_gc()
48     {
49         if ( !threading::Manager::isThreadAttached())
50             cds::threading::Manager::attachThread();
51     }
52
53     template <typename RCUtag>
54     inline sh_thread_gc<RCUtag>::~sh_thread_gc()
55     {
56         cds::threading::Manager::detachThread();
57     }
58
59     template <typename RCUtag>
60     inline typename sh_thread_gc<RCUtag>::thread_record * sh_thread_gc<RCUtag>::get_thread_record()
61     {
62         return cds::threading::getRCU<RCUtag>();
63     }
64
65     template <typename RCUtag>
66     inline void sh_thread_gc<RCUtag>::access_lock()
67     {
68         thread_record * pRec = get_thread_record();
69         assert( pRec != nullptr );
70
71         uint32_t tmp = pRec->m_nAccessControl.load( atomics::memory_order_relaxed );
72         if ( (tmp & rcu_class::c_nNestMask) == 0 ) {
73             pRec->m_nAccessControl.store(
74                 sh_singleton<RCUtag>::instance()->global_control_word(atomics::memory_order_acquire),
75                 atomics::memory_order_release
76             );
77         }
78         else {
79             pRec->m_nAccessControl.fetch_add( 1, atomics::memory_order_release );
80         }
81         CDS_COMPILER_RW_BARRIER;
82     }
83
84     template <typename RCUtag>
85     inline void sh_thread_gc<RCUtag>::access_unlock()
86     {
87         thread_record * pRec = get_thread_record();
88         assert( pRec != nullptr);
89
90         CDS_COMPILER_RW_BARRIER;
91         pRec->m_nAccessControl.fetch_sub( 1, atomics::memory_order_release );
92     }
93
94     template <typename RCUtag>
95     inline bool sh_thread_gc<RCUtag>::is_locked()
96     {
97         thread_record * pRec = get_thread_record();
98         assert( pRec != nullptr);
99
100         return (pRec->m_nAccessControl.load( atomics::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0;
101     }
102
103
104     // sh_singleton
105     template <typename RCUtag>
106     inline void sh_singleton<RCUtag>::set_signal_handler()
107     {
108         //TODO: OS-specific code must be moved to cds::OS namespace
109         struct sigaction sigact;
110         memset( &sigact, 0, sizeof(sigact));
111         sigact.sa_sigaction = signal_handler;
112         sigact.sa_flags = SA_SIGINFO;
113         sigemptyset( &sigact.sa_mask );
114         //sigaddset( &sigact.sa_mask, m_nSigNo );
115         sigaction( m_nSigNo, &sigact, nullptr );
116
117         sigaddset( &sigact.sa_mask, m_nSigNo );
118         pthread_sigmask( SIG_UNBLOCK, &sigact.sa_mask, nullptr );
119     }
120
121     template <typename RCUtag>
122     inline void sh_singleton<RCUtag>::clear_signal_handler()
123     {}
124
125     template <typename RCUtag>
126     void sh_singleton<RCUtag>::signal_handler( int /*signo*/, siginfo_t * /*sigInfo*/, void * /*context*/ )
127     {
128         thread_record * pRec = cds::threading::getRCU<RCUtag>();
129         if ( pRec ) {
130             atomics::atomic_signal_fence( atomics::memory_order_acquire );
131             pRec->m_bNeedMemBar.store( false, atomics::memory_order_relaxed );
132             atomics::atomic_signal_fence( atomics::memory_order_release );
133         }
134     }
135
136     template <typename RCUtag>
137     inline void sh_singleton<RCUtag>::raise_signal( cds::OS::ThreadId tid )
138     {
139         pthread_kill( tid, m_nSigNo );
140     }
141
142     template <typename RCUtag>
143     template <class Backoff>
144     inline void sh_singleton<RCUtag>::force_membar_all_threads( Backoff& bkOff )
145     {
146         OS::ThreadId const nullThreadId = OS::c_NullThreadId;
147
148         // Send "need membar" signal to all RCU threads
149         for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
150             OS::ThreadId tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire);
151             if ( tid != nullThreadId ) {
152                 pRec->m_bNeedMemBar.store( true, atomics::memory_order_release );
153                 raise_signal( tid );
154             }
155         }
156
157         // Wait while all RCU threads process the signal
158         for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
159             OS::ThreadId tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire);
160             if ( tid != nullThreadId ) {
161                 bkOff.reset();
162                 while ( (tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire )) != nullThreadId
163                      && pRec->m_bNeedMemBar.load( atomics::memory_order_acquire ))
164                 {
165                     // Some versions of OSes can lose signals
166                     // So, we resend the signal
167                     raise_signal( tid );
168                     bkOff();
169                 }
170             }
171         }
172     }
173
174     template <typename RCUtag>
175     bool sh_singleton<RCUtag>::check_grace_period( thread_record * pRec ) const
176     {
177         uint32_t const v = pRec->m_nAccessControl.load( atomics::memory_order_acquire );
178         return (v & signal_handling_rcu::c_nNestMask)
179             && ((( v ^ m_nGlobalControl.load( atomics::memory_order_relaxed )) & ~signal_handling_rcu::c_nNestMask ));
180     }
181
182     template <typename RCUtag>
183     template <class Backoff>
184     void sh_singleton<RCUtag>::wait_for_quiescent_state( Backoff& bkOff )
185     {
186         OS::ThreadId const nullThreadId = OS::c_NullThreadId;
187
188         for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
189             while ( pRec->m_list.m_idOwner.load( atomics::memory_order_acquire) != nullThreadId && check_grace_period( pRec ))
190                 bkOff();
191         }
192     }
193
194 }}} // namespace cds:urcu::details
195 //@endcond
196
197 #endif // #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
198 #endif // #ifndef CDSLIB_URCU_DETAILS_SH_H