6de2b70d5a334316ecd867666274b22934acb180
[libcds.git] / cds / urcu / details / sh.h
1 /*
2     This file is a part of libcds - Concurrent Data Structures library
3
4     (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
5
6     Source code repo: http://github.com/khizmax/libcds/
7     Download: http://sourceforge.net/projects/libcds/files/
8
9     Redistribution and use in source and binary forms, with or without
10     modification, are permitted provided that the following conditions are met:
11
12     * Redistributions of source code must retain the above copyright notice, this
13       list of conditions and the following disclaimer.
14
15     * Redistributions in binary form must reproduce the above copyright notice,
16       this list of conditions and the following disclaimer in the documentation
17       and/or other materials provided with the distribution.
18
19     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20     AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21     IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22     DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23     FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24     DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25     SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26     CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27     OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #ifndef CDSLIB_URCU_DETAILS_SH_H
32 #define CDSLIB_URCU_DETAILS_SH_H
33
34 #include <memory.h> //memset
35 #include <cds/urcu/details/sh_decl.h>
36
37 #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
38 #include <cds/threading/model.h>
39
40 //@cond
41 namespace cds { namespace urcu { namespace details {
42
43     // Inlines
44
45     // sh_thread_gc
46     template <typename RCUtag>
47     inline sh_thread_gc<RCUtag>::sh_thread_gc()
48     {
49         if ( !threading::Manager::isThreadAttached())
50             cds::threading::Manager::attachThread();
51     }
52
53     template <typename RCUtag>
54     inline sh_thread_gc<RCUtag>::~sh_thread_gc()
55     {
56         cds::threading::Manager::detachThread();
57     }
58
59     template <typename RCUtag>
60     inline typename sh_thread_gc<RCUtag>::thread_record * sh_thread_gc<RCUtag>::get_thread_record()
61     {
62         return cds::threading::getRCU<RCUtag>();
63     }
64
65     template <typename RCUtag>
66     inline void sh_thread_gc<RCUtag>::access_lock()
67     {
68         thread_record * pRec = get_thread_record();
69         assert( pRec != nullptr );
70
71         uint32_t tmp = pRec->m_nAccessControl.load( atomics::memory_order_relaxed );
72         if ( (tmp & rcu_class::c_nNestMask) == 0 ) {
73             pRec->m_nAccessControl.store(
74                 sh_singleton<RCUtag>::instance()->global_control_word(atomics::memory_order_acquire),
75                 atomics::memory_order_release
76             );
77         }
78         else {
79             pRec->m_nAccessControl.fetch_add( 1, atomics::memory_order_release );
80         }
81         CDS_COMPILER_RW_BARRIER;
82     }
83
84     template <typename RCUtag>
85     inline void sh_thread_gc<RCUtag>::access_unlock()
86     {
87         thread_record * pRec = get_thread_record();
88         assert( pRec != nullptr);
89
90         CDS_COMPILER_RW_BARRIER;
91         pRec->m_nAccessControl.fetch_sub( 1, atomics::memory_order_release );
92     }
93
94     template <typename RCUtag>
95     inline bool sh_thread_gc<RCUtag>::is_locked()
96     {
97         thread_record * pRec = get_thread_record();
98         assert( pRec != nullptr);
99
100         return (pRec->m_nAccessControl.load( atomics::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0;
101     }
102
103
104     // sh_singleton
105     template <typename RCUtag>
106     inline void sh_singleton<RCUtag>::set_signal_handler()
107     {
108         //TODO: OS-specific code must be moved to cds::OS namespace
109         struct sigaction sigact;
110         memset( &sigact, 0, sizeof(sigact));
111         sigact.sa_sigaction = signal_handler;
112         sigact.sa_flags = SA_SIGINFO;
113         sigemptyset( &sigact.sa_mask );
114         sigaction( m_nSigNo, &sigact, nullptr );
115
116         sigaddset( &sigact.sa_mask, m_nSigNo );
117         pthread_sigmask( SIG_UNBLOCK, &sigact.sa_mask, nullptr );
118     }
119
120     template <typename RCUtag>
121     inline void sh_singleton<RCUtag>::clear_signal_handler()
122     {}
123
124     template <typename RCUtag>
125     inline void sh_singleton<RCUtag>::raise_signal( cds::OS::ThreadId tid )
126     {
127         pthread_kill( tid, m_nSigNo );
128     }
129
130     template <typename RCUtag>
131     template <class Backoff>
132     inline void sh_singleton<RCUtag>::force_membar_all_threads( Backoff& bkOff )
133     {
134         OS::ThreadId const nullThreadId = OS::c_NullThreadId;
135
136         // Send "need membar" signal to all RCU threads
137         for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
138             OS::ThreadId tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire);
139             if ( tid != nullThreadId ) {
140                 pRec->m_bNeedMemBar.store( true, atomics::memory_order_release );
141                 raise_signal( tid );
142             }
143         }
144
145         // Wait while all RCU threads process the signal
146         for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
147             OS::ThreadId tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire);
148             if ( tid != nullThreadId ) {
149                 bkOff.reset();
150                 while ( (tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire )) != nullThreadId
151                      && pRec->m_bNeedMemBar.load( atomics::memory_order_acquire ))
152                 {
153                     // Some versions of OSes can lose signals
154                     // So, we resend the signal
155                     raise_signal( tid );
156                     bkOff();
157                 }
158             }
159         }
160     }
161
162     template <typename RCUtag>
163     bool sh_singleton<RCUtag>::check_grace_period( thread_record * pRec ) const
164     {
165         uint32_t const v = pRec->m_nAccessControl.load( atomics::memory_order_acquire );
166         return (v & signal_handling_rcu::c_nNestMask)
167             && ((( v ^ m_nGlobalControl.load( atomics::memory_order_relaxed )) & ~signal_handling_rcu::c_nNestMask ));
168     }
169
170     template <typename RCUtag>
171     template <class Backoff>
172     void sh_singleton<RCUtag>::wait_for_quiescent_state( Backoff& bkOff )
173     {
174         OS::ThreadId const nullThreadId = OS::c_NullThreadId;
175
176         for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
177             while ( pRec->m_list.m_idOwner.load( atomics::memory_order_acquire) != nullThreadId && check_grace_period( pRec ))
178                 bkOff();
179         }
180     }
181
182 }}} // namespace cds:urcu::details
183 //@endcond
184
185 #endif // #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
186 #endif // #ifndef CDSLIB_URCU_DETAILS_SH_H