2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_URCU_DETAILS_SH_H
32 #define CDSLIB_URCU_DETAILS_SH_H
34 #include <memory.h> //memset
35 #include <cds/urcu/details/sh_decl.h>
37 #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
38 #include <cds/threading/model.h>
41 namespace cds { namespace urcu { namespace details {
46 template <typename RCUtag>
47 inline sh_thread_gc<RCUtag>::sh_thread_gc()
49 if ( !threading::Manager::isThreadAttached())
50 cds::threading::Manager::attachThread();
53 template <typename RCUtag>
54 inline sh_thread_gc<RCUtag>::~sh_thread_gc()
56 cds::threading::Manager::detachThread();
59 template <typename RCUtag>
60 inline typename sh_thread_gc<RCUtag>::thread_record * sh_thread_gc<RCUtag>::get_thread_record()
62 return cds::threading::getRCU<RCUtag>();
65 template <typename RCUtag>
66 inline void sh_thread_gc<RCUtag>::access_lock()
68 thread_record * pRec = get_thread_record();
69 assert( pRec != nullptr );
71 uint32_t tmp = pRec->m_nAccessControl.load( atomics::memory_order_relaxed );
72 if ( (tmp & rcu_class::c_nNestMask) == 0 ) {
73 pRec->m_nAccessControl.store(
74 sh_singleton<RCUtag>::instance()->global_control_word(atomics::memory_order_acquire),
75 atomics::memory_order_release
79 pRec->m_nAccessControl.fetch_add( 1, atomics::memory_order_release );
81 CDS_COMPILER_RW_BARRIER;
84 template <typename RCUtag>
85 inline void sh_thread_gc<RCUtag>::access_unlock()
87 thread_record * pRec = get_thread_record();
88 assert( pRec != nullptr);
90 CDS_COMPILER_RW_BARRIER;
91 pRec->m_nAccessControl.fetch_sub( 1, atomics::memory_order_release );
94 template <typename RCUtag>
95 inline bool sh_thread_gc<RCUtag>::is_locked()
97 thread_record * pRec = get_thread_record();
98 assert( pRec != nullptr);
100 return (pRec->m_nAccessControl.load( atomics::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0;
105 template <typename RCUtag>
106 inline void sh_singleton<RCUtag>::set_signal_handler()
108 //TODO: OS-specific code must be moved to cds::OS namespace
109 struct sigaction sigact;
110 memset( &sigact, 0, sizeof(sigact));
111 sigact.sa_sigaction = signal_handler;
112 sigact.sa_flags = SA_SIGINFO;
113 sigemptyset( &sigact.sa_mask );
114 sigaction( m_nSigNo, &sigact, nullptr );
116 sigaddset( &sigact.sa_mask, m_nSigNo );
117 pthread_sigmask( SIG_UNBLOCK, &sigact.sa_mask, nullptr );
120 template <typename RCUtag>
121 inline void sh_singleton<RCUtag>::clear_signal_handler()
124 template <typename RCUtag>
125 inline void sh_singleton<RCUtag>::raise_signal( cds::OS::ThreadId tid )
127 pthread_kill( tid, m_nSigNo );
130 template <typename RCUtag>
131 template <class Backoff>
132 inline void sh_singleton<RCUtag>::force_membar_all_threads( Backoff& bkOff )
134 OS::ThreadId const nullThreadId = OS::c_NullThreadId;
136 // Send "need membar" signal to all RCU threads
137 for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
138 OS::ThreadId tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire);
139 if ( tid != nullThreadId ) {
140 pRec->m_bNeedMemBar.store( true, atomics::memory_order_release );
145 // Wait while all RCU threads process the signal
146 for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
147 OS::ThreadId tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire);
148 if ( tid != nullThreadId ) {
150 while ( (tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire )) != nullThreadId
151 && pRec->m_bNeedMemBar.load( atomics::memory_order_acquire ))
153 // Some versions of OSes can lose signals
154 // So, we resend the signal
162 template <typename RCUtag>
163 bool sh_singleton<RCUtag>::check_grace_period( thread_record * pRec ) const
165 uint32_t const v = pRec->m_nAccessControl.load( atomics::memory_order_acquire );
166 return (v & signal_handling_rcu::c_nNestMask)
167 && ((( v ^ m_nGlobalControl.load( atomics::memory_order_relaxed )) & ~signal_handling_rcu::c_nNestMask ));
170 template <typename RCUtag>
171 template <class Backoff>
172 void sh_singleton<RCUtag>::wait_for_quiescent_state( Backoff& bkOff )
174 OS::ThreadId const nullThreadId = OS::c_NullThreadId;
176 for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
177 while ( pRec->m_list.m_idOwner.load( atomics::memory_order_acquire) != nullThreadId && check_grace_period( pRec ))
182 }}} // namespace cds:urcu::details
185 #endif // #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
186 #endif // #ifndef CDSLIB_URCU_DETAILS_SH_H