3 #ifndef _CDS_URCU_DETAILS_SH_H
4 #define _CDS_URCU_DETAILS_SH_H
6 #include <cds/urcu/details/sh_decl.h>
8 #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
9 #include <cds/threading/model.h>
12 namespace cds { namespace urcu { namespace details {
17 template <typename RCUtag>
18 inline sh_thread_gc<RCUtag>::sh_thread_gc()
20 if ( !threading::Manager::isThreadAttached() )
21 cds::threading::Manager::attachThread();
24 template <typename RCUtag>
25 inline sh_thread_gc<RCUtag>::~sh_thread_gc()
27 cds::threading::Manager::detachThread();
30 template <typename RCUtag>
31 inline typename sh_thread_gc<RCUtag>::thread_record * sh_thread_gc<RCUtag>::get_thread_record()
33 return cds::threading::getRCU<RCUtag>();
36 template <typename RCUtag>
37 inline void sh_thread_gc<RCUtag>::access_lock()
39 thread_record * pRec = get_thread_record();
40 assert( pRec != nullptr );
42 uint32_t tmp = pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed );
43 if ( (tmp & rcu_class::c_nNestMask) == 0 ) {
44 pRec->m_nAccessControl.store(
45 sh_singleton<RCUtag>::instance()->global_control_word(CDS_ATOMIC::memory_order_acquire),
46 CDS_ATOMIC::memory_order_release
50 pRec->m_nAccessControl.fetch_add( 1, CDS_ATOMIC::memory_order_release );
52 CDS_COMPILER_RW_BARRIER;
55 template <typename RCUtag>
56 inline void sh_thread_gc<RCUtag>::access_unlock()
58 thread_record * pRec = get_thread_record();
59 assert( pRec != nullptr);
61 CDS_COMPILER_RW_BARRIER;
62 pRec->m_nAccessControl.fetch_sub( 1, CDS_ATOMIC::memory_order_release );
65 template <typename RCUtag>
66 inline bool sh_thread_gc<RCUtag>::is_locked()
68 thread_record * pRec = get_thread_record();
69 assert( pRec != nullptr);
71 return (pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0;
76 template <typename RCUtag>
77 inline void sh_singleton<RCUtag>::set_signal_handler()
79 //TODO: OS-specific code must be moved to cds::OS namespace
80 struct sigaction sigact;
81 memset( &sigact, 0, sizeof(sigact));
82 sigact.sa_sigaction = signal_handler;
83 sigact.sa_flags = SA_SIGINFO;
84 sigemptyset( &sigact.sa_mask );
85 //sigaddset( &sigact.sa_mask, m_nSigNo );
86 sigaction( m_nSigNo, &sigact, nullptr );
88 sigaddset( &sigact.sa_mask, m_nSigNo );
89 pthread_sigmask( SIG_UNBLOCK, &sigact.sa_mask, NULL );
92 template <typename RCUtag>
93 inline void sh_singleton<RCUtag>::clear_signal_handler()
96 template <typename RCUtag>
97 void sh_singleton<RCUtag>::signal_handler( int signo, siginfo_t * sigInfo, void * context )
99 thread_record * pRec = cds::threading::getRCU<RCUtag>();
101 CDS_ATOMIC::atomic_signal_fence( CDS_ATOMIC::memory_order_acquire );
102 pRec->m_bNeedMemBar.store( false, CDS_ATOMIC::memory_order_relaxed );
103 CDS_ATOMIC::atomic_signal_fence( CDS_ATOMIC::memory_order_release );
107 template <typename RCUtag>
108 inline void sh_singleton<RCUtag>::raise_signal( std::thread::id tid )
110 pthread_kill( tid, m_nSigNo );
113 template <typename RCUtag>
114 template <class Backoff>
115 inline void sh_singleton<RCUtag>::force_membar_all_threads( Backoff& bkOff )
117 std::thread::id const nullThreadId = std::thread::id();
119 // Send "need membar" signal to all RCU threads
120 for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
121 std::thread::id tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire);
122 if ( tid != nullThreadId ) {
123 pRec->m_bNeedMemBar.store( true, CDS_ATOMIC::memory_order_release );
128 // Wait while all RCU threads process the signal
129 for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
130 std::thread::id tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire);
131 if ( tid != nullThreadId ) {
133 while ( (tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire )) != nullThreadId
134 && pRec->m_bNeedMemBar.load( CDS_ATOMIC::memory_order_acquire ))
136 // Some versions of OSes can lose signals
137 // So, we resend the signal
145 template <typename RCUtag>
146 bool sh_singleton<RCUtag>::check_grace_period( thread_record * pRec ) const
148 uint32_t const v = pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_acquire );
149 return (v & signal_handling_rcu::c_nNestMask)
150 && ((( v ^ m_nGlobalControl.load( CDS_ATOMIC::memory_order_relaxed )) & ~signal_handling_rcu::c_nNestMask ));
153 template <typename RCUtag>
154 template <class Backoff>
155 void sh_singleton<RCUtag>::wait_for_quiescent_state( Backoff& bkOff )
157 std::thread::id const nullThreadId = std::thread::id();
159 for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
160 while ( pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire) != nullThreadId && check_grace_period( pRec ))
165 }}} // namespace cds:urcu::details
168 #endif // #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
169 #endif // #ifndef _CDS_URCU_DETAILS_SH_H