25e5642e0ba37bbaa207dc73ed1ab0309ffd402a
[libcds.git] / cds / urcu / details / sh.h
1 //$$CDS-header$$
2
3 #ifndef _CDS_URCU_DETAILS_SH_H
4 #define _CDS_URCU_DETAILS_SH_H
5
6 #include <cds/urcu/details/sh_decl.h>
7
8 #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
9 #include <cds/threading/model.h>
10
11 //@cond
12 namespace cds { namespace urcu { namespace details {
13
14     // Inlines
15
16     // sh_thread_gc
17     template <typename RCUtag>
18     inline sh_thread_gc<RCUtag>::sh_thread_gc()
19     {
20         if ( !threading::Manager::isThreadAttached() )
21             cds::threading::Manager::attachThread();
22     }
23
24     template <typename RCUtag>
25     inline sh_thread_gc<RCUtag>::~sh_thread_gc()
26     {
27         cds::threading::Manager::detachThread();
28     }
29
30     template <typename RCUtag>
31     inline typename sh_thread_gc<RCUtag>::thread_record * sh_thread_gc<RCUtag>::get_thread_record()
32     {
33         return cds::threading::getRCU<RCUtag>();
34     }
35
36     template <typename RCUtag>
37     inline void sh_thread_gc<RCUtag>::access_lock()
38     {
39         thread_record * pRec = get_thread_record();
40         assert( pRec != nullptr );
41
42         uint32_t tmp = pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed );
43         if ( (tmp & rcu_class::c_nNestMask) == 0 ) {
44             pRec->m_nAccessControl.store(
45                 sh_singleton<RCUtag>::instance()->global_control_word(CDS_ATOMIC::memory_order_acquire),
46                 CDS_ATOMIC::memory_order_release
47             );
48         }
49         else {
50             pRec->m_nAccessControl.fetch_add( 1, CDS_ATOMIC::memory_order_release );
51         }
52         CDS_COMPILER_RW_BARRIER;
53     }
54
55     template <typename RCUtag>
56     inline void sh_thread_gc<RCUtag>::access_unlock()
57     {
58         thread_record * pRec = get_thread_record();
59         assert( pRec != nullptr);
60
61         CDS_COMPILER_RW_BARRIER;
62         pRec->m_nAccessControl.fetch_sub( 1, CDS_ATOMIC::memory_order_release );
63     }
64
65     template <typename RCUtag>
66     inline bool sh_thread_gc<RCUtag>::is_locked()
67     {
68         thread_record * pRec = get_thread_record();
69         assert( pRec != nullptr);
70
71         return (pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0;
72     }
73
74
75     // sh_singleton
76     template <typename RCUtag>
77     inline void sh_singleton<RCUtag>::set_signal_handler()
78     {
79         //TODO: OS-specific code must be moved to cds::OS namespace
80         struct sigaction sigact;
81         memset( &sigact, 0, sizeof(sigact));
82         sigact.sa_sigaction = signal_handler;
83         sigact.sa_flags = SA_SIGINFO;
84         sigemptyset( &sigact.sa_mask );
85         //sigaddset( &sigact.sa_mask, m_nSigNo );
86         sigaction( m_nSigNo, &sigact, nullptr );
87
88         sigaddset( &sigact.sa_mask, m_nSigNo );
89         pthread_sigmask( SIG_UNBLOCK, &sigact.sa_mask, NULL );
90     }
91
92     template <typename RCUtag>
93     inline void sh_singleton<RCUtag>::clear_signal_handler()
94     {}
95
96     template <typename RCUtag>
97     void sh_singleton<RCUtag>::signal_handler( int signo, siginfo_t * sigInfo, void * context )
98     {
99         thread_record * pRec = cds::threading::getRCU<RCUtag>();
100         if ( pRec ) {
101             CDS_ATOMIC::atomic_signal_fence( CDS_ATOMIC::memory_order_acquire );
102             pRec->m_bNeedMemBar.store( false, CDS_ATOMIC::memory_order_relaxed );
103             CDS_ATOMIC::atomic_signal_fence( CDS_ATOMIC::memory_order_release );
104         }
105     }
106
107     template <typename RCUtag>
108     inline void sh_singleton<RCUtag>::raise_signal( cds::OS::ThreadId tid )
109     {
110         pthread_kill( tid, m_nSigNo );
111     }
112
113     template <typename RCUtag>
114     template <class Backoff>
115     inline void sh_singleton<RCUtag>::force_membar_all_threads( Backoff& bkOff )
116     {
117         OS::ThreadId const nullThreadId = OS::c_NullThreadId;
118
119         // Send "need membar" signal to all RCU threads
120         for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
121             OS::ThreadId tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire);
122             if ( tid != nullThreadId ) {
123                 pRec->m_bNeedMemBar.store( true, CDS_ATOMIC::memory_order_release );
124                 raise_signal( tid );
125             }
126         }
127
128         // Wait while all RCU threads process the signal
129         for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
130             OS::ThreadId tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire);
131             if ( tid != nullThreadId ) {
132                 bkOff.reset();
133                 while ( (tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire )) != nullThreadId
134                      && pRec->m_bNeedMemBar.load( CDS_ATOMIC::memory_order_acquire ))
135                 {
136                     // Some versions of OSes can lose signals
137                     // So, we resend the signal
138                     raise_signal( tid );
139                     bkOff();
140                 }
141             }
142         }
143     }
144
145     template <typename RCUtag>
146     bool sh_singleton<RCUtag>::check_grace_period( thread_record * pRec ) const
147     {
148         uint32_t const v = pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_acquire );
149         return (v & signal_handling_rcu::c_nNestMask)
150             && ((( v ^ m_nGlobalControl.load( CDS_ATOMIC::memory_order_relaxed )) & ~signal_handling_rcu::c_nNestMask ));
151     }
152
153     template <typename RCUtag>
154     template <class Backoff>
155     void sh_singleton<RCUtag>::wait_for_quiescent_state( Backoff& bkOff )
156     {
157         OS::ThreadId const nullThreadId = OS::c_NullThreadId;
158
159         for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
160             while ( pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire) != nullThreadId && check_grace_period( pRec ))
161                 bkOff();
162         }
163     }
164
165 }}} // namespace cds:urcu::details
166 //@endcond
167
168 #endif // #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
169 #endif // #ifndef _CDS_URCU_DETAILS_SH_H