Replace CDS_ATOMIC with namespace atomics
[libcds.git] / cds / urcu / details / sh.h
1 //$$CDS-header$$
2
3 #ifndef _CDS_URCU_DETAILS_SH_H
4 #define _CDS_URCU_DETAILS_SH_H
5
6 #include <memory.h> //memset
7 #include <cds/urcu/details/sh_decl.h>
8
9 #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
10 #include <cds/threading/model.h>
11
12 //@cond
13 namespace cds { namespace urcu { namespace details {
14
15     // Inlines
16
17     // sh_thread_gc
18     template <typename RCUtag>
19     inline sh_thread_gc<RCUtag>::sh_thread_gc()
20     {
21         if ( !threading::Manager::isThreadAttached() )
22             cds::threading::Manager::attachThread();
23     }
24
25     template <typename RCUtag>
26     inline sh_thread_gc<RCUtag>::~sh_thread_gc()
27     {
28         cds::threading::Manager::detachThread();
29     }
30
31     template <typename RCUtag>
32     inline typename sh_thread_gc<RCUtag>::thread_record * sh_thread_gc<RCUtag>::get_thread_record()
33     {
34         return cds::threading::getRCU<RCUtag>();
35     }
36
37     template <typename RCUtag>
38     inline void sh_thread_gc<RCUtag>::access_lock()
39     {
40         thread_record * pRec = get_thread_record();
41         assert( pRec != nullptr );
42
43         uint32_t tmp = pRec->m_nAccessControl.load( atomics::memory_order_relaxed );
44         if ( (tmp & rcu_class::c_nNestMask) == 0 ) {
45             pRec->m_nAccessControl.store(
46                 sh_singleton<RCUtag>::instance()->global_control_word(atomics::memory_order_acquire),
47                 atomics::memory_order_release
48             );
49         }
50         else {
51             pRec->m_nAccessControl.fetch_add( 1, atomics::memory_order_release );
52         }
53         CDS_COMPILER_RW_BARRIER;
54     }
55
56     template <typename RCUtag>
57     inline void sh_thread_gc<RCUtag>::access_unlock()
58     {
59         thread_record * pRec = get_thread_record();
60         assert( pRec != nullptr);
61
62         CDS_COMPILER_RW_BARRIER;
63         pRec->m_nAccessControl.fetch_sub( 1, atomics::memory_order_release );
64     }
65
66     template <typename RCUtag>
67     inline bool sh_thread_gc<RCUtag>::is_locked()
68     {
69         thread_record * pRec = get_thread_record();
70         assert( pRec != nullptr);
71
72         return (pRec->m_nAccessControl.load( atomics::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0;
73     }
74
75
76     // sh_singleton
77     template <typename RCUtag>
78     inline void sh_singleton<RCUtag>::set_signal_handler()
79     {
80         //TODO: OS-specific code must be moved to cds::OS namespace
81         struct sigaction sigact;
82         memset( &sigact, 0, sizeof(sigact));
83         sigact.sa_sigaction = signal_handler;
84         sigact.sa_flags = SA_SIGINFO;
85         sigemptyset( &sigact.sa_mask );
86         //sigaddset( &sigact.sa_mask, m_nSigNo );
87         sigaction( m_nSigNo, &sigact, nullptr );
88
89         sigaddset( &sigact.sa_mask, m_nSigNo );
90         pthread_sigmask( SIG_UNBLOCK, &sigact.sa_mask, nullptr );
91     }
92
93     template <typename RCUtag>
94     inline void sh_singleton<RCUtag>::clear_signal_handler()
95     {}
96
97     template <typename RCUtag>
98     void sh_singleton<RCUtag>::signal_handler( int signo, siginfo_t * sigInfo, void * context )
99     {
100         thread_record * pRec = cds::threading::getRCU<RCUtag>();
101         if ( pRec ) {
102             atomics::atomic_signal_fence( atomics::memory_order_acquire );
103             pRec->m_bNeedMemBar.store( false, atomics::memory_order_relaxed );
104             atomics::atomic_signal_fence( atomics::memory_order_release );
105         }
106     }
107
108     template <typename RCUtag>
109     inline void sh_singleton<RCUtag>::raise_signal( cds::OS::ThreadId tid )
110     {
111         pthread_kill( tid, m_nSigNo );
112     }
113
114     template <typename RCUtag>
115     template <class Backoff>
116     inline void sh_singleton<RCUtag>::force_membar_all_threads( Backoff& bkOff )
117     {
118         OS::ThreadId const nullThreadId = OS::c_NullThreadId;
119
120         // Send "need membar" signal to all RCU threads
121         for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
122             OS::ThreadId tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire);
123             if ( tid != nullThreadId ) {
124                 pRec->m_bNeedMemBar.store( true, atomics::memory_order_release );
125                 raise_signal( tid );
126             }
127         }
128
129         // Wait while all RCU threads process the signal
130         for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
131             OS::ThreadId tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire);
132             if ( tid != nullThreadId ) {
133                 bkOff.reset();
134                 while ( (tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire )) != nullThreadId
135                      && pRec->m_bNeedMemBar.load( atomics::memory_order_acquire ))
136                 {
137                     // Some versions of OSes can lose signals
138                     // So, we resend the signal
139                     raise_signal( tid );
140                     bkOff();
141                 }
142             }
143         }
144     }
145
146     template <typename RCUtag>
147     bool sh_singleton<RCUtag>::check_grace_period( thread_record * pRec ) const
148     {
149         uint32_t const v = pRec->m_nAccessControl.load( atomics::memory_order_acquire );
150         return (v & signal_handling_rcu::c_nNestMask)
151             && ((( v ^ m_nGlobalControl.load( atomics::memory_order_relaxed )) & ~signal_handling_rcu::c_nNestMask ));
152     }
153
154     template <typename RCUtag>
155     template <class Backoff>
156     void sh_singleton<RCUtag>::wait_for_quiescent_state( Backoff& bkOff )
157     {
158         OS::ThreadId const nullThreadId = OS::c_NullThreadId;
159
160         for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
161             while ( pRec->m_list.m_idOwner.load( atomics::memory_order_acquire) != nullThreadId && check_grace_period( pRec ))
162                 bkOff();
163         }
164     }
165
166 }}} // namespace cds:urcu::details
167 //@endcond
168
169 #endif // #ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
170 #endif // #ifndef _CDS_URCU_DETAILS_SH_H