3 #ifndef CDSLIB_URCU_DETAILS_GP_H
4 #define CDSLIB_URCU_DETAILS_GP_H
6 #include <cds/urcu/details/gp_decl.h>
7 #include <cds/threading/model.h>
10 namespace cds { namespace urcu { namespace details {
15 template <typename RCUtag>
16 inline gp_thread_gc<RCUtag>::gp_thread_gc()
18 if ( !threading::Manager::isThreadAttached() )
19 cds::threading::Manager::attachThread();
22 template <typename RCUtag>
23 inline gp_thread_gc<RCUtag>::~gp_thread_gc()
25 cds::threading::Manager::detachThread();
28 template <typename RCUtag>
29 inline typename gp_thread_gc<RCUtag>::thread_record * gp_thread_gc<RCUtag>::get_thread_record()
31 return cds::threading::getRCU<RCUtag>();
34 template <typename RCUtag>
35 inline void gp_thread_gc<RCUtag>::access_lock()
37 thread_record * pRec = get_thread_record();
38 assert( pRec != nullptr );
40 uint32_t tmp = pRec->m_nAccessControl.load( atomics::memory_order_relaxed );
41 if ( (tmp & rcu_class::c_nNestMask) == 0 ) {
42 pRec->m_nAccessControl.store( gp_singleton<RCUtag>::instance()->global_control_word(atomics::memory_order_relaxed),
43 atomics::memory_order_relaxed );
44 atomics::atomic_thread_fence( atomics::memory_order_acquire );
45 CDS_COMPILER_RW_BARRIER;
48 pRec->m_nAccessControl.fetch_add( 1, atomics::memory_order_relaxed );
52 template <typename RCUtag>
53 inline void gp_thread_gc<RCUtag>::access_unlock()
55 thread_record * pRec = get_thread_record();
56 assert( pRec != nullptr );
58 CDS_COMPILER_RW_BARRIER;
59 pRec->m_nAccessControl.fetch_sub( 1, atomics::memory_order_release );
62 template <typename RCUtag>
63 inline bool gp_thread_gc<RCUtag>::is_locked()
65 thread_record * pRec = get_thread_record();
66 assert( pRec != nullptr );
68 return (pRec->m_nAccessControl.load( atomics::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0;
73 template <typename RCUtag>
74 inline bool gp_singleton<RCUtag>::check_grace_period( typename gp_singleton<RCUtag>::thread_record * pRec ) const
76 uint32_t const v = pRec->m_nAccessControl.load( atomics::memory_order_relaxed );
77 return (v & general_purpose_rcu::c_nNestMask)
78 && ((( v ^ m_nGlobalControl.load( atomics::memory_order_relaxed )) & ~general_purpose_rcu::c_nNestMask ));
81 template <typename RCUtag>
82 template <class Backoff>
83 inline void gp_singleton<RCUtag>::flip_and_wait( Backoff& bkoff )
85 OS::ThreadId const nullThreadId = OS::c_NullThreadId;
86 m_nGlobalControl.fetch_xor( general_purpose_rcu::c_nControlBit, atomics::memory_order_seq_cst );
88 for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
89 while ( pRec->m_list.m_idOwner.load( atomics::memory_order_acquire) != nullThreadId && check_grace_period( pRec ) ) {
91 CDS_COMPILER_RW_BARRIER;
98 }}} // namespace cds:urcu::details
101 #endif // #ifndef CDSLIB_URCU_DETAILS_GP_H