Move libcds 1.6.0 from SVN
[libcds.git] / cds / urcu / details / gp.h
1 //$$CDS-header$$
2
3 #ifndef _CDS_URCU_DETAILS_GP_H
4 #define _CDS_URCU_DETAILS_GP_H
5
6 #include <cds/urcu/details/gp_decl.h>
7 #include <cds/threading/model.h>
8
9 //@cond
10 namespace cds { namespace urcu { namespace details {
11
12     // Inlines
13
14     // gp_thread_gc
15     template <typename RCUtag>
16     inline gp_thread_gc<RCUtag>::gp_thread_gc()
17     {
18         if ( !threading::Manager::isThreadAttached() )
19             cds::threading::Manager::attachThread();
20     }
21
22     template <typename RCUtag>
23     inline gp_thread_gc<RCUtag>::~gp_thread_gc()
24     {
25         cds::threading::Manager::detachThread();
26     }
27
28     template <typename RCUtag>
29     inline typename gp_thread_gc<RCUtag>::thread_record * gp_thread_gc<RCUtag>::get_thread_record()
30     {
31         return cds::threading::getRCU<RCUtag>();
32     }
33
34     template <typename RCUtag>
35     inline void gp_thread_gc<RCUtag>::access_lock()
36     {
37         thread_record * pRec = get_thread_record();
38         assert( pRec != null_ptr<thread_record *>());
39
40         uint32_t tmp = pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed );
41         if ( (tmp & rcu_class::c_nNestMask) == 0 ) {
42             pRec->m_nAccessControl.store( gp_singleton<RCUtag>::instance()->global_control_word(CDS_ATOMIC::memory_order_relaxed),
43                 CDS_ATOMIC::memory_order_relaxed );
44             CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_acquire );
45             //CDS_COMPILER_RW_BARRIER;
46         }
47         else {
48             pRec->m_nAccessControl.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
49         }
50     }
51
52     template <typename RCUtag>
53     inline void gp_thread_gc<RCUtag>::access_unlock()
54     {
55         thread_record * pRec = get_thread_record();
56         assert( pRec != null_ptr<thread_record *>());
57
58         //CDS_COMPILER_RW_BARRIER;
59         pRec->m_nAccessControl.fetch_sub( 1, CDS_ATOMIC::memory_order_release );
60     }
61
62     template <typename RCUtag>
63     inline bool gp_thread_gc<RCUtag>::is_locked()
64     {
65         thread_record * pRec = get_thread_record();
66         assert( pRec != null_ptr<thread_record *>());
67
68         return (pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0;
69     }
70
71
72     // gp_singleton
73     template <typename RCUtag>
74     inline bool gp_singleton<RCUtag>::check_grace_period( typename gp_singleton<RCUtag>::thread_record * pRec ) const
75     {
76         uint32_t const v = pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed );
77         return (v & general_purpose_rcu::c_nNestMask)
78             && ((( v ^ m_nGlobalControl.load( CDS_ATOMIC::memory_order_relaxed )) & ~general_purpose_rcu::c_nNestMask ));
79     }
80
81     template <typename RCUtag>
82     template <class Backoff>
83     inline void gp_singleton<RCUtag>::flip_and_wait( Backoff& bkoff )
84     {
85         OS::ThreadId const nullThreadId = OS::nullThreadId();
86         m_nGlobalControl.fetch_xor( general_purpose_rcu::c_nControlBit, CDS_ATOMIC::memory_order_seq_cst );
87
88         for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
89             while ( pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire) != nullThreadId && check_grace_period( pRec ) ) {
90                 bkoff();
91                 CDS_COMPILER_RW_BARRIER;
92             }
93             bkoff.reset();
94         }
95     }
96
97
98 }}} // namespace cds:urcu::details
99 //@endcond
100
101 #endif // #ifndef _CDS_URCU_DETAILS_GP_H