Revert "Replace cds::OS::ThreadId with std::thread::id, cds::OS::nullThreadId() with...
authorkhizmax <libcds.dev@gmail.com>
Fri, 19 Sep 2014 21:05:08 +0000 (01:05 +0400)
committerkhizmax <libcds.dev@gmail.com>
Fri, 19 Sep 2014 21:05:08 +0000 (01:05 +0400)
This reverts commit 696ef88ac5b9503da2300246865ba1671d87aae3.

17 files changed:
cds/gc/hrc/hrc.h
cds/gc/hzp/hzp.h
cds/intrusive/cuckoo_set.h
cds/intrusive/mspriority_queue.h
cds/intrusive/striped_set/striping_policy.h
cds/lock/spinlock.h
cds/os/posix/thread.h
cds/os/win/thread.h
cds/urcu/details/base.h
cds/urcu/details/gp.h
cds/urcu/details/sh.h
cds/urcu/details/sh_decl.h
src/dllmain.cpp
src/hrc_gc.cpp
src/hzp_gc.cpp
tests/unit/map2/map_insdel_func.cpp
tests/unit/set2/set_insdel_func.h

index 743248e574bd960335a55899d77f84a88b504a2e..ff5239314e7e416f34e08dc7e06998035b2eb59d 100644 (file)
@@ -294,7 +294,7 @@ namespace cds { namespace gc {
             {
                 thread_list_node *  m_pNext     ; ///< next list record
                 ThreadGC *          m_pOwner    ; ///< Owner of record
             {
                 thread_list_node *  m_pNext     ; ///< next list record
                 ThreadGC *          m_pOwner    ; ///< Owner of record
-                CDS_ATOMIC::atomic<std::thread::id>   m_idOwner   ; ///< Id of thread owned; 0 - record is free
+                CDS_ATOMIC::atomic<cds::OS::ThreadId>   m_idOwner   ; ///< Id of thread owned; 0 - record is free
                 bool                m_bFree        ; ///< Node is help-scanned
 
                 //@cond
                 bool                m_bFree        ; ///< Node is help-scanned
 
                 //@cond
@@ -302,14 +302,14 @@ namespace cds { namespace gc {
                     : thread_descriptor( HzpMgr ),
                     m_pNext( nullptr ),
                     m_pOwner( nullptr ),
                     : thread_descriptor( HzpMgr ),
                     m_pNext( nullptr ),
                     m_pOwner( nullptr ),
-                    m_idOwner( std::thread::id() ),
+                    m_idOwner(cds::OS::c_NullThreadId),
                     m_bFree( false )
                 {}
 
                 ~thread_list_node()
                 {
                     assert( m_pOwner == nullptr );
                     m_bFree( false )
                 {}
 
                 ~thread_list_node()
                 {
                     assert( m_pOwner == nullptr );
-                    assert( m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == std::thread::id() );
+                    assert( m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == cds::OS::c_NullThreadId );
                 }
                 //@endcond
             };
                 }
                 //@endcond
             };
index 568c4835d8747dbc3cc0e3e7d8c371eaa0aaa294..fc3dd20cdf4b4ccc4c9c261aa17885000e05d826 100644 (file)
@@ -208,20 +208,20 @@ namespace cds {
             struct hplist_node: public details::HPRec
             {
                 hplist_node *                       m_pNextNode ; ///< next hazard ptr record in list
             struct hplist_node: public details::HPRec
             {
                 hplist_node *                       m_pNextNode ; ///< next hazard ptr record in list
-                CDS_ATOMIC::atomic<std::thread::id>    m_idOwner; ///< Owner thread id; 0 - the record is free (not owned)
+                CDS_ATOMIC::atomic<OS::ThreadId>    m_idOwner   ; ///< Owner thread id; 0 - the record is free (not owned)
                 CDS_ATOMIC::atomic<bool>            m_bFree     ; ///< true if record if free (not owned)
 
                 //@cond
                 hplist_node( const GarbageCollector& HzpMgr )
                     : HPRec( HzpMgr ),
                     m_pNextNode(NULL),
                 CDS_ATOMIC::atomic<bool>            m_bFree     ; ///< true if record if free (not owned)
 
                 //@cond
                 hplist_node( const GarbageCollector& HzpMgr )
                     : HPRec( HzpMgr ),
                     m_pNextNode(NULL),
-                    m_idOwner( std::thread::id() ),
+                    m_idOwner( OS::c_NullThreadId ),
                     m_bFree( true )
                 {}
 
                 ~hplist_node()
                 {
                     m_bFree( true )
                 {}
 
                 ~hplist_node()
                 {
-                    assert( m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == std::thread::id() );
+                    assert( m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == OS::c_NullThreadId );
                     assert( m_bFree.load(CDS_ATOMIC::memory_order_relaxed) );
                 }
                 //@endcond
                     assert( m_bFree.load(CDS_ATOMIC::memory_order_relaxed) );
                 }
                 //@endcond
index 42db2d66c39acffd102cd032b5eb30f965769d07..dd07159d3afe90e6eb3a1c709de0cbd76a3dc1b5 100644 (file)
@@ -648,7 +648,7 @@ namespace cds { namespace intrusive {
             typedef cds::details::Allocator< lock_array_type, allocator_type >  lock_array_allocator;
 
             typedef unsigned long long  owner_t;
             typedef cds::details::Allocator< lock_array_type, allocator_type >  lock_array_allocator;
 
             typedef unsigned long long  owner_t;
-            typedef std::thread::id     threadId_t;
+            typedef cds::OS::ThreadId   threadId_t;
 
             typedef cds::lock::Spin     spinlock_type;
             typedef cds::lock::scoped_lock< spinlock_type > scoped_spinlock;
 
             typedef cds::lock::Spin     spinlock_type;
             typedef cds::lock::scoped_lock< spinlock_type > scoped_spinlock;
@@ -681,7 +681,7 @@ namespace cds { namespace intrusive {
 
             void acquire( size_t const * arrHash, lock_array_ptr * pLockArr, lock_type ** parrLock )
             {
 
             void acquire( size_t const * arrHash, lock_array_ptr * pLockArr, lock_type ** parrLock )
             {
-                owner_t me = (owner_t)std::this_thread::get_id();
+                owner_t me = (owner_t) cds::OS::getCurrentThreadId();
                 owner_t who;
 
                 back_off bkoff;
                 owner_t who;
 
                 back_off bkoff;
@@ -760,7 +760,7 @@ namespace cds { namespace intrusive {
 
             void acquire_all()
             {
 
             void acquire_all()
             {
-                owner_t me = (owner_t)std::this_thread::get_id();
+                owner_t me = (owner_t) cds::OS::getCurrentThreadId();
 
                 back_off bkoff;
                 while ( true ) {
 
                 back_off bkoff;
                 while ( true ) {
@@ -784,7 +784,7 @@ namespace cds { namespace intrusive {
 
             void acquire_resize( lock_array_ptr * pOldLocks )
             {
 
             void acquire_resize( lock_array_ptr * pOldLocks )
             {
-                owner_t me = (owner_t)std::this_thread::get_id();
+                owner_t me = (owner_t) cds::OS::getCurrentThreadId();
 
                 while ( true ) {
                     {
 
                 while ( true ) {
                     {
index b502fd2d79e200712192054c0daf9cbc315b14d2..ce277758c739a303949af6e8cd1c9cb74db19790 100644 (file)
@@ -169,7 +169,7 @@ namespace cds { namespace intrusive {
 
     protected:
         //@cond
 
     protected:
         //@cond
-        typedef std::thread::id tag_type;
+        typedef cds::OS::ThreadId   tag_type;
 
         enum tag_value {
             Available   = -1,
 
         enum tag_value {
             Available   = -1,
@@ -255,7 +255,7 @@ namespace cds { namespace intrusive {
         */
         bool push( value_type& val )
         {
         */
         bool push( value_type& val )
         {
-            tag_type const curId = std::this_thread::get_id();
+            tag_type const curId = cds::OS::getCurrentThreadId();
 
             // Insert new item at bottom of the heap
             m_Lock.lock();
 
             // Insert new item at bottom of the heap
             m_Lock.lock();
index 9a593c9afd3833de726e96a57d12dda78e8c7dd7..1e3ec4c39b4630d3118fe25838be7ddbfbdef8af 100644 (file)
@@ -142,7 +142,7 @@ namespace cds { namespace intrusive { namespace striped_set {
         typedef cds::details::Allocator< lock_array_type, allocator_type >  lock_array_allocator;
 
         typedef unsigned long long  owner_t;
         typedef cds::details::Allocator< lock_array_type, allocator_type >  lock_array_allocator;
 
         typedef unsigned long long  owner_t;
-        typedef std::thread::id     threadId_t;
+        typedef cds::OS::ThreadId   threadId_t;
 
         typedef cds::lock::Spin     spinlock_type;
         typedef cds::lock::scoped_lock< spinlock_type > scoped_spinlock;
 
         typedef cds::lock::Spin     spinlock_type;
         typedef cds::lock::scoped_lock< spinlock_type > scoped_spinlock;
@@ -175,7 +175,7 @@ namespace cds { namespace intrusive { namespace striped_set {
 
         lock_type& acquire( size_t nHash )
         {
 
         lock_type& acquire( size_t nHash )
         {
-            owner_t me = (owner_t) std::this_thread::get_id();
+            owner_t me = (owner_t) cds::OS::getCurrentThreadId();
             owner_t who;
 
             back_off bkoff;
             owner_t who;
 
             back_off bkoff;
@@ -206,7 +206,7 @@ namespace cds { namespace intrusive { namespace striped_set {
 
         lock_array_ptr acquire_all()
         {
 
         lock_array_ptr acquire_all()
         {
-            owner_t me = (owner_t)std::this_thread::get_id();
+            owner_t me = (owner_t) cds::OS::getCurrentThreadId();
             owner_t who;
 
             back_off bkoff;
             owner_t who;
 
             back_off bkoff;
@@ -242,7 +242,7 @@ namespace cds { namespace intrusive { namespace striped_set {
 
         bool acquire_resize()
         {
 
         bool acquire_resize()
         {
-            owner_t me = (owner_t)std::this_thread::get_id();
+            owner_t me = (owner_t) cds::OS::getCurrentThreadId();
 
             back_off bkoff;
             for (unsigned int nAttempts = 0; nAttempts < 32; ++nAttempts ) {
 
             back_off bkoff;
             for (unsigned int nAttempts = 0; nAttempts < 32; ++nAttempts ) {
index 76625d0c536fc0679b96891175df979fed71cf0e..daf9e9e893c6e4b65715b62d6924c43b7e24a673 100644 (file)
@@ -50,18 +50,18 @@ namespace cds {
         class Spinlock
         {
         public:
         class Spinlock
         {
         public:
-            typedef Backoff     backoff_strategy;   ///< back-off strategy type
+            typedef        Backoff      backoff_strategy    ;        ///< back-off strategy type
         private:
         private:
-            CDS_ATOMIC::atomic<bool>    m_spin;     ///< Spin
+            CDS_ATOMIC::atomic<bool>    m_spin  ;       ///< Spin
 #    ifdef CDS_DEBUG
 #    ifdef CDS_DEBUG
-            typename std::thread::id    m_dbgOwnerId; ///< Owner thread id (only for debug mode)
+            typename OS::ThreadId       m_dbgOwnerId        ;       ///< Owner thread id (only for debug mode)
 #    endif
 
         public:
             /// Construct free (unlocked) spin-lock
             Spinlock() CDS_NOEXCEPT
 #    ifdef CDS_DEBUG
 #    endif
 
         public:
             /// Construct free (unlocked) spin-lock
             Spinlock() CDS_NOEXCEPT
 #    ifdef CDS_DEBUG
-                :m_dbgOwnerId( std::thread::id() )
+                :m_dbgOwnerId( OS::c_NullThreadId )
 #    endif
             {
                 m_spin.store( false, CDS_ATOMIC::memory_order_relaxed );
 #    endif
             {
                 m_spin.store( false, CDS_ATOMIC::memory_order_relaxed );
@@ -73,7 +73,7 @@ namespace cds {
             */
             Spinlock( bool bLocked ) CDS_NOEXCEPT
 #    ifdef CDS_DEBUG
             */
             Spinlock( bool bLocked ) CDS_NOEXCEPT
 #    ifdef CDS_DEBUG
-                :m_dbgOwnerId( bLocked ? std::this_thread::get_id() : std::thread::id() )
+                :m_dbgOwnerId( bLocked ? OS::getCurrentThreadId() : OS::c_NullThreadId )
 #    endif
             {
                 m_spin.store( bLocked, CDS_ATOMIC::memory_order_relaxed );
 #    endif
             {
                 m_spin.store( bLocked, CDS_ATOMIC::memory_order_relaxed );
@@ -88,7 +88,7 @@ namespace cds {
             Spinlock(const Spinlock<Backoff>& ) CDS_NOEXCEPT
                 : m_spin( false )
 #   ifdef CDS_DEBUG
             Spinlock(const Spinlock<Backoff>& ) CDS_NOEXCEPT
                 : m_spin( false )
 #   ifdef CDS_DEBUG
-                , m_dbgOwnerId( std::thread::id() )
+                , m_dbgOwnerId( OS::c_NullThreadId )
 #   endif
             {}
 
 #   endif
             {}
 
@@ -124,7 +124,7 @@ namespace cds {
 
                 CDS_DEBUG_DO(
                     if ( !bCurrent ) {
 
                 CDS_DEBUG_DO(
                     if ( !bCurrent ) {
-                        m_dbgOwnerId = std::this_thread::get_id();
+                        m_dbgOwnerId = OS::getCurrentThreadId();
                     }
                 )
                 return !bCurrent;
                     }
                 )
                 return !bCurrent;
@@ -158,7 +158,7 @@ namespace cds {
                 Backoff backoff;
 
                 // Deadlock detected
                 Backoff backoff;
 
                 // Deadlock detected
-                assert( m_dbgOwnerId != std::this_thread::get_id() );
+                assert( m_dbgOwnerId != OS::getCurrentThreadId() );
 
                 // TATAS algorithm
                 while ( !tryLock() ) {
 
                 // TATAS algorithm
                 while ( !tryLock() ) {
@@ -166,7 +166,7 @@ namespace cds {
                         backoff();
                     }
                 }
                         backoff();
                     }
                 }
-                assert( m_dbgOwnerId == std::this_thread::get_id() );
+                assert( m_dbgOwnerId == OS::getCurrentThreadId() );
             }
 
             /// Unlock the spin-lock. Debug version: deadlock may be detected
             }
 
             /// Unlock the spin-lock. Debug version: deadlock may be detected
@@ -174,8 +174,8 @@ namespace cds {
             {
                 assert( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) );
 
             {
                 assert( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) );
 
-                assert( m_dbgOwnerId == std::this_thread::get_id() );
-                CDS_DEBUG_DO( m_dbgOwnerId = std::thread::id() );)
+                assert( m_dbgOwnerId == OS::getCurrentThreadId() );
+                CDS_DEBUG_DO( m_dbgOwnerId = OS::c_NullThreadId; )
 
                 m_spin.store( false, CDS_ATOMIC::memory_order_release );
             }
 
                 m_spin.store( false, CDS_ATOMIC::memory_order_release );
             }
@@ -195,7 +195,7 @@ namespace cds {
         template <typename Integral, class Backoff>
         class ReentrantSpinT
         {
         template <typename Integral, class Backoff>
         class ReentrantSpinT
         {
-            typedef std::thread::id thread_id;        ///< The type of thread id
+            typedef OS::ThreadId    thread_id    ;        ///< The type of thread id
 
         public:
             typedef Integral        integral_type       ; ///< The integral type
 
         public:
             typedef Integral        integral_type       ; ///< The integral type
@@ -203,7 +203,7 @@ namespace cds {
 
         private:
             CDS_ATOMIC::atomic<integral_type>   m_spin      ; ///< spin-lock atomic
 
         private:
             CDS_ATOMIC::atomic<integral_type>   m_spin      ; ///< spin-lock atomic
-            thread_id                           m_OwnerId   ; ///< Owner thread id. If spin-lock is not locked it usually equals to std::thread::id()
+            thread_id                           m_OwnerId   ; ///< Owner thread id. If spin-lock is not locked it usually equals to OS::c_NullThreadId
 
         private:
             //@cond
 
         private:
             //@cond
@@ -214,7 +214,7 @@ namespace cds {
 
             void free() CDS_NOEXCEPT
             {
 
             void free() CDS_NOEXCEPT
             {
-                m_OwnerId = std::thread::id();
+                m_OwnerId = OS::c_NullThreadId;
             }
 
             bool isOwned( thread_id tid ) const CDS_NOEXCEPT
             }
 
             bool isOwned( thread_id tid ) const CDS_NOEXCEPT
@@ -264,7 +264,7 @@ namespace cds {
             /// Default constructor initializes spin to free (unlocked) state
             ReentrantSpinT() CDS_NOEXCEPT
                 : m_spin(0)
             /// Default constructor initializes spin to free (unlocked) state
             ReentrantSpinT() CDS_NOEXCEPT
                 : m_spin(0)
-                , m_OwnerId( std::thread::id() )
+                , m_OwnerId( OS::c_NullThreadId )
             {}
 
             /// Dummy copy constructor
             {}
 
             /// Dummy copy constructor
@@ -275,13 +275,13 @@ namespace cds {
             */
             ReentrantSpinT(const ReentrantSpinT<Integral, Backoff>& ) CDS_NOEXCEPT
                 : m_spin(0)
             */
             ReentrantSpinT(const ReentrantSpinT<Integral, Backoff>& ) CDS_NOEXCEPT
                 : m_spin(0)
-                , m_OwnerId( std::thread::id() )
+                , m_OwnerId( OS::c_NullThreadId )
             {}
 
             /// Construct object for specified state
             ReentrantSpinT(bool bLocked) CDS_NOEXCEPT
             {}
 
             /// Construct object for specified state
             ReentrantSpinT(bool bLocked) CDS_NOEXCEPT
-                : m_spin(0),
-                m_OwnerId( std::thread::id() )
+                : m_spin(0)
+                , m_OwnerId( OS::c_NullThreadId )
             {
                 if ( bLocked )
                     lock();
             {
                 if ( bLocked )
                     lock();
@@ -294,13 +294,13 @@ namespace cds {
             */
             bool is_locked() const CDS_NOEXCEPT
             {
             */
             bool is_locked() const CDS_NOEXCEPT
             {
-                return !(m_spin.load( CDS_ATOMIC::memory_order_relaxed ) == 0 || isOwned( std::this_thread::get_id() ));
+                return !( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) == 0 || isOwned( cds::OS::getCurrentThreadId() ));
             }
 
             /// Try to lock the spin-lock (synonym for \ref try_lock)
             bool tryLock() CDS_NOEXCEPT
             {
             }
 
             /// Try to lock the spin-lock (synonym for \ref try_lock)
             bool tryLock() CDS_NOEXCEPT
             {
-                thread_id tid = std::this_thread::get_id();
+                thread_id tid = OS::getCurrentThreadId();
                 if ( tryLockOwned( tid ) )
                     return true;
                 if ( tryAcquireLock()) {
                 if ( tryLockOwned( tid ) )
                     return true;
                 if ( tryAcquireLock()) {
@@ -324,7 +324,7 @@ namespace cds {
                 CDS_NOEXCEPT_( noexcept( tryAcquireLock(nTryCount) ))
 #       endif
             {
                 CDS_NOEXCEPT_( noexcept( tryAcquireLock(nTryCount) ))
 #       endif
             {
-                thread_id tid = std::this_thread::get_id();
+                thread_id tid = OS::getCurrentThreadId();
                 if ( tryLockOwned( tid ) )
                     return true;
                 if ( tryAcquireLock( nTryCount )) {
                 if ( tryLockOwned( tid ) )
                     return true;
                 if ( tryAcquireLock( nTryCount )) {
@@ -353,7 +353,7 @@ namespace cds {
             /// Lock the object waits if it is busy
             void lock() CDS_NOEXCEPT
             {
             /// Lock the object waits if it is busy
             void lock() CDS_NOEXCEPT
             {
-                thread_id tid = std::this_thread::get_id();
+                thread_id tid = OS::getCurrentThreadId();
                 if ( !tryLockOwned( tid ) ) {
                     acquireLock();
                     beOwner( tid );
                 if ( !tryLockOwned( tid ) ) {
                     acquireLock();
                     beOwner( tid );
@@ -363,7 +363,7 @@ namespace cds {
             /// Unlock the spin-lock. Return @p true if the current thread is owner of spin-lock @p false otherwise
             bool unlock() CDS_NOEXCEPT
             {
             /// Unlock the spin-lock. Return @p true if the current thread is owner of spin-lock @p false otherwise
             bool unlock() CDS_NOEXCEPT
             {
-                if ( isOwned( std::this_thread::get_id() ) ) {
+                if ( isOwned( OS::getCurrentThreadId() ) ) {
                     integral_type n = m_spin.load( CDS_ATOMIC::memory_order_relaxed );
                     if ( n > 1 )
                         m_spin.store( n - 1, CDS_ATOMIC::memory_order_relaxed );
                     integral_type n = m_spin.load( CDS_ATOMIC::memory_order_relaxed );
                     if ( n > 1 )
                         m_spin.store( n - 1, CDS_ATOMIC::memory_order_relaxed );
@@ -377,10 +377,10 @@ namespace cds {
             }
 
             /// Change the owner of locked spin-lock. May be called by thread that is owner of the spin-lock
             }
 
             /// Change the owner of locked spin-lock. May be called by thread that is owner of the spin-lock
-            bool changeOwner( std::thread::id newOwnerId ) CDS_NOEXCEPT
+            bool changeOwner( OS::ThreadId newOwnerId ) CDS_NOEXCEPT
             {
             {
-                if ( isOwned( std::this_thread::get_id() ) ) {
-                    assert( newOwnerId != std::thread::id() );
+                if ( isOwned( OS::getCurrentThreadId() ) ) {
+                    assert( newOwnerId != OS::c_NullThreadId );
                     m_OwnerId = newOwnerId;
                     return true;
                 }
                     m_OwnerId = newOwnerId;
                     return true;
                 }
index da77ea6593d0bd401b5b00457096eb17bd448ab7..76592ca882cf8d9b552c908e6ec5ec6c16684138 100644 (file)
@@ -8,8 +8,15 @@
 namespace cds { namespace OS {
     /// posix-related wrappers
     namespace posix {
 namespace cds { namespace OS {
     /// posix-related wrappers
     namespace posix {
+
+        /// Posix thread id type
+        typedef std::thread::native_thread_handle ThreadId;
+
+        /// Get current thread id
+        static inline ThreadId getCurrentThreadId()    { return pthread_self(); }
+
         /// Checks if thread \p id is alive
         /// Checks if thread \p id is alive
-        static inline bool isThreadAlive( std::thread::id id )
+        static inline bool isThreadAlive( ThreadId id )
         {
             // if sig is zero, error checking is performed but no signal is actually sent.
             // ESRCH - No thread could be found corresponding to that specified by the given thread ID
         {
             // if sig is zero, error checking is performed but no signal is actually sent.
             // ESRCH - No thread could be found corresponding to that specified by the given thread ID
@@ -25,6 +32,10 @@ namespace cds { namespace OS {
 
     }    // namespace posix
 
 
     }    // namespace posix
 
+    using posix::ThreadId;
+    constexpr const ThreadId c_NullThreadId = 0;
+
+    using posix::getCurrentThreadId;
     using posix::isThreadAlive;
     using posix::backoff;
 
     using posix::isThreadAlive;
     using posix::backoff;
 
index e7feb557f4b1b59191d24df78eb8d34f9f523ce7..85877573eb106a9ec42369ebfbee9cd1733bf09a 100644 (file)
@@ -9,8 +9,17 @@ namespace cds { namespace OS {
     /// Windows-specific functions
     namespace Win32 {
 
     /// Windows-specific functions
     namespace Win32 {
 
+        /// OS-specific type of thread identifier
+        typedef DWORD           ThreadId;
+
+        /// Get current thread id
+        static inline ThreadId getCurrentThreadId()
+        {
+            return ::GetCurrentThreadId();
+        }
+
         /// Tests whether the thread is alive
         /// Tests whether the thread is alive
-        static inline bool isThreadAlive( std::thread::id id )
+        static inline bool isThreadAlive( ThreadId id )
         {
             HANDLE h = ::OpenThread( SYNCHRONIZE, FALSE, id );
             if ( h == NULL )
         {
             HANDLE h = ::OpenThread( SYNCHRONIZE, FALSE, id );
             if ( h == NULL )
@@ -26,6 +35,10 @@ namespace cds { namespace OS {
         }
     }    // namespace Win32
 
         }
     }    // namespace Win32
 
+    using Win32::ThreadId;
+    CDS_CONSTEXPR const ThreadId c_NullThreadId = 0;
+
+    using Win32::getCurrentThreadId;
     using Win32::isThreadAlive;
     using Win32::backoff;
 
     using Win32::isThreadAlive;
     using Win32::backoff;
 
index dcbfe2e4b314d283903e5db7edf3eeb476b6e012..0ba1359c7282323ba68c2aa525c639086818b253 100644 (file)
@@ -316,11 +316,11 @@ namespace cds {
             template <typename ThreadData>
             struct thread_list_record {
                 ThreadData *    m_pNext ;  ///< Next item in thread list
             template <typename ThreadData>
             struct thread_list_record {
                 ThreadData *    m_pNext ;  ///< Next item in thread list
-                CDS_ATOMIC::atomic<std::thread::id>    m_idOwner; ///< Owner thread id; 0 - the record is free (not owned)
+                CDS_ATOMIC::atomic<OS::ThreadId>    m_idOwner   ; ///< Owner thread id; 0 - the record is free (not owned)
 
                 thread_list_record()
                     : m_pNext( nullptr )
 
                 thread_list_record()
                     : m_pNext( nullptr )
-                    , m_idOwner( std::thread::id() )
+                    , m_idOwner( cds::OS::c_NullThreadId )
                 {}
 
                 ~thread_list_record()
                 {}
 
                 ~thread_list_record()
@@ -351,12 +351,12 @@ namespace cds {
                 thread_record * alloc()
                 {
                     thread_record * pRec;
                 thread_record * alloc()
                 {
                     thread_record * pRec;
-                    std::thread::id const nullThreadId = std::thread::id();
-                    std::thread::id const curThreadId = std::this_thread::get_id();
+                    cds::OS::ThreadId const nullThreadId = cds::OS::c_NullThreadId;
+                    cds::OS::ThreadId const curThreadId  = cds::OS::getCurrentThreadId();
 
                     // First try to reuse a retired (non-active) HP record
                     for ( pRec = m_pHead.load( CDS_ATOMIC::memory_order_acquire ); pRec; pRec = pRec->m_list.m_pNext ) {
 
                     // First try to reuse a retired (non-active) HP record
                     for ( pRec = m_pHead.load( CDS_ATOMIC::memory_order_acquire ); pRec; pRec = pRec->m_list.m_pNext ) {
-                        std::thread::id thId = nullThreadId;
+                        cds::OS::ThreadId thId = nullThreadId;
                         if ( !pRec->m_list.m_idOwner.compare_exchange_strong( thId, curThreadId, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ) )
                             continue;
                         return pRec;
                         if ( !pRec->m_list.m_idOwner.compare_exchange_strong( thId, curThreadId, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ) )
                             continue;
                         return pRec;
@@ -380,13 +380,13 @@ namespace cds {
                 void retire( thread_record * pRec )
                 {
                     assert( pRec != nullptr );
                 void retire( thread_record * pRec )
                 {
                     assert( pRec != nullptr );
-                    pRec->m_list.m_idOwner.store( std::thread::id(), CDS_ATOMIC::memory_order_release );
+                    pRec->m_list.m_idOwner.store( cds::OS::c_NullThreadId, CDS_ATOMIC::memory_order_release );
                 }
 
                 void detach_all()
                 {
                     thread_record * pNext = nullptr;
                 }
 
                 void detach_all()
                 {
                     thread_record * pNext = nullptr;
-                    std::thread::id const nullThreadId = std::thread::id();
+                    cds::OS::ThreadId const nullThreadId = cds::OS::c_NullThreadId;
 
                     for ( thread_record * pRec = m_pHead.load(CDS_ATOMIC::memory_order_acquire); pRec; pRec = pNext ) {
                         pNext = pRec->m_list.m_pNext;
 
                     for ( thread_record * pRec = m_pHead.load(CDS_ATOMIC::memory_order_acquire); pRec; pRec = pNext ) {
                         pNext = pRec->m_list.m_pNext;
@@ -405,8 +405,8 @@ namespace cds {
                 void destroy()
                 {
                     allocator_type al;
                 void destroy()
                 {
                     allocator_type al;
-                    CDS_DEBUG_DO( std::thread::id const nullThreadId = std::thread::id(); )
-                    CDS_DEBUG_DO( std::thread::id const mainThreadId = std::this_thread::get_id(); )
+                    CDS_DEBUG_DO( cds::OS::ThreadId const nullThreadId = cds::OS::c_NullThreadId; )
+                    CDS_DEBUG_DO( cds::OS::ThreadId const mainThreadId = cds::OS::getCurrentThreadId() ;)
 
                     thread_record * p = m_pHead.exchange( nullptr, CDS_ATOMIC::memory_order_seq_cst );
                     while ( p ) {
 
                     thread_record * p = m_pHead.exchange( nullptr, CDS_ATOMIC::memory_order_seq_cst );
                     while ( p ) {
index d04c22ff871becb4d0804f39151545d470f995b3..181e22ac2d2b7a432e6ad31e6660e57e24a2cc77 100644 (file)
@@ -82,7 +82,7 @@ namespace cds { namespace urcu { namespace details {
     template <class Backoff>
     inline void gp_singleton<RCUtag>::flip_and_wait( Backoff& bkoff )
     {
     template <class Backoff>
     inline void gp_singleton<RCUtag>::flip_and_wait( Backoff& bkoff )
     {
-        std::thread::id const nullThreadId = std::thread::id();
+        OS::ThreadId const nullThreadId = OS::c_NullThreadId;
         m_nGlobalControl.fetch_xor( general_purpose_rcu::c_nControlBit, CDS_ATOMIC::memory_order_seq_cst );
 
         for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
         m_nGlobalControl.fetch_xor( general_purpose_rcu::c_nControlBit, CDS_ATOMIC::memory_order_seq_cst );
 
         for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
index b39612b7fe71d8fddd916decbc3b730abfb333b2..25e5642e0ba37bbaa207dc73ed1ab0309ffd402a 100644 (file)
@@ -105,7 +105,7 @@ namespace cds { namespace urcu { namespace details {
     }
 
     template <typename RCUtag>
     }
 
     template <typename RCUtag>
-    inline void sh_singleton<RCUtag>::raise_signal( std::thread::id tid )
+    inline void sh_singleton<RCUtag>::raise_signal( cds::OS::ThreadId tid )
     {
         pthread_kill( tid, m_nSigNo );
     }
     {
         pthread_kill( tid, m_nSigNo );
     }
@@ -114,11 +114,11 @@ namespace cds { namespace urcu { namespace details {
     template <class Backoff>
     inline void sh_singleton<RCUtag>::force_membar_all_threads( Backoff& bkOff )
     {
     template <class Backoff>
     inline void sh_singleton<RCUtag>::force_membar_all_threads( Backoff& bkOff )
     {
-        std::thread::id const nullThreadId = std::thread::id();
+        OS::ThreadId const nullThreadId = OS::c_NullThreadId;
 
         // Send "need membar" signal to all RCU threads
         for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
 
         // Send "need membar" signal to all RCU threads
         for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
-            std::thread::id tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire);
+            OS::ThreadId tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire);
             if ( tid != nullThreadId ) {
                 pRec->m_bNeedMemBar.store( true, CDS_ATOMIC::memory_order_release );
                 raise_signal( tid );
             if ( tid != nullThreadId ) {
                 pRec->m_bNeedMemBar.store( true, CDS_ATOMIC::memory_order_release );
                 raise_signal( tid );
@@ -127,7 +127,7 @@ namespace cds { namespace urcu { namespace details {
 
         // Wait while all RCU threads process the signal
         for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
 
         // Wait while all RCU threads process the signal
         for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
-            std::thread::id tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire);
+            OS::ThreadId tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire);
             if ( tid != nullThreadId ) {
                 bkOff.reset();
                 while ( (tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire )) != nullThreadId
             if ( tid != nullThreadId ) {
                 bkOff.reset();
                 while ( (tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire )) != nullThreadId
@@ -154,7 +154,7 @@ namespace cds { namespace urcu { namespace details {
     template <class Backoff>
     void sh_singleton<RCUtag>::wait_for_quiescent_state( Backoff& bkOff )
     {
     template <class Backoff>
     void sh_singleton<RCUtag>::wait_for_quiescent_state( Backoff& bkOff )
     {
-        std::thread::id const nullThreadId = std::thread::id();
+        OS::ThreadId const nullThreadId = OS::c_NullThreadId;
 
         for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
             while ( pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire) != nullThreadId && check_grace_period( pRec ))
 
         for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
             while ( pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire) != nullThreadId && check_grace_period( pRec ))
index 4789a87c7745a1501f2330e2a1fa7cfe0dc97b95..1386e6061aea904e11b1d389e7e4a918b16072b0 100644 (file)
@@ -159,7 +159,7 @@ namespace cds { namespace urcu { namespace details {
         void set_signal_handler();
         void clear_signal_handler();
         static void signal_handler( int signo, siginfo_t * sigInfo, void * context );
         void set_signal_handler();
         void clear_signal_handler();
         static void signal_handler( int signo, siginfo_t * sigInfo, void * context );
-        void raise_signal( std::thread::id tid );
+        void raise_signal( cds::OS::ThreadId tid );
 
         template <class Backoff>
         void force_membar_all_threads( Backoff& bkOff );
 
         template <class Backoff>
         void force_membar_all_threads( Backoff& bkOff );
index 113665ea07f80f78588d06ee77c860519875f68f..1fc9b85cedcdf0aa93fc3bb47415862ba0fba03b 100644 (file)
@@ -13,8 +13,8 @@
 #   endif
 #endif
 
 #   endif
 #endif
 
-static std::thread::id  s_MainThreadId = 0;
-static HINSTANCE        s_DllInstance = NULL;
+static cds::OS::ThreadId    s_MainThreadId = 0;
+static HINSTANCE            s_DllInstance = NULL;
 
 #if _WIN32_WINNT < 0x0601
 // For Windows below Windows 7
 
 #if _WIN32_WINNT < 0x0601
 // For Windows below Windows 7
@@ -191,7 +191,7 @@ BOOL WINAPI DllMain(
     switch ( fdwReason ) {
         case DLL_PROCESS_ATTACH:
             s_DllInstance = hinstDLL;
     switch ( fdwReason ) {
         case DLL_PROCESS_ATTACH:
             s_DllInstance = hinstDLL;
-            s_MainThreadId = std::this_thread::get_id();
+            s_MainThreadId = cds::OS::getCurrentThreadId();
 #if _WIN32_WINNT < 0x0601
             discover_topology();
 #endif
 #if _WIN32_WINNT < 0x0601
             discover_topology();
 #endif
index 9033fdc66e6b25f6da9985b65bb18f328ce3f2f1..12a37df6844419cf62982da870e78355a4a73da3 100644 (file)
@@ -38,7 +38,7 @@ namespace cds { namespace gc {
         {
             thread_list_node * pNode = m_pListHead.load( CDS_ATOMIC::memory_order_relaxed );
             while ( pNode ) {
         {
             thread_list_node * pNode = m_pListHead.load( CDS_ATOMIC::memory_order_relaxed );
             while ( pNode ) {
-                assert( pNode->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == std::thread::id() );
+                assert( pNode->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == cds::OS::c_NullThreadId );
                 clearHRCThreadDesc( pNode );
                 thread_list_node * pNext = pNode->m_pNext;
                 deleteHRCThreadDesc( pNode );
                 clearHRCThreadDesc( pNode );
                 thread_list_node * pNext = pNode->m_pNext;
                 deleteHRCThreadDesc( pNode );
@@ -115,7 +115,7 @@ namespace cds { namespace gc {
         GarbageCollector::thread_list_node *  GarbageCollector::getHRCThreadDescForCurrentThread() const
         {
             thread_list_node * hprec;
         GarbageCollector::thread_list_node *  GarbageCollector::getHRCThreadDescForCurrentThread() const
         {
             thread_list_node * hprec;
-            const std::thread::id curThreadId = std::this_thread::get_id();
+            const cds::OS::ThreadId curThreadId  = cds::OS::getCurrentThreadId();
 
             for ( hprec = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); hprec; hprec = hprec->m_pNext ) {
                 if ( hprec->m_idOwner.load( CDS_ATOMIC::memory_order_acquire ) == curThreadId ) {
 
             for ( hprec = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); hprec; hprec = hprec->m_pNext ) {
                 if ( hprec->m_idOwner.load( CDS_ATOMIC::memory_order_acquire ) == curThreadId ) {
@@ -131,12 +131,12 @@ namespace cds { namespace gc {
             CDS_HRC_STATISTIC( ++m_Stat.m_AllocHRCThreadDesc );
 
             thread_list_node * hprec;
             CDS_HRC_STATISTIC( ++m_Stat.m_AllocHRCThreadDesc );
 
             thread_list_node * hprec;
-            const std::thread::id nullThreadId = std::thread::id();
-            const std::thread::id curThreadId = std::this_thread::get_id();
+            const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
+            const cds::OS::ThreadId curThreadId  = cds::OS::getCurrentThreadId();
 
             // First try to reuse a retired (non-active) HP record
             for ( hprec = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); hprec; hprec = hprec->m_pNext ) {
 
             // First try to reuse a retired (non-active) HP record
             for ( hprec = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); hprec; hprec = hprec->m_pNext ) {
-                std::thread::id expectedThreadId = nullThreadId;
+                cds::OS::ThreadId expectedThreadId = nullThreadId;
                 if ( !hprec->m_idOwner.compare_exchange_strong( expectedThreadId, curThreadId, CDS_ATOMIC::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed ) )
                     continue;
                 hprec->m_pOwner = pThreadGC;
                 if ( !hprec->m_idOwner.compare_exchange_strong( expectedThreadId, curThreadId, CDS_ATOMIC::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed ) )
                     continue;
                 hprec->m_pOwner = pThreadGC;
@@ -172,13 +172,13 @@ namespace cds { namespace gc {
             assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
             /*
                 It is possible that
             assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
             /*
                 It is possible that
-                    pNode->m_idOwner.value() != std::this_thread::get_id()
+                    pNode->m_idOwner.value() != cds::OS::getCurrentThreadId()
                 if the destruction of thread object is called by the destructor
                 after thread termination
             */
                 if the destruction of thread object is called by the destructor
                 after thread termination
             */
-            assert( pNode->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) != std::thread::id() );
+            assert( pNode->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) != cds::OS::c_NullThreadId );
             pNode->m_pOwner = nullptr;
             pNode->m_pOwner = nullptr;
-            pNode->m_idOwner.store( std::thread::id(), CDS_ATOMIC::memory_order_release );
+            pNode->m_idOwner.store( cds::OS::c_NullThreadId, CDS_ATOMIC::memory_order_release );
             assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
         }
 
             assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
         }
 
@@ -189,7 +189,7 @@ namespace cds { namespace gc {
             typedef std::vector< ContainerNode * > hazard_ptr_list;
 
             details::thread_descriptor * pRec = pThreadGC->m_pDesc;
             typedef std::vector< ContainerNode * > hazard_ptr_list;
 
             details::thread_descriptor * pRec = pThreadGC->m_pDesc;
-            assert( static_cast< thread_list_node *>(pRec)->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == std::this_thread::get_id() );
+            assert( static_cast< thread_list_node *>( pRec )->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) == cds::OS::getCurrentThreadId() );
 
             // Step 1: mark all pRec->m_arrRetired items as "traced"
             {
 
             // Step 1: mark all pRec->m_arrRetired items as "traced"
             {
@@ -277,13 +277,13 @@ namespace cds { namespace gc {
 
             CDS_HRC_STATISTIC( ++m_Stat.m_HelpScanCalls );
 
 
             CDS_HRC_STATISTIC( ++m_Stat.m_HelpScanCalls );
 
-            const std::thread::id nullThreadId = std::thread::id();
-            const std::thread::id curThreadId = std::this_thread::get_id();
+            const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
+            const cds::OS::ThreadId curThreadId  = cds::OS::getCurrentThreadId();
 
             for ( thread_list_node * pRec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_pNext )
             {
                 // If threadDesc is free then own its
 
             for ( thread_list_node * pRec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_pNext )
             {
                 // If threadDesc is free then own its
-                std::thread::id expectedThreadId = nullThreadId;
+                cds::OS::ThreadId expectedThreadId = nullThreadId;
                 if ( !pRec->m_idOwner.compare_exchange_strong(expectedThreadId, curThreadId, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed) )
                 {
                     continue;
                 if ( !pRec->m_idOwner.compare_exchange_strong(expectedThreadId, curThreadId, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed) )
                 {
                     continue;
@@ -329,7 +329,7 @@ namespace cds { namespace gc {
         {
             CDS_HRC_STATISTIC( ++m_Stat.m_CleanUpAllCalls );
 
         {
             CDS_HRC_STATISTIC( ++m_Stat.m_CleanUpAllCalls );
 
-            //const std::thread::id nullThreadId = std::thread::id();
+            //const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
             thread_list_node * pThread = m_pListHead.load(CDS_ATOMIC::memory_order_acquire);
             while ( pThread ) {
                 for ( size_t i = 0; i < pThread->m_arrRetired.capacity(); ++i ) {
             thread_list_node * pThread = m_pListHead.load(CDS_ATOMIC::memory_order_acquire);
             while ( pThread ) {
                 for ( size_t i = 0; i < pThread->m_arrRetired.capacity(); ++i ) {
index 46d7315338997fd93d60da8790f0870559fe781c..5c0be168f7884016e0ef3fc3425ed49a9a060177 100644 (file)
@@ -58,8 +58,8 @@ namespace cds { namespace gc {
 
         GarbageCollector::~GarbageCollector()
         {
 
         GarbageCollector::~GarbageCollector()
         {
-            CDS_DEBUG_DO( const std::thread::id nullThreadId = std::thread::id(); )
-            CDS_DEBUG_DO( const std::thread::id mainThreadId = std::this_thread::get_id(); )
+            CDS_DEBUG_DO( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
+            CDS_DEBUG_DO( const cds::OS::ThreadId mainThreadId = cds::OS::getCurrentThreadId() ;)
 
             hplist_node * pHead = m_pListHead.load( CDS_ATOMIC::memory_order_relaxed );
             m_pListHead.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
 
             hplist_node * pHead = m_pListHead.load( CDS_ATOMIC::memory_order_relaxed );
             m_pListHead.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
@@ -108,12 +108,12 @@ namespace cds { namespace gc {
             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec );
 
             hplist_node * hprec;
             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec );
 
             hplist_node * hprec;
-            const std::thread::id nullThreadId = std::thread::id();
-            const std::thread::id curThreadId = std::this_thread::get_id();
+            const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
+            const cds::OS::ThreadId curThreadId  = cds::OS::getCurrentThreadId();
 
             // First try to reuse a retired (non-active) HP record
             for ( hprec = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
 
             // First try to reuse a retired (non-active) HP record
             for ( hprec = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
-                std::thread::id thId = nullThreadId;
+                cds::OS::ThreadId thId = nullThreadId;
                 if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ) )
                     continue;
                 hprec->m_bFree.store( false, CDS_ATOMIC::memory_order_release );
                 if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ) )
                     continue;
                 hprec->m_bFree.store( false, CDS_ATOMIC::memory_order_release );
@@ -144,13 +144,13 @@ namespace cds { namespace gc {
             pRec->clear();
             Scan( pRec );
             hplist_node * pNode = static_cast<hplist_node *>( pRec );
             pRec->clear();
             Scan( pRec );
             hplist_node * pNode = static_cast<hplist_node *>( pRec );
-            pNode->m_idOwner.store( std::thread::id(), CDS_ATOMIC::memory_order_release );
+            pNode->m_idOwner.store( cds::OS::c_NullThreadId, CDS_ATOMIC::memory_order_release );
         }
 
         void GarbageCollector::detachAllThread()
         {
             hplist_node * pNext = NULL;
         }
 
         void GarbageCollector::detachAllThread()
         {
             hplist_node * pNext = NULL;
-            const std::thread::id nullThreadId = std::thread::id();
+            const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
             for ( hplist_node * hprec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); hprec; hprec = pNext ) {
                 pNext = hprec->m_pNextNode;
                 if ( hprec->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) != nullThreadId ) {
             for ( hplist_node * hprec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); hprec; hprec = pNext ) {
                 pNext = hprec->m_pNextNode;
                 if ( hprec->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) != nullThreadId ) {
@@ -269,10 +269,10 @@ namespace cds { namespace gc {
         {
             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount );
 
         {
             CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount );
 
-            assert( static_cast<hplist_node *>(pThis)->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == std::this_thread::get_id() );
+            assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) == cds::OS::getCurrentThreadId() );
 
 
-            const std::thread::id nullThreadId = std::thread::id();
-            const std::thread::id curThreadId = std::this_thread::get_id();
+            const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
+            const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId();
             for ( hplist_node * hprec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
 
                 // If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it
             for ( hplist_node * hprec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
 
                 // If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it
@@ -282,7 +282,7 @@ namespace cds { namespace gc {
                 // Owns hprec if it is empty.
                 // Several threads may work concurrently so we use atomic technique only.
                 {
                 // Owns hprec if it is empty.
                 // Several threads may work concurrently so we use atomic technique only.
                 {
-                    std::thread::id curOwner = hprec->m_idOwner.load( CDS_ATOMIC::memory_order_acquire );
+                    cds::OS::ThreadId curOwner = hprec->m_idOwner.load(CDS_ATOMIC::memory_order_acquire);
                     if ( curOwner == nullThreadId || !cds::OS::isThreadAlive( curOwner )) {
                         if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
                             continue;
                     if ( curOwner == nullThreadId || !cds::OS::isThreadAlive( curOwner )) {
                         if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
                             continue;
index 4efef53d7898f06d6d6eaf7f70d49c6fc96143e3..d47a6bea456e6704b3ae611ed4c31a96239f7ff0 100644 (file)
@@ -33,7 +33,7 @@ namespace map2 {
             size_t      nData;
             CDS_ATOMIC::atomic<size_t> nEnsureCall;
             CDS_ATOMIC::atomic<bool>   bInitialized;
             size_t      nData;
             CDS_ATOMIC::atomic<size_t> nEnsureCall;
             CDS_ATOMIC::atomic<bool>   bInitialized;
-            std::thread::id            threadId;   // insert thread id
+            cds::OS::ThreadId          threadId     ;   // insert thread id
 
             typedef cds::lock::Spinlock< cds::backoff::pause >   lock_type;
             mutable lock_type   m_access;
 
             typedef cds::lock::Spinlock< cds::backoff::pause >   lock_type;
             mutable lock_type   m_access;
@@ -43,7 +43,7 @@ namespace map2 {
                 , nData(0)
                 , nEnsureCall(0)
                 , bInitialized( false )
                 , nData(0)
                 , nEnsureCall(0)
                 , bInitialized( false )
-                , threadId( std::this_thread::get_id() )
+                , threadId( cds::OS::getCurrentThreadId() )
             {}
 
             value_type( value_type const& s )
             {}
 
             value_type( value_type const& s )
@@ -51,7 +51,7 @@ namespace map2 {
                 , nData(s.nData)
                 , nEnsureCall(s.nEnsureCall.load(CDS_ATOMIC::memory_order_relaxed))
                 , bInitialized( s.bInitialized.load(CDS_ATOMIC::memory_order_relaxed) )
                 , nData(s.nData)
                 , nEnsureCall(s.nEnsureCall.load(CDS_ATOMIC::memory_order_relaxed))
                 , bInitialized( s.bInitialized.load(CDS_ATOMIC::memory_order_relaxed) )
-                , threadId( std::this_thread::get_id() )
+                , threadId( cds::OS::getCurrentThreadId() )
             {}
 
             // boost::container::flat_map requires operator =
             {}
 
             // boost::container::flat_map requires operator =
index c3a6df65adab4bead1aa6d8bef89060ace95bf25..1f0bdf7d0c7c62663173f5dd45c1005d406e27d8 100644 (file)
@@ -28,9 +28,9 @@ namespace set2 {
         struct value_type {
             size_t      nKey;
             size_t      nData;
         struct value_type {
             size_t      nKey;
             size_t      nData;
-            CDS_ATOMIC::atomic<size_t>  nEnsureCall;
-            bool volatile               bInitialized;
-            std::thread::id             threadId;   // insert thread id
+            CDS_ATOMIC::atomic<size_t> nEnsureCall;
+            bool volatile   bInitialized;
+            cds::OS::ThreadId          threadId     ;   // insert thread id
 
             typedef cds::lock::Spinlock< cds::backoff::pause >   lock_type;
             mutable lock_type   m_access;
 
             typedef cds::lock::Spinlock< cds::backoff::pause >   lock_type;
             mutable lock_type   m_access;
@@ -40,7 +40,7 @@ namespace set2 {
                 , nData(0)
                 , nEnsureCall(0)
                 , bInitialized( false )
                 , nData(0)
                 , nEnsureCall(0)
                 , bInitialized( false )
-                , threadId( std::this_thread::get_id() )
+                , threadId( cds::OS::getCurrentThreadId() )
             {}
 
             value_type( value_type const& s )
             {}
 
             value_type( value_type const& s )
@@ -48,7 +48,7 @@ namespace set2 {
                 , nData(s.nData)
                 , nEnsureCall(s.nEnsureCall.load(CDS_ATOMIC::memory_order_relaxed))
                 , bInitialized( s.bInitialized )
                 , nData(s.nData)
                 , nEnsureCall(s.nEnsureCall.load(CDS_ATOMIC::memory_order_relaxed))
                 , bInitialized( s.bInitialized )
-                , threadId( std::this_thread::get_id() )
+                , threadId( cds::OS::getCurrentThreadId() )
             {}
 
             // boost::container::flat_map requires operator =
             {}
 
             // boost::container::flat_map requires operator =