From: khizmax Date: Mon, 13 Jun 2016 20:24:09 +0000 (+0300) Subject: Added wait strategies to flat combining technique X-Git-Tag: v2.2.0~217 X-Git-Url: http://plrg.eecs.uci.edu/git/?p=libcds.git;a=commitdiff_plain;h=ac52ccdcde087416882f4303c220167f3a898e89 Added wait strategies to flat combining technique --- diff --git a/cds/algo/flat_combining.h b/cds/algo/flat_combining.h index c033d6c0..4c3ac202 100644 --- a/cds/algo/flat_combining.h +++ b/cds/algo/flat_combining.h @@ -25,823 +25,12 @@ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef CDSLIB_ALGO_FLAT_COMBINING_H #define CDSLIB_ALGO_FLAT_COMBINING_H -#include -#include -#include -#include -#include -#include -#include -#include // thread_specific_ptr - -namespace cds { namespace algo { - - /// @defgroup cds_flat_combining_intrusive Intrusive flat combining containers - /// @defgroup cds_flat_combining_container Non-intrusive flat combining containers - - /// Flat combining - /** - @anchor cds_flat_combining_description - Flat combining (FC) technique is invented by Hendler, Incze, Shavit and Tzafrir in their paper - [2010] "Flat Combining and the Synchronization-Parallelism Tradeoff". - The technique converts a sequential data structure to its concurrent implementation. - A few structures are added to the sequential implementation: a global lock, - a count of the number of combining passes, and a pointer to the head - of a publication list. The publication list is a list of thread-local records - of a size proportional to the number of threads that are concurrently accessing the shared object. - - Each thread \p t accessing the structure to perform an invocation of some method \p m - on the shared object executes the following sequence of steps: -
    -
  1. Write the invocation opcode and parameters (if any) of the method \p m to be applied - sequentially to the shared object in the request field of your thread local publication - record (there is no need to use a load-store memory barrier). The request field will later - be used to receive the response. If your thread local publication record is marked as active - continue to step 2, otherwise continue to step 5.
  2. -
  3. Check if the global lock is taken. If so (another thread is an active combiner), spin on the request - field waiting for a response to the invocation (one can add a yield at this point to allow other threads - on the same core to run). Once in a while while spinning check if the lock is still taken and that your - record is active. If your record is inactive proceed to step 5. Once the response is available, - reset the request field to null and return the response.
  4. -
  5. If the lock is not taken, attempt to acquire it and become a combiner. If you fail, - return to spinning in step 2.
  6. -
  7. Otherwise, you hold the lock and are a combiner. -
      -
    • Increment the combining pass count by one.
    • -
    • Execute a \p fc_apply() by traversing the publication list from the head, - combining all nonnull method call invocations, setting the age of each of these records - to the current count, applying the combined method calls to the structure D, and returning - responses to all the invocations. This traversal is guaranteed to be wait-free.
    • -
    • If the count is such that a cleanup needs to be performed, traverse the publication - list from the head. Starting from the second item (we always leave the item pointed to - by the head in the list), remove from the publication list all records whose age is - much smaller than the current count. This is done by removing the node and marking it - as inactive.
    • -
    • Release the lock.
    • -
    -
  8. If you have no thread local publication record allocate one, marked as active. If you already - have one marked as inactive, mark it as active. Execute a store-load memory barrier. Proceed to insert - the record into the list with a successful CAS to the head. Then proceed to step 1.
  9. -
- - As the test results show, the flat combining technique is suitable for non-intrusive containers - like stack, queue, deque. For intrusive concurrent containers the flat combining demonstrates - less impressive results. - - \ref cds_flat_combining_container "List of FC-based containers" in libcds. - - \ref cds_flat_combining_intrusive "List of intrusive FC-based containers" in libcds. - */ - namespace flat_combining { - - /// Special values of publication_record::nRequest - enum request_value - { - req_EmptyRecord, ///< Publication record is empty - req_Response, ///< Operation is done - - req_Operation ///< First operation id for derived classes - }; - - /// publication_record state - enum record_state { - inactive, ///< Record is inactive - active, ///< Record is active - removed ///< Record should be removed - }; - - /// Record of publication list - /** - Each data structure based on flat combining contains a class derived from \p %publication_record - */ - struct publication_record { - atomics::atomic nRequest; ///< Request field (depends on data structure) - atomics::atomic nState; ///< Record state: inactive, active, removed - atomics::atomic nAge; ///< Age of the record - atomics::atomic pNext; ///< Next record in publication list - void * pOwner; ///< [internal data] Pointer to \ref kernel object that manages the publication list - - /// Initializes publication record - publication_record() - : nRequest( req_EmptyRecord ) - , nState( inactive ) - , nAge(0) - , pNext( nullptr ) - , pOwner( nullptr ) - {} - - /// Returns the value of \p nRequest field - unsigned int op() const - { - return nRequest.load( atomics::memory_order_relaxed ); - } - - /// Checks if the operation is done - bool is_done() const - { - return nRequest.load( atomics::memory_order_relaxed ) == req_Response; - } - }; - - /// Flat combining internal statistics - template - struct stat - { - typedef Counter counter_type; ///< Event counter type - - counter_type m_nOperationCount ; ///< How many operations have been performed - counter_type m_nCombiningCount ; ///< Combining call count - counter_type m_nCompactPublicationList; ///< Count of publication list compacting - counter_type m_nDeactivatePubRecord; ///< How many publication records were deactivated during compacting - counter_type m_nActivatePubRecord; ///< Count of publication record activating - counter_type m_nPubRecordCreated ; ///< Count of created publication records - counter_type m_nPubRecordDeteted ; ///< Count of deleted publication records - counter_type m_nAcquirePubRecCount; ///< Count of acquiring publication record - counter_type m_nReleasePubRecCount; ///< Count on releasing publication record - - /// Returns current combining factor - /** - Combining factor is how many operations perform in one combine pass: - combining_factor := m_nOperationCount / m_nCombiningCount - */ - double combining_factor() const - { - return m_nCombiningCount.get() ? double( m_nOperationCount.get()) / m_nCombiningCount.get() : 0.0; - } - - //@cond - void onOperation() { ++m_nOperationCount; } - void onCombining() { ++m_nCombiningCount; } - void onCompactPublicationList() { ++m_nCompactPublicationList; } - void onDeactivatePubRecord() { ++m_nDeactivatePubRecord; } - void onActivatePubRecord() { ++m_nActivatePubRecord; } - void onCreatePubRecord() { ++m_nPubRecordCreated; } - void onDeletePubRecord() { ++m_nPubRecordDeteted; } - void onAcquirePubRecord() { ++m_nAcquirePubRecCount; } - void onReleasePubRecord() { ++m_nReleasePubRecCount; } - //@endcond - }; - - /// Flat combining dummy internal statistics - struct empty_stat - { - //@cond - void onOperation() {} - void onCombining() {} - void onCompactPublicationList() {} - void onDeactivatePubRecord() {} - void onActivatePubRecord() {} - void onCreatePubRecord() {} - void onDeletePubRecord() {} - void onAcquirePubRecord() {} - void onReleasePubRecord() {} - //@endcond - }; - - /// Type traits of \ref kernel class - /** - You can define different type traits for \ref kernel - by specifying your struct based on \p %traits - or by using \ref make_traits metafunction. - */ - struct traits - { - typedef cds::sync::spin lock_type; ///< Lock type - typedef cds::backoff::delay_of<2> back_off; ///< Back-off strategy - typedef CDS_DEFAULT_ALLOCATOR allocator; ///< Allocator used for TLS data (allocating publication_record derivatives) - typedef empty_stat stat; ///< Internal statistics - typedef opt::v::relaxed_ordering memory_model; ///< /// C++ memory ordering model - }; - - /// Metafunction converting option list to traits - /** - \p Options are: - - \p opt::lock_type - mutex type, default is \p cds::sync::spin - - \p opt::back_off - back-off strategy, defalt is \p cds::backoff::delay_of<2> - - \p opt::allocator - allocator type, default is \ref CDS_DEFAULT_ALLOCATOR - - \p opt::stat - internal statistics, possible type: \ref stat, \ref empty_stat (the default) - - \p opt::memory_model - C++ memory ordering model. - List of all available memory ordering see \p opt::memory_model. - Default is \p cds::opt::v::relaxed_ordering - */ - template - struct make_traits { -# ifdef CDS_DOXYGEN_INVOKED - typedef implementation_defined type ; ///< Metafunction result -# else - typedef typename cds::opt::make_options< - typename cds::opt::find_type_traits< traits, Options... >::type - ,Options... - >::type type; -# endif - }; - - /// The kernel of flat combining - /** - Template parameters: - - \p PublicationRecord - a type derived from \ref publication_record - - \p Traits - a type traits of flat combining, default is \p flat_combining::traits. - \ref make_traits metafunction can be used to create type traits - - The kernel object should be a member of a container class. The container cooperates with flat combining - kernel object. There are two ways to interact with the kernel: - - One-by-one processing the active records of the publication list. This mode provides by \p combine() function: - the container acquires its publication record by \p acquire_record(), fills its fields and calls - \p combine() function of its kernel object. If the current thread becomes a combiner, the kernel - calls \p fc_apply() function of the container for each active non-empty record. Then, the container - should release its publication record by \p release_record(). Only one pass through the publication - list is possible. - - Batch processing - \p batch_combine() function. It this mode the container obtains access - to entire publication list. This mode allows the container to perform an elimination, for example, - the stack can collide \p push() and \p pop() requests. The sequence of invocations is the following: - the container acquires its publication record by \p acquire_record(), fills its field and call - \p batch_combine() function of its kernel object. If the current thread becomes a combiner, - the kernel calls \p fc_process() function of the container passing two iterators pointing to - the begin and the end of publication list (see \ref iterator class). The iterators allow - multiple pass through active records of publication list. For each processed record the container - should call \p operation_done() function. On the end, the container should release - its record by \p release_record(). - */ - template < - typename PublicationRecord - ,typename Traits = traits - > - class kernel - { - public: - typedef PublicationRecord publication_record_type; ///< publication record type - typedef Traits traits; ///< Type traits - typedef typename traits::lock_type global_lock_type; ///< Global lock type - typedef typename traits::back_off back_off; ///< back-off strategy type - typedef typename traits::allocator allocator; ///< Allocator type (used for allocating publication_record_type data) - typedef typename traits::stat stat; ///< Internal statistics - typedef typename traits::memory_model memory_model; ///< C++ memory model - - protected: - //@cond - typedef cds::details::Allocator< publication_record_type, allocator > cxx11_allocator; ///< internal helper cds::details::Allocator - typedef std::lock_guard lock_guard; - //@endcond - - protected: - atomics::atomic m_nCount; ///< Total count of combining passes. Used as an age. - publication_record_type * m_pHead; ///< Head of publication list - boost::thread_specific_ptr< publication_record_type > m_pThreadRec; ///< Thread-local publication record - mutable global_lock_type m_Mutex; ///< Global mutex - mutable stat m_Stat; ///< Internal statistics - unsigned int const m_nCompactFactor; ///< Publication list compacting factor (the list will be compacted through \p %m_nCompactFactor combining passes) - unsigned int const m_nCombinePassCount; ///< Number of combining passes - - public: - /// Initializes the object - /** - Compact factor = 64 - - Combiner pass count = 8 - */ - kernel() - : m_nCount(0) - , m_pHead( nullptr ) - , m_pThreadRec( tls_cleanup ) - , m_nCompactFactor( 64 - 1 ) // binary mask - , m_nCombinePassCount( 8 ) - { - init(); - } - - /// Initializes the object - kernel( - unsigned int nCompactFactor ///< Publication list compacting factor (the list will be compacted through \p nCompactFactor combining passes) - ,unsigned int nCombinePassCount ///< Number of combining passes for combiner thread - ) - : m_nCount(0) - , m_pHead( nullptr ) - , m_pThreadRec( tls_cleanup ) - , m_nCompactFactor( (unsigned int)( cds::beans::ceil2( nCompactFactor ) - 1 )) // binary mask - , m_nCombinePassCount( nCombinePassCount ) - { - init(); - } - - /// Destroys the objects and mark all publication records as inactive - ~kernel() - { - // mark all publication record as detached - for ( publication_record * p = m_pHead; p; ) { - p->pOwner = nullptr; - - publication_record * pRec = p; - p = p->pNext.load( memory_model::memory_order_relaxed ); - if ( pRec->nState.load( memory_model::memory_order_acquire ) == removed ) - free_publication_record( static_cast( pRec )); - } - } - - /// Gets publication list record for the current thread - /** - If there is no publication record for the current thread - the function allocates it. - */ - publication_record_type * acquire_record() - { - publication_record_type * pRec = m_pThreadRec.get(); - if ( !pRec ) { - // Allocate new publication record - pRec = cxx11_allocator().New(); - pRec->pOwner = reinterpret_cast( this ); - m_pThreadRec.reset( pRec ); - m_Stat.onCreatePubRecord(); - } - - if ( pRec->nState.load( memory_model::memory_order_acquire ) != active ) - publish( pRec ); - - assert( pRec->nRequest.load( memory_model::memory_order_relaxed ) == req_EmptyRecord ); - - m_Stat.onAcquirePubRecord(); - return pRec; - } - - /// Marks publication record for the current thread as empty - void release_record( publication_record_type * pRec ) - { - assert( pRec->is_done() ); - pRec->nRequest.store( req_EmptyRecord, memory_model::memory_order_release ); - m_Stat.onReleasePubRecord(); - } - - /// Trying to execute operation \p nOpId - /** - \p pRec is the publication record acquiring by \ref acquire_record earlier. - \p owner is a container that is owner of flat combining kernel object. - As a result the current thread can become a combiner or can wait for - another combiner performs \p pRec operation. - - If the thread becomes a combiner, the kernel calls \p owner.fc_apply - for each active non-empty publication record. - */ - template - void combine( unsigned int nOpId, publication_record_type * pRec, Container& owner ) - { - assert( nOpId >= req_Operation ); - assert( pRec ); - //assert( pRec->nState.load( memory_model::memory_order_relaxed ) == active ); - pRec->nRequest.store( nOpId, memory_model::memory_order_release ); - - m_Stat.onOperation(); - - try_combining( owner, pRec ); - } - - /// Trying to execute operation \p nOpId in batch-combine mode - /** - \p pRec is the publication record acquiring by \ref acquire_record earlier. - \p owner is a container that owns flat combining kernel object. - As a result the current thread can become a combiner or can wait for - another combiner performs \p pRec operation. - - If the thread becomes a combiner, the kernel calls \p owner.fc_process - giving the container the full access over publication list. This function - is useful for an elimination technique if the container supports any kind of - that. The container can perform multiple pass through publication list. - - \p owner.fc_process has two arguments - forward iterators on begin and end of - publication list, see \ref iterator class. For each processed record the container - should call \ref operation_done function to mark the record as processed. - - On the end of \p %batch_combine the \ref combine function is called - to process rest of publication records. - */ - template - void batch_combine( unsigned int nOpId, publication_record_type * pRec, Container& owner ) - { - assert( nOpId >= req_Operation ); - assert( pRec ); - //assert( pRec->nState.load( memory_model::memory_order_relaxed ) == active ); - pRec->nRequest.store( nOpId, memory_model::memory_order_release ); - - m_Stat.onOperation(); - - try_batch_combining( owner, pRec ); - } - - /// Waits for end of combining - void wait_while_combining() const - { - lock_guard l( m_Mutex ); - } - - /// Marks \p rec as executed - /** - This function should be called by container if batch_combine mode is used. - For usual combining (see \ref combine) this function is excess. - */ - void operation_done( publication_record& rec ) - { - rec.nRequest.store( req_Response, memory_model::memory_order_release ); - } - - /// Internal statistics - stat const& statistics() const - { - return m_Stat; - } - - //@cond - // For container classes based on flat combining - stat& internal_statistics() const - { - return m_Stat; - } - //@endcond - - /// Returns the compact factor - unsigned int compact_factor() const - { - return m_nCompactFactor + 1; - } - - /// Returns number of combining passes for combiner thread - unsigned int combine_pass_count() const - { - return m_nCombinePassCount; - } - - public: - /// Publication list iterator - /** - Iterators are intended for batch processing by container's - \p fc_process function. - The iterator allows iterate through active publication list. - */ - class iterator - { - //@cond - friend class kernel; - publication_record_type * m_pRec; - //@endcond - - protected: - //@cond - iterator( publication_record_type * pRec ) - : m_pRec( pRec ) - { - skip_inactive(); - } - - void skip_inactive() - { - while ( m_pRec && (m_pRec->nState.load( memory_model::memory_order_acquire ) != active - || m_pRec->nRequest.load( memory_model::memory_order_relaxed) < req_Operation )) - { - m_pRec = static_cast(m_pRec->pNext.load( memory_model::memory_order_acquire )); - } - } - //@endcond - - public: - /// Initializes an empty iterator object - iterator() - : m_pRec( nullptr ) - {} - - /// Copy ctor - iterator( iterator const& src ) - : m_pRec( src.m_pRec ) - {} - - /// Pre-increment - iterator& operator++() - { - assert( m_pRec ); - m_pRec = static_cast( m_pRec->pNext.load( memory_model::memory_order_acquire )); - skip_inactive(); - return *this; - } - - /// Post-increment - iterator operator++(int) - { - assert( m_pRec ); - iterator it(*this); - ++(*this); - return it; - } - - /// Dereference operator, can return \p nullptr - publication_record_type * operator ->() - { - return m_pRec; - } - - /// Dereference operator, the iterator should not be an end iterator - publication_record_type& operator*() - { - assert( m_pRec ); - return *m_pRec; - } - - /// Iterator equality - friend bool operator==( iterator it1, iterator it2 ) - { - return it1.m_pRec == it2.m_pRec; - } - - /// Iterator inequality - friend bool operator!=( iterator it1, iterator it2 ) - { - return !( it1 == it2 ); - } - }; - - /// Returns an iterator to the first active publication record - iterator begin() { return iterator(m_pHead); } - - /// Returns an iterator to the end of publication list. Should not be dereferenced. - iterator end() { return iterator(); } - - private: - //@cond - static void tls_cleanup( publication_record_type * pRec ) - { - // Thread done - // pRec that is TLS data should be excluded from publication list - if ( pRec ) { - if ( pRec->pOwner ) { - // kernel is alive - pRec->nState.store( removed, memory_model::memory_order_release ); - } - else { - // kernel already deleted - free_publication_record( pRec ); - } - } - } - - static void free_publication_record( publication_record_type * pRec ) - { - cxx11_allocator().Delete( pRec ); - } - - void init() - { - assert( m_pThreadRec.get() == nullptr ); - publication_record_type * pRec = cxx11_allocator().New(); - m_pHead = pRec; - pRec->pOwner = this; - m_pThreadRec.reset( pRec ); - m_Stat.onCreatePubRecord(); - } - - void publish( publication_record_type * pRec ) - { - assert( pRec->nState.load( memory_model::memory_order_relaxed ) == inactive ); - - pRec->nAge.store( m_nCount.load(memory_model::memory_order_acquire), memory_model::memory_order_release ); - pRec->nState.store( active, memory_model::memory_order_release ); - - // Insert record to publication list - if ( m_pHead != static_cast(pRec) ) { - publication_record * p = m_pHead->pNext.load(memory_model::memory_order_relaxed); - if ( p != static_cast( pRec )) { - do { - pRec->pNext = p; - // Failed CAS changes p - } while ( !m_pHead->pNext.compare_exchange_weak( p, static_cast(pRec), - memory_model::memory_order_release, atomics::memory_order_relaxed )); - m_Stat.onActivatePubRecord(); - } - } - } - - void republish( publication_record_type * pRec ) - { - if ( pRec->nState.load( memory_model::memory_order_relaxed ) != active ) { - // The record has been excluded from publication list. Reinsert it - publish( pRec ); - } - } - - template - void try_combining( Container& owner, publication_record_type * pRec ) - { - if ( m_Mutex.try_lock() ) { - // The thread becomes a combiner - lock_guard l( m_Mutex, std::adopt_lock_t() ); - - // The record pRec can be excluded from publication list. Re-publish it - republish( pRec ); - - combining( owner ); - assert( pRec->nRequest.load( memory_model::memory_order_relaxed ) == req_Response ); - } - else { - // There is another combiner, wait while it executes our request - if ( !wait_for_combining( pRec ) ) { - // The thread becomes a combiner - lock_guard l( m_Mutex, std::adopt_lock_t() ); - - // The record pRec can be excluded from publication list. Re-publish it - republish( pRec ); - - combining( owner ); - assert( pRec->nRequest.load( memory_model::memory_order_relaxed ) == req_Response ); - } - } - } - - template - void try_batch_combining( Container& owner, publication_record_type * pRec ) - { - if ( m_Mutex.try_lock() ) { - // The thread becomes a combiner - lock_guard l( m_Mutex, std::adopt_lock_t() ); - - // The record pRec can be excluded from publication list. Re-publish it - republish( pRec ); - - batch_combining( owner ); - assert( pRec->nRequest.load( memory_model::memory_order_relaxed ) == req_Response ); - } - else { - // There is another combiner, wait while it executes our request - if ( !wait_for_combining( pRec ) ) { - // The thread becomes a combiner - lock_guard l( m_Mutex, std::adopt_lock_t() ); - - // The record pRec can be excluded from publication list. Re-publish it - republish( pRec ); - - batch_combining( owner ); - assert( pRec->nRequest.load( memory_model::memory_order_relaxed ) == req_Response ); - } - } - } - - template - void combining( Container& owner ) - { - // The thread is a combiner - assert( !m_Mutex.try_lock() ); - - unsigned int const nCurAge = m_nCount.fetch_add( 1, memory_model::memory_order_release ) + 1; - - for ( unsigned int nPass = 0; nPass < m_nCombinePassCount; ++nPass ) - if ( !combining_pass( owner, nCurAge )) - break; - - m_Stat.onCombining(); - if ( (nCurAge & m_nCompactFactor) == 0 ) - compact_list( nCurAge ); - } - - template - bool combining_pass( Container& owner, unsigned int nCurAge ) - { - publication_record * pPrev = nullptr; - publication_record * p = m_pHead; - bool bOpDone = false; - while ( p ) { - switch ( p->nState.load( memory_model::memory_order_acquire )) { - case active: - if ( p->op() >= req_Operation ) { - p->nAge.store( nCurAge, memory_model::memory_order_release ); - owner.fc_apply( static_cast(p) ); - operation_done( *p ); - bOpDone = true; - } - break; - case inactive: - // Only m_pHead can be inactive in the publication list - assert( p == m_pHead ); - break; - case removed: - // The record should be removed - p = unlink_and_delete_record( pPrev, p ); - continue; - default: - /// ??? That is impossible - assert(false); - } - pPrev = p; - p = p->pNext.load( memory_model::memory_order_acquire ); - } - return bOpDone; - } - - template - void batch_combining( Container& owner ) - { - // The thread is a combiner - assert( !m_Mutex.try_lock() ); - - unsigned int const nCurAge = m_nCount.fetch_add( 1, memory_model::memory_order_release ) + 1; - - for ( unsigned int nPass = 0; nPass < m_nCombinePassCount; ++nPass ) - owner.fc_process( begin(), end() ); - - combining_pass( owner, nCurAge ); - m_Stat.onCombining(); - if ( (nCurAge & m_nCompactFactor) == 0 ) - compact_list( nCurAge ); - } - - bool wait_for_combining( publication_record_type * pRec ) - { - back_off bkoff; - while ( pRec->nRequest.load( memory_model::memory_order_acquire ) != req_Response ) { - - // The record can be excluded from publication list. Reinsert it - republish( pRec ); - - bkoff(); - - if ( m_Mutex.try_lock() ) { - if ( pRec->nRequest.load( memory_model::memory_order_acquire ) == req_Response ) { - m_Mutex.unlock(); - break; - } - // The thread becomes a combiner - return false; - } - } - return true; - } - - void compact_list( unsigned int const nCurAge ) - { - // Thinning publication list - publication_record * pPrev = nullptr; - for ( publication_record * p = m_pHead; p; ) { - if ( p->nState.load( memory_model::memory_order_acquire ) == active - && p->nAge.load( memory_model::memory_order_acquire ) + m_nCompactFactor < nCurAge ) - { - if ( pPrev ) { - publication_record * pNext = p->pNext.load( memory_model::memory_order_acquire ); - if ( pPrev->pNext.compare_exchange_strong( p, pNext, - memory_model::memory_order_release, atomics::memory_order_relaxed )) - { - p->nState.store( inactive, memory_model::memory_order_release ); - p = pNext; - m_Stat.onDeactivatePubRecord(); - continue; - } - } - } - pPrev = p; - p = p->pNext.load( memory_model::memory_order_acquire ); - } - - m_Stat.onCompactPublicationList(); - } - - publication_record * unlink_and_delete_record( publication_record * pPrev, publication_record * p ) - { - if ( pPrev ) { - publication_record * pNext = p->pNext.load( memory_model::memory_order_acquire ); - if ( pPrev->pNext.compare_exchange_strong( p, pNext, - memory_model::memory_order_release, atomics::memory_order_relaxed )) - { - free_publication_record( static_cast( p )); - m_Stat.onDeletePubRecord(); - } - return pNext; - } - else { - m_pHead = static_cast( p->pNext.load( memory_model::memory_order_acquire )); - free_publication_record( static_cast( p )); - m_Stat.onDeletePubRecord(); - return m_pHead; - } - } - //@endcond - }; - - //@cond - class container - { - public: - template - void fc_apply( PubRecord * ) - { - assert( false ); - } - - template - void fc_process( Iterator, Iterator ) - { - assert( false ); - } - }; - //@endcond - - } // namespace flat_combining -}} // namespace cds::algo +#include #endif // #ifndef CDSLIB_ALGO_FLAT_COMBINING_H diff --git a/cds/algo/flat_combining/defs.h b/cds/algo/flat_combining/defs.h new file mode 100644 index 00000000..d29702d1 --- /dev/null +++ b/cds/algo/flat_combining/defs.h @@ -0,0 +1,91 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_ALGO_FLAT_COMBINING_DEFS_H +#define CDSLIB_ALGO_FLAT_COMBINING_DEFS_H + +#include + + +namespace cds { namespace algo { namespace flat_combining { + + /// Special values of \p publication_record::nRequest + enum request_value + { + req_EmptyRecord, ///< Publication record is empty + req_Response, ///< Operation is done + + req_Operation ///< First operation id for derived classes + }; + + /// \p publication_record state + enum record_state { + inactive, ///< Record is inactive + active, ///< Record is active + removed ///< Record should be removed + }; + + /// Record of publication list + /** + Each data structure based on flat combining contains a class derived from \p %publication_record + */ + struct publication_record { + atomics::atomic nRequest; ///< Request field (depends on data structure) + atomics::atomic nState; ///< Record state: inactive, active, removed + atomics::atomic nAge; ///< Age of the record + atomics::atomic pNext; ///< Next record in publication list + void * pOwner; ///< [internal data] Pointer to \ref kernel object that manages the publication list + + /// Initializes publication record + publication_record() + : nRequest( req_EmptyRecord ) + , nState( inactive ) + , nAge( 0 ) + , pNext( nullptr ) + , pOwner( nullptr ) + {} + + /// Returns the value of \p nRequest field + unsigned int op( atomics::memory_order mo = atomics::memory_order_relaxed ) const + { + return nRequest.load( mo ); + } + + /// Checks if the operation is done + bool is_done() const + { + return nRequest.load( atomics::memory_order_relaxed ) == req_Response; + } + }; + + +}}} // namespace cds::algo::flat_combining + +#endif // CDSLIB_ALGO_FLAT_COMBINING_DEFS_H diff --git a/cds/algo/flat_combining/kernel.h b/cds/algo/flat_combining/kernel.h new file mode 100644 index 00000000..79bf5e7b --- /dev/null +++ b/cds/algo/flat_combining/kernel.h @@ -0,0 +1,858 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_ALGO_FLAT_COMBINING_KERNEL_H +#define CDSLIB_ALGO_FLAT_COMBINING_KERNEL_H + +#include +#include + +#include +#include +#include +#include + +namespace cds { namespace algo { + + /// @defgroup cds_flat_combining_intrusive Intrusive flat combining containers + /// @defgroup cds_flat_combining_container Non-intrusive flat combining containers + + /// Flat combining + /** + @anchor cds_flat_combining_description + Flat combining (FC) technique is invented by Hendler, Incze, Shavit and Tzafrir in their paper + [2010] "Flat Combining and the Synchronization-Parallelism Tradeoff". + The technique converts a sequential data structure to its concurrent implementation. + A few structures are added to the sequential implementation: a global lock, + a count of the number of combining passes, and a pointer to the head + of a publication list. The publication list is a list of thread-local records + of a size proportional to the number of threads that are concurrently accessing the shared object. + + Each thread \p t accessing the structure to perform an invocation of some method \p m + on the shared object executes the following sequence of steps: +
    +
  1. Write the invocation opcode and parameters (if any) of the method \p m to be applied + sequentially to the shared object in the request field of your thread local publication + record (there is no need to use a load-store memory barrier). The request field will later + be used to receive the response. If your thread local publication record is marked as active + continue to step 2, otherwise continue to step 5.
  2. +
  3. Check if the global lock is taken. If so (another thread is an active combiner), spin on the request + field waiting for a response to the invocation (one can add a yield at this point to allow other threads + on the same core to run). Once in a while while spinning check if the lock is still taken and that your + record is active. If your record is inactive proceed to step 5. Once the response is available, + reset the request field to null and return the response.
  4. +
  5. If the lock is not taken, attempt to acquire it and become a combiner. If you fail, + return to spinning in step 2.
  6. +
  7. Otherwise, you hold the lock and are a combiner. +
      +
    • Increment the combining pass count by one.
    • +
    • Execute a \p fc_apply() by traversing the publication list from the head, + combining all nonnull method call invocations, setting the age of each of these records + to the current count, applying the combined method calls to the structure D, and returning + responses to all the invocations. This traversal is guaranteed to be wait-free.
    • +
    • If the count is such that a cleanup needs to be performed, traverse the publication + list from the head. Starting from the second item (we always leave the item pointed to + by the head in the list), remove from the publication list all records whose age is + much smaller than the current count. This is done by removing the node and marking it + as inactive.
    • +
    • Release the lock.
    • +
    +
  8. If you have no thread local publication record allocate one, marked as active. If you already + have one marked as inactive, mark it as active. Execute a store-load memory barrier. Proceed to insert + the record into the list with a successful CAS to the head. Then proceed to step 1.
  9. +
+ + As the test results show, the flat combining technique is suitable for non-intrusive containers + like stack, queue, deque. For intrusive concurrent containers the flat combining demonstrates + less impressive results. + + \ref cds_flat_combining_container "List of FC-based containers" in libcds. + + \ref cds_flat_combining_intrusive "List of intrusive FC-based containers" in libcds. + */ + namespace flat_combining { + + /// Flat combining internal statistics + template + struct stat + { + typedef Counter counter_type; ///< Event counter type + + counter_type m_nOperationCount ; ///< How many operations have been performed + counter_type m_nCombiningCount ; ///< Combining call count + counter_type m_nCompactPublicationList; ///< Count of publication list compacting + counter_type m_nDeactivatePubRecord; ///< How many publication records were deactivated during compacting + counter_type m_nActivatePubRecord; ///< Count of publication record activating + counter_type m_nPubRecordCreated ; ///< Count of created publication records + counter_type m_nPubRecordDeteted ; ///< Count of deleted publication records + counter_type m_nPassiveWaitCall; ///< Count of passive waiting call (\p kernel::wait_for_combining()) + counter_type m_nPassiveWaitIteration;///< Count of iteration inside passive waiting + counter_type m_nPassiveWaitWakeup; ///< Count of forcing wake-up of passive wait cycle + counter_type m_nInvokeExclusive; ///< Count of call \p kernel::invoke_exclusive() + counter_type m_nWakeupByNotifying; ///< How many times the passive thread be waked up by a notification + counter_type m_nPassiveToCombiner; ///< How many times the passive thread becomes the combiner + + /// Returns current combining factor + /** + Combining factor is how many operations perform in one combine pass: + combining_factor := m_nOperationCount / m_nCombiningCount + */ + double combining_factor() const + { + return m_nCombiningCount.get() ? double( m_nOperationCount.get()) / m_nCombiningCount.get() : 0.0; + } + + //@cond + void onOperation() { ++m_nOperationCount; } + void onCombining() { ++m_nCombiningCount; } + void onCompactPublicationList() { ++m_nCompactPublicationList; } + void onDeactivatePubRecord() { ++m_nDeactivatePubRecord; } + void onActivatePubRecord() { ++m_nActivatePubRecord; } + void onCreatePubRecord() { ++m_nPubRecordCreated; } + void onDeletePubRecord() { ++m_nPubRecordDeteted; } + void onPassiveWait() { ++m_nPassiveWaitCall; } + void onPassiveWaitIteration() { ++m_nPassiveWaitIteration; } + void onPassiveWaitWakeup() { ++m_nPassiveWaitWakeup; } + void onInvokeExclusive() { ++m_nInvokeExclusive; } + void onWakeupByNotifying() { ++m_nWakeupByNotifying; } + void onPassiveToCombiner() { ++m_nPassiveToCombiner; } + + //@endcond + }; + + /// Flat combining dummy internal statistics + struct empty_stat + { + //@cond + void onOperation() const {} + void onCombining() const {} + void onCompactPublicationList() const {} + void onDeactivatePubRecord() const {} + void onActivatePubRecord() const {} + void onCreatePubRecord() const {} + void onDeletePubRecord() const {} + void onPassiveWait() const {} + void onPassiveWaitIteration() const {} + void onPassiveWaitWakeup() const {} + void onInvokeExclusive() const {} + void onWakeupByNotifying() const {} + void onPassiveToCombiner() const {} + //@endcond + }; + + /// Type traits of \ref kernel class + /** + You can define different type traits for \ref kernel + by specifying your struct based on \p %traits + or by using \ref make_traits metafunction. + */ + struct traits + { + typedef cds::sync::spin lock_type; ///< Lock type + typedef cds::algo::flat_combining::wait_strategy::backoff< cds::backoff::delay_of<2>> wait_strategy; ///< Wait strategy + typedef CDS_DEFAULT_ALLOCATOR allocator; ///< Allocator used for TLS data (allocating \p publication_record derivatives) + typedef empty_stat stat; ///< Internal statistics + typedef opt::v::relaxed_ordering memory_model; ///< /// C++ memory ordering model + }; + + /// Metafunction converting option list to traits + /** + \p Options are: + - \p opt::lock_type - mutex type, default is \p cds::sync::spin + - \p opt::wait_strategy - wait strategy, see \p wait_strategy namespace, default is \p wait_strategy::backoff. + - \p opt::allocator - allocator type, default is \ref CDS_DEFAULT_ALLOCATOR + - \p opt::stat - internal statistics, possible type: \ref stat, \ref empty_stat (the default) + - \p opt::memory_model - C++ memory ordering model. + List of all available memory ordering see \p opt::memory_model. + Default is \p cds::opt::v::relaxed_ordering + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + + /// The kernel of flat combining + /** + Template parameters: + - \p PublicationRecord - a type derived from \ref publication_record + - \p Traits - a type traits of flat combining, default is \p flat_combining::traits. + \ref make_traits metafunction can be used to create type traits + + The kernel object should be a member of a container class. The container cooperates with flat combining + kernel object. There are two ways to interact with the kernel: + - One-by-one processing the active records of the publication list. This mode provides by \p combine() function: + the container acquires its publication record by \p acquire_record(), fills its fields and calls + \p combine() function of its kernel object. If the current thread becomes a combiner, the kernel + calls \p fc_apply() function of the container for each active non-empty record. Then, the container + should release its publication record by \p release_record(). Only one pass through the publication + list is possible. + - Batch processing - \p batch_combine() function. It this mode the container obtains access + to entire publication list. This mode allows the container to perform an elimination, for example, + the stack can collide \p push() and \p pop() requests. The sequence of invocations is the following: + the container acquires its publication record by \p acquire_record(), fills its field and call + \p batch_combine() function of its kernel object. If the current thread becomes a combiner, + the kernel calls \p fc_process() function of the container passing two iterators pointing to + the begin and the end of publication list (see \ref iterator class). The iterators allow + multiple pass through active records of publication list. For each processed record the container + should call \p operation_done() function. On the end, the container should release + its record by \p release_record(). + */ + template < + typename PublicationRecord + ,typename Traits = traits + > + class kernel + { + public: + typedef Traits traits; ///< Type traits + typedef typename traits::lock_type global_lock_type; ///< Global lock type + typedef typename traits::wait_strategy wait_strategy; ///< Wait strategy type + typedef typename traits::allocator allocator; ///< Allocator type (used for allocating publication_record_type data) + typedef typename traits::stat stat; ///< Internal statistics + typedef typename traits::memory_model memory_model; ///< C++ memory model + + typedef typename wait_strategy::template make_publication_record::type publication_record_type; ///< Publication record type + + protected: + //@cond + typedef cds::details::Allocator< publication_record_type, allocator > cxx11_allocator; ///< internal helper cds::details::Allocator + typedef std::lock_guard lock_guard; + //@endcond + + protected: + atomics::atomic m_nCount; ///< Total count of combining passes. Used as an age. + publication_record_type * m_pHead; ///< Head of publication list + boost::thread_specific_ptr< publication_record_type > m_pThreadRec; ///< Thread-local publication record + mutable global_lock_type m_Mutex; ///< Global mutex + mutable stat m_Stat; ///< Internal statistics + unsigned int const m_nCompactFactor; ///< Publication list compacting factor (the list will be compacted through \p %m_nCompactFactor combining passes) + unsigned int const m_nCombinePassCount; ///< Number of combining passes + wait_strategy m_waitStrategy; ///< Wait strategy + + public: + /// Initializes the object + /** + Compact factor = 64 + + Combiner pass count = 8 + */ + kernel() + : kernel( 64, 8 ) + {} + + /// Initializes the object + kernel( + unsigned int nCompactFactor ///< Publication list compacting factor (the list will be compacted through \p nCompactFactor combining passes) + ,unsigned int nCombinePassCount ///< Number of combining passes for combiner thread + ) + : m_nCount(0) + , m_pHead( nullptr ) + , m_pThreadRec( tls_cleanup ) + , m_nCompactFactor( (unsigned int)( cds::beans::ceil2( nCompactFactor ) - 1 )) // binary mask + , m_nCombinePassCount( nCombinePassCount ) + { + init(); + } + + /// Destroys the objects and mark all publication records as inactive + ~kernel() + { + // mark all publication record as detached + for ( publication_record* p = m_pHead; p; ) { + p->pOwner = nullptr; + + publication_record * pRec = p; + p = p->pNext.load( memory_model::memory_order_relaxed ); + if ( pRec->nState.load( memory_model::memory_order_acquire ) == removed ) + free_publication_record( static_cast( pRec )); + } + } + + /// Gets publication list record for the current thread + /** + If there is no publication record for the current thread + the function allocates it. + */ + publication_record_type * acquire_record() + { + publication_record_type * pRec = m_pThreadRec.get(); + if ( !pRec ) { + // Allocate new publication record + pRec = cxx11_allocator().New(); + pRec->pOwner = reinterpret_cast( this ); + m_pThreadRec.reset( pRec ); + m_Stat.onCreatePubRecord(); + } + + if ( pRec->nState.load( memory_model::memory_order_acquire ) != active ) + publish( pRec ); + + assert( pRec->op() == req_EmptyRecord ); + + return pRec; + } + + /// Marks publication record for the current thread as empty + void release_record( publication_record_type * pRec ) + { + assert( pRec->is_done() ); + pRec->nRequest.store( req_EmptyRecord, memory_model::memory_order_release ); + } + + /// Trying to execute operation \p nOpId + /** + \p pRec is the publication record acquiring by \ref acquire_record earlier. + \p owner is a container that is owner of flat combining kernel object. + As a result the current thread can become a combiner or can wait for + another combiner performs \p pRec operation. + + If the thread becomes a combiner, the kernel calls \p owner.fc_apply + for each active non-empty publication record. + */ + template + void combine( unsigned int nOpId, publication_record_type * pRec, Container& owner ) + { + assert( nOpId >= req_Operation ); + assert( pRec ); + + pRec->nRequest.store( nOpId, memory_model::memory_order_release ); + m_Stat.onOperation(); + + try_combining( owner, pRec ); + } + + /// Trying to execute operation \p nOpId in batch-combine mode + /** + \p pRec is the publication record acquiring by \p acquire_record() earlier. + \p owner is a container that owns flat combining kernel object. + As a result the current thread can become a combiner or can wait for + another combiner performs \p pRec operation. + + If the thread becomes a combiner, the kernel calls \p owner.fc_process() + giving the container the full access over publication list. This function + is useful for an elimination technique if the container supports any kind of + that. The container can perform multiple pass through publication list. + + \p owner.fc_process() has two arguments - forward iterators on begin and end of + publication list, see \ref iterator class. For each processed record the container + should call \p operation_done() function to mark the record as processed. + + On the end of \p %batch_combine the \p combine() function is called + to process rest of publication records. + */ + template + void batch_combine( unsigned int nOpId, publication_record_type* pRec, Container& owner ) + { + assert( nOpId >= req_Operation ); + assert( pRec ); + + pRec->nRequest.store( nOpId, memory_model::memory_order_release ); + m_Stat.onOperation(); + + try_batch_combining( owner, pRec ); + } + + /// Invokes \p Func in exclusive mode + /** + Some operation in flat combining containers should be called in exclusive mode + i.e the current thread should become the combiner to process the operation. + The typical example is \p empty() function. + + \p %invoke_exclusive() allows do that: the current thread becomes the combiner, + invokes \p f exclusively but unlike a typical usage the thread does not process any pending request. + Instead, after end of \p f call the current thread wakes up a pending thread if any. + */ + template + void invoke_exclusive( Func f ) + { + { + lock_guard l( m_Mutex ); + f(); + } + m_waitStrategy.wakeup( *this ); + m_Stat.onInvokeExclusive(); + } + + /// Marks \p rec as executed + /** + This function should be called by container if \p batch_combine mode is used. + For usual combining (see \p combine() ) this function is excess. + */ + void operation_done( publication_record& rec ) + { + rec.nRequest.store( req_Response, memory_model::memory_order_release ); + m_waitStrategy.notify( *this, static_cast( rec )); + } + + /// Internal statistics + stat const& statistics() const + { + return m_Stat; + } + + //@cond + // For container classes based on flat combining + stat& internal_statistics() const + { + return m_Stat; + } + //@endcond + + /// Returns the compact factor + unsigned int compact_factor() const + { + return m_nCompactFactor + 1; + } + + /// Returns number of combining passes for combiner thread + unsigned int combine_pass_count() const + { + return m_nCombinePassCount; + } + + public: + /// Publication list iterator + /** + Iterators are intended for batch processing by container's + \p fc_process function. + The iterator allows iterate through active publication list. + */ + class iterator + { + //@cond + friend class kernel; + publication_record_type * m_pRec; + //@endcond + + protected: + //@cond + iterator( publication_record_type * pRec ) + : m_pRec( pRec ) + { + skip_inactive(); + } + + void skip_inactive() + { + while ( m_pRec && (m_pRec->nState.load( memory_model::memory_order_acquire ) != active + || m_pRec->op( memory_model::memory_order_relaxed) < req_Operation )) + { + m_pRec = static_cast(m_pRec->pNext.load( memory_model::memory_order_acquire )); + } + } + //@endcond + + public: + /// Initializes an empty iterator object + iterator() + : m_pRec( nullptr ) + {} + + /// Copy ctor + iterator( iterator const& src ) + : m_pRec( src.m_pRec ) + {} + + /// Pre-increment + iterator& operator++() + { + assert( m_pRec ); + m_pRec = static_cast( m_pRec->pNext.load( memory_model::memory_order_acquire )); + skip_inactive(); + return *this; + } + + /// Post-increment + iterator operator++(int) + { + assert( m_pRec ); + iterator it(*this); + ++(*this); + return it; + } + + /// Dereference operator, can return \p nullptr + publication_record_type* operator ->() + { + return m_pRec; + } + + /// Dereference operator, the iterator should not be an end iterator + publication_record_type& operator*() + { + assert( m_pRec ); + return *m_pRec; + } + + /// Iterator equality + friend bool operator==( iterator it1, iterator it2 ) + { + return it1.m_pRec == it2.m_pRec; + } + + /// Iterator inequality + friend bool operator!=( iterator it1, iterator it2 ) + { + return !( it1 == it2 ); + } + }; + + /// Returns an iterator to the first active publication record + iterator begin() { return iterator(m_pHead); } + + /// Returns an iterator to the end of publication list. Should not be dereferenced. + iterator end() { return iterator(); } + + public: + /// Gets current value of \p rec.nRequest + /** + This function is intended for invoking from a wait strategy + */ + int get_operation( publication_record& rec ) + { + return rec.op( memory_model::memory_order_acquire ); + } + + /// Wakes up any waiting thread + /** + This function is intended for invoking from a wait strategy + */ + void wakeup_any() + { + publication_record* pRec = m_pHead; + while ( pRec ) { + if ( pRec->nState.load( memory_model::memory_order_acquire ) == active + && pRec->op( memory_model::memory_order_acquire ) >= req_Operation ) + { + m_waitStrategy.notify( *this, static_cast( *pRec )); + break; + } + pRec = pRec->pNext.load( memory_model::memory_order_acquire ); + } + } + + private: + //@cond + static void tls_cleanup( publication_record_type* pRec ) + { + // Thread done + // pRec that is TLS data should be excluded from publication list + if ( pRec ) { + if ( pRec->pOwner ) { + // kernel is alive + pRec->nState.store( removed, memory_model::memory_order_release ); + } + else { + // kernel already deleted + free_publication_record( pRec ); + } + } + } + + static void free_publication_record( publication_record_type* pRec ) + { + cxx11_allocator().Delete( pRec ); + } + + void init() + { + assert( m_pThreadRec.get() == nullptr ); + publication_record_type* pRec = cxx11_allocator().New(); + m_pHead = pRec; + pRec->pOwner = this; + m_pThreadRec.reset( pRec ); + m_Stat.onCreatePubRecord(); + } + + void publish( publication_record_type* pRec ) + { + assert( pRec->nState.load( memory_model::memory_order_relaxed ) == inactive ); + + pRec->nAge.store( m_nCount.load(memory_model::memory_order_acquire), memory_model::memory_order_release ); + pRec->nState.store( active, memory_model::memory_order_release ); + + // Insert record to publication list + if ( m_pHead != static_cast(pRec) ) { + publication_record * p = m_pHead->pNext.load(memory_model::memory_order_relaxed); + if ( p != static_cast( pRec )) { + do { + pRec->pNext = p; + // Failed CAS changes p + } while ( !m_pHead->pNext.compare_exchange_weak( p, static_cast(pRec), + memory_model::memory_order_release, atomics::memory_order_relaxed )); + m_Stat.onActivatePubRecord(); + } + } + } + + void republish( publication_record_type* pRec ) + { + if ( pRec->nState.load( memory_model::memory_order_relaxed ) != active ) { + // The record has been excluded from publication list. Reinsert it + publish( pRec ); + } + } + + template + void try_combining( Container& owner, publication_record_type* pRec ) + { + if ( m_Mutex.try_lock() ) { + // The thread becomes a combiner + lock_guard l( m_Mutex, std::adopt_lock_t() ); + + // The record pRec can be excluded from publication list. Re-publish it + republish( pRec ); + + combining( owner ); + assert( pRec->op( memory_model::memory_order_relaxed ) == req_Response ); + } + else { + // There is another combiner, wait while it executes our request + if ( !wait_for_combining( pRec ) ) { + // The thread becomes a combiner + lock_guard l( m_Mutex, std::adopt_lock_t() ); + + // The record pRec can be excluded from publication list. Re-publish it + republish( pRec ); + + combining( owner ); + assert( pRec->op( memory_model::memory_order_relaxed ) == req_Response ); + } + } + } + + template + void try_batch_combining( Container& owner, publication_record_type * pRec ) + { + if ( m_Mutex.try_lock() ) { + // The thread becomes a combiner + lock_guard l( m_Mutex, std::adopt_lock_t() ); + + // The record pRec can be excluded from publication list. Re-publish it + republish( pRec ); + + batch_combining( owner ); + assert( pRec->op( memory_model::memory_order_relaxed ) == req_Response ); + } + else { + // There is another combiner, wait while it executes our request + if ( !wait_for_combining( pRec ) ) { + // The thread becomes a combiner + lock_guard l( m_Mutex, std::adopt_lock_t() ); + + // The record pRec can be excluded from publication list. Re-publish it + republish( pRec ); + + batch_combining( owner ); + assert( pRec->op( memory_model::memory_order_relaxed ) == req_Response ); + } + } + } + + template + void combining( Container& owner ) + { + // The thread is a combiner + assert( !m_Mutex.try_lock() ); + + unsigned int const nCurAge = m_nCount.fetch_add( 1, memory_model::memory_order_release ) + 1; + + for ( unsigned int nPass = 0; nPass < m_nCombinePassCount; ++nPass ) + if ( !combining_pass( owner, nCurAge )) + break; + + m_Stat.onCombining(); + if ( (nCurAge & m_nCompactFactor) == 0 ) + compact_list( nCurAge ); + } + + template + bool combining_pass( Container& owner, unsigned int nCurAge ) + { + publication_record* pPrev = nullptr; + publication_record* p = m_pHead; + bool bOpDone = false; + while ( p ) { + switch ( p->nState.load( memory_model::memory_order_acquire )) { + case active: + if ( p->op() >= req_Operation ) { + p->nAge.store( nCurAge, memory_model::memory_order_release ); + owner.fc_apply( static_cast(p) ); + operation_done( *p ); + bOpDone = true; + } + break; + case inactive: + // Only m_pHead can be inactive in the publication list + assert( p == m_pHead ); + break; + case removed: + // The record should be removed + p = unlink_and_delete_record( pPrev, p ); + continue; + default: + /// ??? That is impossible + assert(false); + } + pPrev = p; + p = p->pNext.load( memory_model::memory_order_acquire ); + } + return bOpDone; + } + + template + void batch_combining( Container& owner ) + { + // The thread is a combiner + assert( !m_Mutex.try_lock() ); + + unsigned int const nCurAge = m_nCount.fetch_add( 1, memory_model::memory_order_release ) + 1; + + for ( unsigned int nPass = 0; nPass < m_nCombinePassCount; ++nPass ) + owner.fc_process( begin(), end() ); + + combining_pass( owner, nCurAge ); + m_Stat.onCombining(); + if ( (nCurAge & m_nCompactFactor) == 0 ) + compact_list( nCurAge ); + } + + bool wait_for_combining( publication_record_type * pRec ) + { + m_waitStrategy.prepare( *pRec ); + m_Stat.onPassiveWait(); + + while ( pRec->op( memory_model::memory_order_acquire ) != req_Response ) { + // The record can be excluded from publication list. Reinsert it + republish( pRec ); + + m_Stat.onPassiveWaitIteration(); + + // Wait while operation processing + if ( m_waitStrategy.wait( *this, *pRec )) + m_Stat.onWakeupByNotifying(); + + if ( m_Mutex.try_lock() ) { + if ( pRec->op( memory_model::memory_order_acquire ) == req_Response ) { + // Operation is done + m_Mutex.unlock(); + + // Wake up a pending threads + m_waitStrategy.wakeup( *this ); + m_Stat.onPassiveWaitWakeup(); + + break; + } + // The thread becomes a combiner + m_Stat.onPassiveToCombiner(); + return false; + } + } + return true; + } + + void compact_list( unsigned int const nCurAge ) + { + // Thinning publication list + publication_record * pPrev = nullptr; + for ( publication_record * p = m_pHead; p; ) { + if ( p->nState.load( memory_model::memory_order_acquire ) == active + && p->nAge.load( memory_model::memory_order_acquire ) + m_nCompactFactor < nCurAge ) + { + if ( pPrev ) { + publication_record * pNext = p->pNext.load( memory_model::memory_order_acquire ); + if ( pPrev->pNext.compare_exchange_strong( p, pNext, + memory_model::memory_order_release, atomics::memory_order_relaxed )) + { + p->nState.store( inactive, memory_model::memory_order_release ); + p = pNext; + m_Stat.onDeactivatePubRecord(); + continue; + } + } + } + pPrev = p; + p = p->pNext.load( memory_model::memory_order_acquire ); + } + + m_Stat.onCompactPublicationList(); + } + + publication_record * unlink_and_delete_record( publication_record * pPrev, publication_record * p ) + { + if ( pPrev ) { + publication_record * pNext = p->pNext.load( memory_model::memory_order_acquire ); + if ( pPrev->pNext.compare_exchange_strong( p, pNext, + memory_model::memory_order_release, atomics::memory_order_relaxed )) + { + free_publication_record( static_cast( p )); + m_Stat.onDeletePubRecord(); + } + return pNext; + } + else { + m_pHead = static_cast( p->pNext.load( memory_model::memory_order_acquire )); + free_publication_record( static_cast( p )); + m_Stat.onDeletePubRecord(); + return m_pHead; + } + } + //@endcond + }; + + //@cond + class container + { + public: + template + void fc_apply( PubRecord * ) + { + assert( false ); + } + + template + void fc_process( Iterator, Iterator ) + { + assert( false ); + } + }; + //@endcond + + } // namespace flat_combining +}} // namespace cds::algo + +#endif // #ifndef CDSLIB_ALGO_FLAT_COMBINING_KERNEL_H diff --git a/cds/algo/flat_combining/wait_strategy.h b/cds/algo/flat_combining/wait_strategy.h new file mode 100644 index 00000000..c8ec6f83 --- /dev/null +++ b/cds/algo/flat_combining/wait_strategy.h @@ -0,0 +1,297 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_ALGO_FLAT_COMBINING_WAIT_STRATEGY_H +#define CDSLIB_ALGO_FLAT_COMBINING_WAIT_STRATEGY_H + +#include +#include +#include +#include +#include // thread_specific_ptr + + +namespace cds { namespace opt { + + /// Wait strategy option for \p flat_combining::kernel + template + struct wait_strategy { + //@cond + template struct pack: public Base + { + typedef Strategy wait_strategy; + }; + //@endcond + }; + +}} // namespace cds::opt + +namespace cds { namespace algo { namespace flat_combining { + + /// Wait strategies for \p flat_combining technique + namespace wait_strategy { + + /// Empty wait strategy + struct empty + { + //@cond + template + struct make_publication_record { + typedef PublicationRecord type; + }; + + template + void prepare( PublicationRecord& /*rec*/ ) + {} + + template + bool wait( FCKernel& /*fc*/, PublicationRecord& /*rec*/ ) + { + return false; + } + + template + void notify( FCKernel& /*fc*/, PublicationRecord& /*rec*/ ) + {} + + template + void wakeup( FCKernel& /*fc*/ ) + {} + //@endcond + }; + + /// Back-off wait strategy + template > + struct backoff + { + //@cond + typedef BackOff back_off; + + template + struct make_publication_record { + struct type: public PublicationRecord + { + back_off bkoff; + }; + }; + + template + void prepare( PublicationRecord& rec ) + { + rec.bkoff.reset(); + } + + template + bool wait( FCKernel& /*fc*/, PublicationRecord& rec ) + { + rec.bkoff(); + return false; + } + + template + void notify( FCKernel& /*fc*/, PublicationRecord& /*rec*/ ) + {} + + template + void wakeup( FCKernel& ) + {} + //@endcond + }; + + /// Wait strategy based on the single mutex and the condition variable + /** + The strategy shares the mutex and conditional variable for all thread. + + Template parameter \p Milliseconds specifies waiting duration; + the minimal value is 1. + */ + template + class single_mutex_single_condvar + { + //@cond + std::mutex m_mutex; + std::condition_variable m_condvar; + + typedef std::unique_lock< std::mutex > unique_lock; + + public: + enum { + c_nWaitMilliseconds = Milliseconds < 1 ? 1 : Milliseconds + }; + + template + struct make_publication_record { + typedef PublicationRecord type; + }; + + template + void prepare( PublicationRecord& /*rec*/ ) + {} + + template + bool wait( FCKernel& fc, PublicationRecord& rec ) + { + if ( fc.get_operation( rec ) >= req_Operation ) { + unique_lock lock( m_mutex ); + if ( fc.get_operation( rec ) >= req_Operation ) + return m_condvar.wait_for( lock, std::chrono::milliseconds( c_nWaitMilliseconds )) == std::cv_status::no_timeout; + } + return false; + } + + template + void notify( FCKernel& fc, PublicationRecord& rec ) + { + m_condvar.notify_all(); + } + + template + void wakeup( FCKernel& /*fc*/ ) + { + m_condvar.notify_all(); + } + //@endcond + }; + + /// Wait strategy based on the single mutex and thread-local condition variables + /** + The strategy shares the mutex, but each thread has its own conditional variable + + Template parameter \p Milliseconds specifies waiting duration; + the minimal value is 1. + */ + template + class single_mutex_multi_condvar + { + //@cond + std::mutex m_mutex; + + typedef std::unique_lock< std::mutex > unique_lock; + + public: + enum { + c_nWaitMilliseconds = Milliseconds < 1 ? 1 : Milliseconds + }; + + template + struct make_publication_record { + struct type: public PublicationRecord + { + std::condition_variable m_condvar; + }; + }; + + template + void prepare( PublicationRecord& /*rec*/ ) + {} + + template + bool wait( FCKernel& fc, PublicationRecord& rec ) + { + if ( fc.get_operation( rec ) >= req_Operation ) { + unique_lock lock( m_mutex ); + if ( fc.get_operation( rec ) >= req_Operation ) + return rec.m_condvar.wait_for( lock, std::chrono::milliseconds( c_nWaitMilliseconds )) == std::cv_status::no_timeout; + } + return false; + } + + template + void notify( FCKernel& fc, PublicationRecord& rec ) + { + rec.m_condvar.notify_one(); + } + + template + void wakeup( FCKernel& fc ) + { + fc.wakeup_any(); + } + //@endcond + }; + + /// Wait strategy where each thread has a mutex and a condition variable + /** + Template parameter \p Milliseconds specifies waiting duration; + the minimal value is 1. + */ + template + class multi_mutex_multi_condvar + { + //@cond + typedef std::unique_lock< std::mutex > unique_lock; + + public: + enum { + c_nWaitMilliseconds = Milliseconds < 1 ? 1 : Milliseconds + }; + + template + struct make_publication_record { + struct type: public PublicationRecord + { + std::mutex m_mutex; + std::condition_variable m_condvar; + }; + }; + + template + void prepare( PublicationRecord& /*rec*/ ) + {} + + template + bool wait( FCKernel& fc, PublicationRecord& rec ) + { + if ( fc.get_operation( rec ) >= req_Operation ) { + unique_lock lock( rec.m_mutex ); + if ( fc.get_operation( rec ) >= req_Operation ) + return rec.m_condvar.wait_for( lock, std::chrono::milliseconds( c_nWaitMilliseconds )) == std::cv_status::no_timeout; + } + return false; + } + + template + void notify( FCKernel& /*fc*/, PublicationRecord& rec ) + { + rec.m_condvar.notify_one(); + } + + template + void wakeup( FCKernel& fc ) + { + fc.wakeup_any(); + } + //@endcond + }; + + } // namespace wait_strategy +}}} // namespace cds::algo::flat_combining + +#endif //CDSLIB_ALGO_FLAT_COMBINING_WAIT_STRATEGY_H diff --git a/cds/algo/wait_strategy.h b/cds/algo/wait_strategy.h deleted file mode 100644 index 422e12e8..00000000 --- a/cds/algo/wait_strategy.h +++ /dev/null @@ -1,267 +0,0 @@ -/* - This file is a part of libcds - Concurrent Data Structures library - - (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016 - - Source code repo: http://github.com/khizmax/libcds/ - Download: http://sourceforge.net/projects/libcds/files/ - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE - FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -#ifndef CDSLIB_ALGO_FLAT_COMBINING_WAIT_STRATEGY_H -#define CDSLIB_ALGO_FLAT_COMBINING_WAIT_STRATEGY_H - -#include - -#include -#include - -namespace cds { namespace algo { namespace flat_combining { - /// Waiting strategy for flat combining - - // Special values of publication_record::nRequest - enum request_value - { - req_EmptyRecord, ///< Publication record is empty - req_Response, ///< Operation is done - - req_Operation ///< First operation id for derived classes - }; - - /// publication_record state - enum record_state { - inactive, ///< Record is inactive - active, ///< Record is active - removed ///< Record should be removed - }; - - /// do-nothing strategy - template - struct BareWaitStartegy - { - struct ExtendedPublicationRecord: public UserPublicationRecord - { - }; - - void wait(ExtendedPublicationRecord * pRec){} - void notify(ExtendedPublicationRecord* pRec){ - pRec->nRequest.store( req_Response, Traits::memory_model::memory_order_release); - } - }; - - /// Wait/notify strategy based on thread-local mutex and thread-local condition variable - template - struct WaitStartegyMultMutexMultCondVar - { - struct ExtendedPublicationRecord: public UserPublicationRecord - { - boost::mutex _waitMutex; - boost::condition_variable _condVar; - }; - - void wait(ExtendedPublicationRecord * pRec){ - boost::unique_lock lock(pRec->_waitMutex); - if (pRec->nRequest.load( Traits::memory_model::memory_order_acquire ) >= req_Operation) - pRec->_condVar.wait(lock); - } - - void notify(ExtendedPublicationRecord* pRec){ - boost::unique_lock lock(pRec->_waitMutex); - pRec->nRequest.store( req_Response, Traits::memory_model::memory_order_release); - pRec->_condVar.notify_one(); - } - }; - - /// Back-off strategy - /** - * When N threads compete for the critical resource that can be accessed with the help of CAS-operations, - * only one of them gets an access. Other N–1 threads interrupt each other and consume process time in vain. - */ - template - struct WaitBakkOffStrategy - { - struct ExtendedPublicationRecord: public UserPublicationRecord - { - }; - void wait(ExtendedPublicationRecord * pRec){ - cds::backoff::delay_of<2> back_off; - back_off(); - } - void notify(ExtendedPublicationRecord* pRec){ - pRec->nRequest.store( req_Response, Traits::memory_model::memory_order_release); - } - }; - - /// Wait/notify strategy based on the global mutex and the condition variable - /** - * The strategy is based on the usage of synchronization primitives of - * the FC core which are shared by all threads. - */ - template - struct WaitOneMutexOneCondVarStrategy - { - boost::mutex _globalMutex; - boost::condition_variable _globalCondVar; - - struct ExtendedPublicationRecord: public UserPublicationRecord - { - }; - void wait(ExtendedPublicationRecord * pRec){ - boost::unique_lock lock(_globalMutex); - if (pRec->nRequest.load( Traits::memory_model::memory_order_acquire ) >= req_Operation) - //_globalCondVar.timed_wait(lock, static_cast(1)); - _globalCondVar.wait(lock); - } - - void notify(ExtendedPublicationRecord* pRec){ - boost::unique_lock lock(_globalMutex); - pRec->nRequest.store( req_Response, Traits::memory_model::memory_order_release); - _globalCondVar.notify_all(); - } - }; - - /// Wait/notify strategy based on the global mutex and the thread-local condition variable - /** - * It uses one extra mutex aggregated in the FC core and a condition variable for every - * thread aggregated in thread's publication record in the publication list - */ - template - struct WaitStratygyBaseOnSingleMutexMutlLocalCondVars - { - boost::mutex _globalMutex; - - struct ExtendedPublicationRecord : public UserPublicationRecord - { - boost::condition_variable _globalCondVar; - }; - void wait(ExtendedPublicationRecord * pRec){ - boost::unique_lock lock(_globalMutex); - if (pRec->nRequest.load(Traits::memory_model::memory_order_acquire) >= req_Operation) - //_globalCondVar.timed_wait(lock, static_cast(1)); - pRec->_globalCondVar.wait(lock); - } - - void notify(ExtendedPublicationRecord* pRec){ - boost::unique_lock lock(_globalMutex); - pRec->nRequest.store(req_Response, Traits::memory_model::memory_order_release); - pRec->_globalCondVar.notify_one(); - } - }; - - //=================================================================== - template - struct TimedWaitMultMutexMultCondVar - { - struct ExtendedPublicationRecord: public UserPublicationRecord - { - boost::mutex _waitMutex; - boost::condition_variable _condVar; - }; - - void wait(ExtendedPublicationRecord * pRec){ - boost::unique_lock lock(pRec->_waitMutex); - if (pRec->nRequest.load( Traits::memory_model::memory_order_acquire ) >= req_Operation) - pRec->_condVar.timed_wait(lock, boost::posix_time::millisec(2)); - } - - void notify(ExtendedPublicationRecord* pRec){ - pRec->nRequest.store(req_Response, Traits::memory_model::memory_order_release); - pRec->_condVar.notify_one(); - } - }; - //=================================================================== - template - struct TimedWaitGlobalMutexAndCondVar - { - boost::mutex _globalMutex; - boost::condition_variable _globalCondVar; - - struct ExtendedPublicationRecord: public UserPublicationRecord - { - }; - void wait(ExtendedPublicationRecord * pRec){ - boost::unique_lock lock(_globalMutex); - if (pRec->nRequest.load( Traits::memory_model::memory_order_acquire ) >= req_Operation) - _globalCondVar.timed_wait(lock, boost::posix_time::millisec(2)); - } - - void notify(ExtendedPublicationRecord* pRec){ - pRec->nRequest.store( req_Response, Traits::memory_model::memory_order_release); - _globalCondVar.notify_all(); - } - }; - //=================================================================== - template - struct Int2Type{ - enum {value = v}; - }; - - ///Adaptive strategy - /** - * It works like “back-off”-strategy with “light” elements with small size - * and like Wait/notify strategy based on thread-local mutex and thread-local condition variable - * with “heavy” elements. - */ - template - struct AutoWaitStrategy - { - struct ExtendedPublicationRecord: public UserPublicationRecord - { - boost::mutex _waitMutex; - boost::condition_variable _condVar; - }; - - void wait(ExtendedPublicationRecord * pRec){ - doWait(pRec, Int2Type()); - } - - void notify(ExtendedPublicationRecord* pRec){ - doNotify(pRec, Int2Type()); - } - - private: - //The container consists a small data - void doWait(ExtendedPublicationRecord * pRec, Int2Type){ - cds::backoff::delay_of<2> back_off; - back_off(); - } - - void doNotify(ExtendedPublicationRecord * pRec, Int2Type){ - pRec->nRequest.store( req_Response, Traits::memory_model::memory_order_release); - } - - //The container consists a big data - void doWait(ExtendedPublicationRecord * pRec, Int2Type){ - boost::unique_lock lock(pRec->_waitMutex); - if (pRec->nRequest.load( Traits::memory_model::memory_order_acquire ) >= req_Operation) - pRec->_condVar.timed_wait(lock, boost::posix_time::millisec(2)); - } - - void doNotify(ExtendedPublicationRecord * pRec, Int2Type){ - pRec->nRequest.store(req_Response, Traits::memory_model::memory_order_release); - pRec->_condVar.notify_one(); - } - };//class AutoWaitStrategy -}}}//end namespace cds::algo::flat_combining - -#endif //CDSLIB_ALGO_FLAT_COMBINING_WAIT_STRATEGY_H diff --git a/cds/container/fcdeque.h b/cds/container/fcdeque.h index 7dccc157..fc28a24e 100644 --- a/cds/container/fcdeque.h +++ b/cds/container/fcdeque.h @@ -94,13 +94,8 @@ namespace cds { namespace container { /// Metafunction converting option list to traits /** \p Options are: - - \p opt::lock_type - mutex type, default is \p cds::sync::spin - - \p opt::back_off - back-off strategy, defalt is \p cds::backoff::delay_of<2> - - \p opt::allocator - allocator type, default is \ref CDS_DEFAULT_ALLOCATOR + - any \p cds::algo::flat_combining::make_traits options - \p opt::stat - internal statistics, possible type: \ref stat, \ref empty_stat (the default) - - \p opt::memory_model - C++ memory ordering model. - List of all available memory ordering see opt::memory_model. - Default if cds::opt::v:relaxed_ordering - \p opt::enable_elimination - enable/disable operation \ref cds_elimination_description "elimination" By default, the elimination is disabled. For queue, the elimination is possible if the queue is empty. @@ -161,8 +156,7 @@ namespace cds { namespace container { op_push_back_move, ///< Push back (move semantics) op_pop_front, ///< Pop front op_pop_back, ///< Pop back - op_clear, ///< Clear - op_empty ///< Empty + op_clear ///< Clear }; /// Flat combining publication list record @@ -181,8 +175,8 @@ namespace cds { namespace container { protected: //@cond - fc_kernel m_FlatCombining; - deque_type m_Deque; + mutable fc_kernel m_FlatCombining; + deque_type m_Deque; //@endcond public: @@ -206,7 +200,7 @@ namespace cds { namespace container { value_type const& val ///< Value to be copied to inserted element ) { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); pRec->pValPush = &val; if ( c_bEliminationEnabled ) @@ -228,7 +222,7 @@ namespace cds { namespace container { value_type&& val ///< Value to be moved to inserted element ) { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); pRec->pValPush = &val; if ( c_bEliminationEnabled ) @@ -250,7 +244,7 @@ namespace cds { namespace container { value_type const& val ///< Value to be copied to inserted element ) { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); pRec->pValPush = &val; if ( c_bEliminationEnabled ) @@ -272,7 +266,7 @@ namespace cds { namespace container { value_type&& val ///< Value to be moved to inserted element ) { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); pRec->pValPush = &val; if ( c_bEliminationEnabled ) @@ -295,7 +289,7 @@ namespace cds { namespace container { value_type& val ///< Target to be received the copy of removed element ) { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); pRec->pValPop = &val; if ( c_bEliminationEnabled ) @@ -318,7 +312,7 @@ namespace cds { namespace container { value_type& val ///< Target to be received the copy of removed element ) { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); pRec->pValPop = &val; if ( c_bEliminationEnabled ) @@ -335,7 +329,7 @@ namespace cds { namespace container { /// Clears the deque void clear() { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_clear, pRec, *this ); @@ -361,18 +355,12 @@ namespace cds { namespace container { /** If the combining is in process the function waits while combining done. */ - bool empty() + bool empty() const { - fc_record * pRec = m_FlatCombining.acquire_record(); - - if ( c_bEliminationEnabled ) - m_FlatCombining.batch_combine( op_empty, pRec, *this ); - else - m_FlatCombining.combine( op_empty, pRec, *this ); - - assert( pRec->is_done() ); - m_FlatCombining.release_record( pRec ); - return pRec->bEmpty; + bool bRet = false; + auto const& deq = m_Deque; + m_FlatCombining.invoke_exclusive( [&deq, &bRet]() { bRet = deq.empty(); } ); + return bRet; } /// Internal statistics @@ -433,9 +421,6 @@ namespace cds { namespace container { while ( !m_Deque.empty() ) m_Deque.pop_front(); break; - case op_empty: - pRec->bEmpty = m_Deque.empty(); - break; default: assert(false); break; diff --git a/cds/container/fcpriority_queue.h b/cds/container/fcpriority_queue.h index 779849b5..c06a9ce9 100644 --- a/cds/container/fcpriority_queue.h +++ b/cds/container/fcpriority_queue.h @@ -80,13 +80,8 @@ namespace cds { namespace container { /// Metafunction converting option list to traits /** \p Options are: - - \p opt::lock_type - mutex type, default is \p cds::sync::spin - - \p opt::back_off - back-off strategy, defalt is \p cds::backoff::delay_of<2> - - \p opt::allocator - allocator type, default is \ref CDS_DEFAULT_ALLOCATOR + - any \p cds::algo::flat_combining::make_traits options - \p opt::stat - internal statistics, possible type: \p fcpqueue::stat, \p fcpqueue::empty_stat (the default) - - \p opt::memory_model - C++ memory ordering model. - List of all available memory ordering see \p opt::memory_model. - Default is \p cds::opt::v:relaxed_ordering */ template struct make_traits { @@ -139,8 +134,7 @@ namespace cds { namespace container { op_push = cds::algo::flat_combining::req_Operation, op_push_move, op_pop, - op_clear, - op_empty + op_clear }; // Flat combining publication list record @@ -159,8 +153,8 @@ namespace cds { namespace container { protected: //@cond - fc_kernel m_FlatCombining; - priority_queue_type m_PQueue; + mutable fc_kernel m_FlatCombining; + priority_queue_type m_PQueue; //@endcond public: @@ -184,7 +178,7 @@ namespace cds { namespace container { value_type const& val ///< Value to be copied to inserted element ) { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); pRec->pValPush = &val; m_FlatCombining.combine( op_push, pRec, *this ); @@ -203,7 +197,7 @@ namespace cds { namespace container { value_type&& val ///< Value to be moved to inserted element ) { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); pRec->pValPush = &val; m_FlatCombining.combine( op_push_move, pRec, *this ); @@ -223,7 +217,7 @@ namespace cds { namespace container { value_type& val ///< Target to be received the copy of top element ) { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); pRec->pValPop = &val; m_FlatCombining.combine( op_pop, pRec, *this ); @@ -237,7 +231,7 @@ namespace cds { namespace container { /// Clears the priority queue void clear() { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); m_FlatCombining.combine( op_clear, pRec, *this ); @@ -262,12 +256,10 @@ namespace cds { namespace container { */ bool empty() { - fc_record * pRec = m_FlatCombining.acquire_record(); - - m_FlatCombining.combine( op_empty, pRec, *this ); - assert( pRec->is_done() ); - m_FlatCombining.release_record( pRec ); - return pRec->bEmpty; + bool bRet = false; + auto const& pq = m_PQueue; + m_FlatCombining.invoke_exclusive( [&pq, &bRet]() { bRet = pq.empty(); } ); + return bRet; } /// Internal statistics @@ -311,9 +303,6 @@ namespace cds { namespace container { while ( !m_PQueue.empty() ) m_PQueue.pop(); break; - case op_empty: - pRec->bEmpty = m_PQueue.empty(); - break; default: assert(false); break; diff --git a/cds/container/fcqueue.h b/cds/container/fcqueue.h index dff95ede..1f7c7346 100644 --- a/cds/container/fcqueue.h +++ b/cds/container/fcqueue.h @@ -84,13 +84,8 @@ namespace cds { namespace container { /// Metafunction converting option list to traits /** \p Options are: - - \p opt::lock_type - mutex type, default is \p cds::sync::spin - - \p opt::back_off - back-off strategy, defalt is \p cds::backoff::delay_of<2> - - \p opt::allocator - allocator type, default is \ref CDS_DEFAULT_ALLOCATOR + - any \p cds::algo::flat_combining::make_traits options - \p opt::stat - internal statistics, possible type: \p fcqueue::stat, \p fcqueue::empty_stat (the default) - - \p opt::memory_model - C++ memory ordering model. - List of all available memory ordering see \p opt::memory_model. - Default is \p cds::opt::v:relaxed_ordering - \p opt::enable_elimination - enable/disable operation \ref cds_elimination_description "elimination" By default, the elimination is disabled. For queue, the elimination is possible if the queue is empty. @@ -147,8 +142,7 @@ namespace cds { namespace container { op_enq = cds::algo::flat_combining::req_Operation, ///< Enqueue op_enq_move, ///< Enqueue (move semantics) op_deq, ///< Dequeue - op_clear, ///< Clear - op_empty ///< Empty + op_clear ///< Clear }; /// Flat combining publication list record @@ -167,8 +161,8 @@ namespace cds { namespace container { protected: //@cond - fc_kernel m_FlatCombining; - queue_type m_Queue; + mutable fc_kernel m_FlatCombining; + queue_type m_Queue; //@endcond public: @@ -192,7 +186,7 @@ namespace cds { namespace container { */ bool enqueue( value_type const& val ) { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); pRec->pValEnq = &val; if ( c_bEliminationEnabled ) @@ -218,7 +212,7 @@ namespace cds { namespace container { */ bool enqueue( value_type&& val ) { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); pRec->pValEnq = &val; if ( c_bEliminationEnabled ) @@ -245,7 +239,7 @@ namespace cds { namespace container { */ bool dequeue( value_type& val ) { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); pRec->pValDeq = &val; if ( c_bEliminationEnabled ) @@ -269,7 +263,7 @@ namespace cds { namespace container { /// Clears the queue void clear() { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_clear, pRec, *this ); @@ -295,18 +289,12 @@ namespace cds { namespace container { /** If the combining is in process the function waits while combining done. */ - bool empty() + bool empty() const { - fc_record * pRec = m_FlatCombining.acquire_record(); - - if ( c_bEliminationEnabled ) - m_FlatCombining.batch_combine( op_empty, pRec, *this ); - else - m_FlatCombining.combine( op_empty, pRec, *this ); - - assert( pRec->is_done() ); - m_FlatCombining.release_record( pRec ); - return pRec->bEmpty; + bool bRet = false; + auto const& queue = m_Queue; + m_FlatCombining.invoke_exclusive( [&queue, &bRet]() { bRet = queue.empty(); } ); + return bRet; } /// Internal statistics @@ -351,9 +339,6 @@ namespace cds { namespace container { while ( !m_Queue.empty() ) m_Queue.pop(); break; - case op_empty: - pRec->bEmpty = m_Queue.empty(); - break; default: assert(false); break; diff --git a/cds/container/fcstack.h b/cds/container/fcstack.h index eb7c8de5..ba95875b 100644 --- a/cds/container/fcstack.h +++ b/cds/container/fcstack.h @@ -84,13 +84,8 @@ namespace cds { namespace container { /// Metafunction converting option list to traits /** \p Options are: - - \p opt::lock_type - mutex type, default is \p cds::sync::spin - - \p opt::back_off - back-off strategy, defalt is \p cds::backoff::Default - - \p opt::allocator - allocator type, default is \ref CDS_DEFAULT_ALLOCATOR + - any \p cds::algo::flat_combining::make_traits options - \p opt::stat - internal statistics, possible type: \p fcstack::stat, \p fcstack::empty_stat (the default) - - \p opt::memory_model - C++ memory ordering model. - List of all available memory ordering see \p opt::memory_model. - Default is \p cds::opt::v:relaxed_ordering - \p opt::enable_elimination - enable/disable operation \ref cds_elimination_description "elimination" By default, the elimination is disabled. */ @@ -165,8 +160,8 @@ namespace cds { namespace container { protected: //@cond - fc_kernel m_FlatCombining; - stack_type m_Stack; + mutable fc_kernel m_FlatCombining; + stack_type m_Stack; //@endcond public: @@ -188,7 +183,7 @@ namespace cds { namespace container { */ bool push( value_type const& val ) { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); pRec->pValPush = &val; if ( c_bEliminationEnabled ) @@ -208,7 +203,7 @@ namespace cds { namespace container { */ bool push( value_type&& val ) { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); pRec->pValPush = &val; if ( c_bEliminationEnabled ) @@ -229,7 +224,7 @@ namespace cds { namespace container { */ bool pop( value_type& val ) { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); pRec->pValPop = &val; if ( c_bEliminationEnabled ) @@ -247,7 +242,7 @@ namespace cds { namespace container { /// Clears the stack void clear() { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_clear, pRec, *this ); @@ -275,7 +270,7 @@ namespace cds { namespace container { */ bool empty() { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( op_empty, pRec, *this ); diff --git a/cds/intrusive/fcqueue.h b/cds/intrusive/fcqueue.h index dae81665..412bf57f 100644 --- a/cds/intrusive/fcqueue.h +++ b/cds/intrusive/fcqueue.h @@ -81,15 +81,10 @@ namespace cds { namespace intrusive { /// Metafunction converting option list to traits /** \p Options are: - - \p opt::lock_type - mutex type, default is \p cds::sync::spin - - \p opt::back_off - back-off strategy, defalt is \p cds::backoff::Default + - any \p cds::algo::flat_combining::make_traits options - \p opt::disposer - the functor used to dispose removed items. Default is \p opt::intrusive::v::empty_disposer. This option is used only in \p FCQueue::clear() function. - - \p opt::allocator - allocator type, default is \ref CDS_DEFAULT_ALLOCATOR - \p opt::stat - internal statistics, possible type: \p fcqueue::stat, \p fcqueue::empty_stat (the default) - - \p opt::memory_model - C++ memory ordering model. - List of all available memory ordering see \p opt::memory_model. - Default is \p cds::opt::v:relaxed_ordering - \p opt::enable_elimination - enable/disable operation \ref cds_elimination_description "elimination" By default, the elimination is disabled (\p false) */ @@ -161,8 +156,8 @@ namespace cds { namespace intrusive { protected: //@cond - fc_kernel m_FlatCombining; - container_type m_Queue; + mutable fc_kernel m_FlatCombining; + container_type m_Queue; //@endcond public: @@ -184,7 +179,7 @@ namespace cds { namespace intrusive { */ bool enqueue( value_type& val ) { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); pRec->pVal = &val; if ( c_bEliminationEnabled ) @@ -210,7 +205,7 @@ namespace cds { namespace intrusive { */ value_type * dequeue() { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); pRec->pVal = nullptr; if ( c_bEliminationEnabled ) @@ -238,7 +233,7 @@ namespace cds { namespace intrusive { */ void clear( bool bDispose = false ) { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( bDispose ? op_clear_and_dispose : op_clear, pRec, *this ); @@ -266,8 +261,10 @@ namespace cds { namespace intrusive { */ bool empty() const { - m_FlatCombining.wait_while_combining(); - return m_Queue.empty(); + bool bRet = false; + auto const& queue = m_Queue; + m_FlatCombining.invoke_exclusive([&queue, &bRet]() { bRet = queue.empty(); }); + return bRet; } /// Internal statistics diff --git a/cds/intrusive/fcstack.h b/cds/intrusive/fcstack.h index cfb1626e..553ac903 100644 --- a/cds/intrusive/fcstack.h +++ b/cds/intrusive/fcstack.h @@ -81,15 +81,10 @@ namespace cds { namespace intrusive { /// Metafunction converting option list to traits /** \p Options are: - - \p opt::lock_type - mutex type, default is \p cds::sync::spin - - \p opt::back_off - back-off strategy, defalt is \p cds::backoff::Default + - any \p cds::algo::flat_combining::make_traits options - \p opt::disposer - the functor used for dispose removed items. Default is \p opt::intrusive::v::empty_disposer. This option is used only in \p FCStack::clear() function. - - \p opt::allocator - allocator type, default is \ref CDS_DEFAULT_ALLOCATOR - \p opt::stat - internal statistics, possible type: \p fcstack::stat, \p fcstack::empty_stat (the default) - - \p opt::memory_model - C++ memory ordering model. - List of all available memory ordering see \p opt::memory_model. - Default is \p cds::opt::v:relaxed_ordering - \p opt::enable_elimination - enable/disable operation \ref cds_elimination_description "elimination" By default, the elimination is disabled. */ @@ -162,8 +157,8 @@ namespace cds { namespace intrusive { protected: //@cond - fc_kernel m_FlatCombining; - container_type m_Stack; + mutable fc_kernel m_FlatCombining; + container_type m_Stack; //@endcond public: @@ -185,7 +180,7 @@ namespace cds { namespace intrusive { */ bool push( value_type& val ) { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); pRec->pVal = &val; if ( c_bEliminationEnabled ) @@ -202,7 +197,7 @@ namespace cds { namespace intrusive { /// Removes the element on top of the stack value_type * pop() { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); pRec->pVal = nullptr; if ( c_bEliminationEnabled ) @@ -224,7 +219,7 @@ namespace cds { namespace intrusive { */ void clear( bool bDispose = false ) { - fc_record * pRec = m_FlatCombining.acquire_record(); + auto pRec = m_FlatCombining.acquire_record(); if ( c_bEliminationEnabled ) m_FlatCombining.batch_combine( bDispose ? op_clear_and_dispose : op_clear, pRec, *this ); @@ -252,8 +247,10 @@ namespace cds { namespace intrusive { */ bool empty() const { - m_FlatCombining.wait_while_combining(); - return m_Stack.empty(); + bool bRet = false; + auto const& stack = m_Stack; + m_FlatCombining.invoke_exclusive( [&stack, &bRet]() { bRet = stack.empty(); } ); + return bRet; } /// Internal statistics @@ -271,7 +268,7 @@ namespace cds { namespace intrusive { object if the current thread becomes a combiner. Invocation of the function means that the stack should perform an action recorded in \p pRec. */ - void fc_apply( fc_record * pRec ) + void fc_apply( fc_record* pRec ) { assert( pRec ); diff --git a/projects/Win/vc14/cds.sln b/projects/Win/vc14/cds.sln index c15b6f4a..4864a992 100644 --- a/projects/Win/vc14/cds.sln +++ b/projects/Win/vc14/cds.sln @@ -23,6 +23,7 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "cds_test", "cds_test", "{3A ..\..\..\test\include\cds_test\stat_cuckoo_out.h = ..\..\..\test\include\cds_test\stat_cuckoo_out.h ..\..\..\test\include\cds_test\stat_ellenbintree_out.h = ..\..\..\test\include\cds_test\stat_ellenbintree_out.h ..\..\..\test\include\cds_test\stat_feldman_hashset_out.h = ..\..\..\test\include\cds_test\stat_feldman_hashset_out.h + ..\..\..\test\include\cds_test\stat_flat_combining_out.h = ..\..\..\test\include\cds_test\stat_flat_combining_out.h ..\..\..\test\include\cds_test\stat_skiplist_out.h = ..\..\..\test\include\cds_test\stat_skiplist_out.h ..\..\..\test\include\cds_test\stat_splitlist_out.h = ..\..\..\test\include\cds_test\stat_splitlist_out.h ..\..\..\test\include\cds_test\stat_sync_monitor_out.h = ..\..\..\test\include\cds_test\stat_sync_monitor_out.h diff --git a/projects/Win/vc14/cds.vcxproj b/projects/Win/vc14/cds.vcxproj index 2a78696d..9694c1e0 100644 --- a/projects/Win/vc14/cds.vcxproj +++ b/projects/Win/vc14/cds.vcxproj @@ -383,6 +383,9 @@ + + + diff --git a/projects/Win/vc14/cds.vcxproj.filters b/projects/Win/vc14/cds.vcxproj.filters index 3e4963d9..b9a9ef05 100644 --- a/projects/Win/vc14/cds.vcxproj.filters +++ b/projects/Win/vc14/cds.vcxproj.filters @@ -154,6 +154,9 @@ {03d212fb-73f8-4f0e-9aff-f22b0783fee8} + + {fe703227-44ad-4ad6-bae4-b6c9f5c65355} + @@ -1232,5 +1235,14 @@ Header Files\cds\intrusive + + Header Files\cds\algo\flat_combining + + + Header Files\cds\algo\flat_combining + + + Header Files\cds\algo\flat_combining + \ No newline at end of file diff --git a/projects/Win/vc14/stress-queue.vcxproj b/projects/Win/vc14/stress-queue.vcxproj index 5dc03005..e2184cd8 100644 --- a/projects/Win/vc14/stress-queue.vcxproj +++ b/projects/Win/vc14/stress-queue.vcxproj @@ -30,8 +30,22 @@ - - + + /bigobj %(AdditionalOptions) + /bigobj %(AdditionalOptions) + /bigobj %(AdditionalOptions) + /bigobj %(AdditionalOptions) + /bigobj %(AdditionalOptions) + /bigobj %(AdditionalOptions) + + + /bigobj %(AdditionalOptions) + /bigobj %(AdditionalOptions) + /bigobj %(AdditionalOptions) + /bigobj %(AdditionalOptions) + /bigobj %(AdditionalOptions) + /bigobj %(AdditionalOptions) + /bigobj %(AdditionalOptions) /bigobj %(AdditionalOptions) @@ -40,7 +54,14 @@ /bigobj %(AdditionalOptions) /bigobj %(AdditionalOptions) - + + /bigobj %(AdditionalOptions) + /bigobj %(AdditionalOptions) + /bigobj %(AdditionalOptions) + /bigobj %(AdditionalOptions) + /bigobj %(AdditionalOptions) + /bigobj %(AdditionalOptions) + diff --git a/test/include/cds_test/stat_flat_combining_out.h b/test/include/cds_test/stat_flat_combining_out.h new file mode 100644 index 00000000..d18b0a97 --- /dev/null +++ b/test/include/cds_test/stat_flat_combining_out.h @@ -0,0 +1,64 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSTEST_STAT_FLAT_COMBINING_OUT_H +#define CDSTEST_STAT_FLAT_COMBINING_OUT_H + +#include + +namespace cds_test { + + static inline property_stream& operator <<( property_stream& o, cds::algo::flat_combining::empty_stat const& /*s*/ ) + { + return o; + } + + static inline property_stream& operator <<( property_stream& o, cds::algo::flat_combining::stat<> const& s ) + { + return o + << CDSSTRESS_STAT_OUT_( "combining_factor", s.combining_factor() ) + << CDSSTRESS_STAT_OUT( s, m_nOperationCount ) + << CDSSTRESS_STAT_OUT( s, m_nCombiningCount ) + << CDSSTRESS_STAT_OUT( s, m_nCompactPublicationList ) + << CDSSTRESS_STAT_OUT( s, m_nDeactivatePubRecord ) + << CDSSTRESS_STAT_OUT( s, m_nActivatePubRecord ) + << CDSSTRESS_STAT_OUT( s, m_nPubRecordCreated ) + << CDSSTRESS_STAT_OUT( s, m_nPubRecordDeteted ) + << CDSSTRESS_STAT_OUT( s, m_nPassiveWaitCall ) + << CDSSTRESS_STAT_OUT( s, m_nPassiveWaitIteration ) + << CDSSTRESS_STAT_OUT( s, m_nPassiveWaitWakeup ) + << CDSSTRESS_STAT_OUT( s, m_nInvokeExclusive ) + << CDSSTRESS_STAT_OUT( s, m_nWakeupByNotifying ) + << CDSSTRESS_STAT_OUT( s, m_nPassiveToCombiner ); + } + +} // namespace cds_test + +#endif // #ifndef CDSTEST_STAT_SPLITLIST_OUT_H diff --git a/test/stress/pqueue/pqueue_type.h b/test/stress/pqueue/pqueue_type.h index b776a601..1df733d1 100644 --- a/test/stress/pqueue/pqueue_type.h +++ b/test/stress/pqueue/pqueue_type.h @@ -61,6 +61,7 @@ #include #include #include +#include namespace pqueue { namespace cc = cds::container; @@ -599,16 +600,6 @@ namespace pqueue { typedef details::StdPQueue< Value, std::deque, std::mutex > StdPQueue_deque_mutex; }; - - //template - //static inline void check_statistics( Stat const& /*s*/ ) - //{} - - //static inline void check_statistics( cds::container::ellen_bintree::stat<> const& s ) - //{ - // CPPUNIT_CHECK_CURRENT( s.m_nInternalNodeCreated.get() == s.m_nInternalNodeDeleted.get() ); - // CPPUNIT_CHECK_CURRENT( s.m_nUpdateDescCreated.get() == s.m_nUpdateDescDeleted.get() ); - //} } // namespace pqueue @@ -633,16 +624,7 @@ namespace cds_test { << CDSSTRESS_STAT_OUT( s, m_nPushMove ) << CDSSTRESS_STAT_OUT( s, m_nPop ) << CDSSTRESS_STAT_OUT( s, m_nFailedPop ) - << CDSSTRESS_STAT_OUT_( "combining_factor", s.combining_factor() ) - << CDSSTRESS_STAT_OUT( s, m_nOperationCount ) - << CDSSTRESS_STAT_OUT( s, m_nCombiningCount ) - << CDSSTRESS_STAT_OUT( s, m_nCompactPublicationList ) - << CDSSTRESS_STAT_OUT( s, m_nDeactivatePubRecord ) - << CDSSTRESS_STAT_OUT( s, m_nActivatePubRecord ) - << CDSSTRESS_STAT_OUT( s, m_nPubRecordCreated ) - << CDSSTRESS_STAT_OUT( s, m_nPubRecordDeteted ) - << CDSSTRESS_STAT_OUT( s, m_nAcquirePubRecCount ) - << CDSSTRESS_STAT_OUT( s, m_nReleasePubRecCount ); + << static_cast const&>(s); } static inline property_stream& operator <<( property_stream& o, cds::container::mspriority_queue::empty_stat const& /*s*/ ) diff --git a/test/stress/queue/intrusive_push_pop.cpp b/test/stress/queue/intrusive_push_pop.cpp index 35d705ed..ea229eae 100644 --- a/test/stress/queue/intrusive_push_pop.cpp +++ b/test/stress/queue/intrusive_push_pop.cpp @@ -359,76 +359,6 @@ namespace { propout() << q.statistics(); } - -/* - template - void test( Queue& q ) - { - value_array arrValue( s_nQueueSize ); - { - { - Queue q; - test_with( q, arrValue, 0, 0 ); - } - Queue::gc::force_dispose(); - } - } - - template - void test_boost() - { - value_array arrValue( s_nQueueSize ); - { - Queue q; - test_with(q, arrValue, 0, 0); - } - } - - template - void test_bounded() - { - value_array arrValue( s_nQueueSize ); - Queue q; - test_with(q, arrValue, 0, 0); - } - - template - void test_fcqueue() - { - value_array arrValue( s_nQueueSize ); - CPPUNIT_MSG( "Combining pass count: " << s_nFCPassCount << ", compact factor: " << s_nFCCompactFactor ); - Queue q( s_nFCCompactFactor, s_nFCPassCount ); - test_with(q, arrValue, 0, 0); - } - - template - void test_segmented() - { - value_array arrValue( s_nQueueSize ); - for ( size_t nSegmentSize = 4; nSegmentSize <= 256; nSegmentSize *= 4 ) { - CPPUNIT_MSG( "Segment size: " << nSegmentSize ); - { - Queue q( nSegmentSize ); - test_with( q, arrValue, nSegmentSize * 2, nSegmentSize ); - } - Queue::gc::force_dispose(); - } - } - - template - void test_spqueue() - { - value_array arrValue( s_nQueueSize ); - for ( size_t nArraySize = 2; nArraySize <= 64; nArraySize *= 2 ) { - CPPUNIT_MSG( "Array size: " << nArraySize ); - { - Queue q( nArraySize ); - test_with( q, arrValue, 0, 0 ); - } - Queue::gc::force_dispose(); - } - } -*/ }; #define CDSSTRESS_QUEUE_F( QueueType, NodeType ) \ @@ -489,6 +419,12 @@ namespace { CDSSTRESS_QUEUE_F(FCQueue_list_delay2_elimination_stat, boost::intrusive::list_base_hook<> ) CDSSTRESS_QUEUE_F(FCQueue_list_expbackoff_elimination, boost::intrusive::list_base_hook<> ) CDSSTRESS_QUEUE_F(FCQueue_list_expbackoff_elimination_stat, boost::intrusive::list_base_hook<> ) + CDSSTRESS_QUEUE_F(FCQueue_list_wait_ss, boost::intrusive::list_base_hook<> ) + CDSSTRESS_QUEUE_F(FCQueue_list_wait_ss_stat, boost::intrusive::list_base_hook<> ) + CDSSTRESS_QUEUE_F(FCQueue_list_wait_sm, boost::intrusive::list_base_hook<> ) + CDSSTRESS_QUEUE_F(FCQueue_list_wait_sm_stat, boost::intrusive::list_base_hook<> ) + CDSSTRESS_QUEUE_F(FCQueue_list_wait_mm, boost::intrusive::list_base_hook<> ) + CDSSTRESS_QUEUE_F(FCQueue_list_wait_mm_stat, boost::intrusive::list_base_hook<> ) #undef CDSSTRESS_QUEUE_F diff --git a/test/stress/queue/intrusive_queue_type.h b/test/stress/queue/intrusive_queue_type.h index fd380311..c2133431 100644 --- a/test/stress/queue/intrusive_queue_type.h +++ b/test/stress/queue/intrusive_queue_type.h @@ -46,6 +46,7 @@ #include #include +#include #include "print_stat.h" namespace queue { @@ -412,18 +413,18 @@ namespace queue { // FCQueue class traits_FCQueue_delay2: public cds::intrusive::fcqueue::make_traits< - cds::opt::back_off< cds::backoff::delay_of<2> > + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::backoff< cds::backoff::delay_of<2>>> >::type {}; class traits_FCQueue_delay2_elimination: public cds::intrusive::fcqueue::make_traits< - cds::opt::back_off< cds::backoff::delay_of<2> > + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::backoff< cds::backoff::delay_of<2>>> ,cds::opt::enable_elimination< true > >::type {}; class traits_FCQueue_delay2_elimination_stat: public cds::intrusive::fcqueue::make_traits< - cds::opt::back_off< cds::backoff::delay_of<2> > + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::backoff< cds::backoff::delay_of<2>>> ,cds::opt::stat< cds::intrusive::fcqueue::stat<> > ,cds::opt::enable_elimination< true > >::type @@ -442,11 +443,45 @@ namespace queue { >::type {}; + class traits_FCQueue_wait_ss: + public cds::intrusive::fcqueue::make_traits< + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_single_condvar<>> + >::type + {}; + struct traits_FCQueue_wait_ss_stat: traits_FCQueue_wait_ss + { + typedef cds::intrusive::fcqueue::stat<> stat; + }; + class traits_FCQueue_wait_sm: + public cds::intrusive::fcqueue::make_traits< + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_multi_condvar<>> + >::type + {}; + struct traits_FCQueue_wait_sm_stat: traits_FCQueue_wait_sm + { + typedef cds::intrusive::fcqueue::stat<> stat; + }; + class traits_FCQueue_wait_mm: + public cds::intrusive::fcqueue::make_traits< + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::multi_mutex_multi_condvar<>> + >::type + {}; + struct traits_FCQueue_wait_mm_stat: traits_FCQueue_wait_mm + { + typedef cds::intrusive::fcqueue::stat<> stat; + }; + typedef cds::intrusive::FCQueue< T, boost::intrusive::list, traits_FCQueue_delay2 > FCQueue_list_delay2; typedef cds::intrusive::FCQueue< T, boost::intrusive::list, traits_FCQueue_delay2_elimination > FCQueue_list_delay2_elimination; typedef cds::intrusive::FCQueue< T, boost::intrusive::list, traits_FCQueue_delay2_elimination_stat > FCQueue_list_delay2_elimination_stat; typedef cds::intrusive::FCQueue< T, boost::intrusive::list, traits_FCQueue_expbackoff_elimination > FCQueue_list_expbackoff_elimination; typedef cds::intrusive::FCQueue< T, boost::intrusive::list, traits_FCQueue_expbackoff_elimination_stat > FCQueue_list_expbackoff_elimination_stat; + typedef cds::intrusive::FCQueue< T, boost::intrusive::list, traits_FCQueue_wait_ss > FCQueue_list_wait_ss; + typedef cds::intrusive::FCQueue< T, boost::intrusive::list, traits_FCQueue_wait_ss_stat > FCQueue_list_wait_ss_stat; + typedef cds::intrusive::FCQueue< T, boost::intrusive::list, traits_FCQueue_wait_sm > FCQueue_list_wait_sm; + typedef cds::intrusive::FCQueue< T, boost::intrusive::list, traits_FCQueue_wait_sm_stat > FCQueue_list_wait_sm_stat; + typedef cds::intrusive::FCQueue< T, boost::intrusive::list, traits_FCQueue_wait_mm > FCQueue_list_wait_mm; + typedef cds::intrusive::FCQueue< T, boost::intrusive::list, traits_FCQueue_wait_mm_stat > FCQueue_list_wait_mm_stat; // SegmentedQueue class traits_SegmentedQueue_spin_stat: @@ -508,16 +543,7 @@ namespace cds_test { << CDSSTRESS_STAT_OUT( s, m_nDequeue ) << CDSSTRESS_STAT_OUT( s, m_nFailedDeq ) << CDSSTRESS_STAT_OUT( s, m_nCollided ) - << CDSSTRESS_STAT_OUT_( "combining_factor", s.combining_factor() ) - << CDSSTRESS_STAT_OUT( s, m_nOperationCount ) - << CDSSTRESS_STAT_OUT( s, m_nCombiningCount ) - << CDSSTRESS_STAT_OUT( s, m_nCompactPublicationList ) - << CDSSTRESS_STAT_OUT( s, m_nDeactivatePubRecord ) - << CDSSTRESS_STAT_OUT( s, m_nActivatePubRecord ) - << CDSSTRESS_STAT_OUT( s, m_nPubRecordCreated ) - << CDSSTRESS_STAT_OUT( s, m_nPubRecordDeteted ) - << CDSSTRESS_STAT_OUT( s, m_nAcquirePubRecCount ) - << CDSSTRESS_STAT_OUT( s, m_nReleasePubRecCount ); + << static_cast const&>(s); } static inline property_stream& operator <<( property_stream& o, cds::intrusive::fcqueue::empty_stat const& /*s*/ ) diff --git a/test/stress/queue/queue_type.h b/test/stress/queue/queue_type.h index 0db165ec..c4178fef 100644 --- a/test/stress/queue/queue_type.h +++ b/test/stress/queue/queue_type.h @@ -52,6 +52,7 @@ #include #include +#include #include "print_stat.h" namespace queue { @@ -417,12 +418,39 @@ namespace queue { typedef cds::container::RWQueue< Value, traits_RWQueue_mutex > RWQueue_mutex; // FCQueue - class traits_FCQueue_elimination: + struct traits_FCQueue_single_mutex_single_condvar: + public cds::container::fcqueue::make_traits< + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_single_condvar<>> + >::type + {}; + struct traits_FCQueue_single_mutex_single_condvar_stat: traits_FCQueue_single_mutex_single_condvar + { + typedef cds::container::fcqueue::stat<> stat; + }; + struct traits_FCQueue_single_mutex_multi_condvar: + public cds::container::fcqueue::make_traits< + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_multi_condvar<>> + >::type + {}; + struct traits_FCQueue_single_mutex_multi_condvar_stat: traits_FCQueue_single_mutex_multi_condvar + { + typedef cds::container::fcqueue::stat<> stat; + }; + struct traits_FCQueue_multi_mutex_multi_condvar: + public cds::container::fcqueue::make_traits< + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::multi_mutex_multi_condvar<>> + >::type + {}; + struct traits_FCQueue_multi_mutex_multi_condvar_stat: traits_FCQueue_multi_mutex_multi_condvar + { + typedef cds::container::fcqueue::stat<> stat; + }; + struct traits_FCQueue_elimination: public cds::container::fcqueue::make_traits< cds::opt::enable_elimination< true > >::type {}; - class traits_FCQueue_elimination_stat: + struct traits_FCQueue_elimination_stat: public cds::container::fcqueue::make_traits< cds::opt::enable_elimination< true > ,cds::opt::stat< cds::container::fcqueue::stat<> > @@ -430,10 +458,24 @@ namespace queue { {}; typedef cds::container::FCQueue< Value > FCQueue_deque; + typedef cds::container::FCQueue< Value, std::queue, traits_FCQueue_single_mutex_single_condvar> FCQueue_deque_wait_ss; + typedef cds::container::FCQueue< Value, std::queue, traits_FCQueue_single_mutex_single_condvar_stat> FCQueue_deque_wait_ss_stat; + typedef cds::container::FCQueue< Value, std::queue, traits_FCQueue_single_mutex_multi_condvar> FCQueue_deque_wait_sm; + typedef cds::container::FCQueue< Value, std::queue, traits_FCQueue_single_mutex_multi_condvar_stat> FCQueue_deque_wait_sm_stat; + typedef cds::container::FCQueue< Value, std::queue, traits_FCQueue_multi_mutex_multi_condvar> FCQueue_deque_wait_mm; + typedef cds::container::FCQueue< Value, std::queue, traits_FCQueue_multi_mutex_multi_condvar_stat> FCQueue_deque_wait_mm_stat; + typedef cds::container::FCQueue< Value, std::queue, traits_FCQueue_elimination > FCQueue_deque_elimination; typedef cds::container::FCQueue< Value, std::queue, traits_FCQueue_elimination_stat > FCQueue_deque_elimination_stat; - typedef cds::container::FCQueue< Value, std::queue > > FCQueue_list; + typedef cds::container::FCQueue< Value, std::queue >> FCQueue_list; + typedef cds::container::FCQueue< Value, std::queue>, traits_FCQueue_single_mutex_single_condvar> FCQueue_list_wait_ss; + typedef cds::container::FCQueue< Value, std::queue>, traits_FCQueue_single_mutex_single_condvar_stat> FCQueue_list_wait_ss_stat; + typedef cds::container::FCQueue< Value, std::queue>, traits_FCQueue_single_mutex_multi_condvar> FCQueue_list_wait_sm; + typedef cds::container::FCQueue< Value, std::queue>, traits_FCQueue_single_mutex_multi_condvar_stat> FCQueue_list_wait_sm_stat; + typedef cds::container::FCQueue< Value, std::queue>, traits_FCQueue_multi_mutex_multi_condvar> FCQueue_list_wait_mm; + typedef cds::container::FCQueue< Value, std::queue>, traits_FCQueue_multi_mutex_multi_condvar_stat> FCQueue_list_wait_mm_stat; + typedef cds::container::FCQueue< Value, std::queue >, traits_FCQueue_elimination > FCQueue_list_elimination; typedef cds::container::FCQueue< Value, std::queue >, traits_FCQueue_elimination_stat > FCQueue_list_elimination_stat; @@ -461,9 +503,40 @@ namespace queue { >::type {}; + struct traits_FCDeque_wait_ss: cds::container::fcdeque::traits + { + typedef cds::algo::flat_combining::wait_strategy::single_mutex_single_condvar<> wait_strategy; + }; + struct traits_FCDeque_wait_ss_stat: traits_FCDeque_wait_ss + { + typedef cds::container::fcdeque::stat<> stat; + }; + struct traits_FCDeque_wait_sm: cds::container::fcdeque::traits + { + typedef cds::algo::flat_combining::wait_strategy::single_mutex_multi_condvar<> wait_strategy; + }; + struct traits_FCDeque_wait_sm_stat: traits_FCDeque_wait_sm + { + typedef cds::container::fcdeque::stat<> stat; + }; + struct traits_FCDeque_wait_mm: cds::container::fcdeque::traits + { + typedef cds::algo::flat_combining::wait_strategy::multi_mutex_multi_condvar<> wait_strategy; + }; + struct traits_FCDeque_wait_mm_stat: traits_FCDeque_wait_mm + { + typedef cds::container::fcdeque::stat<> stat; + }; + typedef details::FCDequeL< Value > FCDequeL_default; typedef details::FCDequeL< Value, traits_FCDeque_mutex > FCDequeL_mutex; typedef details::FCDequeL< Value, traits_FCDeque_stat > FCDequeL_stat; + typedef details::FCDequeL< Value, traits_FCDeque_wait_ss > FCDequeL_wait_ss; + typedef details::FCDequeL< Value, traits_FCDeque_wait_ss_stat > FCDequeL_wait_ss_stat; + typedef details::FCDequeL< Value, traits_FCDeque_wait_sm > FCDequeL_wait_sm; + typedef details::FCDequeL< Value, traits_FCDeque_wait_sm_stat > FCDequeL_wait_sm_stat; + typedef details::FCDequeL< Value, traits_FCDeque_wait_mm > FCDequeL_wait_mm; + typedef details::FCDequeL< Value, traits_FCDeque_wait_mm_stat > FCDequeL_wait_mm_stat; typedef details::FCDequeL< Value, traits_FCDeque_elimination > FCDequeL_elimination; typedef details::FCDequeL< Value, traits_FCDeque_elimination_stat > FCDequeL_elimination_stat; @@ -475,6 +548,12 @@ namespace queue { typedef details::FCDequeR< Value > FCDequeR_default; typedef details::FCDequeR< Value, traits_FCDeque_mutex > FCDequeR_mutex; typedef details::FCDequeR< Value, traits_FCDeque_stat > FCDequeR_stat; + typedef details::FCDequeR< Value, traits_FCDeque_wait_ss > FCDequeR_wait_ss; + typedef details::FCDequeR< Value, traits_FCDeque_wait_ss_stat > FCDequeR_wait_ss_stat; + typedef details::FCDequeR< Value, traits_FCDeque_wait_sm > FCDequeR_wait_sm; + typedef details::FCDequeR< Value, traits_FCDeque_wait_sm_stat > FCDequeR_wait_sm_stat; + typedef details::FCDequeR< Value, traits_FCDeque_wait_mm > FCDequeR_wait_mm; + typedef details::FCDequeR< Value, traits_FCDeque_wait_mm_stat > FCDequeR_wait_mm_stat; typedef details::FCDequeR< Value, traits_FCDeque_elimination > FCDequeR_elimination; typedef details::FCDequeR< Value, traits_FCDeque_elimination_stat > FCDequeR_elimination_stat; @@ -483,6 +562,7 @@ namespace queue { typedef details::FCDequeR< Value, traits_FCDeque_elimination, boost::container::deque > FCDequeR_boost_elimination; typedef details::FCDequeR< Value, traits_FCDeque_elimination_stat, boost::container::deque > FCDequeR_boost_elimination_stat; + // STL typedef StdQueue_deque StdQueue_deque_Spinlock; typedef StdQueue_list StdQueue_list_Spinlock; typedef StdQueue_deque StdQueue_deque_Mutex; @@ -553,16 +633,7 @@ namespace cds_test { << CDSSTRESS_STAT_OUT( s, m_nDequeue ) << CDSSTRESS_STAT_OUT( s, m_nFailedDeq ) << CDSSTRESS_STAT_OUT( s, m_nCollided ) - << CDSSTRESS_STAT_OUT_( "combining_factor", s.combining_factor() ) - << CDSSTRESS_STAT_OUT( s, m_nOperationCount ) - << CDSSTRESS_STAT_OUT( s, m_nCombiningCount ) - << CDSSTRESS_STAT_OUT( s, m_nCompactPublicationList ) - << CDSSTRESS_STAT_OUT( s, m_nDeactivatePubRecord ) - << CDSSTRESS_STAT_OUT( s, m_nActivatePubRecord ) - << CDSSTRESS_STAT_OUT( s, m_nPubRecordCreated ) - << CDSSTRESS_STAT_OUT( s, m_nPubRecordDeteted ) - << CDSSTRESS_STAT_OUT( s, m_nAcquirePubRecCount ) - << CDSSTRESS_STAT_OUT( s, m_nReleasePubRecCount ); + << static_cast const&>(s); } static inline property_stream& operator <<( property_stream& o, cds::container::fcqueue::empty_stat const& /*s*/ ) @@ -587,16 +658,7 @@ namespace cds_test { << CDSSTRESS_STAT_OUT( s, m_nPopBack ) << CDSSTRESS_STAT_OUT( s, m_nFailedPopBack ) << CDSSTRESS_STAT_OUT( s, m_nCollided ) - << CDSSTRESS_STAT_OUT_( "combining_factor", s.combining_factor() ) - << CDSSTRESS_STAT_OUT( s, m_nOperationCount ) - << CDSSTRESS_STAT_OUT( s, m_nCombiningCount ) - << CDSSTRESS_STAT_OUT( s, m_nCompactPublicationList ) - << CDSSTRESS_STAT_OUT( s, m_nDeactivatePubRecord ) - << CDSSTRESS_STAT_OUT( s, m_nActivatePubRecord ) - << CDSSTRESS_STAT_OUT( s, m_nPubRecordCreated ) - << CDSSTRESS_STAT_OUT( s, m_nPubRecordDeteted ) - << CDSSTRESS_STAT_OUT( s, m_nAcquirePubRecCount ) - << CDSSTRESS_STAT_OUT( s, m_nReleasePubRecCount ); + << static_cast const&>(s); } } // namespace cds_test @@ -659,9 +721,21 @@ namespace cds_test { #define CDSSTRESS_FCQueue( test_fixture ) \ CDSSTRESS_Queue_F( test_fixture, FCQueue_deque ) \ + CDSSTRESS_Queue_F( test_fixture, FCQueue_deque_wait_ss ) \ + CDSSTRESS_Queue_F( test_fixture, FCQueue_deque_wait_ss_stat ) \ + CDSSTRESS_Queue_F( test_fixture, FCQueue_deque_wait_sm ) \ + CDSSTRESS_Queue_F( test_fixture, FCQueue_deque_wait_sm_stat ) \ + CDSSTRESS_Queue_F( test_fixture, FCQueue_deque_wait_mm ) \ + CDSSTRESS_Queue_F( test_fixture, FCQueue_deque_wait_mm_stat ) \ CDSSTRESS_Queue_F( test_fixture, FCQueue_deque_elimination ) \ CDSSTRESS_Queue_F( test_fixture, FCQueue_deque_elimination_stat ) \ CDSSTRESS_Queue_F( test_fixture, FCQueue_list ) \ + CDSSTRESS_Queue_F( test_fixture, FCQueue_list_wait_ss ) \ + CDSSTRESS_Queue_F( test_fixture, FCQueue_list_wait_ss_stat ) \ + CDSSTRESS_Queue_F( test_fixture, FCQueue_list_wait_sm ) \ + CDSSTRESS_Queue_F( test_fixture, FCQueue_list_wait_sm_stat ) \ + CDSSTRESS_Queue_F( test_fixture, FCQueue_list_wait_mm ) \ + CDSSTRESS_Queue_F( test_fixture, FCQueue_list_wait_mm_stat ) \ CDSSTRESS_Queue_F( test_fixture, FCQueue_list_elimination ) \ CDSSTRESS_Queue_F( test_fixture, FCQueue_list_elimination_stat ) @@ -669,6 +743,12 @@ namespace cds_test { CDSSTRESS_Queue_F( test_fixture, FCDequeL_default ) \ CDSSTRESS_Queue_F( test_fixture, FCDequeL_mutex ) \ CDSSTRESS_Queue_F( test_fixture, FCDequeL_stat ) \ + CDSSTRESS_Queue_F( test_fixture, FCDequeL_wait_ss ) \ + CDSSTRESS_Queue_F( test_fixture, FCDequeL_wait_ss_stat ) \ + CDSSTRESS_Queue_F( test_fixture, FCDequeL_wait_sm ) \ + CDSSTRESS_Queue_F( test_fixture, FCDequeL_wait_sm_stat ) \ + CDSSTRESS_Queue_F( test_fixture, FCDequeL_wait_mm ) \ + CDSSTRESS_Queue_F( test_fixture, FCDequeL_wait_mm_stat ) \ CDSSTRESS_Queue_F( test_fixture, FCDequeL_elimination ) \ CDSSTRESS_Queue_F( test_fixture, FCDequeL_elimination_stat ) \ CDSSTRESS_Queue_F( test_fixture, FCDequeL_boost ) \ @@ -678,6 +758,12 @@ namespace cds_test { CDSSTRESS_Queue_F( test_fixture, FCDequeR_default ) \ CDSSTRESS_Queue_F( test_fixture, FCDequeR_mutex ) \ CDSSTRESS_Queue_F( test_fixture, FCDequeR_stat ) \ + CDSSTRESS_Queue_F( test_fixture, FCDequeR_wait_ss ) \ + CDSSTRESS_Queue_F( test_fixture, FCDequeR_wait_ss_stat ) \ + CDSSTRESS_Queue_F( test_fixture, FCDequeR_wait_sm ) \ + CDSSTRESS_Queue_F( test_fixture, FCDequeR_wait_sm_stat ) \ + CDSSTRESS_Queue_F( test_fixture, FCDequeR_wait_mm ) \ + CDSSTRESS_Queue_F( test_fixture, FCDequeR_wait_mm_stat ) \ CDSSTRESS_Queue_F( test_fixture, FCDequeR_elimination ) \ CDSSTRESS_Queue_F( test_fixture, FCDequeR_elimination_stat ) \ CDSSTRESS_Queue_F( test_fixture, FCDequeR_boost ) \ diff --git a/test/stress/stack/intrusive_stack_type.h b/test/stress/stack/intrusive_stack_type.h index ffcb3931..5740e2f9 100644 --- a/test/stress/stack/intrusive_stack_type.h +++ b/test/stress/stack/intrusive_stack_type.h @@ -45,6 +45,7 @@ #include #include +#include namespace istack { @@ -411,16 +412,7 @@ namespace cds_test { << CDSSTRESS_STAT_OUT( s, m_nPop ) << CDSSTRESS_STAT_OUT( s, m_nFailedPop ) << CDSSTRESS_STAT_OUT( s, m_nCollided ) - << CDSSTRESS_STAT_OUT_( "combining_factor", s.combining_factor() ) - << CDSSTRESS_STAT_OUT( s, m_nOperationCount ) - << CDSSTRESS_STAT_OUT( s, m_nCombiningCount ) - << CDSSTRESS_STAT_OUT( s, m_nCompactPublicationList ) - << CDSSTRESS_STAT_OUT( s, m_nDeactivatePubRecord ) - << CDSSTRESS_STAT_OUT( s, m_nActivatePubRecord ) - << CDSSTRESS_STAT_OUT( s, m_nPubRecordCreated ) - << CDSSTRESS_STAT_OUT( s, m_nPubRecordDeteted ) - << CDSSTRESS_STAT_OUT( s, m_nAcquirePubRecCount ) - << CDSSTRESS_STAT_OUT( s, m_nReleasePubRecCount ); + << static_cast< cds::algo::flat_combining::stat<> const&>( s ); } } // namespace cds_test diff --git a/test/stress/stack/stack_type.h b/test/stress/stack/stack_type.h index 39d476d4..6a14e748 100644 --- a/test/stress/stack/stack_type.h +++ b/test/stress/stack/stack_type.h @@ -45,6 +45,7 @@ #include #include +#include namespace stack { @@ -455,16 +456,7 @@ namespace cds_test { << CDSSTRESS_STAT_OUT( s, m_nPop ) << CDSSTRESS_STAT_OUT( s, m_nFailedPop ) << CDSSTRESS_STAT_OUT( s, m_nCollided ) - << CDSSTRESS_STAT_OUT_( "combining_factor", s.combining_factor()) - << CDSSTRESS_STAT_OUT( s, m_nOperationCount ) - << CDSSTRESS_STAT_OUT( s, m_nCombiningCount ) - << CDSSTRESS_STAT_OUT( s, m_nCompactPublicationList ) - << CDSSTRESS_STAT_OUT( s, m_nDeactivatePubRecord ) - << CDSSTRESS_STAT_OUT( s, m_nActivatePubRecord ) - << CDSSTRESS_STAT_OUT( s, m_nPubRecordCreated ) - << CDSSTRESS_STAT_OUT( s, m_nPubRecordDeteted ) - << CDSSTRESS_STAT_OUT( s, m_nAcquirePubRecCount ) - << CDSSTRESS_STAT_OUT( s, m_nReleasePubRecCount ); + << static_cast const&>( s ); } static inline property_stream& operator <<( property_stream& o, cds::container::fcdeque::empty_stat const& /*s*/ ) @@ -484,16 +476,7 @@ namespace cds_test { << CDSSTRESS_STAT_OUT( s, m_nPopBack ) << CDSSTRESS_STAT_OUT( s, m_nFailedPopBack ) << CDSSTRESS_STAT_OUT( s, m_nCollided ) - << CDSSTRESS_STAT_OUT_( "combining_factor", s.combining_factor() ) - << CDSSTRESS_STAT_OUT( s, m_nOperationCount ) - << CDSSTRESS_STAT_OUT( s, m_nCombiningCount ) - << CDSSTRESS_STAT_OUT( s, m_nCompactPublicationList ) - << CDSSTRESS_STAT_OUT( s, m_nDeactivatePubRecord ) - << CDSSTRESS_STAT_OUT( s, m_nActivatePubRecord ) - << CDSSTRESS_STAT_OUT( s, m_nPubRecordCreated ) - << CDSSTRESS_STAT_OUT( s, m_nPubRecordDeteted ) - << CDSSTRESS_STAT_OUT( s, m_nAcquirePubRecCount ) - << CDSSTRESS_STAT_OUT( s, m_nReleasePubRecCount ); + << static_cast const&>(s); } } // namespace cds_test diff --git a/test/unit/deque/fcdeque.cpp b/test/unit/deque/fcdeque.cpp index 3c6ad1ab..154d0bbc 100644 --- a/test/unit/deque/fcdeque.cpp +++ b/test/unit/deque/fcdeque.cpp @@ -114,6 +114,30 @@ namespace { test( dq ); } + TEST_F( FCDeque, std_empty_wait_strategy ) + { + typedef cds::container::FCDeque, + cds::container::fcdeque::make_traits< + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::empty > + >::type + > deque_type; + + deque_type dq; + test( dq ); + } + + TEST_F( FCDeque, std_multi_mutex_multi_condvar ) + { + typedef cds::container::FCDeque, + cds::container::fcdeque::make_traits< + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::multi_mutex_multi_condvar<>> + >::type + > deque_type; + + deque_type dq; + test( dq ); + } + TEST_F( FCDeque, std_elimination ) { typedef cds::container::FCDeque, @@ -126,6 +150,19 @@ namespace { test( dq ); } + TEST_F( FCDeque, std_elimination_single_mutex_single_condvar ) + { + typedef cds::container::FCDeque, + cds::container::fcdeque::make_traits< + cds::opt::enable_elimination< true > + , cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_single_condvar<3>> + >::type + > deque_type; + + deque_type dq; + test( dq ); + } + TEST_F( FCDeque, std_statistics ) { typedef cds::container::FCDeque, @@ -138,6 +175,19 @@ namespace { test( dq ); } + TEST_F( FCDeque, std_stat_single_mutex_multi_condvar ) + { + typedef cds::container::FCDeque, + cds::container::fcdeque::make_traits< + cds::opt::stat< cds::container::fcdeque::stat<> > + , cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_multi_condvar<2>> + >::type + > deque_type; + + deque_type dq; + test( dq ); + } + TEST_F( FCDeque, std_mutex ) { struct deque_traits : public @@ -161,6 +211,30 @@ namespace { test( dq ); } + TEST_F( FCDeque, boost_empty_wait_strategy ) + { + typedef cds::container::FCDeque, + cds::container::fcdeque::make_traits< + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::empty > + >::type + > deque_type; + + deque_type dq; + test( dq ); + } + + TEST_F( FCDeque, boost_single_mutex_single_condvar ) + { + typedef cds::container::FCDeque, + cds::container::fcdeque::make_traits< + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_single_condvar<>> + >::type + > deque_type; + + deque_type dq; + test( dq ); + } + TEST_F( FCDeque, boost_elimination ) { typedef cds::container::FCDeque, @@ -173,6 +247,19 @@ namespace { test( dq ); } + TEST_F( FCDeque, boost_elimination_single_mutex_multi_condvar ) + { + typedef cds::container::FCDeque, + cds::container::fcdeque::make_traits< + cds::opt::enable_elimination< true > + ,cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_multi_condvar<5>> + >::type + > deque_type; + + deque_type dq; + test( dq ); + } + TEST_F( FCDeque, boost_statistics ) { typedef cds::container::FCDeque, @@ -198,4 +285,18 @@ namespace { test( dq ); } + TEST_F( FCDeque, boost_mutex_multi_mutex_multi_condvar ) + { + typedef cds::container::FCDeque, + cds::container::fcdeque::make_traits< + cds::opt::enable_elimination< true > + , cds::opt::lock_type< std::mutex > + , cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::multi_mutex_multi_condvar<>> + >::type + > deque_type; + + deque_type dq; + test( dq ); + } + } // namespace diff --git a/test/unit/pqueue/fcpqueue_boost_stable_vector.cpp b/test/unit/pqueue/fcpqueue_boost_stable_vector.cpp index 91044795..cb51b445 100644 --- a/test/unit/pqueue/fcpqueue_boost_stable_vector.cpp +++ b/test/unit/pqueue/fcpqueue_boost_stable_vector.cpp @@ -68,6 +68,63 @@ namespace cds_test { test( pq ); } + TEST_F( FCPQueue, stable_vector_empty_wait_strategy ) + { + typedef cds::container::FCPriorityQueue< + value_type + ,std::priority_queue< + value_type + ,boost::container::stable_vector + ,less + > + ,cds::container::fcpqueue::make_traits< + cds::opt::stat< cds::container::fcpqueue::stat<> > + , cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::empty > + >::type + > pqueue_type; + + pqueue_type pq; + test( pq ); + } + + TEST_F( FCPQueue, stable_vector_single_mutex_single_condvar ) + { + typedef cds::container::FCPriorityQueue< + value_type + ,std::priority_queue< + value_type + ,boost::container::stable_vector + ,less + > + ,cds::container::fcpqueue::make_traits< + cds::opt::stat< cds::container::fcpqueue::stat<> > + , cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_single_condvar<>> + >::type + > pqueue_type; + + pqueue_type pq; + test( pq ); + } + + TEST_F( FCPQueue, stable_vector_single_mutex_multi_condvar ) + { + typedef cds::container::FCPriorityQueue< + value_type + ,std::priority_queue< + value_type + ,boost::container::stable_vector + ,less + > + ,cds::container::fcpqueue::make_traits< + cds::opt::stat< cds::container::fcpqueue::stat<> > + , cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_multi_condvar<4>> + >::type + > pqueue_type; + + pqueue_type pq; + test( pq ); + } + TEST_F( FCPQueue, boost_deque ) { typedef cds::container::FCPriorityQueue< @@ -101,4 +158,42 @@ namespace cds_test { test( pq ); } + TEST_F( FCPQueue, boost_deque_empty_wait_strategy ) + { + typedef cds::container::FCPriorityQueue< + value_type + ,std::priority_queue< + value_type + ,boost::container::deque + ,less + > + ,cds::container::fcpqueue::make_traits< + cds::opt::stat< cds::container::fcpqueue::stat<> > + ,cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::empty > + >::type + > pqueue_type; + + pqueue_type pq; + test( pq ); + } + + TEST_F( FCPQueue, boost_deque_multi_mutex_multi_condvar ) + { + typedef cds::container::FCPriorityQueue< + value_type + ,std::priority_queue< + value_type + ,boost::container::deque + ,less + > + ,cds::container::fcpqueue::make_traits< + cds::opt::stat< cds::container::fcpqueue::stat<> > + ,cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::multi_mutex_multi_condvar<6>> + >::type + > pqueue_type; + + pqueue_type pq; + test( pq ); + } + } // namespace cds_test diff --git a/test/unit/pqueue/fcpqueue_deque.cpp b/test/unit/pqueue/fcpqueue_deque.cpp index 0f7e0b93..1da9be98 100644 --- a/test/unit/pqueue/fcpqueue_deque.cpp +++ b/test/unit/pqueue/fcpqueue_deque.cpp @@ -67,6 +67,63 @@ namespace cds_test { test( pq ); } + TEST_F( FCPQueue, deque_stat_single_mutex_single_condvar ) + { + typedef cds::container::FCPriorityQueue< + value_type + ,std::priority_queue< + value_type + ,std::deque + ,less + > + ,cds::container::fcpqueue::make_traits< + cds::opt::stat< cds::container::fcpqueue::stat<> > + , cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_single_condvar<>> + >::type + > pqueue_type; + + pqueue_type pq; + test( pq ); + } + + TEST_F( FCPQueue, deque_empty_wait_strategy ) + { + typedef cds::container::FCPriorityQueue< + value_type + ,std::priority_queue< + value_type + ,std::deque + ,less + > + ,cds::container::fcpqueue::make_traits< + cds::opt::stat< cds::container::fcpqueue::stat<> > + , cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::empty > + >::type + > pqueue_type; + + pqueue_type pq; + test( pq ); + } + + TEST_F( FCPQueue, deque_single_mutex_multi_condvar ) + { + typedef cds::container::FCPriorityQueue< + value_type + ,std::priority_queue< + value_type + ,std::deque + ,less + > + ,cds::container::fcpqueue::make_traits< + cds::opt::stat< cds::container::fcpqueue::stat<> > + , cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_multi_condvar<2>> + >::type + > pqueue_type; + + pqueue_type pq; + test( pq ); + } + TEST_F( FCPQueue, deque_mutex ) { typedef cds::container::FCPriorityQueue< @@ -84,4 +141,22 @@ namespace cds_test { test( pq ); } + TEST_F( FCPQueue, deque_multi_mutex_multi_condvar ) + { + typedef cds::container::FCPriorityQueue< + value_type + ,std::priority_queue< + value_type + ,std::deque + > + ,cds::container::fcpqueue::make_traits< + cds::opt::lock_type< std::mutex > + , cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::multi_mutex_multi_condvar<1000>> + >::type + > pqueue_type; + + pqueue_type pq; + test( pq ); + } + } // namespace cds_test diff --git a/test/unit/pqueue/fcpqueue_vector.cpp b/test/unit/pqueue/fcpqueue_vector.cpp index 6de4855a..b1c9ef0f 100644 --- a/test/unit/pqueue/fcpqueue_vector.cpp +++ b/test/unit/pqueue/fcpqueue_vector.cpp @@ -40,6 +40,50 @@ namespace cds_test { test( pq ); } + TEST_F( FCPQueue, vector_empty_wait_strategy ) + { + struct pqueue_traits : public cds::container::fcpqueue::traits + { + typedef cds::container::fcpqueue::stat<> stat; + typedef cds::algo::flat_combining::wait_strategy::empty wait_strategy; + }; + + typedef cds::container::FCPriorityQueue< + value_type + , std::priority_queue< + value_type + , std::vector + , less + > + , pqueue_traits + > pqueue_type; + + pqueue_type pq; + test( pq ); + } + + TEST_F( FCPQueue, vector_multi_mutex_multi_condvar ) + { + struct pqueue_traits : public cds::container::fcpqueue::traits + { + typedef cds::container::fcpqueue::stat<> stat; + typedef cds::algo::flat_combining::wait_strategy::multi_mutex_multi_condvar<> wait_strategy; + }; + + typedef cds::container::FCPriorityQueue< + value_type + , std::priority_queue< + value_type + , std::vector + , less + > + , pqueue_traits + > pqueue_type; + + pqueue_type pq; + test( pq ); + } + TEST_F( FCPQueue, vector_stat ) { struct pqueue_traits : public cds::container::fcpqueue::traits @@ -61,6 +105,28 @@ namespace cds_test { test( pq ); } + TEST_F( FCPQueue, vector_stat_single_mutex_multi_condvar ) + { + struct pqueue_traits : public cds::container::fcpqueue::traits + { + typedef cds::container::fcpqueue::stat<> stat; + typedef cds::algo::flat_combining::wait_strategy::single_mutex_multi_condvar<42> wait_strategy; + }; + + typedef cds::container::FCPriorityQueue< + value_type + , std::priority_queue< + value_type + , std::vector + , less + > + , pqueue_traits + > pqueue_type; + + pqueue_type pq; + test( pq ); + } + TEST_F( FCPQueue, vector_mutex ) { typedef cds::container::FCPriorityQueue< @@ -75,4 +141,19 @@ namespace cds_test { test( pq ); } + TEST_F( FCPQueue, vector_single_mutex_single_condvar ) + { + typedef cds::container::FCPriorityQueue< + value_type + ,std::priority_queue< value_type > + ,cds::container::fcpqueue::make_traits< + cds::opt::lock_type< std::mutex > + , cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_single_condvar<1000>> + >::type + > pqueue_type; + + pqueue_type pq; + test( pq ); + } + } // namespace cds_test diff --git a/test/unit/queue/fcqueue.cpp b/test/unit/queue/fcqueue.cpp index 7681ddeb..99f7b0c3 100644 --- a/test/unit/queue/fcqueue.cpp +++ b/test/unit/queue/fcqueue.cpp @@ -154,6 +154,30 @@ namespace { test_string( q ); } + TEST_F( FCQueue, std_empty_wait_strategy ) + { + typedef cds::container::FCQueue>, + cds::container::fcqueue::make_traits< + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::empty > + >::type + > queue_type; + + queue_type q; + test( q ); + } + + TEST_F( FCQueue, std_single_mutex_single_condvar ) + { + typedef cds::container::FCQueue>, + cds::container::fcqueue::make_traits< + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_single_condvar<>> + >::type + > queue_type; + + queue_type q; + test( q ); + } + TEST_F( FCQueue, std_deque_elimination ) { typedef cds::container::FCQueue>, @@ -166,6 +190,19 @@ namespace { test( q ); } + TEST_F( FCQueue, std_deque_elimination_single_mutex_multi_condvar ) + { + typedef cds::container::FCQueue>, + cds::container::fcqueue::make_traits< + cds::opt::enable_elimination< true > + , cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_multi_condvar<2>> + >::type + > queue_type; + + queue_type q; + test( q ); + } + TEST_F( FCQueue, std_deque_elimination_move ) { typedef cds::container::FCQueue>, @@ -178,6 +215,19 @@ namespace { test_string( q ); } + TEST_F( FCQueue, std_deque_elimination_move_multi_mutex_multi_condvar ) + { + typedef cds::container::FCQueue>, + cds::container::fcqueue::make_traits< + cds::opt::enable_elimination< true > + , cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::multi_mutex_multi_condvar<>> + >::type + > queue_type; + + queue_type q; + test_string( q ); + } + TEST_F( FCQueue, std_deque_mutex ) { typedef cds::container::FCQueue>, @@ -206,6 +256,30 @@ namespace { test_string( q ); } + TEST_F( FCQueue, std_list_empty_wait_strategy ) + { + typedef cds::container::FCQueue >, + cds::container::fcqueue::make_traits< + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::empty > + >::type + > queue_type; + + queue_type q; + test( q ); + } + + TEST_F( FCQueue, std_list_single_mutex_single_condvar ) + { + typedef cds::container::FCQueue >, + cds::container::fcqueue::make_traits< + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_single_condvar<5>> + >::type + > queue_type; + + queue_type q; + test( q ); + } + TEST_F( FCQueue, std_list_elimination ) { typedef cds::container::FCQueue >, @@ -218,6 +292,19 @@ namespace { test( q ); } + TEST_F( FCQueue, std_list_elimination_multi_mutex_multi_condvar ) + { + typedef cds::container::FCQueue >, + cds::container::fcqueue::make_traits< + cds::opt::enable_elimination< true > + ,cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::multi_mutex_multi_condvar<5>> + >::type + > queue_type; + + queue_type q; + test( q ); + } + TEST_F( FCQueue, std_list_elimination_move ) { typedef cds::container::FCQueue >, diff --git a/test/unit/queue/intrusive_fcqueue.cpp b/test/unit/queue/intrusive_fcqueue.cpp index 189cf097..aa2db271 100644 --- a/test/unit/queue/intrusive_fcqueue.cpp +++ b/test/unit/queue/intrusive_fcqueue.cpp @@ -169,6 +169,36 @@ namespace { test( q ); } + TEST_F( IntrusiveFCQueue, base_empty_wait_strategy ) + { + typedef base_hook_item< boost::intrusive::list_base_hook<> > value_type; + struct traits: public cds::intrusive::fcqueue::traits + { + typedef IntrusiveFCQueue::disposer disposer; + typedef cds::algo::flat_combining::wait_strategy::empty wait_strategy; + typedef cds::intrusive::fcqueue::stat<> stat; + }; + typedef cds::intrusive::FCQueue< value_type, boost::intrusive::list< value_type >, traits > queue_type; + + queue_type q; + test( q ); + } + + TEST_F( IntrusiveFCQueue, base_single_mutex_single_condvar ) + { + typedef base_hook_item< boost::intrusive::list_base_hook<> > value_type; + struct traits: public cds::intrusive::fcqueue::traits + { + typedef IntrusiveFCQueue::disposer disposer; + typedef cds::algo::flat_combining::wait_strategy::single_mutex_single_condvar<> wait_strategy; + typedef cds::intrusive::fcqueue::stat<> stat; + }; + typedef cds::intrusive::FCQueue< value_type, boost::intrusive::list< value_type >, traits > queue_type; + + queue_type q; + test( q ); + } + TEST_F( IntrusiveFCQueue, base_mutex ) { typedef base_hook_item< boost::intrusive::list_base_hook<> > value_type; @@ -184,6 +214,22 @@ namespace { test( q ); } + TEST_F( IntrusiveFCQueue, base_mutex_single_mutex_multi_condvar ) + { + typedef base_hook_item< boost::intrusive::list_base_hook<> > value_type; + struct traits: public cds::intrusive::fcqueue::traits + { + typedef IntrusiveFCQueue::disposer disposer; + typedef std::mutex lock_type; + typedef cds::intrusive::fcqueue::stat<> stat; + typedef cds::algo::flat_combining::wait_strategy::single_mutex_multi_condvar<> wait_strategy; + }; + typedef cds::intrusive::FCQueue< value_type, boost::intrusive::list< value_type >, traits > queue_type; + + queue_type q; + test( q ); + } + TEST_F( IntrusiveFCQueue, base_elimination ) { typedef base_hook_item< boost::intrusive::list_base_hook<> > value_type; @@ -199,6 +245,22 @@ namespace { test( q ); } + TEST_F( IntrusiveFCQueue, base_elimination_multi_mutex_multi_condvar ) + { + typedef base_hook_item< boost::intrusive::list_base_hook<> > value_type; + struct traits: public + cds::intrusive::fcqueue::make_traits < + cds::intrusive::opt::disposer< disposer > + , cds::opt::enable_elimination < true > + , cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_multi_condvar<>> + > ::type + {}; + typedef cds::intrusive::FCQueue< value_type, boost::intrusive::list< value_type >, traits > queue_type; + + queue_type q; + test( q ); + } + TEST_F( IntrusiveFCQueue, member ) { typedef member_hook_item< boost::intrusive::list_member_hook<> > value_type; @@ -231,6 +293,54 @@ namespace { test( q ); } + TEST_F( IntrusiveFCQueue, member_empty_wait_strategy ) + { + typedef member_hook_item< boost::intrusive::list_member_hook<> > value_type; + typedef boost::intrusive::member_hook, &value_type::hMember> member_option; + + typedef cds::intrusive::FCQueue< value_type, boost::intrusive::list< value_type, member_option >, + cds::intrusive::fcqueue::make_traits< + cds::intrusive::opt::disposer< disposer > + , cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::empty > + >::type + > queue_type; + + queue_type q; + test( q ); + } + + TEST_F( IntrusiveFCQueue, member_single_mutex_single_condvar ) + { + typedef member_hook_item< boost::intrusive::list_member_hook<> > value_type; + typedef boost::intrusive::member_hook, &value_type::hMember> member_option; + + typedef cds::intrusive::FCQueue< value_type, boost::intrusive::list< value_type, member_option >, + cds::intrusive::fcqueue::make_traits< + cds::intrusive::opt::disposer< disposer > + , cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_single_condvar<2>> + >::type + > queue_type; + + queue_type q; + test( q ); + } + + TEST_F( IntrusiveFCQueue, member_multi_mutex_multi_condvar ) + { + typedef member_hook_item< boost::intrusive::list_member_hook<> > value_type; + typedef boost::intrusive::member_hook, &value_type::hMember> member_option; + + typedef cds::intrusive::FCQueue< value_type, boost::intrusive::list< value_type, member_option >, + cds::intrusive::fcqueue::make_traits< + cds::intrusive::opt::disposer< disposer > + , cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::multi_mutex_multi_condvar<>> + >::type + > queue_type; + + queue_type q; + test( q ); + } + TEST_F( IntrusiveFCQueue, member_elimination ) { typedef member_hook_item< boost::intrusive::list_member_hook<> > value_type; @@ -247,6 +357,23 @@ namespace { test( q ); } + TEST_F( IntrusiveFCQueue, member_elimination_single_mutex_multi_condvar ) + { + typedef member_hook_item< boost::intrusive::list_member_hook<> > value_type; + typedef boost::intrusive::member_hook, &value_type::hMember> member_option; + + typedef cds::intrusive::FCQueue< value_type, boost::intrusive::list< value_type, member_option >, + cds::intrusive::fcqueue::make_traits< + cds::intrusive::opt::disposer< disposer > + ,cds::opt::enable_elimination< true > + , cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_multi_condvar<2>> + >::type + > queue_type; + + queue_type q; + test( q ); + } + TEST_F( IntrusiveFCQueue, slist_base ) { typedef base_hook_item< boost::intrusive::slist_base_hook<>> value_type; diff --git a/test/unit/queue/segmented_queue_dhp.cpp b/test/unit/queue/segmented_queue_dhp.cpp index fff7e296..66370043 100644 --- a/test/unit/queue/segmented_queue_dhp.cpp +++ b/test/unit/queue/segmented_queue_dhp.cpp @@ -53,7 +53,7 @@ namespace { void TearDown() { cds::threading::Manager::detachThread(); - cds::gc::hp::GarbageCollector::Destruct(); + cds::gc::dhp::GarbageCollector::Destruct(); } }; diff --git a/test/unit/stack/fcstack.cpp b/test/unit/stack/fcstack.cpp index 03c178a7..9e612af4 100644 --- a/test/unit/stack/fcstack.cpp +++ b/test/unit/stack/fcstack.cpp @@ -101,6 +101,83 @@ namespace { test(); } + TEST_F( FCStack, deque_empty_wait_strategy ) + { + struct stack_traits: public + cds::container::fcstack::make_traits < + cds::opt::wait_strategy + > ::type + {}; + typedef cds::container::FCStack< unsigned int, std::stack>, stack_traits > stack_type; + test(); + } + + TEST_F( FCStack, deque_single_mutex_single_condvar ) + { + struct stack_traits: public + cds::container::fcstack::make_traits < + cds::opt::wait_strategy> + > ::type + {}; + typedef cds::container::FCStack< unsigned int, std::stack>, stack_traits > stack_type; + test(); + } + + TEST_F( FCStack, deque_single_mutex_multi_condvar ) + { + struct stack_traits: public + cds::container::fcstack::make_traits < + cds::opt::wait_strategy> + > ::type + {}; + typedef cds::container::FCStack< unsigned int, std::stack>, stack_traits > stack_type; + test(); + } + + TEST_F( FCStack, deque_multi_mutex_multi_condvar ) + { + struct stack_traits: public + cds::container::fcstack::make_traits < + cds::opt::wait_strategy> + > ::type + {}; + typedef cds::container::FCStack< unsigned int, std::stack>, stack_traits > stack_type; + test(); + } + + TEST_F( FCStack, deque_single_mutex_single_condvar_2ms ) + { + struct stack_traits: public + cds::container::fcstack::make_traits < + cds::opt::wait_strategy> + > ::type + {}; + typedef cds::container::FCStack< unsigned int, std::stack>, stack_traits > stack_type; + test(); + } + + TEST_F( FCStack, deque_single_mutex_multi_condvar_2ms ) + { + struct stack_traits: public + cds::container::fcstack::make_traits < + cds::opt::wait_strategy> + > ::type + {}; + typedef cds::container::FCStack< unsigned int, std::stack>, stack_traits > stack_type; + test(); + } + + TEST_F( FCStack, deque_multi_mutex_multi_condvar_3ms ) + { + struct stack_traits: public + cds::container::fcstack::make_traits < + cds::opt::wait_strategy> + > ::type + {}; + typedef cds::container::FCStack< unsigned int, std::stack>, stack_traits > stack_type; + test(); + } + TEST_F( FCStack, deque_elimination ) { struct stack_traits : public @@ -118,6 +195,46 @@ namespace { test(); } + TEST_F( FCStack, vector_empty_wait_strategy ) + { + typedef cds::container::FCStack< unsigned int, std::stack>, + cds::container::fcstack::make_traits< + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::empty > + >::type + > stack_type; + test(); + } + + TEST_F( FCStack, vector_multi_mutex_multi_condvar ) + { + typedef cds::container::FCStack< unsigned int, std::stack>, + cds::container::fcstack::make_traits< + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::multi_mutex_multi_condvar<>> + >::type + > stack_type; + test(); + } + + TEST_F( FCStack, vector_single_mutex_multi_condvar ) + { + typedef cds::container::FCStack< unsigned int, std::stack>, + cds::container::fcstack::make_traits< + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_multi_condvar<>> + >::type + > stack_type; + test(); + } + + TEST_F( FCStack, vector_single_mutex_single_condvar ) + { + typedef cds::container::FCStack< unsigned int, std::stack>, + cds::container::fcstack::make_traits< + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_single_condvar<>> + >::type + > stack_type; + test(); + } + TEST_F( FCStack, vector_elimination ) { typedef cds::container::FCStack< unsigned int, std::stack>, @@ -134,11 +251,52 @@ namespace { test(); } + TEST_F( FCStack, list_empty_wait_strategy ) + { + typedef cds::container::FCStack< unsigned int, std::stack>, + cds::container::fcstack::make_traits< + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::empty > + >::type + > stack_type; + test(); + } + + TEST_F( FCStack, list_single_mutex_single_condvar ) + { + typedef cds::container::FCStack< unsigned int, std::stack>, + cds::container::fcstack::make_traits< + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_single_condvar<>> + >::type + > stack_type; + test(); + } + + TEST_F( FCStack, list_single_mutex_multi_condvar ) + { + typedef cds::container::FCStack< unsigned int, std::stack>, + cds::container::fcstack::make_traits< + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_multi_condvar<>> + >::type + > stack_type; + test(); + } + + TEST_F( FCStack, list_multi_mutex_multi_condvar ) + { + typedef cds::container::FCStack< unsigned int, std::stack>, + cds::container::fcstack::make_traits< + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::multi_mutex_multi_condvar<>> + >::type + > stack_type; + test(); + } + TEST_F( FCStack, list_elimination ) { typedef cds::container::FCStack< unsigned int, std::stack>, cds::container::fcstack::make_traits< - cds::opt::enable_elimination< true > + cds::opt::enable_elimination< true > + , cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::multi_mutex_multi_condvar<2>> >::type > stack_type; test(); diff --git a/test/unit/stack/intrusive_fcstack.cpp b/test/unit/stack/intrusive_fcstack.cpp index d4cce2b2..32b25bbb 100644 --- a/test/unit/stack/intrusive_fcstack.cpp +++ b/test/unit/stack/intrusive_fcstack.cpp @@ -143,6 +143,83 @@ namespace { test(); } + TEST_F( IntrusiveFCStack, slist_empty_wait_strategy ) + { + typedef base_hook_item< boost::intrusive::slist_base_hook<> > value_type; + struct stack_traits: public cds::intrusive::fcstack::traits + { + typedef cds::algo::flat_combining::wait_strategy::empty wait_strategy; + }; + typedef cds::intrusive::FCStack< value_type, boost::intrusive::slist< value_type >, stack_traits > stack_type; + test(); + } + + TEST_F( IntrusiveFCStack, slist_single_mutex_single_condvar ) + { + typedef base_hook_item< boost::intrusive::slist_base_hook<> > value_type; + struct stack_traits: public cds::intrusive::fcstack::traits + { + typedef cds::algo::flat_combining::wait_strategy::single_mutex_single_condvar<> wait_strategy; + }; + typedef cds::intrusive::FCStack< value_type, boost::intrusive::slist< value_type >, stack_traits > stack_type; + test(); + } + + TEST_F( IntrusiveFCStack, slist_single_mutex_multi_condvar ) + { + typedef base_hook_item< boost::intrusive::slist_base_hook<> > value_type; + struct stack_traits: public cds::intrusive::fcstack::traits + { + typedef cds::algo::flat_combining::wait_strategy::single_mutex_multi_condvar<> wait_strategy; + }; + typedef cds::intrusive::FCStack< value_type, boost::intrusive::slist< value_type >, stack_traits > stack_type; + test(); + } + + TEST_F( IntrusiveFCStack, slist_multi_mutex_multi_condvar ) + { + typedef base_hook_item< boost::intrusive::slist_base_hook<> > value_type; + struct stack_traits: public cds::intrusive::fcstack::traits + { + typedef cds::algo::flat_combining::wait_strategy::multi_mutex_multi_condvar<> wait_strategy; + }; + typedef cds::intrusive::FCStack< value_type, boost::intrusive::slist< value_type >, stack_traits > stack_type; + test(); + } + + TEST_F( IntrusiveFCStack, slist_single_mutex_single_condvar_2ms ) + { + typedef base_hook_item< boost::intrusive::slist_base_hook<> > value_type; + struct stack_traits: public cds::intrusive::fcstack::traits + { + typedef cds::algo::flat_combining::wait_strategy::single_mutex_single_condvar<2> wait_strategy; + }; + typedef cds::intrusive::FCStack< value_type, boost::intrusive::slist< value_type >, stack_traits > stack_type; + test(); + } + + TEST_F( IntrusiveFCStack, slist_single_mutex_multi_condvar_3ms ) + { + typedef base_hook_item< boost::intrusive::slist_base_hook<> > value_type; + struct stack_traits: public cds::intrusive::fcstack::traits + { + typedef cds::algo::flat_combining::wait_strategy::single_mutex_multi_condvar<3> wait_strategy; + }; + typedef cds::intrusive::FCStack< value_type, boost::intrusive::slist< value_type >, stack_traits > stack_type; + test(); + } + + TEST_F( IntrusiveFCStack, slist_multi_mutex_multi_condvar_2ms ) + { + typedef base_hook_item< boost::intrusive::slist_base_hook<> > value_type; + struct stack_traits: public cds::intrusive::fcstack::traits + { + typedef cds::algo::flat_combining::wait_strategy::multi_mutex_multi_condvar<2> wait_strategy; + }; + typedef cds::intrusive::FCStack< value_type, boost::intrusive::slist< value_type >, stack_traits > stack_type; + test(); + } + TEST_F( IntrusiveFCStack, slist_disposer ) { typedef base_hook_item< boost::intrusive::slist_base_hook<> > value_type; @@ -183,7 +260,8 @@ namespace { struct stack_traits : public cds::intrusive::fcstack::make_traits < cds::opt::enable_elimination < true >, - cds::intrusive::opt::disposer< mock_disposer > + cds::intrusive::opt::disposer< mock_disposer >, + cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::multi_mutex_multi_condvar<>> > ::type {}; typedef cds::intrusive::FCStack< value_type, boost::intrusive::slist< value_type >, stack_traits > stack_type; @@ -195,8 +273,9 @@ namespace { typedef base_hook_item< boost::intrusive::slist_base_hook<> > value_type; typedef cds::intrusive::FCStack< value_type, boost::intrusive::slist< value_type >, cds::intrusive::fcstack::make_traits< - cds::opt::enable_elimination< true > - , cds::opt::stat< cds::intrusive::fcstack::stat<> > + cds::opt::enable_elimination< true > + , cds::opt::stat< cds::intrusive::fcstack::stat<> > + , cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_multi_condvar<>> >::type > stack_type; test(); @@ -211,6 +290,58 @@ namespace { test(); } + TEST_F( IntrusiveFCStack, slist_member_empty_wait_strategy ) + { + typedef member_hook_item< boost::intrusive::slist_member_hook<> > value_type; + typedef boost::intrusive::member_hook, &value_type::hMember> member_option; + struct stack_traits: public cds::intrusive::fcstack::traits + { + typedef cds::algo::flat_combining::wait_strategy::empty wait_strategy; + }; + + typedef cds::intrusive::FCStack< value_type, boost::intrusive::slist< value_type, member_option >, stack_traits > stack_type; + test(); + } + + TEST_F( IntrusiveFCStack, slist_member_single_mutex_single_condvar ) + { + typedef member_hook_item< boost::intrusive::slist_member_hook<> > value_type; + typedef boost::intrusive::member_hook, &value_type::hMember> member_option; + struct stack_traits: public cds::intrusive::fcstack::traits + { + typedef cds::algo::flat_combining::wait_strategy::single_mutex_single_condvar<> wait_strategy; + }; + + typedef cds::intrusive::FCStack< value_type, boost::intrusive::slist< value_type, member_option >, stack_traits > stack_type; + test(); + } + + TEST_F( IntrusiveFCStack, slist_member_single_mutex_multi_condvar ) + { + typedef member_hook_item< boost::intrusive::slist_member_hook<> > value_type; + typedef boost::intrusive::member_hook, &value_type::hMember> member_option; + struct stack_traits: public cds::intrusive::fcstack::traits + { + typedef cds::algo::flat_combining::wait_strategy::single_mutex_multi_condvar<> wait_strategy; + }; + + typedef cds::intrusive::FCStack< value_type, boost::intrusive::slist< value_type, member_option >, stack_traits > stack_type; + test(); + } + + TEST_F( IntrusiveFCStack, slist_member_multi_mutex_multi_condvar ) + { + typedef member_hook_item< boost::intrusive::slist_member_hook<> > value_type; + typedef boost::intrusive::member_hook, &value_type::hMember> member_option; + struct stack_traits: public cds::intrusive::fcstack::traits + { + typedef cds::algo::flat_combining::wait_strategy::multi_mutex_multi_condvar<> wait_strategy; + }; + + typedef cds::intrusive::FCStack< value_type, boost::intrusive::slist< value_type, member_option >, stack_traits > stack_type; + test(); + } + TEST_F( IntrusiveFCStack, slist_member_disposer ) { typedef member_hook_item< boost::intrusive::slist_member_hook<> > value_type; @@ -246,6 +377,7 @@ namespace { cds::intrusive::fcstack::make_traits< cds::opt::enable_elimination< true > , cds::opt::stat< cds::intrusive::fcstack::stat<> > + , cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_multi_condvar<>> >::type > stack_type; test(); @@ -288,6 +420,7 @@ namespace { cds::intrusive::fcstack::make_traits< cds::opt::enable_elimination< true > , cds::opt::stat< cds::intrusive::fcstack::stat<> > + , cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::multi_mutex_multi_condvar<>> >::type > stack_type; test(); @@ -324,6 +457,7 @@ namespace { cds::intrusive::fcstack::make_traits< cds::opt::enable_elimination< true > , cds::opt::stat< cds::intrusive::fcstack::stat<> > + , cds::opt::wait_strategy< cds::algo::flat_combining::wait_strategy::single_mutex_single_condvar<>> >::type > stack_type; test();