Source code repo: http://github.com/khizmax/libcds/
Download: http://sourceforge.net/projects/libcds/files/
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_INTRUSIVE_CUCKOO_SET_H
{
public:
// placeholder ctor
- lock_array(): lock_array_type( typename lock_array_type::select_cell_policy(2) ) {}
+ lock_array(): lock_array_type( typename lock_array_type::select_cell_policy(2)) {}
// real ctor
- lock_array( size_t nCapacity ): lock_array_type( nCapacity, typename lock_array_type::select_cell_policy(nCapacity) ) {}
+ lock_array( size_t nCapacity ): lock_array_type( nCapacity, typename lock_array_type::select_cell_policy(nCapacity)) {}
};
class scoped_lock: public std::unique_lock< lock_array_type >
if ( m_bLocked ) {
m_guard[0] = &(policy.m_Locks[0].at(nCell));
for ( unsigned int i = 1; i < c_nArity; ++i ) {
- m_guard[i] = &( policy.m_Locks[i].at( policy.m_Locks[i].lock( arrHash[i] )) );
+ m_guard[i] = &( policy.m_Locks[i].at( policy.m_Locks[i].lock( arrHash[i] )));
}
}
else {
lock_array_ptr create_lock_array( size_t nCapacity )
{
- return lock_array_ptr( lock_array_allocator().New( nCapacity ), lock_array_disposer() );
+ return lock_array_ptr( lock_array_allocator().New( nCapacity ), lock_array_disposer());
}
void acquire( size_t const * arrHash, lock_array_ptr * pLockArr, lock_type ** parrLock )
// wait while resizing
while ( true ) {
who = m_Owner.load( atomics::memory_order_acquire );
- if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask) )
+ if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask))
break;
bkoff();
m_Stat.onCellWaitResizing();
}
who = m_Owner.load( atomics::memory_order_acquire );
- if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask) ) && m_arrLocks[0] == pLockArr[0] ) {
+ if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask)) && m_arrLocks[0] == pLockArr[0] ) {
m_Stat.onCellLock();
return;
}
parrLock[0] = &(m_arrLocks[0]->at(nCell));
for ( unsigned int i = 1; i < c_nArity; ++i ) {
- parrLock[i] = &( m_arrLocks[i]->at( m_arrLocks[i]->lock( arrHash[i] & nMask)) );
+ parrLock[i] = &( m_arrLocks[i]->at( m_arrLocks[i]->lock( arrHash[i] & nMask)));
}
m_Stat.onSecondCellLock();
counter_type m_nRelocateCallCount ; ///< Count of \p relocate() function call
counter_type m_nRelocateRoundCount ; ///< Count of attempts to relocate items
counter_type m_nFalseRelocateCount ; ///< Count of unneeded attempts of \p relocate call
- counter_type m_nSuccessRelocateCount ; ///< Count of successfull item relocating
+ counter_type m_nSuccessRelocateCount ; ///< Count of successful item relocating
counter_type m_nRelocateAboveThresholdCount; ///< Count of item relocating above probeset threshold
counter_type m_nFailedRelocateCount ; ///< Count of failed relocation attemp (when all probeset is full)
counter_type m_nResizeCallCount ; ///< Count of \p resize() function call
counter_type m_nFalseResizeCount ; ///< Count of false \p resize() function call (when other thread has been resized the set)
- counter_type m_nResizeSuccessNodeMove; ///< Count of successfull node moving when resizing
+ counter_type m_nResizeSuccessNodeMove; ///< Count of successful node moving when resizing
counter_type m_nResizeRelocateCall ; ///< Count of \p relocate() function call from \p resize function
- counter_type m_nInsertSuccess ; ///< Count of successfull \p insert() function call
+ counter_type m_nInsertSuccess ; ///< Count of successful \p insert() function call
counter_type m_nInsertFailed ; ///< Count of failed \p insert() function call
counter_type m_nInsertResizeCount ; ///< Count of \p resize() function call from \p insert()
counter_type m_nInsertRelocateCount ; ///< Count of \p relocate() function call from \p insert()
counter_type m_nInsertRelocateFault ; ///< Count of failed \p relocate() function call from \p insert()
counter_type m_nUpdateExistCount ; ///< Count of call \p update() function for existing node
- counter_type m_nUpdateSuccessCount ; ///< Count of successfull \p insert() function call for new node
+ counter_type m_nUpdateSuccessCount ; ///< Count of successful \p insert() function call for new node
counter_type m_nUpdateResizeCount ; ///< Count of \p resize() function call from \p update()
counter_type m_nUpdateRelocateCount ; ///< Count of \p relocate() function call from \p update()
counter_type m_nUpdateRelocateFault ; ///< Count of failed \p relocate() function call from \p update()
{
node_type * pPrev = itPrev.pNode;
node_type * pWhat = itWhat.pNode;
- assert( (!pPrev && pWhat == pHead) || (pPrev && pPrev->m_pNext == pWhat) );
+ assert( (!pPrev && pWhat == pHead) || (pPrev && pPrev->m_pNext == pWhat));
if ( pPrev )
pPrev->m_pNext = pWhat->m_pNext;
Otherwise, it is unordered and \p Traits should provide \p equal_to functor.
*/
static bool const c_isSorted = !( std::is_same< typename traits::compare, opt::none >::value
- && std::is_same< typename traits::less, opt::none >::value ) ;
+ && std::is_same< typename traits::less, opt::none >::value );
static size_t const c_nArity = hash::size ; ///< the arity of cuckoo hashing: the number of hash functors provided; minimum 2.
/// Key equality functor; used only for unordered probe-set
void allocate_bucket_tables( size_t nSize )
{
- assert( cds::beans::is_power2( nSize ) );
+ assert( cds::beans::is_power2( nSize ));
m_nBucketMask = nSize - 1;
bucket_table_allocator alloc;
unsigned int nTable = contains( arrPos, arrHash, val, pred );
if ( nTable != c_nUndefTable ) {
node_type& node = *arrPos[nTable].itFound;
- f( *node_traits::to_value_ptr(node) );
+ f( *node_traits::to_value_ptr(node));
bucket( nTable, arrHash[nTable]).remove( arrPos[nTable].itPrev, arrPos[nTable].itFound );
--m_ItemCounter;
m_Stat.onEraseSuccess();
return true;
}
- pVal = node_traits::to_value_ptr( *refBucket.begin() );
+ pVal = node_traits::to_value_ptr( *refBucket.begin());
copy_hash( arrHash, *pVal );
scoped_cell_trylock guard2( m_MutexPolicy, arrHash );
- if ( !guard2.locked() )
+ if ( !guard2.locked())
continue ; // try one more time
- refBucket.remove( typename bucket_entry::iterator(), refBucket.begin() );
+ refBucket.remove( typename bucket_entry::iterator(), refBucket.begin());
unsigned int i = (nTable + 1) % c_nArity;
bucket_entry& bkt = bucket( i, arrHash[i] );
if ( bkt.size() < m_nProbesetThreshold ) {
position pos;
- contains_action::find( bkt, pos, i, arrHash[i], *pVal, key_predicate() ) ; // must return false!
+ contains_action::find( bkt, pos, i, arrHash[i], *pVal, key_predicate()) ; // must return false!
bkt.insert_after( pos.itPrev, node_traits::to_node_ptr( pVal ));
m_Stat.onSuccessRelocateRound();
return true;
bucket_entry& bkt = bucket( i, arrHash[i] );
if ( bkt.size() < m_nProbesetSize ) {
position pos;
- contains_action::find( bkt, pos, i, arrHash[i], *pVal, key_predicate() ) ; // must return false!
+ contains_action::find( bkt, pos, i, arrHash[i], *pVal, key_predicate()) ; // must return false!
bkt.insert_after( pos.itPrev, node_traits::to_node_ptr( pVal ));
nTable = i;
memcpy( arrGoalHash, arrHash, sizeof(arrHash));
{
scoped_resize_lock guard( m_MutexPolicy );
- if ( nOldCapacity != bucket_count() ) {
+ if ( nOldCapacity != bucket_count()) {
m_Stat.onFalseResizeCall();
return;
}
value_type& val = *node_traits::to_value_ptr( *it );
copy_hash( arrHash, val );
- contains( arrPos, arrHash, val, key_predicate() ) ; // must return c_nUndefTable
+ contains( arrPos, arrHash, val, key_predicate()) ; // must return c_nUndefTable
for ( unsigned int i = 0; i < c_nArity; ++i ) {
bucket_entry& refBucket = bucket( i, arrHash[i] );
if ( refBucket.size() < m_nProbesetSize ) {
refBucket.insert_after( arrPos[i].itPrev, &*it );
assert( refBucket.size() > 1 );
- copy_hash( arrHash, *node_traits::to_value_ptr( *refBucket.begin()) );
+ copy_hash( arrHash, *node_traits::to_value_ptr( *refBucket.begin()));
m_Stat.onResizeRelocateCall();
relocate( i, arrHash );
break;
Probe set threshold = probe set size - 1
*/
CuckooSet()
- : m_nProbesetSize( calc_probeset_size(0) )
+ : m_nProbesetSize( calc_probeset_size(0))
, m_nProbesetThreshold( m_nProbesetSize - 1 )
, m_MutexPolicy( c_nDefaultInitialSize )
{
, unsigned int nProbesetSize ///< probe set size
, unsigned int nProbesetThreshold = 0 ///< probe set threshold, <tt>nProbesetThreshold < nProbesetSize</tt>. If 0, <tt>nProbesetThreshold = nProbesetSize - 1</tt>
)
- : m_nProbesetSize( calc_probeset_size(nProbesetSize) )
+ : m_nProbesetSize( calc_probeset_size(nProbesetSize))
, m_nProbesetThreshold( nProbesetThreshold ? nProbesetThreshold : m_nProbesetSize - 1 )
, m_MutexPolicy( cds::beans::ceil2(nInitialSize ? nInitialSize : c_nDefaultInitialSize ))
{
CuckooSet(
hash_tuple_type const& h ///< hash functor tuple of type <tt>std::tuple<H1, H2, ... Hn></tt> where <tt> n == \ref c_nArity </tt>
)
- : m_nProbesetSize( calc_probeset_size(0) )
+ : m_nProbesetSize( calc_probeset_size(0))
, m_nProbesetThreshold( m_nProbesetSize -1 )
, m_Hash( h )
, m_MutexPolicy( c_nDefaultInitialSize )
, unsigned int nProbesetThreshold ///< probe set threshold, <tt>nProbesetThreshold < nProbesetSize</tt>. If 0, <tt>nProbesetThreshold = nProbesetSize - 1</tt>
, hash_tuple_type const& h ///< hash functor tuple of type <tt>std::tuple<H1, H2, ... Hn></tt> where <tt> n == \ref c_nArity </tt>
)
- : m_nProbesetSize( calc_probeset_size(nProbesetSize) )
+ : m_nProbesetSize( calc_probeset_size(nProbesetSize))
, m_nProbesetThreshold( nProbesetThreshold ? nProbesetThreshold : m_nProbesetSize - 1)
, m_Hash( h )
, m_MutexPolicy( cds::beans::ceil2(nInitialSize ? nInitialSize : c_nDefaultInitialSize ))
CuckooSet(
hash_tuple_type&& h ///< hash functor tuple of type <tt>std::tuple<H1, H2, ... Hn></tt> where <tt> n == \ref c_nArity </tt>
)
- : m_nProbesetSize( calc_probeset_size(0) )
+ : m_nProbesetSize( calc_probeset_size(0))
, m_nProbesetThreshold( m_nProbesetSize / 2 )
- , m_Hash( std::forward<hash_tuple_type>(h) )
+ , m_Hash( std::forward<hash_tuple_type>(h))
, m_MutexPolicy( c_nDefaultInitialSize )
{
check_common_constraints();
, unsigned int nProbesetThreshold ///< probe set threshold, <tt>nProbesetThreshold < nProbesetSize</tt>. If 0, <tt>nProbesetThreshold = nProbesetSize - 1</tt>
, hash_tuple_type&& h ///< hash functor tuple of type <tt>std::tuple<H1, H2, ... Hn></tt> where <tt> n == \ref c_nArity </tt>
)
- : m_nProbesetSize( calc_probeset_size(nProbesetSize) )
+ : m_nProbesetSize( calc_probeset_size(nProbesetSize))
, m_nProbesetThreshold( nProbesetThreshold ? nProbesetThreshold : m_nProbesetSize - 1)
- , m_Hash( std::forward<hash_tuple_type>(h) )
+ , m_Hash( std::forward<hash_tuple_type>(h))
, m_MutexPolicy( cds::beans::ceil2(nInitialSize ? nInitialSize : c_nDefaultInitialSize ))
{
check_common_constraints();
{
scoped_cell_lock guard( m_MutexPolicy, arrHash );
- if ( contains( arrPos, arrHash, val, key_predicate() ) != c_nUndefTable ) {
+ if ( contains( arrPos, arrHash, val, key_predicate()) != c_nUndefTable ) {
m_Stat.onInsertFailed();
return false;
}
++m_ItemCounter;
nGoalTable = i;
assert( refBucket.size() > 1 );
- copy_hash( arrHash, *node_traits::to_value_ptr( *refBucket.begin()) );
+ copy_hash( arrHash, *node_traits::to_value_ptr( *refBucket.begin()));
goto do_relocate;
}
}
The functor may change non-key fields of the \p item.
- Returns std::pair<bool, bool> where \p first is \p true if operation is successfull,
+ Returns std::pair<bool, bool> where \p first is \p true if operation is successful,
i.e. the node has been inserted or updated,
\p second is \p true if new item has been added or \p false if the item with \p key
already exists.
{
scoped_cell_lock guard( m_MutexPolicy, arrHash );
- unsigned int nTable = contains( arrPos, arrHash, val, key_predicate() );
+ unsigned int nTable = contains( arrPos, arrHash, val, key_predicate());
if ( nTable != c_nUndefTable ) {
func( false, *node_traits::to_value_ptr( *arrPos[nTable].itFound ), val );
m_Stat.onUpdateExist();
++m_ItemCounter;
nGoalTable = i;
assert( refBucket.size() > 1 );
- copy_hash( arrHash, *node_traits::to_value_ptr( *refBucket.begin()) );
+ copy_hash( arrHash, *node_traits::to_value_ptr( *refBucket.begin()));
goto do_relocate;
}
}
{
scoped_cell_lock guard( m_MutexPolicy, arrHash );
- unsigned int nTable = contains( arrPos, arrHash, val, key_predicate() );
+ unsigned int nTable = contains( arrPos, arrHash, val, key_predicate());
if ( nTable != c_nUndefTable && node_traits::to_value_ptr(*arrPos[nTable].itFound) == &val ) {
bucket( nTable, arrHash[nTable]).remove( arrPos[nTable].itPrev, arrPos[nTable].itFound );
--m_ItemCounter;
*/
void clear()
{
- clear_and_dispose( disposer() );
+ clear_and_dispose( disposer());
}
/// Clears the set and calls \p disposer for each item