private:
// elements should generally be cache-line-size padded :
- nonatomic<t_element> m_array[t_size];
+ t_element m_array[t_size];
// rdwr counts the reads & writes that have started
atomic<unsigned int> m_rdwr;
public:
- mpmc_boundq_1_alt() : m_rdwr(0), m_read(0), m_written(0)
+ mpmc_boundq_1_alt()
{
+ m_rdwr = 0;
+ m_read = 0;
+ m_written = 0;
}
//-----------------------------------------------------
- nonatomic<t_element> * read_fetch() {
+ t_element * read_fetch() {
unsigned int rdwr = m_rdwr.load(mo_acquire);
unsigned int rd,wr;
for(;;) {
if ( m_rdwr.compare_exchange_weak(rdwr,rdwr+(1<<16),mo_acq_rel) )
break;
+ else
+ thrd_yield();
}
// (*1)
rl::backoff bo;
while ( (m_written.load(mo_acquire) & 0xFFFF) != wr ) {
- bo.yield();
+ thrd_yield();
}
- nonatomic<t_element> * p = & ( m_array[ rd % t_size ] );
+ t_element * p = & ( m_array[ rd % t_size ] );
return p;
}
//-----------------------------------------------------
- nonatomic<t_element> * write_prepare() {
+ t_element * write_prepare() {
unsigned int rdwr = m_rdwr.load(mo_acquire);
unsigned int rd,wr;
for(;;) {
if ( m_rdwr.compare_exchange_weak(rdwr,(rd<<16) | ((wr+1)&0xFFFF),mo_acq_rel) )
break;
+ else
+ thrd_yield();
}
// (*1)
rl::backoff bo;
while ( (m_read.load(mo_acquire) & 0xFFFF) != rd ) {
- bo.yield();
+ thrd_yield();
}
- nonatomic<t_element> * p = & ( m_array[ wr % t_size ] );
+ t_element * p = & ( m_array[ wr % t_size ] );
return p;
}