4 template <typename t_element, size_t t_size>
5 struct mpmc_boundq_1_alt
9 // elements should generally be cache-line-size padded :
10 t_element m_array[t_size];
12 // rdwr counts the reads & writes that have started
13 atomic<unsigned int> m_rdwr;
14 // "read" and "written" count the number completed
15 atomic<unsigned int> m_read;
16 atomic<unsigned int> m_written;
30 Order_queue<unsigned int*> spec_queue;
33 //-----------------------------------------------------
35 t_element * read_fetch() {
36 unsigned int rdwr = m_rdwr.load(mo_acquire);
39 rd = (rdwr>>16) & 0xFFFF;
42 if ( wr == rd ) { // empty
46 if ( m_rdwr.compare_exchange_weak(rdwr,rdwr+(1<<16),mo_acq_rel) )
54 while ( (m_written.load(mo_acquire) & 0xFFFF) != wr ) {
58 t_element * p = & ( m_array[ rd % t_size ] );
61 @Commit_point_Check: true
64 spec_queue.peak() == p
70 m_read.fetch_add(1,mo_release);
72 @Commit_point_define: true
73 @Label: Read_Consume_Success
81 //-----------------------------------------------------
83 t_element * write_prepare() {
84 unsigned int rdwr = m_rdwr.load(mo_acquire);
87 rd = (rdwr>>16) & 0xFFFF;
90 if ( wr == ((rd + t_size)&0xFFFF) ) // full
93 if ( m_rdwr.compare_exchange_weak(rdwr,(rd<<16) | ((wr+1)&0xFFFF),mo_acq_rel) )
101 while ( (m_read.load(mo_acquire) & 0xFFFF) != rd ) {
106 t_element * p = & ( m_array[ wr % t_size ] );
109 @Commit_point_check: ANY
110 @Action: spec_queue.add(p);
117 m_written.fetch_add(1,mo_release);
120 //-----------------------------------------------------