#include <stdatomic.h>
#include <unrelacy.h>
-
-/**
- Properties to check:
- 1. At any point, only one thread can acquire the mutex; when any thread
- nlock the mutex, he must have it in his hand.
- 2. The sequence of the lock is guaranteed, which means there is a queue for
- all the lock operations.
- ####
- 3. Should establish the happens-before relationship between unlock and lock
-*/
-
-/**
- @Begin
- @Global_define:
- # The invariant is that the thread that has a guard at the head of the
- # queue should be the one who currently has the lock.
-
- int __lock_acquired = 0;
- queue<guard*> __lock_waiting_queue;
-
- @Happens-before:
- # Without any specifying, this means the beginning of a successful Unlock()
- # happens before the end of the next successful Lock().
- Unlock -> Lock
- @End
-*/
+#include <spec_lib.h>
+#include <stdlib.h>
+#include <cdsannotate.h>
+#include <specannotation.h>
+#include <model_memory.h>
+#include "common.h"
struct mcs_node {
std::atomic<mcs_node *> next;
}
};
+
struct mcs_mutex {
public:
// tail is null when lock is not held
std::atomic<mcs_node *> m_tail;
mcs_mutex() {
+ /**
+ @Begin
+ @Entry_point
+ @End
+ */
m_tail.store( NULL );
}
~mcs_mutex() {
- ASSERT( m_tail.load() == NULL );
+ //ASSERT( m_tail.load() == NULL );
}
// Each thread will have their own guard.
~guard() { m_t->unlock(this); }
};
+ /**
+ @Begin
+ @Options:
+ LANG = CPP;
+ CLASS = mcs_mutex;
+ @Global_define:
+ @DeclareVar: bool _lock_acquired;
+ @InitVar: _lock_acquired = false;
+ @Happens_before: Unlock -> Lock
+ @End
+ */
/**
@Begin
- # This function will soon enqueue the current guard to the queue to make
- # sure it will get fair lock successfully.
@Interface: Lock
- @Commit_point_set: Lock_Enqueue_Point
- @Action:
- @Code:
- __lock_waiting_queue.enqueue(I);
- @Post_action:
- __lock_acquired++;
- @Post_check:
- # Make sure when it successfully locks, the lock is not acquired yet
- # and the locking is in a FIFO order
- __lock_acquired == 1 && I == __lock_waiting_queue.peak()
+ @Commit_point_set: Lock_Enqueue_Point1 | Lock_Enqueue_Point2
+ @Check: _lock_acquired == false;
+ @Action: _lock_acquired = true;
@End
*/
void lock(guard * I) {
me->next.store(NULL, std::mo_relaxed );
me->gate.store(1, std::mo_relaxed );
+ /** Run this in the -Y mode to expose the HB bug */
// publish my node as the new tail :
mcs_node * pred = m_tail.exchange(me, std::mo_acq_rel);
/**
- # Once the exchange completes, the thread already claimed the next
- # available slot for the lock
@Begin
- @Commit_point_check_define: true
- @Label: Lock_Enqueue_Point
+ @Commit_point_define_check: pred == NULL
+ @Label: Lock_Enqueue_Point1
@End
*/
if ( pred != NULL ) {
// unlock of pred can see me in the tail before I fill next
// publish me to previous lock-holder :
+ // FIXME: detection miss, execution never ends
+ // If this is relaxed, the store 0 to gate will be read before and
+ // that lock will never ends.
pred->next.store(me, std::mo_release );
+ //printf("lock_miss1\n");
// (*2) pred not touched any more
// wait on predecessor setting my flag -
rl::linear_backoff bo;
int my_gate = 1;
- while ( (my_gate = me->gate.load(std::mo_acquire)) ) {
+ while (my_gate ) {
+ my_gate = me->gate.load(std::mo_acquire);
+ //if (my_gate == 0)
+ //printf("lock at gate!\n");
+ /**
+ @Begin
+ @Commit_point_define_check: my_gate == 0
+ @Label: Lock_Enqueue_Point2
+ @End
+ */
thrd_yield();
}
}
/**
@Begin
@Interface: Unlock
- @Commit_point_set:
- Unlock = Unlock_Point_Success_1 | Unlock_Point_Success_2
- @Check:
- lock_acquired == 1 && I == lock_waiting_queue.peak()
- @Action:
- @Code:
- lock_acquired--;
- lock_waiting_queue.dequeue();
+ @Commit_point_set: Unlock_Point_Success_1 | Unlock_Point_Success_2
+ @Check: _lock_acquired == true
+ @Action: _lock_acquired = false;
@End
*/
void unlock(guard * I) {
mcs_node * me = &(I->m_node);
+ // FIXME: detection miss, execution never ends
mcs_node * next = me->next.load(std::mo_acquire);
+ //printf("unlock_miss2\n");
if ( next == NULL )
{
mcs_node * tail_was_me = me;
bool success;
- if ( (success = m_tail.compare_exchange_strong(
- tail_was_me,NULL,std::mo_acq_rel)) ) {
- /**
- @Begin
- @Commit_point_check_define: __ATOMIC_RET__ == true
- @Label: Unlock_Point_Success_1
- @End
- */
+ success = m_tail.compare_exchange_strong(
+ tail_was_me,NULL,std::mo_acq_rel);
+ /**
+ @Begin
+ @Commit_point_define_check: success == true
+ @Label: Unlock_Point_Success_1
+ @End
+ */
+ if (success) {
+
// got null in tail, mutex is unlocked
return;
}
// (*1) catch the race :
rl::linear_backoff bo;
for(;;) {
+ // FIXME: detection miss, execution never ends
next = me->next.load(std::mo_acquire);
+ //printf("unlock_miss3\n");
if ( next != NULL )
break;
thrd_yield();
next->gate.store( 0, std::mo_release );
/**
@Begin
- @Commit_point_check_define: true
+ @Commit_point_define_check: true
@Label: Unlock_Point_Success_2
@End
*/
}
};
+/**
+ @Begin
+ @Class_end
+ @End
+*/