--- /dev/null
+#ifndef _SEQLOCK_H
+#define _SEQLOCK_H
+
+#include <atomic>
+
+using namespace std;
+
+typedef void* (*read_func_ptr_t)(void*);
+
+/**
+ Properties to check:
+ Every read will read the up-to-date value, and only one thread can be
+ writing.
+*/
+
+
+/**
+ @Begin
+ @Global_define:
+ Type *__data_ptr = NULL;
+ bool __is_writing = false;
+
+ static bool _is_equals(void* ptr1, void* ptr2) {
+ //... Should be defined to check if the internal data equals
+ }
+
+ @Happens-before:
+ Init -> Read
+ Init -> Write
+ Write -> Write
+ Write -> Read
+ @End
+*/
+
+/**
+ Fixed the write to be lock-free. Use a CAS in the write instead of using the
+ mutex. There are a couple options for the implementation according to Hans
+ Boehm's paper <<Can seqlocks get along with programming language memory
+ models>>.
+
+ Interesting thing in the read() function is the memory ordering we should
+ impose there. In Hans Boehm's paper, he presents 3 ways to do it. We will
+ try to use the fences one here as a special case to check programs written
+ with C++ fences.
+*/
+
+template<typename Type>
+class seqlock {
+ private:
+ // To make the write lock-free, we don't need the mutex here.
+ // Mutex to write exclusively
+
+ // Sequence for reader consistency check
+ atomic_int _seq;
+ // The shared data structure to be protected;
+ // It needs to be atomic to avoid data races
+ atomic<Type*> _data;
+
+ // This function is to check if the objects pointed by the two pointer are
+ // the same; it is only used for checking the correctness
+ bool is_equal(void *ptr1, void *ptr2) {
+ // return ptr1 == NULL ? false : ptr1->equals(ptr2);
+ }
+
+ public:
+ /**
+ @Begin
+ @Interface: Init
+ @Commit_point_set: Init_Point
+ @Action:
+ @Code:
+ __data_ptr = data;
+ @End
+ */
+ seqlock(Type *data) {
+ _data.store(data, memory_order_relaxed);
+ _seq.store(0, memory_order_release);
+ /**
+ # Initialization code
+ @Begin
+ @Commit_point_check_define: true
+ @Label: Init_Point
+ @End
+ */
+ }
+
+ ~seqlock() {}
+
+ /**
+ @Begin
+ @Interface: Read
+ @Commit_point_set: Read_Success_Point
+ @Action:
+ @Code:
+ void *_Old_data_ptr = __data_ptr;
+ @Post_check:
+ _is_equal(read_func(_Old_data_ptr), __RET__)
+ @End
+ */
+ void* read(read_func_ptr_t read_func) {
+ while (true) {
+ int old_seq = _seq.load(memory_order_acquire);
+ if (old_seq % 2 == 1) continue;
+ // A customized read of something return a pointer to it
+ // Potential data race, should be atomic
+ void *res = read_func(_data.load(memory_order_relaxed));
+ // This is subtle to use an acquire fence here
+ // What we actually need is a #LoadLoad fence
+ atomic_thread_fence(memory_order_acquire);
+ int new_seq;
+ if ((new_seq = _seq.load(memory_order_relaxed)) == old_seq) {
+ /**
+ @Begin
+ @Commit_point_check_define: __ATOMIC_RET__ == old_seq
+ @Label: Read_Success_Point
+ @End
+ */
+ return res;
+ }
+ // _data has been updated, read should retry
+ }
+ }
+
+ /**
+ @Begin
+ @Interface: Write
+ @Commit_point_set: Write_Success_Point
+ @Action:
+ @Code:
+ __data_ptr = new_data;
+ @End
+ */
+
+ void write(Type *new_data) {
+ while (true) {
+ // This might be a relaxed too
+ // Should think about how to read the lo
+ int old_seq = _seq.load(memory_order_acquire);
+ if (old_seq % 2 == 1) {
+ // Backoff, another thread is writing
+ //thrd_yield();
+ continue; // Retry
+ }
+ // Should be relaxed!!!
+ if (_seq.compare_exchange_strong(old_seq, old_seq + 1,
+ memory_order_relaxed, memory_order_relaxed))
+ break;
+ }
+ // Should be a store release, to synchronize with the fence!!!
+ _data.store(new_data, memory_order_release);
+ _seq.fetch_add(1, memory_order_release);
+ /**
+ @Begin
+ @Commit_point_define: true
+ @Label: Write_Success_Point
+ @End
+ */
+ }
+};
+
+
+#endif