Clean up build and remove generated files
[c11concurrency-benchmarks.git] / silo / circbuf.h
1 #pragma once
2
3 #include <cstring>
4 #include <atomic>
5 #include <vector>
6 #include <limits>
7
8 #include "macros.h"
9 #include "amd64.h"
10
11 // Thread safety is ensured for many concurrent enqueuers but only one
12 // concurrent dequeuer. That is, the head end is thread safe, but the tail end
13 // can only be manipulated by a single thread.
14 template <typename Tp, unsigned int Capacity>
15 class circbuf {
16 public:
17   circbuf()
18     : head_(0), tail_(0)
19   {
20     memset(&buf_[0], 0, Capacity * sizeof(buf_[0]));
21   }
22
23   inline bool
24   empty() const
25   {
26     return head_.load(std::memory_order_acquire) ==
27            tail_.load(std::memory_order_acquire) &&
28            !buf_[head_.load(std::memory_order_acquire)].load(std::memory_order_acquire);
29   }
30
31   // blocks until something enqs()
32   inline void
33   enq(Tp *p)
34   {
35     INVARIANT(p);
36
37   retry:
38     unsigned icur = head_.load(std::memory_order_acquire);
39     INVARIANT(icur < Capacity);
40     if (buf_[icur].load(std::memory_order_acquire)) {
41       nop_pause();
42       goto retry;
43     }
44
45     // found an empty spot, so we now race for it
46     unsigned inext = (icur + 1) % Capacity;
47     if (!head_.compare_exchange_strong(icur, inext, std::memory_order_acq_rel)) {
48       nop_pause();
49       goto retry;
50     }
51
52     INVARIANT(!buf_[icur].load(std::memory_order_acquire));
53     buf_[icur].store(p, std::memory_order_release);
54   }
55
56   // blocks until something deqs()
57   inline Tp *
58   deq()
59   {
60     while (!buf_[tail_.load(std::memory_order_acquire)].load(std::memory_order_acquire))
61       nop_pause();
62     Tp *ret = buf_[tail_.load(std::memory_order_acquire)].load(std::memory_order_acquire);
63     buf_[postincr(tail_)].store(nullptr, std::memory_order_release);
64     INVARIANT(ret);
65     return ret;
66   }
67
68   inline Tp *
69   peek()
70   {
71     return buf_[tail_.load(std::memory_order_acquire)].load(std::memory_order_acquire);
72   }
73
74   // takes a current snapshot of all entries in the queue
75   inline void
76   peekall(std::vector<Tp *> &ps, size_t limit = std::numeric_limits<size_t>::max())
77   {
78     ps.clear();
79     const unsigned t = tail_.load(std::memory_order_acquire);
80     unsigned i = t;
81     Tp *p;
82     while ((p = buf_[i].load(std::memory_order_acquire)) && ps.size() < limit) {
83       ps.push_back(p);
84       postincr(i);
85       if (i == t)
86         // have fully wrapped around
87         break;
88     }
89   }
90
91 private:
92
93   static inline unsigned
94   postincr(unsigned &i)
95   {
96     const unsigned ret = i;
97     i = (i + 1) % Capacity;
98     return ret;
99   }
100
101   static inline unsigned
102   postincr(std::atomic<unsigned> &i)
103   {
104     const unsigned ret = i.load(std::memory_order_acquire);
105     i.store((ret + 1) % Capacity, std::memory_order_release);
106     return ret;
107   }
108
109   std::atomic<Tp *> buf_[Capacity];
110   std::atomic<unsigned> head_;
111   std::atomic<unsigned> tail_;
112 };