1 /*------------------------------------------------------------------------
2 Junction: Concurrent data structures in C++
3 Copyright (c) 2016 Jeff Preshing
5 Distributed under the Simplified BSD License.
6 Original location: https://github.com/preshing/junction
8 This software is distributed WITHOUT ANY WARRANTY; without even the
9 implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
10 See the LICENSE file for more information.
11 ------------------------------------------------------------------------*/
13 #include <junction/Core.h>
14 #include <turf/CPUTimer.h>
15 #include <turf/Util.h>
16 #include <turf/extra/UniqueSequence.h>
17 #include <turf/extra/JobDispatcher.h>
18 #include <turf/extra/Options.h>
19 #include <junction/extra/MapAdapter.h>
23 using namespace turf::intTypes;
24 typedef junction::extra::MapAdapter MapAdapter;
26 static const ureg NumKeysPerThread = 16384;
27 static const ureg DefaultReadsPerWrite = 19;
28 static const ureg DefaultItersPerChunk = 128;
29 static const ureg DefaultChunks = 10;
30 static const u32 Prime = 0x4190ab09;
34 turf::extra::Random m_rand;
39 m_threshold = u32(double(0xffffffffu) * ratio);
42 void delay(ureg& workUnits) {
44 volatile ureg v = m_rand.next32();
55 ureg numKeysPerThread;
60 turf::extra::SpinKicker spinKicker;
61 turf::Atomic<u32> doneFlag;
63 SharedState(MapAdapter& adapter, ureg numThreads, ureg numKeysPerThread, ureg readsPerWrite, ureg itersPerChunk)
64 : adapter(adapter), map(NULL), numKeysPerThread(numKeysPerThread), numThreads(numThreads), readsPerWrite(readsPerWrite),
65 itersPerChunk(itersPerChunk) {
67 doneFlag.storeNonatomic(0);
73 SharedState& m_shared;
74 MapAdapter::ThreadContext m_threadCtx;
93 Stats& operator+=(const Stats& other) {
94 workUnitsDone += other.workUnitsDone;
95 mapOpsDone += other.mapOpsDone;
96 duration += other.duration;
100 bool operator<(const Stats& other) const {
101 return duration < other.duration;
107 ThreadState(SharedState& shared, ureg threadIndex, u32 rangeLo, u32 rangeHi)
108 : m_shared(shared), m_threadCtx(shared.adapter, threadIndex) {
109 m_threadIndex = threadIndex;
112 m_addIndex = rangeLo;
113 m_removeIndex = rangeLo;
116 void registerThread() {
117 m_threadCtx.registerThread();
120 void unregisterThread() {
121 m_threadCtx.unregisterThread();
124 void initialPopulate() {
125 TURF_ASSERT(m_addIndex == m_removeIndex);
126 MapAdapter::Map* map = m_shared.map;
127 for (ureg i = 0; i < m_shared.numKeysPerThread; i++) {
128 u32 key = m_addIndex * Prime;
129 map->assign(key, (void*) (key & ~uptr(3)));
130 if (++m_addIndex == m_rangeHi)
131 m_addIndex = m_rangeLo;
136 MapAdapter::Map* map = m_shared.map;
137 turf::CPUTimer::Converter converter;
138 Delay delay(m_shared.delayFactor);
140 ureg lookupIndex = m_rangeLo;
141 ureg remaining = m_shared.itersPerChunk;
142 if (m_threadIndex == 0)
143 m_shared.spinKicker.kick(m_shared.numThreads - 1);
146 m_shared.spinKicker.waitForKick();
150 turf::CPUTimer::Point start = turf::CPUTimer::get();
151 for (; remaining > 0; remaining--) {
153 delay.delay(stats.workUnitsDone);
154 if (m_shared.doneFlag.load(turf::Relaxed))
156 u32 key = m_addIndex * Prime;
158 map->assign(key, (void*) uptr(key));
161 if (++m_addIndex == m_rangeHi)
162 m_addIndex = m_rangeLo;
165 if (s32(lookupIndex - m_removeIndex) < 0)
166 lookupIndex = m_removeIndex;
167 for (ureg l = 0; l < m_shared.readsPerWrite; l++) {
168 delay.delay(stats.workUnitsDone);
169 if (m_shared.doneFlag.load(turf::Relaxed))
171 key = lookupIndex * Prime;
173 volatile void* value = map->get(key);
177 if (++lookupIndex == m_rangeHi)
178 lookupIndex = m_rangeLo;
179 if (lookupIndex == m_addIndex)
180 lookupIndex = m_removeIndex;
184 delay.delay(stats.workUnitsDone);
185 if (m_shared.doneFlag.load(turf::Relaxed))
187 key = m_removeIndex * Prime;
192 if (++m_removeIndex == m_rangeHi)
193 m_removeIndex = m_rangeLo;
196 if (s32(lookupIndex - m_removeIndex) < 0)
197 lookupIndex = m_removeIndex;
198 for (ureg l = 0; l < m_shared.readsPerWrite; l++) {
199 delay.delay(stats.workUnitsDone);
200 if (m_shared.doneFlag.load(turf::Relaxed))
202 key = lookupIndex * Prime;
204 volatile void* value = map->get(key);
208 if (++lookupIndex == m_rangeHi)
209 lookupIndex = m_rangeLo;
210 if (lookupIndex == m_addIndex)
211 lookupIndex = m_removeIndex;
214 if (m_threadIndex == 0)
215 m_shared.doneFlag.store(1, turf::Relaxed);
216 m_threadCtx.update();
217 turf::CPUTimer::Point end = turf::CPUTimer::get();
220 stats.duration = converter.toSeconds(end - start);
225 static const turf::extra::Option Options[] = {
226 {"readsPerWrite", 'r', true, "number of reads per write"},
227 {"itersPerChunk", 'i', true, "number of iterations per chunk"},
228 {"chunks", 'c', true, "number of chunks to execute"},
229 {"keepChunkFraction", 'k', true, "threshold fraction of chunk timings to keep"},
232 int main(int argc, const char** argv) {
233 turf::extra::Options options(Options, TURF_STATIC_ARRAY_SIZE(Options));
234 options.parse(argc, argv);
235 ureg readsPerWrite = options.getInteger("readsPerWrite", DefaultReadsPerWrite);
236 ureg itersPerChunk = options.getInteger("itersPerChunk", DefaultItersPerChunk);
237 ureg chunks = options.getInteger("chunks", DefaultChunks);
238 double keepChunkFraction = options.getDouble("keepChunkFraction", 1.0);
240 turf::extra::JobDispatcher dispatcher;
241 ureg numThreads = dispatcher.getNumPhysicalCores();
242 MapAdapter adapter(numThreads);
244 // Create shared state and register threads
245 SharedState shared(adapter, numThreads, NumKeysPerThread, readsPerWrite, itersPerChunk);
246 std::vector<ThreadState> threads;
247 threads.reserve(numThreads);
248 for (ureg t = 0; t < numThreads; t++) {
249 u32 rangeLo = 0xffffffffu / numThreads * t + 1;
250 u32 rangeHi = 0xffffffffu / numThreads * (t + 1) + 1;
251 threads.emplace_back(shared, t, rangeLo, rangeHi);
253 dispatcher.kickMulti(&ThreadState::registerThread, &threads[0], threads.size());
257 MapAdapter::Map map(MapAdapter::getInitialCapacity(numThreads * NumKeysPerThread));
259 dispatcher.kickMulti(&ThreadState::initialPopulate, &threads[0], threads.size());
262 printf("'mapType': '%s',\n", MapAdapter::MapName);
263 printf("'readsPerWrite': %d,\n", (int) readsPerWrite);
264 printf("'itersPerChunk': %d,\n", (int) itersPerChunk);
265 printf("'chunks': %d,\n", (int) chunks);
266 printf("'keepChunkFraction': %f,\n", keepChunkFraction);
267 printf("'labels': ('delayFactor', 'workUnitsDone', 'mapOpsDone', 'totalTime'),\n"), printf("'points': [\n");
268 for (float delayFactor = 1.f; delayFactor >= 0.0005f; delayFactor *= 0.95f) {
269 shared.delayFactor = delayFactor;
271 std::vector<ThreadState::Stats> kickTotals;
272 for (ureg c = 0; c < chunks; c++) {
273 shared.doneFlag.storeNonatomic(false);
274 dispatcher.kickMulti(&ThreadState::run, &threads[0], threads.size());
276 ThreadState::Stats kickTotal;
277 for (ureg t = 0; t < numThreads; t++)
278 kickTotal += threads[t].m_stats;
279 kickTotals.push_back(kickTotal);
282 std::sort(kickTotals.begin(), kickTotals.end());
283 ThreadState::Stats totals;
284 for (ureg t = 0; t < ureg(kickTotals.size() * keepChunkFraction); t++) {
285 totals += kickTotals[t];
288 printf(" (%f, %d, %d, %f),\n", shared.delayFactor, int(totals.workUnitsDone), int(totals.mapOpsDone),
297 dispatcher.kickMulti(&ThreadState::unregisterThread, &threads[0], threads.size());