1 /*------------------------------------------------------------------------
2 Junction: Concurrent data structures in C++
3 Copyright (c) 2016 Jeff Preshing
5 Distributed under the Simplified BSD License.
6 Original location: https://github.com/preshing/junction
8 This software is distributed WITHOUT ANY WARRANTY; without even the
9 implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
10 See the LICENSE file for more information.
11 ------------------------------------------------------------------------*/
13 #ifndef JUNCTION_DETAILS_GRAMPA_H
14 #define JUNCTION_DETAILS_GRAMPA_H
16 #include <junction/Core.h>
17 #include <turf/Atomic.h>
18 #include <junction/striped/Mutex.h>
19 #include <junction/striped/ManualResetEvent.h>
20 #include <turf/Util.h>
21 #include <junction/MapTraits.h>
22 #include <turf/Trace.h>
23 #include <turf/Heap.h>
24 #include <junction/SimpleJobCoordinator.h>
25 #include <junction/QSBR.h>
31 #if JUNCTION_TRACK_GRAMPA_STATS
32 struct GrampaCounter {
33 turf::Atomic<ureg> total;
34 turf::Atomic<sreg> current;
37 total.fetchAdd(1, turf::Relaxed);
38 current.fetchAdd(1, turf::Relaxed);
42 current.fetchSub(1, turf::Relaxed);
47 GrampaCounter numTables;
48 GrampaCounter numTableMigrations;
49 GrampaCounter numFlatTrees;
50 GrampaCounter numFlatTreeMigrations;
52 static GrampaStats Instance; // Zero-initialized
56 TURF_TRACE_DECLARE(Grampa, 37)
60 typedef typename Map::Hash Hash;
61 typedef typename Map::Value Value;
62 typedef typename Map::KeyTraits KeyTraits;
63 typedef typename Map::ValueTraits ValueTraits;
65 static const ureg RedirectFlatTree = 1;
66 static const ureg InitialSize = 8;
67 static const ureg TableMigrationUnitSize = 32;
68 static const ureg FlatTreeMigrationUnitSize = 32;
69 static const ureg LinearSearchLimit = 128;
70 static const ureg CellsInUseSample = LinearSearchLimit;
71 TURF_STATIC_ASSERT(LinearSearchLimit > 0 && LinearSearchLimit < 256); // Must fit in CellGroup::links
72 TURF_STATIC_ASSERT(CellsInUseSample > 0 && CellsInUseSample <= LinearSearchLimit); // Limit sample to failed search chain
74 static const ureg MinTableSize = 8;
75 static const ureg LeafSizeBits = 10;
76 static const ureg LeafSize = (ureg(1) << LeafSizeBits);
79 // If value == Redirect, threads participate in the jobCoordinator.
80 turf::Atomic<Hash> hash;
81 turf::Atomic<Value> value;
85 // Every cell in the table actually represents a bucket of cells, all linked together in a probe chain.
86 // Each cell in the probe chain is located within the table itself.
87 // "deltas" determines the index of the next cell in the probe chain.
88 // The first cell in the chain is the one that was hashed. It may or may not actually belong in the bucket.
89 // The "second" cell in the chain is given by deltas 0 - 3. It's guaranteed to belong in the bucket.
90 // All subsequent cells in the chain is given by deltas 4 - 7. Also guaranteed to belong in the bucket.
91 turf::Atomic<u8> deltas[8];
96 // unsafeRangeShift determines how many slots are occupied by this Table in the flattree.
97 // The range of hashes stored in this table is given by (1 << shift).
98 // eg. If the entire map is stored in a single table, then Table::shift == HASH_BITS.
99 // If the entire map is stored in two tables, then Table::shift == (HASH_BITS - 1) for each table.
100 // FlatTree::shift is always <= Table::shift for all the tables it contains.
101 const ureg sizeMask; // a power of two minus one
103 const ureg unsafeRangeShift;
104 junction::striped::ManualResetEvent isPublished; // To prevent publishing a subtree before its parent is published (happened in testing)
105 junction::striped::Mutex mutex; // to DCLI the TableMigration (stored in the jobCoordinator)
106 SimpleJobCoordinator jobCoordinator; // makes all blocked threads participate in the migration
108 Table(ureg sizeMask, Hash baseHash, ureg unsafeRangeShift) : sizeMask(sizeMask), baseHash(baseHash), unsafeRangeShift(unsafeRangeShift) {
111 static Table* create(ureg tableSize, ureg baseHash, ureg unsafeShift) {
112 TURF_ASSERT(turf::util::isPowerOf2(tableSize));
113 TURF_ASSERT(unsafeShift > 0 && unsafeShift <= sizeof(Hash) * 8);
114 TURF_ASSERT(tableSize >= 4);
115 ureg numGroups = tableSize >> 2;
116 Table* table = (Table*) TURF_HEAP.alloc(sizeof(Table) + sizeof(CellGroup) * numGroups);
117 new(table) Table(tableSize - 1, baseHash, (u8) unsafeShift);
118 for (ureg i = 0; i < numGroups; i++) {
119 CellGroup* group = table->getCellGroups() + i;
120 for (ureg j = 0; j < 4; j++) {
121 group->deltas[j].storeNonatomic(0);
122 group->deltas[j + 4].storeNonatomic(0);
123 group->cells[j].hash.storeNonatomic(KeyTraits::NullHash);
124 group->cells[j].value.storeNonatomic(Value(ValueTraits::NullValue));
127 #if JUNCTION_TRACK_GRAMPA_STATS
128 GrampaStats::Instance.numTables.increment();
134 #if JUNCTION_TRACK_GRAMPA_STATS
135 GrampaStats::Instance.numTables.decrement();
137 this->Table::~Table();
138 TURF_HEAP.free(this);
141 CellGroup* getCellGroups() const {
142 return (CellGroup*) (this + 1);
145 ureg getNumMigrationUnits() const {
146 return sizeMask / TableMigrationUnitSize + 1;
150 class TableMigration : public SimpleJobCoordinator::Job {
154 turf::Atomic<ureg> sourceIndex;
158 Hash m_baseHash; // The lowest possible hash value in this subtree; determines index in flattree.
159 // If m_numDestinations == 1, m_shift == 0.
160 // Otherwise, m_shift tells (indirectly) the size of the flattree in which our subtree would exactly fit: 1 << (HASH_BITS - m_shift).
161 // This ensures that m_shift is always less than sizeof(Hash) * 8, so that shifting by m_shift is not undefined behavior.
162 // To determine the subtree index for a hash during migration, we use: (hash >> m_shift) & (m_numDestinations - 1)
163 // A mask is used since we are only migrating a subtree -- not necessarily the entire map.
165 turf::Atomic<ureg> m_workerStatus; // number of workers + end flag
166 turf::Atomic<sreg> m_overflowTableIndex;
167 turf::Atomic<sreg> m_unitsRemaining;
169 ureg m_numDestinations; // The size of the subtree being created. Some table pointers may be repeated.
171 TableMigration(Map& map) : m_map(map) {
174 static TableMigration* create(Map& map, ureg numSources, ureg numDestinations) {
175 TableMigration* migration = (TableMigration*) TURF_HEAP.alloc(sizeof(TableMigration) + sizeof(TableMigration::Source) * numSources + sizeof(Table*) * numDestinations);
176 new(migration) TableMigration(map);
177 migration->m_workerStatus.storeNonatomic(0);
178 migration->m_overflowTableIndex.storeNonatomic(-1);
179 migration->m_unitsRemaining.storeNonatomic(0);
180 migration->m_numSources = numSources;
181 migration->m_numDestinations = numDestinations;
182 // Caller is responsible for filling in source & destination pointers
183 #if JUNCTION_TRACK_GRAMPA_STATS
184 GrampaStats::Instance.numTableMigrations.increment();
189 virtual ~TableMigration() TURF_OVERRIDE {
193 #if JUNCTION_TRACK_GRAMPA_STATS
194 GrampaStats::Instance.numTableMigrations.decrement();
196 // Destroy all source tables.
197 for (ureg i = 0; i < m_numSources; i++)
198 if (getSources()[i].table)
199 getSources()[i].table->destroy();
200 // Delete the migration object itself.
201 this->TableMigration::~TableMigration();
202 TURF_HEAP.free(this);
205 ureg getUnsafeShift() const {
206 return m_safeShift ? m_safeShift : (sizeof(Hash) * 8);
209 Source* getSources() const {
210 return (Source*) (this + 1);
213 Table** getDestinations() const {
214 return (Table**) (getSources() + m_numSources);
217 sreg migrateRange(Table* srcTable, ureg startIdx);
218 virtual void run() TURF_OVERRIDE;
221 class FlatTreeMigration;
224 // The size of the flattree is 1 << 64 - HASH_BITS.
225 // Or, stated another way, (Hash(-1) >> shift) + 1.
226 // To determine the flattree index for a given hash, we simply use: (hash >> shift)
227 // Smaller shift == more significant bits used as an index == bigger flattree.
228 // For example, the simplest flattree has only two entries, and only the most significant
229 // bit of each hash is used as the flattree index. In that case, shift == HASH_BITS - 1.
230 // Each time the flattree doubles in size, shift decreases by 1.
231 const ureg safeShift;
232 junction::striped::Mutex mutex;
233 FlatTreeMigration* migration; // Protected by mutex
235 FlatTree(ureg safeShift) : safeShift(safeShift), migration(NULL) {
236 // A FlatTree always has at least two tables, so the shift is always safe.
237 TURF_ASSERT(safeShift < sizeof(Hash) * 8);
240 static FlatTree* create(ureg safeShift) {
241 // A flattree always has at least two tables, so the shift is always safe.
242 TURF_ASSERT(safeShift < sizeof(Hash) * 8);
243 ureg numLeaves = (Hash(-1) >> safeShift) + 1;
244 FlatTree* flatTree = (FlatTree*) TURF_HEAP.alloc(sizeof(FlatTree) + sizeof(turf::Atomic<Table*>) * numLeaves);
245 new(flatTree) FlatTree(safeShift);
246 // Caller will initialize flatTree->getTables()
247 #if JUNCTION_TRACK_GRAMPA_STATS
248 GrampaStats::Instance.numFlatTrees.increment();
254 #if JUNCTION_TRACK_GRAMPA_STATS
255 GrampaStats::Instance.numFlatTrees.decrement();
257 this->FlatTree::~FlatTree();
258 TURF_HEAP.free(this);
261 turf::Atomic<Table*>* getTables() const {
262 return (turf::Atomic<Table*>*) (this + 1);
265 ureg getSize() const {
266 return (Hash(-1) >> safeShift) + 1;
269 ureg getNumMigrationUnits() const {
270 ureg sizeMask = Hash(-1) >> safeShift;
271 return sizeMask / FlatTreeMigrationUnitSize + 1;
275 class FlatTreeMigration : public SimpleJobCoordinator::Job {
279 FlatTree* m_destination;
280 turf::Atomic<ureg> m_workerStatus;
281 turf::Atomic<ureg> m_sourceIndex;
282 turf::Atomic<sreg> m_unitsRemaining;
283 junction::striped::ManualResetEvent m_completed;
285 FlatTreeMigration(Map& map, FlatTree* flatTree, ureg shift) : m_map(map) {
287 m_destination = FlatTree::create(shift);
288 m_workerStatus.storeNonatomic(0);
289 m_sourceIndex.storeNonatomic(0);
290 m_unitsRemaining.storeNonatomic(flatTree->getNumMigrationUnits());
291 #if JUNCTION_TRACK_GRAMPA_STATS
292 GrampaStats::Instance.numFlatTreeMigrations.increment();
296 virtual ~FlatTreeMigration() TURF_OVERRIDE {
297 #if JUNCTION_TRACK_GRAMPA_STATS
298 GrampaStats::Instance.numFlatTreeMigrations.decrement();
300 // Delete source flattree.
308 virtual void run() TURF_OVERRIDE;
311 static void garbageCollectTable(Table* table) {
313 DefaultQSBR.enqueue(&Table::destroy, table);
316 static void garbageCollectFlatTree(FlatTree* flatTree) {
317 TURF_ASSERT(flatTree);
318 DefaultQSBR.enqueue(&FlatTree::destroy, flatTree);
321 static Cell* find(Hash hash, Table* table, ureg sizeMask) {
322 TURF_TRACE(Grampa, 0, "[find] called", uptr(table), hash);
324 TURF_ASSERT(hash != KeyTraits::NullHash);
325 // Optimistically check hashed cell even though it might belong to another bucket
326 ureg idx = hash & sizeMask;
327 CellGroup* group = table->getCellGroups() + (idx >> 2);
328 Cell* cell = group->cells + (idx & 3);
329 Hash probeHash = cell->hash.load(turf::Relaxed);
330 if (probeHash == hash) {
331 TURF_TRACE(Grampa, 1, "[find] found existing cell optimistically", uptr(table), idx);
333 } else if (probeHash == KeyTraits::NullHash) {
336 // Follow probe chain for our bucket
337 u8 delta = group->deltas[idx & 3].load(turf::Relaxed);
339 idx = (idx + delta) & sizeMask;
340 group = table->getCellGroups() + (idx >> 2);
341 cell = group->cells + (idx & 3);
342 Hash probeHash = cell->hash.load(turf::Relaxed);
343 // Note: probeHash might actually be NULL due to memory reordering of a concurrent insert,
344 // but we don't check for it. We just follow the probe chain.
345 if (probeHash == hash) {
346 TURF_TRACE(Grampa, 2, "[find] found existing cell", uptr(table), idx);
349 delta = group->deltas[(idx & 3) + 4].load(turf::Relaxed);
351 // End of probe chain, not found
355 // FIXME: Possible optimization: Dedicated insert for migration? It wouldn't check for InsertResult_AlreadyFound.
357 InsertResult_AlreadyFound,
358 InsertResult_InsertedNew,
359 InsertResult_Overflow
361 static InsertResult insert(Hash hash, Table* table, ureg sizeMask, Cell*& cell, ureg& overflowIdx) {
362 TURF_TRACE(Grampa, 3, "[insert] called", uptr(table), hash);
364 TURF_ASSERT(hash != KeyTraits::NullHash);
367 // Check hashed cell first, though it may not even belong to the bucket.
368 CellGroup* group = table->getCellGroups() + ((idx & sizeMask) >> 2);
369 cell = group->cells + (idx & 3);
370 Hash probeHash = cell->hash.load(turf::Relaxed);
371 if (probeHash == KeyTraits::NullHash) {
372 if (cell->hash.compareExchangeStrong(probeHash, hash, turf::Relaxed)) {
373 TURF_TRACE(Grampa, 4, "[insert] reserved first cell", uptr(table), idx);
374 // There are no links to set. We're done.
375 return InsertResult_InsertedNew;
377 TURF_TRACE(Grampa, 5, "[insert] race to reserve first cell", uptr(table), idx);
378 // Fall through to check if it was the same hash...
381 if (probeHash == hash) {
382 TURF_TRACE(Grampa, 6, "[insert] found in first cell", uptr(table), idx);
383 return InsertResult_AlreadyFound;
386 // Follow the link chain for this bucket.
387 ureg maxIdx = idx + sizeMask;
389 turf::Atomic<u8>* prevLink;
392 prevLink = group->deltas + ((idx & 3) + linkLevel);
394 u8 probeDelta = prevLink->load(turf::Relaxed);
397 // Check the hash for this cell.
398 group = table->getCellGroups() + ((idx & sizeMask) >> 2);
399 cell = group->cells + (idx & 3);
400 probeHash = cell->hash.load(turf::Relaxed);
401 if (probeHash == KeyTraits::NullHash) {
402 // Cell was linked, but hash is not visible yet.
403 // We could avoid this case (and guarantee it's visible) using acquire & release, but instead,
404 // just poll until it becomes visible.
405 TURF_TRACE(Grampa, 7, "[insert] race to read hash", uptr(table), idx);
407 probeHash = cell->hash.load(turf::Acquire);
408 } while (probeHash == KeyTraits::NullHash);
410 TURF_ASSERT(((probeHash ^ hash) & sizeMask) == 0); // Only hashes in same bucket can be linked
411 if (probeHash == hash) {
412 TURF_TRACE(Grampa, 8, "[insert] found in probe chain", uptr(table), idx);
413 return InsertResult_AlreadyFound;
416 // Reached the end of the link chain for this bucket.
417 // Switch to linear probing until we reserve a new cell or find a late-arriving cell in the same bucket.
418 ureg prevLinkIdx = idx;
419 TURF_ASSERT(sreg(maxIdx - idx) >= 0); // Nobody would have linked an idx that's out of range.
420 ureg linearProbesRemaining = turf::util::min(maxIdx - idx, LinearSearchLimit);
421 while (linearProbesRemaining-- > 0) {
423 group = table->getCellGroups() + ((idx & sizeMask) >> 2);
424 cell = group->cells + (idx & 3);
425 probeHash = cell->hash.load(turf::Relaxed);
426 if (probeHash == KeyTraits::NullHash) {
427 // It's an empty cell. Try to reserve it.
428 if (cell->hash.compareExchangeStrong(probeHash, hash, turf::Relaxed)) {
429 // Success. We've reserved the cell. Link it to previous cell in same bucket.
430 TURF_TRACE(Grampa, 9, "[insert] reserved cell", uptr(table), idx);
431 TURF_ASSERT(probeDelta == 0);
432 u8 desiredDelta = idx - prevLinkIdx;
433 // Note: another thread could actually set the link on our behalf (see below).
434 #if TURF_WITH_ASSERTS
435 probeDelta = prevLink->exchange(desiredDelta, turf::Relaxed);
436 TURF_ASSERT(probeDelta == 0 || probeDelta == desiredDelta);
438 prevLink->store(desiredDelta, turf::Relaxed);
440 return InsertResult_InsertedNew;
442 TURF_TRACE(Grampa, 10, "[insert] race to reserve cell", uptr(table), idx);
443 // Fall through to check if it's the same hash...
446 Hash x = (probeHash ^ hash);
447 // Check for same hash.
449 TURF_TRACE(Grampa, 11, "[insert] found outside probe chain", uptr(table), idx);
450 return InsertResult_AlreadyFound;
452 // Check for same bucket.
453 if ((x & sizeMask) == 0) {
454 TURF_TRACE(Grampa, 12, "[insert] found late-arriving cell in same bucket", uptr(table), idx);
455 // Attempt to set the link on behalf of the late-arriving cell.
456 // This is usually redundant, but if we don't attempt to set the late-arriving cell's link here,
457 // there's no guarantee that our own link chain will be well-formed by the time this function returns.
458 // (Indeed, subsequent lookups sometimes failed during testing, for this exact reason.)
459 u8 desiredDelta = idx - prevLinkIdx;
460 #if TURF_WITH_ASSERTS
461 probeDelta = prevLink->exchange(desiredDelta, turf::Relaxed);
462 TURF_ASSERT(probeDelta == 0 || probeDelta == desiredDelta);
464 TURF_TRACE(Grampa, 13, "[insert] set link on behalf of late-arriving cell", uptr(table), idx);
466 prevLink->store(desiredDelta, turf::Relaxed);
468 goto followLink; // Try to follow link chain for the bucket again.
470 // Continue linear search...
472 // Table is too full to insert.
473 overflowIdx = idx + 1;
474 TURF_TRACE(Grampa, 14, "[insert] overflow", uptr(table), overflowIdx);
475 return InsertResult_Overflow;
480 static void beginTableMigrationToSize(Map& map, Table* table, ureg nextTableSize, ureg splitShift) {
481 // Create new migration by DCLI.
482 TURF_TRACE(Grampa, 15, "[beginTableMigrationToSize] called", 0, 0);
483 SimpleJobCoordinator::Job* job = table->jobCoordinator.loadConsume();
485 TURF_TRACE(Grampa, 16, "[beginTableMigrationToSize] new migration already exists", 0, 0);
487 turf::LockGuard<junction::striped::Mutex> guard(table->mutex);
488 job = table->jobCoordinator.loadConsume(); // Non-atomic would be sufficient, but that's OK.
490 TURF_TRACE(Grampa, 17, "[beginTableMigrationToSize] new migration already exists (double-checked)", 0, 0);
492 // Create new migration.
493 ureg numDestinations = ureg(1) << splitShift;
494 TableMigration* migration = TableMigration::create(map, 1, numDestinations);
495 migration->m_baseHash = table->baseHash;
496 ureg migrationShift = table->unsafeRangeShift - splitShift;
497 migration->m_safeShift = (migrationShift < sizeof(Hash) * 8) ? migrationShift : 0;
498 migration->m_unitsRemaining.storeNonatomic(table->getNumMigrationUnits());
499 migration->getSources()[0].table = table;
500 migration->getSources()[0].sourceIndex.storeNonatomic(0);
501 ureg subRangeShift = table->unsafeRangeShift - splitShift; // subRangeShift is also "unsafe" (possibly represents entire range)
502 ureg hashOffsetDelta = subRangeShift < (sizeof(Hash) * 8) ? (ureg(1) << subRangeShift) : 0;
503 for (ureg i = 0; i < numDestinations; i++) {
504 migration->getDestinations()[i] = Table::create(nextTableSize, table->baseHash + hashOffsetDelta * i, subRangeShift);
506 // Publish the new migration.
507 table->jobCoordinator.storeRelease(migration);
512 static void beginTableMigration(Map& map, Table* table, ureg overflowIdx) {
513 // Estimate number of cells in use based on a small sample.
514 ureg sizeMask = table->sizeMask;
515 ureg idx = overflowIdx - CellsInUseSample;
517 for (ureg linearProbesRemaining = CellsInUseSample; linearProbesRemaining > 0; linearProbesRemaining--) {
518 CellGroup* group = table->getCellGroups() + ((idx & sizeMask) >> 2);
519 Cell* cell = group->cells + (idx & 3);
520 Value value = cell->value.load(turf::Relaxed);
521 if (value == Value(ValueTraits::Redirect)) {
522 // Another thread kicked off the jobCoordinator. The caller will participate upon return.
523 TURF_TRACE(Grampa, 18, "[beginTableMigration] redirected while determining table size", 0, 0);
526 if (value != Value(ValueTraits::NullValue))
530 float inUseRatio = float(inUseCells) / CellsInUseSample;
531 float estimatedInUse = (sizeMask + 1) * inUseRatio;
532 ureg nextTableSize = turf::util::roundUpPowerOf2(ureg(estimatedInUse * 2));
533 // FIXME: Support migrating to smaller tables.
534 nextTableSize = turf::util::max(nextTableSize, sizeMask + 1);
535 // Split into multiple tables if necessary.
537 while (nextTableSize > LeafSize) {
541 beginTableMigrationToSize(map, table, nextTableSize, splitShift);
544 static FlatTreeMigration* createFlatTreeMigration(Map& map, FlatTree* flatTree, ureg shift) {
545 turf::LockGuard<junction::striped::Mutex> guard(flatTree->mutex);
546 if (!flatTree->migration) {
547 flatTree->migration = new FlatTreeMigration(map, flatTree, shift);
549 return flatTree->migration;
552 static FlatTreeMigration* getExistingFlatTreeMigration(FlatTree* flatTree) {
553 turf::LockGuard<junction::striped::Mutex> guard(flatTree->mutex);
554 TURF_ASSERT(flatTree->migration); // Must already exist!
555 return flatTree->migration;
559 // Return index of the destination table that overflowed, or -1 if none
561 sreg Grampa<Map>::TableMigration::migrateRange(Table* srcTable, ureg startIdx) {
562 ureg srcSizeMask = srcTable->sizeMask;
563 ureg safeShift = m_safeShift;
564 Table** dstLeafs = getDestinations();
565 ureg dstLeafMask = m_numDestinations - 1;
566 ureg endIdx = turf::util::min(startIdx + TableMigrationUnitSize, srcSizeMask + 1);
567 // Iterate over source range.
568 for (ureg srcIdx = startIdx; srcIdx < endIdx; srcIdx++) {
569 CellGroup* srcGroup = srcTable->getCellGroups() + ((srcIdx & srcSizeMask) >> 2);
570 Cell* srcCell = srcGroup->cells + (srcIdx & 3);
573 // Fetch the srcHash and srcValue.
575 srcHash = srcCell->hash.load(turf::Relaxed);
576 if (srcHash == KeyTraits::NullHash) {
577 // An unused cell. Try to put a Redirect marker in its value.
578 srcValue = srcCell->value.compareExchange(Value(ValueTraits::NullValue), Value(ValueTraits::Redirect), turf::Relaxed);
579 if (srcValue == Value(ValueTraits::Redirect)) {
580 // srcValue is already marked Redirect due to previous incomplete migration.
581 TURF_TRACE(Grampa, 19, "[migrateRange] empty cell already redirected", uptr(srcTable), srcIdx);
584 if (srcValue == Value(ValueTraits::NullValue))
585 break; // Redirect has been placed. Break inner loop, continue outer loop.
586 TURF_TRACE(Grampa, 20, "[migrateRange] race to insert key", uptr(srcTable), srcIdx);
587 // Otherwise, somebody just claimed the cell. Read srcHash again...
589 // Check for deleted/uninitialized value.
590 srcValue = srcCell->value.load(turf::Relaxed);
591 if (srcValue == Value(ValueTraits::NullValue)) {
592 // Try to put a Redirect marker.
593 if (srcCell->value.compareExchangeStrong(srcValue, Value(ValueTraits::Redirect), turf::Relaxed))
594 break; // Redirect has been placed. Break inner loop, continue outer loop.
595 TURF_TRACE(Grampa, 21, "[migrateRange] race to insert value", uptr(srcTable), srcIdx);
596 if (srcValue == Value(ValueTraits::Redirect)) {
597 // FIXME: I don't think this will happen. Investigate & change to assert
598 TURF_TRACE(Grampa, 22, "[migrateRange] race inserted Redirect", uptr(srcTable), srcIdx);
601 } else if (srcValue == Value(ValueTraits::Redirect)) {
602 // srcValue is already marked Redirect due to previous incomplete migration.
603 TURF_TRACE(Grampa, 23, "[migrateRange] in-use cell already redirected", uptr(srcTable), srcIdx);
607 // We've got a key/value pair to migrate.
608 // Reserve a destination cell in dstTable.
609 TURF_ASSERT(srcHash != KeyTraits::NullHash);
610 TURF_ASSERT(srcValue != Value(ValueTraits::NullValue));
611 TURF_ASSERT(srcValue != Value(ValueTraits::Redirect));
612 ureg destLeafIndex = (srcHash >> safeShift) & dstLeafMask;
613 Table* dstLeaf = dstLeafs[destLeafIndex];
616 InsertResult result = insert(srcHash, dstLeaf, dstLeaf->sizeMask, dstCell, overflowIdx);
617 // During migration, a hash can only exist in one place among all the source tables,
618 // and it is only migrated by one thread. Therefore, the hash will never already exist
619 // in the destination table:
620 TURF_ASSERT(result != InsertResult_AlreadyFound);
621 if (result == InsertResult_Overflow) {
622 // Destination overflow.
623 // This can happen for several reasons. For example, the source table could have
624 // existed of all deleted cells when it overflowed, resulting in a small destination
625 // table size, but then another thread could re-insert all the same hashes
626 // before the migration completed.
627 // Caller will cancel the current migration and begin a new one.
628 return destLeafIndex;
630 // Migrate the old value to the new cell.
632 // Copy srcValue to the destination.
633 dstCell->value.store(srcValue, turf::Relaxed);
634 // Try to place a Redirect marker in srcValue.
635 Value doubleCheckedSrcValue = srcCell->value.compareExchange(srcValue, Value(ValueTraits::Redirect), turf::Relaxed);
636 TURF_ASSERT(doubleCheckedSrcValue != Value(ValueTraits::Redirect)); // Only one thread can redirect a cell at a time.
637 if (doubleCheckedSrcValue == srcValue) {
638 // No racing writes to the src. We've successfully placed the Redirect marker.
639 // srcValue was non-NULL when we decided to migrate it, but it may have changed to NULL
640 // by a late-arriving erase.
641 if (srcValue == Value(ValueTraits::NullValue))
642 TURF_TRACE(Grampa, 24, "[migrateRange] racing update was erase", uptr(srcTable), srcIdx);
645 // There was a late-arriving write (or erase) to the src. Migrate the new value and try again.
646 TURF_TRACE(Grampa, 25, "[migrateRange] race to update migrated value", uptr(srcTable), srcIdx);
647 srcValue = doubleCheckedSrcValue;
649 // Cell successfully migrated. Proceed to next source cell.
654 // Range has been migrated successfully.
659 void Grampa<Map>::TableMigration::run() {
660 // Conditionally increment the shared # of workers.
661 ureg probeStatus = m_workerStatus.load(turf::Relaxed);
663 if (probeStatus & 1) {
664 // End flag is already set, so do nothing.
665 TURF_TRACE(Grampa, 26, "[TableMigration::run] already ended", uptr(this), 0);
668 } while (!m_workerStatus.compareExchangeWeak(probeStatus, probeStatus + 2, turf::Relaxed, turf::Relaxed));
669 // # of workers has been incremented, and the end flag is clear.
670 TURF_ASSERT((probeStatus & 1) == 0);
672 // Iterate over all source tables.
673 Source* sources = getSources();
674 for (ureg s = 0; s < m_numSources; s++) {
675 Source& source = sources[s];
676 // Loop over all migration units in this source table.
678 if (m_workerStatus.load(turf::Relaxed) & 1) {
679 TURF_TRACE(Grampa, 27, "[TableMigration::run] detected end flag set", uptr(this), 0);
682 ureg startIdx = source.sourceIndex.fetchAdd(TableMigrationUnitSize, turf::Relaxed);
683 if (startIdx >= source.table->sizeMask + 1)
684 break; // No more migration units in this table. Try next source table.
685 sreg overflowTableIndex = migrateRange(source.table, startIdx);
686 if (overflowTableIndex >= 0) {
687 // *** FAILED MIGRATION ***
688 // TableMigration failed due to destination table overflow.
689 // No other thread can declare the migration successful at this point, because *this* unit will never complete, hence m_unitsRemaining won't reach zero.
690 // However, multiple threads can independently detect a failed migration at the same time.
691 TURF_TRACE(Grampa, 28, "[TableMigration::run] destination overflow", uptr(source.table), uptr(startIdx));
692 // The reason we store overflowTableIndex in a shared variable is because we must flush all the worker threads before
693 // we can safely deal with the overflow. Therefore, the thread that detects the failure is often different from the thread
694 // that deals with it.
695 // Store overflowTableIndex unconditionally; racing writes should be rare, and it doesn't matter which one wins.
696 sreg oldIndex = m_overflowTableIndex.exchange(overflowTableIndex, turf::Relaxed);
698 TURF_TRACE(Grampa, 29, "[TableMigration::run] race to set m_overflowTableIndex", uptr(overflowTableIndex), uptr(oldIndex));
699 m_workerStatus.fetchOr(1, turf::Relaxed);
702 sreg prevRemaining = m_unitsRemaining.fetchSub(1, turf::Relaxed);
703 TURF_ASSERT(prevRemaining > 0);
704 if (prevRemaining == 1) {
705 // *** SUCCESSFUL MIGRATION ***
706 // That was the last chunk to migrate.
707 m_workerStatus.fetchOr(1, turf::Relaxed);
712 TURF_TRACE(Grampa, 30, "[TableMigration::run] out of migration units", uptr(this), 0);
715 // Decrement the shared # of workers.
716 probeStatus = m_workerStatus.fetchSub(2, turf::AcquireRelease); // Ensure all modifications are visible to the thread that will publish
717 if (probeStatus >= 4) {
718 // There are other workers remaining. Return here so that only the very last worker will proceed.
719 TURF_TRACE(Grampa, 31, "[TableMigration::run] not the last worker", uptr(this), uptr(probeStatus));
723 // We're the very last worker thread.
724 // Perform the appropriate post-migration step depending on whether the migration succeeded or failed.
725 TURF_ASSERT(probeStatus == 3);
726 sreg overflowTableIndex = m_overflowTableIndex.loadNonatomic(); // No racing writes at this point
727 if (overflowTableIndex < 0) {
728 // The migration succeeded. This is the most likely outcome. Publish the new subtree.
729 m_map.publishTableMigration(this);
730 // End the jobCoodinator.
731 sources[0].table->jobCoordinator.end();
733 // The migration failed due to the overflow of a destination table.
734 Table* origTable = sources[0].table;
735 ureg count = ureg(1) << (origTable->unsafeRangeShift - getUnsafeShift());
736 ureg lo = overflowTableIndex & ~(count - 1);
737 TURF_ASSERT(lo + count <= m_numDestinations);
738 turf::LockGuard<junction::striped::Mutex> guard(origTable->mutex);
739 SimpleJobCoordinator::Job* checkedJob = origTable->jobCoordinator.loadConsume();
740 if (checkedJob != this) {
741 TURF_TRACE(Grampa, 32, "[TableMigration::run] a new TableMigration was already started", uptr(origTable), uptr(checkedJob));
743 TableMigration* migration;
744 Table* overflowedTable = getDestinations()[overflowTableIndex];
745 if (overflowedTable->sizeMask + 1 < LeafSize) {
746 // The entire map is contained in a small table.
747 TURF_TRACE(Grampa, 33, "[TableMigration::run] overflow occured in a small map", uptr(origTable), uptr(checkedJob));
748 TURF_ASSERT(overflowedTable->unsafeRangeShift == sizeof(Hash) * 8);
749 TURF_ASSERT(overflowedTable->baseHash == 0);
750 TURF_ASSERT(m_numDestinations == 1);
751 TURF_ASSERT(m_baseHash == 0);
752 migration = TableMigration::create(m_map, m_numSources + 1, 1);
753 migration->m_baseHash = 0;
754 migration->m_safeShift = 0;
755 // Double the destination table size.
756 migration->getDestinations()[0] = Table::create((overflowedTable->sizeMask + 1) * 2, overflowedTable->baseHash, overflowedTable->unsafeRangeShift);
758 // The overflowed table is already the size of a leaf. Split it into two ranges.
760 TURF_TRACE(Grampa, 34, "[TableMigration::run] doubling subtree size after failure", uptr(origTable), uptr(checkedJob));
761 migration = TableMigration::create(m_map, m_numSources + 1, m_numDestinations * 2);
762 migration->m_baseHash = m_baseHash;
763 migration->m_safeShift = getUnsafeShift() - 1;
764 for (ureg i = 0; i < m_numDestinations; i++) {
765 migration->getDestinations()[i * 2] = getDestinations()[i];
766 migration->getDestinations()[i * 2 + 1] = getDestinations()[i];
770 TURF_TRACE(Grampa, 35, "[TableMigration::run] keeping same subtree size after failure", uptr(origTable), uptr(checkedJob));
771 migration = TableMigration::create(m_map, m_numSources + 1, m_numDestinations);
772 migration->m_baseHash = m_baseHash;
773 migration->m_safeShift = m_safeShift;
774 memcpy(migration->getDestinations(), getDestinations(), m_numDestinations * sizeof(Table*));
776 Table* splitTable1 = Table::create(LeafSize, origTable->baseHash, origTable->unsafeRangeShift - 1);
778 for (; i < count / 2; i++) {
779 migration->getDestinations()[lo + i] = splitTable1;
781 ureg halfNumHashes = ureg(1) << (origTable->unsafeRangeShift - 1);
782 Table* splitTable2 = Table::create(LeafSize, origTable->baseHash + halfNumHashes, origTable->unsafeRangeShift - 1);
783 for (; i < count; i++) {
784 migration->getDestinations()[lo + i] = splitTable2;
787 // Transfer source tables to the new migration.
788 for (ureg i = 0; i < m_numSources; i++) {
789 migration->getSources()[i].table = getSources()[i].table;
790 migration->getSources()[i].sourceIndex.storeNonatomic(0);
791 getSources()[i].table = NULL;
793 migration->getSources()[m_numSources].table = overflowedTable;
794 migration->getSources()[m_numSources].sourceIndex.storeNonatomic(0);
795 // Calculate total number of migration units to move.
796 ureg unitsRemaining = 0;
797 for (ureg s = 0; s < migration->m_numSources; s++)
798 unitsRemaining += migration->getSources()[s].table->getNumMigrationUnits();
799 migration->m_unitsRemaining.storeNonatomic(unitsRemaining);
800 // Publish the new migration.
801 origTable->jobCoordinator.storeRelease(migration);
805 // We're done with this TableMigration. Queue it for GC.
806 DefaultQSBR.enqueue(&TableMigration::destroy, this);
810 void Grampa<Map>::FlatTreeMigration::run() {
811 // Conditionally increment the shared # of workers.
812 ureg probeStatus = m_workerStatus.load(turf::Relaxed);
814 if (probeStatus & 1) {
815 // End flag is already set, so do nothing.
816 TURF_TRACE(Grampa, 36, "[FlatTreeMigration::run] already ended", uptr(this), 0);
819 } while (!m_workerStatus.compareExchangeWeak(probeStatus, probeStatus + 2, turf::Relaxed, turf::Relaxed));
820 // # of workers has been incremented, and the end flag is clear.
821 TURF_ASSERT((probeStatus & 1) == 0);
823 // Loop over all migration units
824 ureg srcSize = (Hash(-1) >> m_source->safeShift) + 1;
825 // FIXME: Support migration to smaller flattrees
826 TURF_ASSERT(m_destination->safeShift < m_source->safeShift);
827 ureg repeat = ureg(1) << (m_source->safeShift - m_destination->safeShift);
829 ureg srcStart = m_sourceIndex.fetchAdd(FlatTreeMigrationUnitSize, turf::Relaxed);
830 if (srcStart >= srcSize)
831 break; // No more migration units in this flattree.
832 // Migrate this range
833 ureg srcEnd = turf::util::min(srcSize, srcStart + FlatTreeMigrationUnitSize);
834 ureg dst = srcStart * repeat;
835 for (ureg src = srcStart; src < srcEnd; src++) {
836 // Pointers in the source table can be changed at any time due to concurrent subtree publishing,
837 // so we need to exchange them with Redirect markers.
838 Table* t = m_source->getTables()[src].exchange((Table*) RedirectFlatTree, turf::Relaxed);
839 TURF_ASSERT(uptr(t) != RedirectFlatTree);
840 for (ureg r = repeat; r > 0; r--) {
841 m_destination->getTables()[dst].storeNonatomic(t);
845 // Decrement m_unitsRemaining
846 sreg prevRemaining = m_unitsRemaining.fetchSub(1, turf::Relaxed);
847 if (prevRemaining == 1) {
848 // *** SUCCESSFUL MIGRATION ***
849 // That was the last chunk to migrate.
850 m_workerStatus.fetchOr(1, turf::Relaxed);
855 // Decrement the shared # of workers.
856 probeStatus = m_workerStatus.fetchSub(2, turf::AcquireRelease); // AcquireRelease makes all previous writes visible to the last worker thread.
857 if (probeStatus >= 4) {
858 // There are other workers remaining. Return here so that only the very last worker will proceed.
862 // We're the very last worker thread.
863 // Publish the new flattree.
864 TURF_ASSERT(probeStatus == 3); // End flag must be set
865 m_map.publishFlatTreeMigration(this);
866 m_completed.signal();
868 // We're done with this FlatTreeMigration. Queue it for GC.
869 DefaultQSBR.enqueue(&FlatTreeMigration::destroy, this);
872 } // namespace details
873 } // namespace junction
875 #endif // JUNCTION_DETAILS_GRAMPA_H