b44a8c4504eed176da0af79d0682018b0c9f1039
[junction.git] / junction / details / Grampa.h
1 /*------------------------------------------------------------------------
2   Junction: Concurrent data structures in C++
3   Copyright (c) 2016 Jeff Preshing
4
5   Distributed under the Simplified BSD License.
6   Original location: https://github.com/preshing/junction
7
8   This software is distributed WITHOUT ANY WARRANTY; without even the
9   implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
10   See the LICENSE file for more information.
11 ------------------------------------------------------------------------*/
12
13 #ifndef JUNCTION_DETAILS_GRAMPA_H
14 #define JUNCTION_DETAILS_GRAMPA_H
15
16 #include <junction/Core.h>
17 #include <turf/Atomic.h>
18 #include <junction/striped/Mutex.h>
19 #include <junction/striped/ManualResetEvent.h>
20 #include <turf/Util.h>
21 #include <junction/MapTraits.h>
22 #include <turf/Trace.h>
23 #include <turf/Heap.h>
24 #include <junction/SimpleJobCoordinator.h>
25 #include <junction/QSBR.h>
26 #include <memory.h>
27
28 namespace junction {
29 namespace details {
30
31 #if JUNCTION_TRACK_GRAMPA_STATS
32 struct GrampaCounter {
33     turf::Atomic<ureg> total;
34     turf::Atomic<sreg> current;
35
36     void increment() {
37         total.fetchAdd(1, turf::Relaxed);
38         current.fetchAdd(1, turf::Relaxed);
39     }
40
41     void decrement() {
42         current.fetchSub(1, turf::Relaxed);
43     }
44 };
45
46 struct GrampaStats {
47     GrampaCounter numTables;
48     GrampaCounter numTableMigrations;
49     GrampaCounter numFlatTrees;
50     GrampaCounter numFlatTreeMigrations;
51
52     static GrampaStats Instance;    // Zero-initialized
53 };
54 #endif
55
56 TURF_TRACE_DECLARE(Grampa, 37)
57
58 template<class Map>
59 struct Grampa {
60     typedef typename Map::Hash Hash;
61     typedef typename Map::Value Value;
62     typedef typename Map::KeyTraits KeyTraits;
63     typedef typename Map::ValueTraits ValueTraits;
64
65     static const ureg RedirectFlatTree = 1;
66     static const ureg InitialSize = 8;
67     static const ureg TableMigrationUnitSize = 32;
68     static const ureg FlatTreeMigrationUnitSize = 32;
69     static const ureg LinearSearchLimit = 128;
70     static const ureg CellsInUseSample = LinearSearchLimit;
71     TURF_STATIC_ASSERT(LinearSearchLimit > 0 && LinearSearchLimit < 256); // Must fit in CellGroup::links
72     TURF_STATIC_ASSERT(CellsInUseSample > 0 && CellsInUseSample <= LinearSearchLimit); // Limit sample to failed search chain
73
74     static const ureg MinTableSize = 8;
75     static const ureg LeafSizeBits = 10;
76     static const ureg LeafSize = (ureg(1) << LeafSizeBits);
77
78     struct Cell {
79         // If value == Redirect, threads participate in the jobCoordinator.
80         turf::Atomic<Hash> hash;
81         turf::Atomic<Value> value;
82     };
83
84     struct CellGroup {
85         // Every cell in the table actually represents a bucket of cells, all linked together in a probe chain.
86         // Each cell in the probe chain is located within the table itself.
87         // "deltas" determines the index of the next cell in the probe chain.
88         // The first cell in the chain is the one that was hashed. It may or may not actually belong in the bucket.
89         // The "second" cell in the chain is given by deltas 0 - 3. It's guaranteed to belong in the bucket.
90         // All subsequent cells in the chain is given by deltas 4 - 7. Also guaranteed to belong in the bucket.
91         turf::Atomic<u8> deltas[8];
92         Cell cells[4];
93     };
94
95     struct Table {
96         // unsafeRangeShift determines how many slots are occupied by this Table in the flattree.
97         // The range of hashes stored in this table is given by (1 << shift).
98         // eg. If the entire map is stored in a single table, then Table::shift == HASH_BITS.
99         // If the entire map is stored in two tables, then Table::shift == (HASH_BITS - 1) for each table.
100         // FlatTree::shift is always <= Table::shift for all the tables it contains.
101         const ureg sizeMask;                            // a power of two minus one
102         const Hash baseHash;
103         const ureg unsafeRangeShift;
104         junction::striped::ManualResetEvent isPublished;    // To prevent publishing a subtree before its parent is published (happened in testing)
105         junction::striped::Mutex mutex;                     // to DCLI the TableMigration (stored in the jobCoordinator)
106         SimpleJobCoordinator jobCoordinator;            // makes all blocked threads participate in the migration
107
108         Table(ureg sizeMask, Hash baseHash, ureg unsafeRangeShift) : sizeMask(sizeMask), baseHash(baseHash), unsafeRangeShift(unsafeRangeShift) {
109         }
110
111         static Table* create(ureg tableSize, ureg baseHash, ureg unsafeShift) {
112             TURF_ASSERT(turf::util::isPowerOf2(tableSize));
113             TURF_ASSERT(unsafeShift > 0 && unsafeShift <= sizeof(Hash) * 8);
114             TURF_ASSERT(tableSize >= 4);
115             ureg numGroups = tableSize >> 2;
116             Table* table = (Table*) TURF_HEAP.alloc(sizeof(Table) + sizeof(CellGroup) * numGroups);
117             new(table) Table(tableSize - 1, baseHash, (u8) unsafeShift);
118             for (ureg i = 0; i < numGroups; i++) {
119                 CellGroup* group = table->getCellGroups() + i;
120                 for (ureg j = 0; j < 4; j++) {
121                     group->deltas[j].storeNonatomic(0);
122                     group->deltas[j + 4].storeNonatomic(0);
123                     group->cells[j].hash.storeNonatomic(KeyTraits::NullHash);
124                     group->cells[j].value.storeNonatomic(Value(ValueTraits::NullValue));
125                 }
126             }
127 #if JUNCTION_TRACK_GRAMPA_STATS
128             GrampaStats::Instance.numTables.increment();
129 #endif
130             return table;
131         }
132
133         void destroy() {
134 #if JUNCTION_TRACK_GRAMPA_STATS
135             GrampaStats::Instance.numTables.decrement();
136 #endif
137             this->Table::~Table();
138             TURF_HEAP.free(this);
139         }
140
141         CellGroup* getCellGroups() const {
142             return (CellGroup*) (this + 1);
143         }
144
145         ureg getNumMigrationUnits() const {
146             return sizeMask / TableMigrationUnitSize + 1;
147         }
148     };
149
150     class TableMigration : public SimpleJobCoordinator::Job {
151     public:
152         struct Source {
153             Table* table;
154             turf::Atomic<ureg> sourceIndex;
155         };
156
157         Map& m_map;
158         Hash m_baseHash;                           // The lowest possible hash value in this subtree; determines index in flattree.
159         // If m_numDestinations == 1, m_shift == 0.
160         // Otherwise, m_shift tells (indirectly) the size of the flattree in which our subtree would exactly fit: 1 << (HASH_BITS - m_shift).
161         // This ensures that m_shift is always less than sizeof(Hash) * 8, so that shifting by m_shift is not undefined behavior.
162         // To determine the subtree index for a hash during migration, we use: (hash >> m_shift) & (m_numDestinations - 1)
163         // A mask is used since we are only migrating a subtree -- not necessarily the entire map.
164         ureg m_safeShift;
165         turf::Atomic<ureg> m_workerStatus;          // number of workers + end flag
166         turf::Atomic<sreg> m_overflowTableIndex;
167         turf::Atomic<sreg> m_unitsRemaining;
168         ureg m_numSources;
169         ureg m_numDestinations;                 // The size of the subtree being created. Some table pointers may be repeated.
170
171         TableMigration(Map& map) : m_map(map) {
172         }
173
174         static TableMigration* create(Map& map, ureg numSources, ureg numDestinations) {
175             TableMigration* migration = (TableMigration*) TURF_HEAP.alloc(sizeof(TableMigration) + sizeof(TableMigration::Source) * numSources + sizeof(Table*) * numDestinations);
176             new(migration) TableMigration(map);
177             migration->m_workerStatus.storeNonatomic(0);
178             migration->m_overflowTableIndex.storeNonatomic(-1);
179             migration->m_unitsRemaining.storeNonatomic(0);
180             migration->m_numSources = numSources;
181             migration->m_numDestinations = numDestinations;
182             // Caller is responsible for filling in source & destination pointers
183 #if JUNCTION_TRACK_GRAMPA_STATS
184             GrampaStats::Instance.numTableMigrations.increment();
185 #endif
186             return migration;
187         }
188
189         virtual ~TableMigration() TURF_OVERRIDE {
190         }
191
192         void destroy() {
193 #if JUNCTION_TRACK_GRAMPA_STATS
194             GrampaStats::Instance.numTableMigrations.decrement();
195 #endif
196             // Destroy all source tables.
197             for (ureg i = 0; i < m_numSources; i++)
198                 if (getSources()[i].table)
199                     getSources()[i].table->destroy();
200             // Delete the migration object itself.
201             this->TableMigration::~TableMigration();
202             TURF_HEAP.free(this);
203         }
204
205         ureg getUnsafeShift() const {
206             return m_safeShift ? m_safeShift : (sizeof(Hash) * 8);
207         }
208
209         Source* getSources() const {
210             return (Source*) (this + 1);
211         }
212
213         Table** getDestinations() const {
214             return (Table**) (getSources() + m_numSources);
215         }
216
217         sreg migrateRange(Table* srcTable, ureg startIdx);
218         virtual void run() TURF_OVERRIDE;
219     };
220
221     class FlatTreeMigration;
222
223     struct FlatTree {
224         // The size of the flattree is 1 << 64 - HASH_BITS.
225         // Or, stated another way, (Hash(-1) >> shift) + 1.
226         // To determine the flattree index for a given hash, we simply use: (hash >> shift)
227         // Smaller shift == more significant bits used as an index == bigger flattree.
228         // For example, the simplest flattree has only two entries, and only the most significant
229         // bit of each hash is used as the flattree index. In that case, shift == HASH_BITS - 1.
230         // Each time the flattree doubles in size, shift decreases by 1.
231         const ureg safeShift;
232         junction::striped::Mutex mutex;
233         FlatTreeMigration* migration;   // Protected by mutex
234
235         FlatTree(ureg safeShift) : safeShift(safeShift), migration(NULL) {
236             // A FlatTree always has at least two tables, so the shift is always safe.
237             TURF_ASSERT(safeShift < sizeof(Hash) * 8);
238         }
239
240         static FlatTree* create(ureg safeShift) {
241             // A flattree always has at least two tables, so the shift is always safe.
242             TURF_ASSERT(safeShift < sizeof(Hash) * 8);
243             ureg numLeaves = (Hash(-1) >> safeShift) + 1;
244             FlatTree* flatTree = (FlatTree*) TURF_HEAP.alloc(sizeof(FlatTree) + sizeof(turf::Atomic<Table*>) * numLeaves);
245             new(flatTree) FlatTree(safeShift);
246             // Caller will initialize flatTree->getTables()
247 #if JUNCTION_TRACK_GRAMPA_STATS
248             GrampaStats::Instance.numFlatTrees.increment();
249 #endif
250             return flatTree;
251         }
252
253         void destroy() {
254 #if JUNCTION_TRACK_GRAMPA_STATS
255             GrampaStats::Instance.numFlatTrees.decrement();
256 #endif
257             this->FlatTree::~FlatTree();
258             TURF_HEAP.free(this);
259         }
260
261         turf::Atomic<Table*>* getTables() const {
262             return (turf::Atomic<Table*>*) (this + 1);
263         }
264
265         ureg getSize() const {
266             return (Hash(-1) >> safeShift) + 1;
267         }
268
269         ureg getNumMigrationUnits() const {
270             ureg sizeMask = Hash(-1) >> safeShift;
271             return sizeMask / FlatTreeMigrationUnitSize + 1;
272         }
273     };
274
275     class FlatTreeMigration : public SimpleJobCoordinator::Job {
276     public:
277         Map& m_map;
278         FlatTree* m_source;
279         FlatTree* m_destination;
280         turf::Atomic<ureg> m_workerStatus;
281         turf::Atomic<ureg> m_sourceIndex;
282         turf::Atomic<sreg> m_unitsRemaining;
283         junction::striped::ManualResetEvent m_completed;
284
285         FlatTreeMigration(Map& map, FlatTree* flatTree, ureg shift) : m_map(map) {
286             m_source = flatTree;
287             m_destination = FlatTree::create(shift);
288             m_workerStatus.storeNonatomic(0);
289             m_sourceIndex.storeNonatomic(0);
290             m_unitsRemaining.storeNonatomic(flatTree->getNumMigrationUnits());
291 #if JUNCTION_TRACK_GRAMPA_STATS
292             GrampaStats::Instance.numFlatTreeMigrations.increment();
293 #endif
294         }
295
296         virtual ~FlatTreeMigration() TURF_OVERRIDE {
297 #if JUNCTION_TRACK_GRAMPA_STATS
298             GrampaStats::Instance.numFlatTreeMigrations.decrement();
299 #endif
300             // Delete source flattree.
301             m_source->destroy();
302         }
303
304         void destroy() {
305             delete this;
306         }
307
308         virtual void run() TURF_OVERRIDE;
309     };
310
311     static void garbageCollectTable(Table* table) {
312         TURF_ASSERT(table);
313         DefaultQSBR.enqueue(&Table::destroy, table);
314     }
315
316     static void garbageCollectFlatTree(FlatTree* flatTree) {
317         TURF_ASSERT(flatTree);
318         DefaultQSBR.enqueue(&FlatTree::destroy, flatTree);
319     }
320
321     static Cell* find(Hash hash, Table* table, ureg sizeMask) {
322         TURF_TRACE(Grampa, 0, "[find] called", uptr(table), hash);
323         TURF_ASSERT(table);
324         TURF_ASSERT(hash != KeyTraits::NullHash);
325         // Optimistically check hashed cell even though it might belong to another bucket
326         ureg idx = hash & sizeMask;
327         CellGroup* group = table->getCellGroups() + (idx >> 2);
328         Cell* cell = group->cells + (idx & 3);
329         Hash probeHash = cell->hash.load(turf::Relaxed);
330         if (probeHash == hash) {
331             TURF_TRACE(Grampa, 1, "[find] found existing cell optimistically", uptr(table), idx);
332             return cell;
333         } else if (probeHash == KeyTraits::NullHash) {
334             return cell = NULL;
335         }
336         // Follow probe chain for our bucket
337         u8 delta = group->deltas[idx & 3].load(turf::Relaxed);
338         while (delta) {
339             idx = (idx + delta) & sizeMask;
340             group = table->getCellGroups() + (idx >> 2);
341             cell = group->cells + (idx & 3);
342             Hash probeHash = cell->hash.load(turf::Relaxed);
343             // Note: probeHash might actually be NULL due to memory reordering of a concurrent insert,
344             // but we don't check for it. We just follow the probe chain.
345             if (probeHash == hash) {
346                 TURF_TRACE(Grampa, 2, "[find] found existing cell", uptr(table), idx);
347                 return cell;
348             }
349             delta = group->deltas[(idx & 3) + 4].load(turf::Relaxed);
350         }
351         // End of probe chain, not found
352         return NULL;
353     }
354
355     // FIXME: Possible optimization: Dedicated insert for migration? It wouldn't check for InsertResult_AlreadyFound.
356     enum InsertResult {
357         InsertResult_AlreadyFound,
358         InsertResult_InsertedNew,
359         InsertResult_Overflow
360     };
361     static InsertResult insert(Hash hash, Table* table, ureg sizeMask, Cell*& cell, ureg& overflowIdx) {
362         TURF_TRACE(Grampa, 3, "[insert] called", uptr(table), hash);
363         TURF_ASSERT(table);
364         TURF_ASSERT(hash != KeyTraits::NullHash);
365         ureg idx = hash;
366
367         // Check hashed cell first, though it may not even belong to the bucket.
368         CellGroup* group = table->getCellGroups() + ((idx & sizeMask) >> 2);
369         cell = group->cells + (idx & 3);
370         Hash probeHash = cell->hash.load(turf::Relaxed);
371         if (probeHash == KeyTraits::NullHash) {
372             if (cell->hash.compareExchangeStrong(probeHash, hash, turf::Relaxed)) {
373                 TURF_TRACE(Grampa, 4, "[insert] reserved first cell", uptr(table), idx);
374                 // There are no links to set. We're done.
375                 return InsertResult_InsertedNew;
376             } else {
377                 TURF_TRACE(Grampa, 5, "[insert] race to reserve first cell", uptr(table), idx);
378                 // Fall through to check if it was the same hash...
379             }
380         }
381         if (probeHash == hash) {
382             TURF_TRACE(Grampa, 6, "[insert] found in first cell", uptr(table), idx);
383             return InsertResult_AlreadyFound;
384         }
385
386         // Follow the link chain for this bucket.
387         ureg maxIdx = idx + sizeMask;
388         ureg linkLevel = 0;
389         turf::Atomic<u8>* prevLink;
390         for (;;) {
391         followLink:
392             prevLink = group->deltas + ((idx & 3) + linkLevel);
393             linkLevel = 4;
394             u8 probeDelta = prevLink->load(turf::Relaxed);
395             if (probeDelta) {
396                 idx += probeDelta;
397                 // Check the hash for this cell.
398                 group = table->getCellGroups() + ((idx & sizeMask) >> 2);
399                 cell = group->cells + (idx & 3);
400                 probeHash = cell->hash.load(turf::Relaxed);
401                 if (probeHash == KeyTraits::NullHash) {
402                     // Cell was linked, but hash is not visible yet.
403                     // We could avoid this case (and guarantee it's visible) using acquire & release, but instead,
404                     // just poll until it becomes visible.
405                     TURF_TRACE(Grampa, 7, "[insert] race to read hash", uptr(table), idx);
406                     do {
407                         probeHash = cell->hash.load(turf::Acquire);
408                     } while (probeHash == KeyTraits::NullHash);
409                 }
410                 TURF_ASSERT(((probeHash ^ hash) & sizeMask) == 0);   // Only hashes in same bucket can be linked
411                 if (probeHash == hash) {
412                     TURF_TRACE(Grampa, 8, "[insert] found in probe chain", uptr(table), idx);
413                     return InsertResult_AlreadyFound;
414                 }
415             } else {
416                 // Reached the end of the link chain for this bucket.
417                 // Switch to linear probing until we reserve a new cell or find a late-arriving cell in the same bucket.
418                 ureg prevLinkIdx = idx;
419                 TURF_ASSERT(sreg(maxIdx - idx) >= 0);    // Nobody would have linked an idx that's out of range.
420                 ureg linearProbesRemaining = turf::util::min(maxIdx - idx, LinearSearchLimit);
421                 while (linearProbesRemaining-- > 0) {
422                     idx++;
423                     group = table->getCellGroups() + ((idx & sizeMask) >> 2);
424                     cell = group->cells + (idx & 3);
425                     probeHash = cell->hash.load(turf::Relaxed);
426                     if (probeHash == KeyTraits::NullHash) {
427                         // It's an empty cell. Try to reserve it.
428                         if (cell->hash.compareExchangeStrong(probeHash, hash, turf::Relaxed)) {
429                             // Success. We've reserved the cell. Link it to previous cell in same bucket.
430                             TURF_TRACE(Grampa, 9, "[insert] reserved cell", uptr(table), idx);
431                             TURF_ASSERT(probeDelta == 0);
432                             u8 desiredDelta = idx - prevLinkIdx;
433                             // Note: another thread could actually set the link on our behalf (see below).
434 #if TURF_WITH_ASSERTS                            
435                             probeDelta = prevLink->exchange(desiredDelta, turf::Relaxed);
436                             TURF_ASSERT(probeDelta == 0 || probeDelta == desiredDelta);
437 #else
438                             prevLink->store(desiredDelta, turf::Relaxed);
439 #endif                            
440                             return InsertResult_InsertedNew;
441                         } else {
442                             TURF_TRACE(Grampa, 10, "[insert] race to reserve cell", uptr(table), idx);
443                             // Fall through to check if it's the same hash...
444                         }
445                     }
446                     Hash x = (probeHash ^ hash);
447                     // Check for same hash.
448                     if (!x) {
449                         TURF_TRACE(Grampa, 11, "[insert] found outside probe chain", uptr(table), idx);
450                         return InsertResult_AlreadyFound;
451                     }
452                     // Check for same bucket.
453                     if ((x & sizeMask) == 0) {
454                         TURF_TRACE(Grampa, 12, "[insert] found late-arriving cell in same bucket", uptr(table), idx);
455                         // Attempt to set the link on behalf of the late-arriving cell.
456                         // This is usually redundant, but if we don't attempt to set the late-arriving cell's link here,
457                         // there's no guarantee that our own link chain will be well-formed by the time this function returns.
458                         // (Indeed, subsequent lookups sometimes failed during testing, for this exact reason.)
459                         u8 desiredDelta = idx - prevLinkIdx;
460 #if TURF_WITH_ASSERTS                            
461                         probeDelta = prevLink->exchange(desiredDelta, turf::Relaxed);
462                         TURF_ASSERT(probeDelta == 0 || probeDelta == desiredDelta);
463                         if (probeDelta == 0)
464                             TURF_TRACE(Grampa, 13, "[insert] set link on behalf of late-arriving cell", uptr(table), idx);
465 #else
466                         prevLink->store(desiredDelta, turf::Relaxed);
467 #endif                    
468                         goto followLink;  // Try to follow link chain for the bucket again.
469                     }
470                     // Continue linear search...
471                 }
472                 // Table is too full to insert.
473                 overflowIdx = idx + 1;
474                 TURF_TRACE(Grampa, 14, "[insert] overflow", uptr(table), overflowIdx);
475                 return InsertResult_Overflow;
476             }
477         }
478     }
479
480     static void beginTableMigrationToSize(Map& map, Table* table, ureg nextTableSize, ureg splitShift) {
481         // Create new migration by DCLI.
482         TURF_TRACE(Grampa, 15, "[beginTableMigrationToSize] called", 0, 0);
483         SimpleJobCoordinator::Job* job = table->jobCoordinator.loadConsume();
484         if (job) {
485             TURF_TRACE(Grampa, 16, "[beginTableMigrationToSize] new migration already exists", 0, 0);
486         } else {
487             turf::LockGuard<junction::striped::Mutex> guard(table->mutex);
488             job = table->jobCoordinator.loadConsume();  // Non-atomic would be sufficient, but that's OK.
489             if (job) {
490                 TURF_TRACE(Grampa, 17, "[beginTableMigrationToSize] new migration already exists (double-checked)", 0, 0);
491             } else {
492                 // Create new migration.
493                 ureg numDestinations = ureg(1) << splitShift;
494                 TableMigration* migration = TableMigration::create(map, 1, numDestinations);
495                 migration->m_baseHash = table->baseHash;
496                 ureg migrationShift = table->unsafeRangeShift - splitShift;
497                 migration->m_safeShift = (migrationShift < sizeof(Hash) * 8) ? migrationShift : 0;
498                 migration->m_unitsRemaining.storeNonatomic(table->getNumMigrationUnits());
499                 migration->getSources()[0].table = table;
500                 migration->getSources()[0].sourceIndex.storeNonatomic(0);
501                 ureg subRangeShift = table->unsafeRangeShift - splitShift;      // subRangeShift is also "unsafe" (possibly represents entire range)
502                 ureg hashOffsetDelta = subRangeShift < (sizeof(Hash) * 8) ? (ureg(1) << subRangeShift) : 0;
503                 for (ureg i = 0; i < numDestinations; i++) {
504                     migration->getDestinations()[i] = Table::create(nextTableSize, table->baseHash + hashOffsetDelta * i, subRangeShift);
505                 }
506                 // Publish the new migration.
507                 table->jobCoordinator.storeRelease(migration);
508             }
509         }
510     }
511
512     static void beginTableMigration(Map& map, Table* table, ureg overflowIdx) {
513         // Estimate number of cells in use based on a small sample.
514         ureg sizeMask = table->sizeMask;
515         ureg idx = overflowIdx - CellsInUseSample;
516         ureg inUseCells = 0;
517         for (ureg linearProbesRemaining = CellsInUseSample; linearProbesRemaining > 0; linearProbesRemaining--) {
518             CellGroup* group = table->getCellGroups() + ((idx & sizeMask) >> 2);
519             Cell* cell = group->cells + (idx & 3);
520             Value value = cell->value.load(turf::Relaxed);
521             if (value == Value(ValueTraits::Redirect)) {
522                 // Another thread kicked off the jobCoordinator. The caller will participate upon return.
523                 TURF_TRACE(Grampa, 18, "[beginTableMigration] redirected while determining table size", 0, 0);
524                 return;
525             }
526             if (value != Value(ValueTraits::NullValue))
527                 inUseCells++;
528             idx++;
529         }
530         float inUseRatio = float(inUseCells) / CellsInUseSample;
531         float estimatedInUse = (sizeMask + 1) * inUseRatio;
532         ureg nextTableSize = turf::util::roundUpPowerOf2(ureg(estimatedInUse * 2));
533         // FIXME: Support migrating to smaller tables.
534         nextTableSize = turf::util::max(nextTableSize, sizeMask + 1);
535         // Split into multiple tables if necessary.
536         ureg splitShift = 0;
537         while (nextTableSize > LeafSize) {
538             splitShift++;
539             nextTableSize >>= 1;
540         }
541         beginTableMigrationToSize(map, table, nextTableSize, splitShift);
542     }
543     
544     static FlatTreeMigration* createFlatTreeMigration(Map& map, FlatTree* flatTree, ureg shift) {
545         turf::LockGuard<junction::striped::Mutex> guard(flatTree->mutex);
546         if (!flatTree->migration) {
547             flatTree->migration = new FlatTreeMigration(map, flatTree, shift);
548         }
549         return flatTree->migration;
550     }
551
552     static FlatTreeMigration* getExistingFlatTreeMigration(FlatTree* flatTree) {
553         turf::LockGuard<junction::striped::Mutex> guard(flatTree->mutex);
554         TURF_ASSERT(flatTree->migration);     // Must already exist!
555         return flatTree->migration;
556     }
557 }; // Grampa
558
559 // Return index of the destination table that overflowed, or -1 if none
560 template<class Map>
561 sreg Grampa<Map>::TableMigration::migrateRange(Table* srcTable, ureg startIdx) {
562     ureg srcSizeMask = srcTable->sizeMask;
563     ureg safeShift = m_safeShift;
564     Table** dstLeafs = getDestinations();
565     ureg dstLeafMask = m_numDestinations - 1;
566     ureg endIdx = turf::util::min(startIdx + TableMigrationUnitSize, srcSizeMask + 1);
567     // Iterate over source range.
568     for (ureg srcIdx = startIdx; srcIdx < endIdx; srcIdx++) {
569         CellGroup* srcGroup = srcTable->getCellGroups() + ((srcIdx & srcSizeMask) >> 2);
570         Cell* srcCell = srcGroup->cells + (srcIdx & 3);
571         Hash srcHash;
572         Value srcValue;
573         // Fetch the srcHash and srcValue.
574         for (;;) {
575             srcHash = srcCell->hash.load(turf::Relaxed);
576             if (srcHash == KeyTraits::NullHash) {
577                 // An unused cell. Try to put a Redirect marker in its value.
578                 srcValue = srcCell->value.compareExchange(Value(ValueTraits::NullValue), Value(ValueTraits::Redirect), turf::Relaxed);
579                 if (srcValue == Value(ValueTraits::Redirect)) {
580                     // srcValue is already marked Redirect due to previous incomplete migration.
581                     TURF_TRACE(Grampa, 19, "[migrateRange] empty cell already redirected", uptr(srcTable), srcIdx);
582                     break;
583                 }
584                 if (srcValue == Value(ValueTraits::NullValue))
585                     break;  // Redirect has been placed. Break inner loop, continue outer loop.
586                 TURF_TRACE(Grampa, 20, "[migrateRange] race to insert key", uptr(srcTable), srcIdx);
587                 // Otherwise, somebody just claimed the cell. Read srcHash again...
588             } else {
589                 // Check for deleted/uninitialized value.
590                 srcValue = srcCell->value.load(turf::Relaxed);
591                 if (srcValue == Value(ValueTraits::NullValue)) {
592                     // Try to put a Redirect marker.
593                     if (srcCell->value.compareExchangeStrong(srcValue, Value(ValueTraits::Redirect), turf::Relaxed))
594                         break;  // Redirect has been placed. Break inner loop, continue outer loop.
595                     TURF_TRACE(Grampa, 21, "[migrateRange] race to insert value", uptr(srcTable), srcIdx);
596                     if (srcValue == Value(ValueTraits::Redirect)) {
597                         // FIXME: I don't think this will happen. Investigate & change to assert
598                         TURF_TRACE(Grampa, 22, "[migrateRange] race inserted Redirect", uptr(srcTable), srcIdx);
599                         break;
600                     }
601                 } else if (srcValue == Value(ValueTraits::Redirect)) {
602                     // srcValue is already marked Redirect due to previous incomplete migration.
603                     TURF_TRACE(Grampa, 23, "[migrateRange] in-use cell already redirected", uptr(srcTable), srcIdx);
604                     break;
605                 }
606                 
607                 // We've got a key/value pair to migrate.
608                 // Reserve a destination cell in dstTable.
609                 TURF_ASSERT(srcHash != KeyTraits::NullHash);
610                 TURF_ASSERT(srcValue != Value(ValueTraits::NullValue));
611                 TURF_ASSERT(srcValue != Value(ValueTraits::Redirect));
612                 ureg destLeafIndex = (srcHash >> safeShift) & dstLeafMask;
613                 Table* dstLeaf = dstLeafs[destLeafIndex];
614                 Cell* dstCell;
615                 ureg overflowIdx;
616                 InsertResult result = insert(srcHash, dstLeaf, dstLeaf->sizeMask, dstCell, overflowIdx);
617                 // During migration, a hash can only exist in one place among all the source tables,
618                 // and it is only migrated by one thread. Therefore, the hash will never already exist
619                 // in the destination table:
620                 TURF_ASSERT(result != InsertResult_AlreadyFound);
621                 if (result == InsertResult_Overflow) {
622                     // Destination overflow.
623                     // This can happen for several reasons. For example, the source table could have
624                     // existed of all deleted cells when it overflowed, resulting in a small destination
625                     // table size, but then another thread could re-insert all the same hashes
626                     // before the migration completed.
627                     // Caller will cancel the current migration and begin a new one.
628                     return destLeafIndex;
629                 }
630                 // Migrate the old value to the new cell.
631                 for (;;) {
632                     // Copy srcValue to the destination.
633                     dstCell->value.store(srcValue, turf::Relaxed);
634                     // Try to place a Redirect marker in srcValue.
635                     Value doubleCheckedSrcValue = srcCell->value.compareExchange(srcValue, Value(ValueTraits::Redirect), turf::Relaxed);
636                     TURF_ASSERT(doubleCheckedSrcValue != Value(ValueTraits::Redirect)); // Only one thread can redirect a cell at a time.
637                     if (doubleCheckedSrcValue == srcValue) {
638                         // No racing writes to the src. We've successfully placed the Redirect marker.
639                         // srcValue was non-NULL when we decided to migrate it, but it may have changed to NULL
640                         // by a late-arriving erase.
641                         if (srcValue == Value(ValueTraits::NullValue))
642                             TURF_TRACE(Grampa, 24, "[migrateRange] racing update was erase", uptr(srcTable), srcIdx);
643                         break;
644                     }
645                     // There was a late-arriving write (or erase) to the src. Migrate the new value and try again.
646                     TURF_TRACE(Grampa, 25, "[migrateRange] race to update migrated value", uptr(srcTable), srcIdx);
647                     srcValue = doubleCheckedSrcValue;
648                 }
649                 // Cell successfully migrated. Proceed to next source cell.
650                 break;
651             }
652         }
653     }
654     // Range has been migrated successfully.
655     return -1;
656 }
657
658 template <class Map>
659 void Grampa<Map>::TableMigration::run() {
660     // Conditionally increment the shared # of workers.
661     ureg probeStatus = m_workerStatus.load(turf::Relaxed);
662     do {
663         if (probeStatus & 1) {
664             // End flag is already set, so do nothing.
665             TURF_TRACE(Grampa, 26, "[TableMigration::run] already ended", uptr(this), 0);
666             return;
667         }
668     } while (!m_workerStatus.compareExchangeWeak(probeStatus, probeStatus + 2, turf::Relaxed, turf::Relaxed));
669     // # of workers has been incremented, and the end flag is clear.
670     TURF_ASSERT((probeStatus & 1) == 0);
671
672     // Iterate over all source tables.
673     Source* sources = getSources();
674     for (ureg s = 0; s < m_numSources; s++) {
675         Source& source = sources[s];
676         // Loop over all migration units in this source table.
677         for (;;) {
678             if (m_workerStatus.load(turf::Relaxed) & 1) {
679                 TURF_TRACE(Grampa, 27, "[TableMigration::run] detected end flag set", uptr(this), 0);
680                 goto endMigration;
681             }
682             ureg startIdx = source.sourceIndex.fetchAdd(TableMigrationUnitSize, turf::Relaxed);
683             if (startIdx >= source.table->sizeMask + 1)
684                 break;   // No more migration units in this table. Try next source table.
685             sreg overflowTableIndex = migrateRange(source.table, startIdx);
686             if  (overflowTableIndex >= 0) {
687                 // *** FAILED MIGRATION ***
688                 // TableMigration failed due to destination table overflow.
689                 // No other thread can declare the migration successful at this point, because *this* unit will never complete, hence m_unitsRemaining won't reach zero.
690                 // However, multiple threads can independently detect a failed migration at the same time.
691                 TURF_TRACE(Grampa, 28, "[TableMigration::run] destination overflow", uptr(source.table), uptr(startIdx));
692                 // The reason we store overflowTableIndex in a shared variable is because we must flush all the worker threads before
693                 // we can safely deal with the overflow. Therefore, the thread that detects the failure is often different from the thread
694                 // that deals with it.
695                 // Store overflowTableIndex unconditionally; racing writes should be rare, and it doesn't matter which one wins.
696                 sreg oldIndex = m_overflowTableIndex.exchange(overflowTableIndex, turf::Relaxed);
697                 if (oldIndex >= 0)
698                     TURF_TRACE(Grampa, 29, "[TableMigration::run] race to set m_overflowTableIndex", uptr(overflowTableIndex), uptr(oldIndex));
699                 m_workerStatus.fetchOr(1, turf::Relaxed);
700                 goto endMigration;
701             }
702             sreg prevRemaining = m_unitsRemaining.fetchSub(1, turf::Relaxed);
703             TURF_ASSERT(prevRemaining > 0);
704             if (prevRemaining == 1) {
705                 // *** SUCCESSFUL MIGRATION ***
706                 // That was the last chunk to migrate.
707                 m_workerStatus.fetchOr(1, turf::Relaxed);
708                 goto endMigration;
709             }
710         }
711     }
712     TURF_TRACE(Grampa, 30, "[TableMigration::run] out of migration units", uptr(this), 0);
713
714 endMigration:
715     // Decrement the shared # of workers.
716     probeStatus = m_workerStatus.fetchSub(2, turf::AcquireRelease);  // Ensure all modifications are visible to the thread that will publish
717     if (probeStatus >= 4) {
718         // There are other workers remaining. Return here so that only the very last worker will proceed.
719         TURF_TRACE(Grampa, 31, "[TableMigration::run] not the last worker", uptr(this), uptr(probeStatus));
720         return;
721     }
722
723     // We're the very last worker thread.
724     // Perform the appropriate post-migration step depending on whether the migration succeeded or failed.
725     TURF_ASSERT(probeStatus == 3);
726     sreg overflowTableIndex = m_overflowTableIndex.loadNonatomic();  // No racing writes at this point
727     if (overflowTableIndex < 0) {
728         // The migration succeeded. This is the most likely outcome. Publish the new subtree.
729         m_map.publishTableMigration(this);
730         // End the jobCoodinator.
731         sources[0].table->jobCoordinator.end();
732     } else {
733         // The migration failed due to the overflow of a destination table.
734         Table* origTable = sources[0].table;
735         ureg count = ureg(1) << (origTable->unsafeRangeShift - getUnsafeShift());
736         ureg lo = overflowTableIndex & ~(count - 1);
737         TURF_ASSERT(lo + count <= m_numDestinations);
738         turf::LockGuard<junction::striped::Mutex> guard(origTable->mutex);
739         SimpleJobCoordinator::Job* checkedJob = origTable->jobCoordinator.loadConsume();
740         if (checkedJob != this) {
741             TURF_TRACE(Grampa, 32, "[TableMigration::run] a new TableMigration was already started", uptr(origTable), uptr(checkedJob));
742         } else {
743             TableMigration* migration;
744             Table* overflowedTable = getDestinations()[overflowTableIndex];
745             if (overflowedTable->sizeMask + 1 < LeafSize) {
746                 // The entire map is contained in a small table.
747                 TURF_TRACE(Grampa, 33, "[TableMigration::run] overflow occured in a small map", uptr(origTable), uptr(checkedJob));
748                 TURF_ASSERT(overflowedTable->unsafeRangeShift == sizeof(Hash) * 8);
749                 TURF_ASSERT(overflowedTable->baseHash == 0);
750                 TURF_ASSERT(m_numDestinations == 1);
751                 TURF_ASSERT(m_baseHash == 0);
752                 migration = TableMigration::create(m_map, m_numSources + 1, 1);
753                 migration->m_baseHash = 0;
754                 migration->m_safeShift = 0;
755                 // Double the destination table size.
756                 migration->getDestinations()[0] = Table::create((overflowedTable->sizeMask + 1) * 2, overflowedTable->baseHash, overflowedTable->unsafeRangeShift);
757             } else {
758                 // The overflowed table is already the size of a leaf. Split it into two ranges.
759                 if (count == 1) {
760                     TURF_TRACE(Grampa, 34, "[TableMigration::run] doubling subtree size after failure", uptr(origTable), uptr(checkedJob));
761                     migration = TableMigration::create(m_map, m_numSources + 1, m_numDestinations * 2);
762                     migration->m_baseHash = m_baseHash;
763                     migration->m_safeShift = getUnsafeShift() - 1;
764                     for (ureg i = 0; i < m_numDestinations; i++) {
765                         migration->getDestinations()[i * 2] = getDestinations()[i];
766                         migration->getDestinations()[i * 2 + 1] = getDestinations()[i];
767                     }
768                     count = 2;
769                 } else {
770                     TURF_TRACE(Grampa, 35, "[TableMigration::run] keeping same subtree size after failure", uptr(origTable), uptr(checkedJob));
771                     migration = TableMigration::create(m_map, m_numSources + 1, m_numDestinations);
772                     migration->m_baseHash = m_baseHash;
773                     migration->m_safeShift = m_safeShift;
774                     memcpy(migration->getDestinations(), getDestinations(), m_numDestinations * sizeof(Table*));
775                 }
776                 Table* splitTable1 = Table::create(LeafSize, origTable->baseHash, origTable->unsafeRangeShift - 1);
777                 ureg i = 0;
778                 for (; i < count / 2; i++) {
779                     migration->getDestinations()[lo + i] = splitTable1;
780                 }
781                 ureg halfNumHashes = ureg(1) << (origTable->unsafeRangeShift - 1);
782                 Table* splitTable2 = Table::create(LeafSize, origTable->baseHash + halfNumHashes, origTable->unsafeRangeShift - 1);
783                 for (; i < count; i++) {
784                     migration->getDestinations()[lo + i] = splitTable2;
785                 }
786             }
787             // Transfer source tables to the new migration.
788             for (ureg i = 0; i < m_numSources; i++) {
789                 migration->getSources()[i].table = getSources()[i].table;
790                 migration->getSources()[i].sourceIndex.storeNonatomic(0);
791                 getSources()[i].table = NULL;
792             }
793             migration->getSources()[m_numSources].table = overflowedTable;
794             migration->getSources()[m_numSources].sourceIndex.storeNonatomic(0);
795             // Calculate total number of migration units to move.
796             ureg unitsRemaining = 0;
797             for (ureg s = 0; s < migration->m_numSources; s++)
798                 unitsRemaining += migration->getSources()[s].table->getNumMigrationUnits();
799             migration->m_unitsRemaining.storeNonatomic(unitsRemaining);
800             // Publish the new migration.
801             origTable->jobCoordinator.storeRelease(migration);
802         }
803     }
804
805     // We're done with this TableMigration. Queue it for GC.
806     DefaultQSBR.enqueue(&TableMigration::destroy, this);
807 }
808
809 template<class Map>
810 void Grampa<Map>::FlatTreeMigration::run() {
811     // Conditionally increment the shared # of workers.
812     ureg probeStatus = m_workerStatus.load(turf::Relaxed);
813     do {
814         if (probeStatus & 1) {
815             // End flag is already set, so do nothing.
816             TURF_TRACE(Grampa, 36, "[FlatTreeMigration::run] already ended", uptr(this), 0);
817             return;
818         }
819     } while (!m_workerStatus.compareExchangeWeak(probeStatus, probeStatus + 2, turf::Relaxed, turf::Relaxed));
820     // # of workers has been incremented, and the end flag is clear.
821     TURF_ASSERT((probeStatus & 1) == 0);
822
823     // Loop over all migration units
824     ureg srcSize = (Hash(-1) >> m_source->safeShift) + 1;
825     // FIXME: Support migration to smaller flattrees
826     TURF_ASSERT(m_destination->safeShift < m_source->safeShift);
827     ureg repeat = ureg(1) << (m_source->safeShift - m_destination->safeShift);
828     for (;;) {
829         ureg srcStart = m_sourceIndex.fetchAdd(FlatTreeMigrationUnitSize, turf::Relaxed);
830         if (srcStart >= srcSize)
831             break;      // No more migration units in this flattree.
832         // Migrate this range
833         ureg srcEnd = turf::util::min(srcSize, srcStart + FlatTreeMigrationUnitSize);
834         ureg dst = srcStart * repeat;
835         for (ureg src = srcStart; src < srcEnd; src++) {
836             // Pointers in the source table can be changed at any time due to concurrent subtree publishing,
837             // so we need to exchange them with Redirect markers.
838             Table* t = m_source->getTables()[src].exchange((Table*) RedirectFlatTree, turf::Relaxed);
839             TURF_ASSERT(uptr(t) != RedirectFlatTree);
840             for (ureg r = repeat; r > 0; r--) {
841                 m_destination->getTables()[dst].storeNonatomic(t);
842                 dst++;
843             }
844         }
845         // Decrement m_unitsRemaining
846         sreg prevRemaining = m_unitsRemaining.fetchSub(1, turf::Relaxed);
847         if (prevRemaining == 1) {
848             // *** SUCCESSFUL MIGRATION ***
849             // That was the last chunk to migrate.
850             m_workerStatus.fetchOr(1, turf::Relaxed);
851             break;
852         }
853     }
854
855     // Decrement the shared # of workers.
856     probeStatus = m_workerStatus.fetchSub(2, turf::AcquireRelease);    // AcquireRelease makes all previous writes visible to the last worker thread.
857     if (probeStatus >= 4) {
858         // There are other workers remaining. Return here so that only the very last worker will proceed.
859         return;
860     }
861
862     // We're the very last worker thread.
863     // Publish the new flattree.
864     TURF_ASSERT(probeStatus == 3);      // End flag must be set
865     m_map.publishFlatTreeMigration(this);
866     m_completed.signal();
867
868     // We're done with this FlatTreeMigration. Queue it for GC.
869     DefaultQSBR.enqueue(&FlatTreeMigration::destroy, this);
870 }
871
872 } // namespace details
873 } // namespace junction
874
875 #endif // JUNCTION_DETAILS_GRAMPA_H