Skip to content

Commit c78fb8d

Browse files
[ADT] Simplify SmallDenseMap::grow (NFC) (#167829)
Without this patch, SmallDenseMap::grow has two separate code paths to grow the bucket array. The code path to handle the small mode has its own traversal over the bucket array. This patch simplifies this logic as follows: 1. Allocate a temporary instance of SmallDenseMap. 2. Move valid key/value pairs to the temporary instance. 3. Move LargeRep to *this. Remarks: - This patch adds moveFromImpl to move key/value pairs. moveFromOldBuckets is updated to use the new helper function. - This patch adds a private constructor to SmallDenseMap that takes an exact number of buckets, accompanied by tag ExactBucketCount. - This patch adds a fast path to deallocateBuckets in case getLargeRep()->NumBuckets == 0, just like destroyAll. This path is used to destruct zombie instances after moves. - In somewhat rare cases, we "grow" from the small mode to the small mode when there are many tombstones in the inline storage. This is handled with another call to moveFrom.
1 parent 12322b2 commit c78fb8d

File tree

1 file changed

+33
-42
lines changed

1 file changed

+33
-42
lines changed

llvm/include/llvm/ADT/DenseMap.h

Lines changed: 33 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -413,9 +413,7 @@ class DenseMapBase : public DebugEpochBase {
413413
return NextPowerOf2(NumEntries * 4 / 3 + 1);
414414
}
415415

416-
void moveFromOldBuckets(iterator_range<BucketT *> OldBuckets) {
417-
initEmpty();
418-
416+
void moveFromImpl(iterator_range<BucketT *> OldBuckets) {
419417
// Insert all the old elements.
420418
const KeyT EmptyKey = KeyInfoT::getEmptyKey();
421419
const KeyT TombstoneKey = KeyInfoT::getTombstoneKey();
@@ -438,6 +436,14 @@ class DenseMapBase : public DebugEpochBase {
438436
}
439437
}
440438

439+
void moveFromOldBuckets(iterator_range<BucketT *> OldBuckets) {
440+
initEmpty();
441+
moveFromImpl(OldBuckets);
442+
}
443+
444+
// Move key/value from Other to *this. Other will be in a zombie state.
445+
void moveFrom(DerivedT &Other) { moveFromImpl(Other.buckets()); }
446+
441447
void copyFrom(const DerivedT &other) {
442448
this->destroyAll();
443449
derived().deallocateBuckets();
@@ -889,6 +895,12 @@ class SmallDenseMap
889895
/// a large bucket. This union will be discriminated by the 'Small' bit.
890896
AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
891897

898+
struct ExactBucketCount {};
899+
SmallDenseMap(unsigned NumBuckets, ExactBucketCount) {
900+
allocateBuckets(NumBuckets);
901+
this->BaseT::initEmpty();
902+
}
903+
892904
public:
893905
explicit SmallDenseMap(unsigned NumElementsToReserve = 0) {
894906
init(NumElementsToReserve);
@@ -1065,7 +1077,10 @@ class SmallDenseMap
10651077
}
10661078

10671079
void deallocateBuckets() {
1068-
if (Small)
1080+
// Fast path to deallocateBuckets in case getLargeRep()->NumBuckets == 0,
1081+
// just like destroyAll. This path is used to destruct zombie instances
1082+
// after moves.
1083+
if (Small || getLargeRep()->NumBuckets == 0)
10691084
return;
10701085

10711086
deallocate_buffer(getLargeRep()->Buckets,
@@ -1096,46 +1111,22 @@ class SmallDenseMap
10961111
if (AtLeast > InlineBuckets)
10971112
AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast - 1));
10981113

1099-
if (Small) {
1100-
// First move the inline buckets into a temporary storage.
1101-
AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
1102-
BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
1103-
BucketT *TmpEnd = TmpBegin;
1114+
SmallDenseMap Tmp(AtLeast, ExactBucketCount{});
1115+
Tmp.moveFrom(*this);
11041116

1105-
// Loop over the buckets, moving non-empty, non-tombstones into the
1106-
// temporary storage. Have the loop move the TmpEnd forward as it goes.
1107-
const KeyT EmptyKey = KeyInfoT::getEmptyKey();
1108-
const KeyT TombstoneKey = KeyInfoT::getTombstoneKey();
1109-
for (BucketT &B : inlineBuckets()) {
1110-
if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) &&
1111-
!KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) {
1112-
assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
1113-
"Too many inline buckets!");
1114-
::new (&TmpEnd->getFirst()) KeyT(std::move(B.getFirst()));
1115-
::new (&TmpEnd->getSecond()) ValueT(std::move(B.getSecond()));
1116-
++TmpEnd;
1117-
B.getSecond().~ValueT();
1118-
}
1119-
B.getFirst().~KeyT();
1120-
}
1121-
1122-
// AtLeast == InlineBuckets can happen if there are many tombstones,
1123-
// and grow() is used to remove them. Usually we always switch to the
1124-
// large rep here.
1125-
allocateBuckets(AtLeast);
1126-
this->moveFromOldBuckets(llvm::make_range(TmpBegin, TmpEnd));
1127-
return;
1117+
if (Tmp.Small) {
1118+
// Use moveFrom in those rare cases where we stay in the small mode. This
1119+
// can happen when we have many tombstones.
1120+
this->BaseT::initEmpty();
1121+
this->moveFrom(Tmp);
1122+
Tmp.Small = false;
1123+
Tmp.getLargeRep()->NumBuckets = 0;
1124+
} else {
1125+
Small = false;
1126+
NumTombstones = 0;
1127+
*getLargeRep() = std::move(*Tmp.getLargeRep());
1128+
Tmp.getLargeRep()->NumBuckets = 0;
11281129
}
1129-
1130-
LargeRep OldRep = std::move(*getLargeRep());
1131-
getLargeRep()->~LargeRep();
1132-
allocateBuckets(AtLeast);
1133-
1134-
this->moveFromOldBuckets(OldRep.buckets());
1135-
1136-
// Free the old table.
1137-
deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
1138-
alignof(BucketT));
11391130
}
11401131

11411132
// Plan how to shrink the bucket table. Return:

0 commit comments

Comments
 (0)