Skip to content

Commit 7ce40a5

Browse files
committed
[GC] Rework the bin's batch allocation algorithm.
1 parent 7f9e897 commit 7ce40a5

File tree

1 file changed

+79
-94
lines changed

1 file changed

+79
-94
lines changed

sdlib/d/gc/bin.d

Lines changed: 79 additions & 94 deletions
Original file line numberDiff line numberDiff line change
@@ -35,62 +35,100 @@ struct Bin {
3535
"Invalid arena or sizeClass!");
3636
assert(slotSize == binInfos[sizeClass].slotSize, "Invalid slot size!");
3737

38-
mutex.lock();
39-
scope(exit) mutex.unlock();
38+
assert(bottom < top, "Invalid stack boundaries!");
39+
assert((top - bottom) < uint.max, "Invalid stack size!");
4040

41-
return (cast(Bin*) &this)
42-
.batchAllocateImpl(filler, emap, sizeClass, top, bottom, slotSize);
43-
}
41+
/**
42+
* When we run out of slab with free space, we allocate a fresh slab.
43+
* However, while we do so, another thread may have returned slabs to
44+
* the bin, so we might end up not using our fresh slab.
45+
*/
46+
Extent* freshSlab = null;
47+
48+
/**
49+
* Allocating fresh slab might fail, for instance if the system
50+
* runs out of memory. Before we attempt to allocate one, we make
51+
* sure we made progress since the last attempt.
52+
*/
53+
bool progressed = true;
54+
55+
/**
56+
* We insert from the bottom up!
57+
*/
58+
auto insert = bottom;
4459

45-
uint batchFree(const(void*)[] worklist, PageDescriptor* pds,
46-
Extent** dallocSlabs, ref uint ndalloc) shared {
47-
mutex.lock();
48-
scope(exit) mutex.unlock();
60+
Refill: {
61+
mutex.lock();
62+
scope(exit) mutex.unlock();
4963

50-
return (cast(Bin*) &this)
51-
.batchFreeImpl(worklist, pds, dallocSlabs, ndalloc);
52-
}
64+
auto slabs = &(cast(Bin*) &this).slabs;
5365

54-
private:
55-
void** batchAllocateImpl(
56-
shared(PageFiller)* filler,
57-
ref CachedExtentMap emap,
58-
ubyte sizeClass,
59-
void** top,
60-
void** bottom,
61-
size_t slotSize,
62-
) {
63-
// FIXME: in contract.
64-
assert(mutex.isHeld(), "Mutex not held!");
65-
assert(bottom < top, "Invalid stack boundaries!");
66-
assert((top - bottom) < uint.max, "Invalid stack size!");
66+
while (insert !is top) {
67+
assert(insert < top, "Insert out of bounds!");
6768

68-
auto insert = bottom;
69-
while (insert !is top) {
70-
assert(insert < top, "Insert out of bounds!");
69+
auto e = slabs.top;
70+
if (unlikely(e is null)) {
71+
if (freshSlab is null) {
72+
// Let's go fetch a new fresh slab.
73+
goto Refresh;
74+
}
7175

72-
auto e = getSlab(filler, emap, sizeClass);
73-
if (unlikely(e is null)) {
74-
break;
75-
}
76+
// We have a fresh slab, use it!
77+
slabs.insert(freshSlab);
78+
freshSlab = null;
79+
continue;
80+
}
81+
82+
assert(e.nfree > 0);
83+
uint nfill = (top - insert) & uint.max;
84+
insert = e.batchAllocate(insert, nfill, slotSize);
85+
assert(bottom <= insert && insert <= top);
7686

77-
assert(e.nfree > 0);
78-
uint nfill = (top - insert) & uint.max;
79-
insert = e.batchAllocate(insert, nfill, slotSize);
80-
assert(bottom <= insert && insert <= top);
87+
progressed = true;
8188

82-
// If the slab is not full, we are done.
83-
if (e.nfree > 0) {
84-
break;
89+
// If the slab is not full, we are done.
90+
if (e.nfree > 0) {
91+
goto Exit;
92+
}
93+
94+
// The slab is full, remove from the heap.
95+
slabs.remove(e);
8596
}
97+
}
8698

87-
// The slab is full, remove from the heap.
88-
slabs.remove(e);
99+
Exit:
100+
if (freshSlab !is null) {
101+
filler.freeExtent(emap, freshSlab);
89102
}
90103

91104
return insert;
105+
106+
Refresh:
107+
assert(insert !is top);
108+
assert(freshSlab is null);
109+
110+
if (!progressed) {
111+
goto Exit;
112+
}
113+
114+
freshSlab = filler.allocSlab(emap, sizeClass);
115+
auto nslots = binInfos[sizeClass].nslots;
116+
assert(freshSlab is null || freshSlab.nfree == nslots);
117+
118+
progressed = false;
119+
goto Refill;
92120
}
93121

122+
uint batchFree(const(void*)[] worklist, PageDescriptor* pds,
123+
Extent** dallocSlabs, ref uint ndalloc) shared {
124+
mutex.lock();
125+
scope(exit) mutex.unlock();
126+
127+
return (cast(Bin*) &this)
128+
.batchFreeImpl(worklist, pds, dallocSlabs, ndalloc);
129+
}
130+
131+
private:
94132
uint batchFreeImpl(const(void*)[] worklist, PageDescriptor* pds,
95133
Extent** dallocSlabs, ref uint ndalloc) {
96134
// FIXME: in contract.
@@ -145,59 +183,6 @@ private:
145183
return ndeferred;
146184
}
147185

148-
auto getSlab(shared(PageFiller)* filler, ref CachedExtentMap emap,
149-
ubyte sizeClass) {
150-
// FIXME: in contract.
151-
assert(mutex.isHeld(), "Mutex not held!");
152-
153-
auto slab = slabs.top;
154-
if (slab !is null) {
155-
return slab;
156-
}
157-
158-
{
159-
// Release the lock while we allocate a slab.
160-
mutex.unlock();
161-
scope(exit) mutex.lock();
162-
163-
// We don't have a suitable slab, so allocate one.
164-
slab = filler.allocSlab(emap, sizeClass);
165-
}
166-
167-
if (unlikely(slab is null)) {
168-
// Another thread might have been successful
169-
// while we did not hold the lock.
170-
return slabs.top;
171-
}
172-
173-
// We may have allocated the slab we need when the lock was released.
174-
if (likely(slabs.top is null)) {
175-
slabs.insert(slab);
176-
return slab;
177-
}
178-
179-
// We are about to release the freshly allocated slab.
180-
// We do not want another thread stealing the slab we intend
181-
// to use from under our feets, so we keep it around.
182-
auto current = slabs.pop();
183-
184-
assert(slab !is current);
185-
assert(slab.nfree == binInfos[sizeClass].nslots);
186-
187-
{
188-
// Release the lock while we release the slab.
189-
mutex.unlock();
190-
scope(exit) mutex.lock();
191-
192-
filler.freeExtent(emap, slab);
193-
}
194-
195-
// Now we put it back, which ensure we have at least one
196-
// slab available that we can return.
197-
slabs.insert(current);
198-
return slabs.top;
199-
}
200-
201186
/**
202187
* GC facilities.
203188
*/

0 commit comments

Comments
 (0)