Skip to content

Commit 7f9e897

Browse files
committed
[GC] Move post batch allocate thread bin maintenance logic in the thread bin.
1 parent 2381be3 commit 7f9e897

File tree

2 files changed

+41
-45
lines changed

2 files changed

+41
-45
lines changed

sdlib/d/gc/bin.d

Lines changed: 4 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -35,51 +35,11 @@ struct Bin {
3535
"Invalid arena or sizeClass!");
3636
assert(slotSize == binInfos[sizeClass].slotSize, "Invalid slot size!");
3737

38-
void** insert;
39-
40-
{
41-
mutex.lock();
42-
scope(exit) mutex.unlock();
43-
44-
insert = (cast(Bin*) &this)
45-
.batchAllocateImpl(filler, emap, sizeClass, top, bottom,
46-
slotSize);
47-
}
48-
49-
/**
50-
* Note: If we are worried about security, we might want to shuffle
51-
* our allocations around. This makes the uses of techniques
52-
* like Heap Feng Shui difficult.
53-
* We do not think it is worth the complication and performance
54-
* hit in the general case, but something we might want to add
55-
* in the future for security sensitive applications.
56-
*
57-
* http://www.phreedom.org/research/heap-feng-shui/heap-feng-shui.html
58-
*/
59-
60-
// We filled the whole stack, done.
61-
if (likely(insert is top)) {
62-
return bottom;
63-
}
64-
65-
/**
66-
* We could simplify this code by inserting from top to bottom,
67-
* in order to avoid moving all the elements when the stack has not
68-
* been filled.
69-
*
70-
* However, because we allocate from the best slab to the worse one,
71-
* this would result in a stack that allocate from the worse slab
72-
* before the best ones.
73-
*
74-
* So we allocate from the bottom to the top, and move the whole stack
75-
* if we did not quite reach the top.
76-
*/
77-
while (insert > bottom) {
78-
*(--top) = *(--insert);
79-
}
38+
mutex.lock();
39+
scope(exit) mutex.unlock();
8040

81-
assert(bottom <= top);
82-
return top;
41+
return (cast(Bin*) &this)
42+
.batchAllocateImpl(filler, emap, sizeClass, top, bottom, slotSize);
8343
}
8444

8545
uint batchFree(const(void*)[] worklist, PageDescriptor* pds,

sdlib/d/gc/tbin.d

Lines changed: 37 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,8 +86,44 @@ public:
8686
auto insert = _head - nfill;
8787
assert(available <= insert);
8888

89-
_head = arena.batchAllocSmall(emap, sizeClass, _head, insert, slotSize);
89+
auto filled =
90+
arena.batchAllocSmall(emap, sizeClass, _head, insert, slotSize);
9091
state.refilled = true;
92+
93+
/**
94+
* Note: If we are worried about security, we might want to shuffle
95+
* our allocations around. This makes the uses of techniques
96+
* like Heap Feng Shui difficult.
97+
* We do not think it is worth the complication and performance
98+
* hit in the general case, but something we might want to add
99+
* in the future for security sensitive applications.
100+
*
101+
* http://www.phreedom.org/research/heap-feng-shui/heap-feng-shui.html
102+
*/
103+
104+
// The whole space was filled. We are done.
105+
if (likely(filled is _head)) {
106+
_head = insert;
107+
return;
108+
}
109+
110+
/**
111+
* We could simplify this code by inserting from top to bottom,
112+
* in order to avoid moving all the elements when the stack has not
113+
* been filled.
114+
*
115+
* However, because we allocate from the best slab to the worse one,
116+
* this would result in a stack that allocate from the worse slab
117+
* before the best ones.
118+
*
119+
* So we allocate from the bottom to the top, and move the whole stack
120+
* if we did not quite reach the top.
121+
*/
122+
while (filled > insert) {
123+
*(--_head) = *(--filled);
124+
}
125+
126+
assert(insert <= _head);
91127
}
92128

93129
bool free(void* ptr) {

0 commit comments

Comments
 (0)