1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
|
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/rastack_p.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// RAStackAllocator - Slots
// ========================
RAStackSlot* RAStackAllocator::newSlot(uint32_t baseRegId, uint32_t size, uint32_t alignment, uint32_t flags) noexcept {
if (ASMJIT_UNLIKELY(_slots.willGrow(allocator(), 1) != kErrorOk))
return nullptr;
RAStackSlot* slot = allocator()->allocT<RAStackSlot>();
if (ASMJIT_UNLIKELY(!slot))
return nullptr;
slot->_baseRegId = uint8_t(baseRegId);
slot->_alignment = uint8_t(Support::max<uint32_t>(alignment, 1));
slot->_flags = uint16_t(flags);
slot->_useCount = 0;
slot->_size = size;
slot->_weight = 0;
slot->_offset = 0;
_alignment = Support::max<uint32_t>(_alignment, alignment);
_slots.appendUnsafe(slot);
return slot;
}
// RAStackAllocator - Utilities
// ============================
struct RAStackGap {
inline RAStackGap() noexcept
: offset(0),
size(0) {}
inline RAStackGap(uint32_t offset, uint32_t size) noexcept
: offset(offset),
size(size) {}
inline RAStackGap(const RAStackGap& other) noexcept
: offset(other.offset),
size(other.size) {}
uint32_t offset;
uint32_t size;
};
Error RAStackAllocator::calculateStackFrame() noexcept {
// Base weight added to all registers regardless of their size and alignment.
uint32_t kBaseRegWeight = 16;
// STEP 1:
//
// Update usage based on the size of the slot. We boost smaller slots in a way that 32-bit register has a higher
// priority than a 128-bit register, however, if one 128-bit register is used 4 times more than some other 32-bit
// register it will overweight it.
for (RAStackSlot* slot : _slots) {
uint32_t alignment = slot->alignment();
ASMJIT_ASSERT(alignment > 0);
uint32_t power = Support::min<uint32_t>(Support::ctz(alignment), 6);
uint64_t weight;
if (slot->isRegHome())
weight = kBaseRegWeight + (uint64_t(slot->useCount()) * (7 - power));
else
weight = power;
// If overflown, which has less chance of winning a lottery, just use max possible weight. In such case it
// probably doesn't matter at all.
if (weight > 0xFFFFFFFFu)
weight = 0xFFFFFFFFu;
slot->setWeight(uint32_t(weight));
}
// STEP 2:
//
// Sort stack slots based on their newly calculated weight (in descending order).
_slots.sort([](const RAStackSlot* a, const RAStackSlot* b) noexcept {
return a->weight() > b->weight() ? 1 :
a->weight() == b->weight() ? 0 : -1;
});
// STEP 3:
//
// Calculate offset of each slot. We start from the slot that has the highest weight and advance to slots with
// lower weight. It could look that offsets start from the first slot in our list and then simply increase, but
// it's not always the case as we also try to fill all gaps introduced by the fact that slots are sorted by
// weight and not by size & alignment, so when we need to align some slot we distribute the gap caused by the
// alignment to `gaps`.
uint32_t offset = 0;
ZoneVector<RAStackGap> gaps[kSizeCount - 1];
for (RAStackSlot* slot : _slots) {
if (slot->isStackArg())
continue;
uint32_t slotAlignment = slot->alignment();
uint32_t alignedOffset = Support::alignUp(offset, slotAlignment);
// Try to find a slot within gaps first, before advancing the `offset`.
bool foundGap = false;
uint32_t gapSize = 0;
uint32_t gapOffset = 0;
{
uint32_t slotSize = slot->size();
if (slotSize < (1u << uint32_t(ASMJIT_ARRAY_SIZE(gaps)))) {
// Iterate from the lowest to the highest possible.
uint32_t index = Support::ctz(slotSize);
do {
if (!gaps[index].empty()) {
RAStackGap gap = gaps[index].pop();
ASMJIT_ASSERT(Support::isAligned(gap.offset, slotAlignment));
slot->setOffset(int32_t(gap.offset));
gapSize = gap.size - slotSize;
gapOffset = gap.offset - slotSize;
foundGap = true;
break;
}
} while (++index < uint32_t(ASMJIT_ARRAY_SIZE(gaps)));
}
}
// No gap found, we may create a new one(s) if the current offset is not aligned.
if (!foundGap && offset != alignedOffset) {
gapSize = alignedOffset - offset;
gapOffset = alignedOffset;
offset = alignedOffset;
}
// True if we have found a gap and not filled all of it or we aligned the current offset.
if (gapSize) {
uint32_t gapEnd = gapSize + gapOffset;
while (gapOffset < gapEnd) {
uint32_t index = Support::ctz(gapOffset);
uint32_t slotSize = 1u << index;
// Weird case, better to bail...
if (gapEnd - gapOffset < slotSize)
break;
ASMJIT_PROPAGATE(gaps[index].append(allocator(), RAStackGap(gapOffset, slotSize)));
gapOffset += slotSize;
}
}
if (!foundGap) {
ASMJIT_ASSERT(Support::isAligned(offset, slotAlignment));
slot->setOffset(int32_t(offset));
offset += slot->size();
}
}
_stackSize = Support::alignUp(offset, _alignment);
return kErrorOk;
}
Error RAStackAllocator::adjustSlotOffsets(int32_t offset) noexcept {
for (RAStackSlot* slot : _slots)
if (!slot->isStackArg())
slot->_offset += offset;
return kErrorOk;
}
ASMJIT_END_NAMESPACE
#endif // !ASMJIT_NO_COMPILER
|