Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #include "jit/EffectiveAddressAnalysis.h"
8 : #include "jit/IonAnalysis.h"
9 : #include "jit/MIR.h"
10 : #include "jit/MIRGraph.h"
11 :
12 : using namespace js;
13 : using namespace jit;
14 :
15 : static void
16 0 : AnalyzeLsh(TempAllocator& alloc, MLsh* lsh)
17 : {
18 0 : if (lsh->specialization() != MIRType::Int32)
19 0 : return;
20 :
21 0 : if (lsh->isRecoveredOnBailout())
22 0 : return;
23 :
24 0 : MDefinition* index = lsh->lhs();
25 0 : MOZ_ASSERT(index->type() == MIRType::Int32);
26 :
27 0 : MConstant* shiftValue = lsh->rhs()->maybeConstantValue();
28 0 : if (!shiftValue)
29 0 : return;
30 :
31 0 : if (shiftValue->type() != MIRType::Int32 || !IsShiftInScaleRange(shiftValue->toInt32()))
32 0 : return;
33 :
34 0 : Scale scale = ShiftToScale(shiftValue->toInt32());
35 :
36 0 : int32_t displacement = 0;
37 0 : MInstruction* last = lsh;
38 0 : MDefinition* base = nullptr;
39 : while (true) {
40 0 : if (!last->hasOneUse())
41 0 : break;
42 :
43 0 : MUseIterator use = last->usesBegin();
44 0 : if (!use->consumer()->isDefinition() || !use->consumer()->toDefinition()->isAdd())
45 0 : break;
46 :
47 0 : MAdd* add = use->consumer()->toDefinition()->toAdd();
48 0 : if (add->specialization() != MIRType::Int32 || !add->isTruncated())
49 0 : break;
50 :
51 0 : MDefinition* other = add->getOperand(1 - add->indexOf(*use));
52 :
53 0 : if (MConstant* otherConst = other->maybeConstantValue()) {
54 0 : displacement += otherConst->toInt32();
55 : } else {
56 0 : if (base)
57 0 : break;
58 0 : base = other;
59 : }
60 :
61 0 : last = add;
62 0 : if (last->isRecoveredOnBailout())
63 0 : return;
64 0 : }
65 :
66 0 : if (!base) {
67 0 : uint32_t elemSize = 1 << ScaleToShift(scale);
68 0 : if (displacement % elemSize != 0)
69 0 : return;
70 :
71 0 : if (!last->hasOneUse())
72 0 : return;
73 :
74 0 : MUseIterator use = last->usesBegin();
75 0 : if (!use->consumer()->isDefinition() || !use->consumer()->toDefinition()->isBitAnd())
76 0 : return;
77 :
78 0 : MBitAnd* bitAnd = use->consumer()->toDefinition()->toBitAnd();
79 0 : if (bitAnd->isRecoveredOnBailout())
80 0 : return;
81 :
82 0 : MDefinition* other = bitAnd->getOperand(1 - bitAnd->indexOf(*use));
83 0 : MConstant* otherConst = other->maybeConstantValue();
84 0 : if (!otherConst || otherConst->type() != MIRType::Int32)
85 0 : return;
86 :
87 0 : uint32_t bitsClearedByShift = elemSize - 1;
88 0 : uint32_t bitsClearedByMask = ~uint32_t(otherConst->toInt32());
89 0 : if ((bitsClearedByShift & bitsClearedByMask) != bitsClearedByMask)
90 0 : return;
91 :
92 0 : bitAnd->replaceAllUsesWith(last);
93 0 : return;
94 : }
95 :
96 0 : if (base->isRecoveredOnBailout())
97 0 : return;
98 :
99 0 : MEffectiveAddress* eaddr = MEffectiveAddress::New(alloc, base, index, scale, displacement);
100 0 : last->replaceAllUsesWith(eaddr);
101 0 : last->block()->insertAfter(last, eaddr);
102 : }
103 :
104 : // Transform:
105 : //
106 : // [AddI]
107 : // addl $9, %esi
108 : // [LoadUnboxedScalar]
109 : // movsd 0x0(%rbx,%rsi,8), %xmm4
110 : //
111 : // into:
112 : //
113 : // [LoadUnboxedScalar]
114 : // movsd 0x48(%rbx,%rsi,8), %xmm4
115 : //
116 : // This is possible when the AddI is only used by the LoadUnboxedScalar opcode.
117 : static void
118 0 : AnalyzeLoadUnboxedScalar(TempAllocator& alloc, MLoadUnboxedScalar* load)
119 : {
120 0 : if (load->isRecoveredOnBailout())
121 0 : return;
122 :
123 0 : if (!load->getOperand(1)->isAdd())
124 0 : return;
125 :
126 0 : JitSpew(JitSpew_EAA, "analyze: %s%u", load->opName(), load->id());
127 :
128 0 : MAdd* add = load->getOperand(1)->toAdd();
129 :
130 0 : if (add->specialization() != MIRType::Int32 || !add->hasUses() ||
131 0 : add->truncateKind() != MDefinition::TruncateKind::Truncate)
132 : {
133 0 : return;
134 : }
135 :
136 0 : MDefinition* lhs = add->lhs();
137 0 : MDefinition* rhs = add->rhs();
138 0 : MDefinition* constant = nullptr;
139 0 : MDefinition* node = nullptr;
140 :
141 0 : if (lhs->isConstant()) {
142 0 : constant = lhs;
143 0 : node = rhs;
144 0 : } else if (rhs->isConstant()) {
145 0 : constant = rhs;
146 0 : node = lhs;
147 : } else
148 0 : return;
149 :
150 0 : MOZ_ASSERT(constant->type() == MIRType::Int32);
151 :
152 0 : size_t storageSize = Scalar::byteSize(load->storageType());
153 0 : int32_t c1 = load->offsetAdjustment();
154 0 : int32_t c2 = 0;
155 0 : if (!SafeMul(constant->maybeConstantValue()->toInt32(), storageSize, &c2))
156 0 : return;
157 :
158 0 : int32_t offset = 0;
159 0 : if (!SafeAdd(c1, c2, &offset))
160 0 : return;
161 :
162 0 : JitSpew(JitSpew_EAA, "set offset: %d + %d = %d on: %s%u", c1, c2, offset,
163 0 : load->opName(), load->id());
164 0 : load->setOffsetAdjustment(offset);
165 0 : load->replaceOperand(1, node);
166 :
167 0 : if (!add->hasLiveDefUses() && DeadIfUnused(add) && add->canRecoverOnBailout()) {
168 0 : JitSpew(JitSpew_EAA, "mark as recovered on bailout: %s%u",
169 0 : add->opName(), add->id());
170 0 : add->setRecoveredOnBailoutUnchecked();
171 : }
172 : }
173 :
174 : template<typename AsmJSMemoryAccess>
175 : bool
176 0 : EffectiveAddressAnalysis::tryAddDisplacement(AsmJSMemoryAccess* ins, int32_t o)
177 : {
178 : #ifdef WASM_HUGE_MEMORY
179 : // Compute the new offset. Check for overflow.
180 0 : uint32_t oldOffset = ins->offset();
181 0 : uint32_t newOffset = oldOffset + o;
182 0 : if (o < 0 ? (newOffset >= oldOffset) : (newOffset < oldOffset))
183 0 : return false;
184 :
185 : // The offset must ultimately be written into the offset immediate of a load
186 : // or store instruction so don't allow folding of the offset is bigger.
187 0 : if (newOffset >= wasm::OffsetGuardLimit)
188 0 : return false;
189 :
190 : // Everything checks out. This is the new offset.
191 0 : ins->setOffset(newOffset);
192 0 : return true;
193 : #else
194 : return false;
195 : #endif
196 : }
197 :
198 : template<typename AsmJSMemoryAccess>
199 : void
200 0 : EffectiveAddressAnalysis::analyzeAsmJSHeapAccess(AsmJSMemoryAccess* ins)
201 : {
202 0 : MDefinition* base = ins->base();
203 :
204 0 : if (base->isConstant()) {
205 : // Look for heap[i] where i is a constant offset, and fold the offset.
206 : // By doing the folding now, we simplify the task of codegen; the offset
207 : // is always the address mode immediate. This also allows it to avoid
208 : // a situation where the sum of a constant pointer value and a non-zero
209 : // offset doesn't actually fit into the address mode immediate.
210 0 : int32_t imm = base->toConstant()->toInt32();
211 0 : if (imm != 0 && tryAddDisplacement(ins, imm)) {
212 0 : MInstruction* zero = MConstant::New(graph_.alloc(), Int32Value(0));
213 0 : ins->block()->insertBefore(ins, zero);
214 0 : ins->replaceBase(zero);
215 : }
216 :
217 : // If the index is within the minimum heap length, we can optimize
218 : // away the bounds check.
219 0 : if (imm >= 0) {
220 0 : int32_t end = (uint32_t)imm + ins->byteSize();
221 0 : if (end >= imm && (uint32_t)end <= mir_->minWasmHeapLength())
222 0 : ins->removeBoundsCheck();
223 : }
224 0 : } else if (base->isAdd()) {
225 : // Look for heap[a+i] where i is a constant offset, and fold the offset.
226 : // Alignment masks have already been moved out of the way by the
227 : // Alignment Mask Analysis pass.
228 0 : MDefinition* op0 = base->toAdd()->getOperand(0);
229 0 : MDefinition* op1 = base->toAdd()->getOperand(1);
230 0 : if (op0->isConstant())
231 0 : mozilla::Swap(op0, op1);
232 0 : if (op1->isConstant()) {
233 0 : int32_t imm = op1->toConstant()->toInt32();
234 0 : if (tryAddDisplacement(ins, imm))
235 0 : ins->replaceBase(op0);
236 : }
237 : }
238 0 : }
239 :
240 : // This analysis converts patterns of the form:
241 : // truncate(x + (y << {0,1,2,3}))
242 : // truncate(x + (y << {0,1,2,3}) + imm32)
243 : // into a single lea instruction, and patterns of the form:
244 : // asmload(x + imm32)
245 : // asmload(x << {0,1,2,3})
246 : // asmload((x << {0,1,2,3}) + imm32)
247 : // asmload((x << {0,1,2,3}) & mask) (where mask is redundant with shift)
248 : // asmload(((x << {0,1,2,3}) + imm32) & mask) (where mask is redundant with shift + imm32)
249 : // into a single asmload instruction (and for asmstore too).
250 : //
251 : // Additionally, we should consider the general forms:
252 : // truncate(x + y + imm32)
253 : // truncate((y << {0,1,2,3}) + imm32)
254 : bool
255 8 : EffectiveAddressAnalysis::analyze()
256 : {
257 411 : for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
258 1875 : for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
259 1472 : if (!graph_.alloc().ensureBallast())
260 0 : return false;
261 :
262 : // Note that we don't check for MAsmJSCompareExchangeHeap
263 : // or MAsmJSAtomicBinopHeap, because the backend and the OOB
264 : // mechanism don't support non-zero offsets for them yet
265 : // (TODO bug 1254935).
266 1472 : if (i->isLsh())
267 0 : AnalyzeLsh(graph_.alloc(), i->toLsh());
268 1472 : else if (i->isLoadUnboxedScalar())
269 0 : AnalyzeLoadUnboxedScalar(graph_.alloc(), i->toLoadUnboxedScalar());
270 1472 : else if (i->isAsmJSLoadHeap())
271 0 : analyzeAsmJSHeapAccess(i->toAsmJSLoadHeap());
272 1472 : else if (i->isAsmJSStoreHeap())
273 0 : analyzeAsmJSHeapAccess(i->toAsmJSStoreHeap());
274 : }
275 : }
276 8 : return true;
277 : }
|