Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #include "jit/IonAnalysis.h"
8 :
9 : #include "mozilla/SizePrintfMacros.h"
10 :
11 : #include "jit/AliasAnalysis.h"
12 : #include "jit/BaselineInspector.h"
13 : #include "jit/BaselineJIT.h"
14 : #include "jit/FlowAliasAnalysis.h"
15 : #include "jit/Ion.h"
16 : #include "jit/IonBuilder.h"
17 : #include "jit/IonOptimizationLevels.h"
18 : #include "jit/LIR.h"
19 : #include "jit/Lowering.h"
20 : #include "jit/MIRGraph.h"
21 : #include "vm/RegExpObject.h"
22 : #include "vm/SelfHosting.h"
23 :
24 : #include "jsobjinlines.h"
25 : #include "jsopcodeinlines.h"
26 : #include "jsscriptinlines.h"
27 :
28 : #include "jit/shared/Lowering-shared-inl.h"
29 :
30 : using namespace js;
31 : using namespace js::jit;
32 :
33 : using mozilla::DebugOnly;
34 :
35 : typedef Vector<MPhi*, 16, SystemAllocPolicy> MPhiVector;
36 :
37 : static bool
38 119 : FlagPhiInputsAsHavingRemovedUses(MIRGenerator* mir, MBasicBlock* block, MBasicBlock* succ,
39 : MPhiVector& worklist)
40 : {
41 : // When removing an edge between 2 blocks, we might remove the ability of
42 : // later phases to figure out that the uses of a Phi should be considered as
43 : // a use of all its inputs. Thus we need to mark the Phi inputs as having
44 : // removed uses iff the phi has any uses.
45 : //
46 : //
47 : // +--------------------+ +---------------------+
48 : // |12 MFoo 6 | |32 MBar 5 |
49 : // | | | |
50 : // | ... | | ... |
51 : // | | | |
52 : // |25 MGoto Block 4 | |43 MGoto Block 4 |
53 : // +--------------------+ +---------------------+
54 : // | |
55 : // | | |
56 : // | | |
57 : // | +-----X------------------------+
58 : // | Edge |
59 : // | Removed |
60 : // | |
61 : // | +------------v-----------+
62 : // | |50 MPhi 12 32 |
63 : // | | |
64 : // | | ... |
65 : // | | |
66 : // | |70 MReturn 50 |
67 : // | +------------------------+
68 : // |
69 : // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
70 : // |
71 : // v
72 : //
73 : // ^ +--------------------+ +---------------------+
74 : // /!\ |12 MConst opt-out | |32 MBar 5 |
75 : // '---' | | | |
76 : // | ... | | ... |
77 : // |78 MBail | | |
78 : // |80 MUnreachable | |43 MGoto Block 4 |
79 : // +--------------------+ +---------------------+
80 : // |
81 : // |
82 : // |
83 : // +---------------+
84 : // |
85 : // |
86 : // |
87 : // +------------v-----------+
88 : // |50 MPhi 32 |
89 : // | |
90 : // | ... |
91 : // | |
92 : // |70 MReturn 50 |
93 : // +------------------------+
94 : //
95 : //
96 : // If the inputs of the Phi are not flagged as having removed uses, then
97 : // later compilation phase might optimize them out. The problem is that a
98 : // bailout will use this value and give it back to baseline, which will then
99 : // use the OptimizedOut magic value in a computation.
100 :
101 : // Conservative upper limit for the number of Phi instructions which are
102 : // visited while looking for uses.
103 119 : const size_t conservativeUsesLimit = 128;
104 :
105 119 : MOZ_ASSERT(worklist.empty());
106 119 : size_t predIndex = succ->getPredecessorIndex(block);
107 119 : MPhiIterator end = succ->phisEnd();
108 119 : MPhiIterator it = succ->phisBegin();
109 391 : for (; it != end; it++) {
110 136 : MPhi* phi = *it;
111 :
112 136 : if (mir->shouldCancel("FlagPhiInputsAsHavingRemovedUses outer loop"))
113 0 : return false;
114 :
115 : // We are looking to mark the Phi inputs which are used across the edge
116 : // between the |block| and its successor |succ|.
117 136 : MDefinition* def = phi->getOperand(predIndex);
118 136 : if (def->isUseRemoved())
119 70 : continue;
120 :
121 66 : phi->setInWorklist();
122 66 : if (!worklist.append(phi))
123 0 : return false;
124 :
125 : // Fill the work list with all the Phi nodes uses until we reach either:
126 : // - A resume point which uses the Phi as an observable operand.
127 : // - An explicit use of the Phi instruction.
128 : // - An implicit use of the Phi instruction.
129 66 : bool isUsed = false;
130 159 : for (size_t idx = 0; !isUsed && idx < worklist.length(); idx++) {
131 128 : phi = worklist[idx];
132 :
133 128 : if (mir->shouldCancel("FlagPhiInputsAsHavingRemovedUses inner loop 1"))
134 0 : return false;
135 :
136 128 : if (phi->isUseRemoved() || phi->isImplicitlyUsed()) {
137 : // The phi is implicitly used.
138 35 : isUsed = true;
139 70 : break;
140 : }
141 :
142 93 : MUseIterator usesEnd(phi->usesEnd());
143 1285 : for (MUseIterator use(phi->usesBegin()); use != usesEnd; use++) {
144 1223 : MNode* consumer = (*use)->consumer();
145 :
146 1223 : if (mir->shouldCancel("FlagPhiInputsAsHavingRemovedUses inner loop 2"))
147 0 : return false;
148 :
149 1223 : if (consumer->isResumePoint()) {
150 1038 : MResumePoint* rp = consumer->toResumePoint();
151 1038 : if (rp->isObservableOperand(*use)) {
152 : // The phi is observable via a resume point operand.
153 0 : isUsed = true;
154 0 : break;
155 : }
156 1038 : continue;
157 : }
158 :
159 185 : MDefinition* cdef = consumer->toDefinition();
160 185 : if (!cdef->isPhi()) {
161 : // The phi is explicitly used.
162 31 : isUsed = true;
163 31 : break;
164 : }
165 :
166 154 : phi = cdef->toPhi();
167 154 : if (phi->isInWorklist())
168 48 : continue;
169 :
170 106 : phi->setInWorklist();
171 106 : if (!worklist.append(phi))
172 0 : return false;
173 : }
174 :
175 : // Use a conservative upper bound to avoid iterating too many times
176 : // on very large graphs.
177 93 : if (idx >= conservativeUsesLimit) {
178 0 : isUsed = true;
179 0 : break;
180 : }
181 : }
182 :
183 66 : if (isUsed)
184 66 : def->setUseRemoved();
185 :
186 : // Remove all the InWorklist flags.
187 410 : while (!worklist.empty()) {
188 172 : phi = worklist.popCopy();
189 172 : phi->setNotInWorklist();
190 : }
191 : }
192 :
193 119 : return true;
194 : }
195 :
196 : static bool
197 105 : FlagAllOperandsAsHavingRemovedUses(MIRGenerator* mir, MBasicBlock* block)
198 : {
199 105 : const CompileInfo& info = block->info();
200 :
201 : // Flag all instructions operands as having removed uses.
202 105 : MInstructionIterator end = block->end();
203 494 : for (MInstructionIterator it = block->begin(); it != end; it++) {
204 389 : if (mir->shouldCancel("FlagAllOperandsAsHavingRemovedUses loop 1"))
205 0 : return false;
206 :
207 389 : MInstruction* ins = *it;
208 678 : for (size_t i = 0, e = ins->numOperands(); i < e; i++)
209 289 : ins->getOperand(i)->setUseRemovedUnchecked();
210 :
211 : // Flag observable resume point operands as having removed uses.
212 389 : if (MResumePoint* rp = ins->resumePoint()) {
213 : // Note: no need to iterate over the caller's of the resume point as
214 : // this is the same as the entry resume point.
215 1979 : for (size_t i = 0, e = rp->numOperands(); i < e; i++) {
216 1863 : if (info.isObservableSlot(i))
217 163 : rp->getOperand(i)->setUseRemovedUnchecked();
218 : }
219 : }
220 : }
221 :
222 : // Flag observable operands of the entry resume point as having removed uses.
223 105 : MResumePoint* rp = block->entryResumePoint();
224 381 : while (rp) {
225 138 : if (mir->shouldCancel("FlagAllOperandsAsHavingRemovedUses loop 2"))
226 0 : return false;
227 :
228 2353 : for (size_t i = 0, e = rp->numOperands(); i < e; i++) {
229 2215 : if (info.isObservableSlot(i))
230 196 : rp->getOperand(i)->setUseRemovedUnchecked();
231 : }
232 :
233 138 : rp = rp->caller();
234 : }
235 :
236 : // Flag Phi inputs of the successors has having removed uses.
237 210 : MPhiVector worklist;
238 224 : for (size_t i = 0, e = block->numSuccessors(); i < e; i++) {
239 119 : if (mir->shouldCancel("FlagAllOperandsAsHavingRemovedUses loop 3"))
240 0 : return false;
241 :
242 119 : if (!FlagPhiInputsAsHavingRemovedUses(mir, block, block->getSuccessor(i), worklist))
243 0 : return false;
244 : }
245 :
246 105 : return true;
247 : }
248 :
249 : static void
250 105 : RemoveFromSuccessors(MBasicBlock* block)
251 : {
252 : // Remove this block from its successors.
253 105 : size_t numSucc = block->numSuccessors();
254 343 : while (numSucc--) {
255 119 : MBasicBlock* succ = block->getSuccessor(numSucc);
256 119 : if (succ->isDead())
257 88 : continue;
258 31 : JitSpew(JitSpew_Prune, "Remove block edge %d -> %d.", block->id(), succ->id());
259 31 : succ->removePredecessor(block);
260 : }
261 105 : }
262 :
263 : static void
264 35 : ConvertToBailingBlock(TempAllocator& alloc, MBasicBlock* block)
265 : {
266 : // Add a bailout instruction.
267 35 : MBail* bail = MBail::New(alloc, Bailout_FirstExecution);
268 35 : MInstruction* bailPoint = block->safeInsertTop();
269 35 : block->insertBefore(block->safeInsertTop(), bail);
270 :
271 : // Discard all remaining instructions.
272 35 : MInstructionIterator clearStart = block->begin(bailPoint);
273 35 : block->discardAllInstructionsStartingAt(clearStart);
274 35 : if (block->outerResumePoint())
275 0 : block->clearOuterResumePoint();
276 :
277 : // And replace the last instruction by the unreachable control instruction.
278 35 : block->end(MUnreachable::New(alloc));
279 35 : }
280 :
281 : bool
282 8 : jit::PruneUnusedBranches(MIRGenerator* mir, MIRGraph& graph)
283 : {
284 8 : MOZ_ASSERT(!mir->compilingWasm(), "wasm compilation has no code coverage support.");
285 :
286 : // We do a reverse-post-order traversal, marking basic blocks when the block
287 : // have to be converted into bailing blocks, and flagging block as
288 : // unreachable if all predecessors are flagged as bailing or unreachable.
289 8 : bool someUnreachable = false;
290 652 : for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
291 644 : if (mir->shouldCancel("Prune unused branches (main loop)"))
292 0 : return false;
293 :
294 644 : JitSpew(JitSpew_Prune, "Investigate Block %d:", block->id());
295 749 : JitSpewIndent indent(JitSpew_Prune);
296 :
297 : // Do not touch entry basic blocks.
298 644 : if (*block == graph.osrBlock() || *block == graph.entryBlock()) {
299 11 : JitSpew(JitSpew_Prune, "Block %d is an entry point.", block->id());
300 11 : continue;
301 : }
302 :
303 : // Compute if all the predecessors of this block are either bailling out
304 : // or are already flagged as unreachable.
305 633 : bool isUnreachable = true;
306 633 : bool isLoopHeader = block->isLoopHeader();
307 633 : size_t numPred = block->numPredecessors();
308 633 : size_t i = 0;
309 853 : for (; i < numPred; i++) {
310 673 : if (mir->shouldCancel("Prune unused branches (inner loop 1)"))
311 0 : return false;
312 :
313 673 : MBasicBlock* pred = block->getPredecessor(i);
314 :
315 : // The backedge is visited after the loop header, but if the loop
316 : // header is unreachable, then we can assume that the backedge would
317 : // be unreachable too.
318 673 : if (isLoopHeader && pred == block->backedge())
319 0 : continue;
320 :
321 : // Break if any of the predecessor can continue in this block.
322 673 : if (!pred->isMarked() && !pred->unreachable()) {
323 563 : isUnreachable = false;
324 563 : break;
325 : }
326 : }
327 :
328 : // Compute if the block should bailout, based on the trivial heuristic
329 : // which is that if the block never got visited before, then it is
330 : // likely to not be visited after.
331 : bool shouldBailout =
332 1237 : block->getHitState() == MBasicBlock::HitState::Count &&
333 1237 : block->getHitCount() == 0;
334 :
335 : // Check if the predecessors got accessed a large number of times in
336 : // comparisons of the current block, in order to know if our attempt at
337 : // removing this block is not premature.
338 633 : if (!isUnreachable && shouldBailout) {
339 86 : size_t p = numPred;
340 86 : size_t predCount = 0;
341 86 : size_t numSuccessorsOfPreds = 1;
342 86 : bool isLoopExit = false;
343 268 : while (p--) {
344 91 : if (mir->shouldCancel("Prune unused branches (inner loop 2)"))
345 0 : return false;
346 :
347 91 : MBasicBlock* pred = block->getPredecessor(p);
348 91 : if (pred->getHitState() == MBasicBlock::HitState::Count)
349 89 : predCount += pred->getHitCount();
350 91 : isLoopExit |= pred->isLoopHeader() && pred->backedge() != *block;
351 91 : numSuccessorsOfPreds += pred->numSuccessors() - 1;
352 : }
353 :
354 : // Iterate over the approximated set of dominated blocks and count
355 : // the number of instructions which are dominated. Note that this
356 : // approximation has issues with OSR blocks, but this should not be
357 : // a big deal.
358 86 : size_t numDominatedInst = 0;
359 86 : size_t numEffectfulInst = 0;
360 86 : int numInOutEdges = block->numPredecessors();
361 86 : size_t branchSpan = 0;
362 86 : ReversePostorderIterator it(block);
363 3433 : do {
364 1777 : if (mir->shouldCancel("Prune unused branches (inner loop 3)"))
365 0 : return false;
366 :
367 : // Iterate over dominated blocks, and visit exit blocks as well.
368 1777 : numInOutEdges -= it->numPredecessors();
369 1777 : if (numInOutEdges < 0)
370 69 : break;
371 1708 : numInOutEdges += it->numSuccessors();
372 :
373 : // Collect information about the instructions within the block.
374 5858 : for (MDefinitionIterator def(*it); def; def++) {
375 4150 : numDominatedInst++;
376 4150 : if (def->isEffectful())
377 497 : numEffectfulInst++;
378 : }
379 :
380 1708 : it++;
381 1708 : branchSpan++;
382 5107 : } while(numInOutEdges > 0 && it != graph.rpoEnd());
383 :
384 : // The goal of branch pruning is to remove branches which are
385 : // preventing other optimization, while keeping branches which would
386 : // be costly if we were to bailout. The following heuristics are
387 : // made to prevent bailouts in branches when we estimate that the
388 : // confidence is not enough to compensate for the cost of a bailout.
389 : //
390 : // 1. Confidence for removal varies with the number of hit counts
391 : // of the predecessor. The reason being that the likelyhood of
392 : // taking this branch is decreasing with the number of hit
393 : // counts of the predecessor.
394 : //
395 : // 2. Confidence for removal varies with the number of dominated
396 : // instructions. The reason being that the complexity of the
397 : // branch increases with the number of instructions, thus
398 : // working against other optimizations.
399 : //
400 : // 3. Confidence for removal varies with the span of the
401 : // branch. The reason being that a branch that spans over a
402 : // large set of blocks is likely to remove optimization
403 : // opportunity as it prevents instructions from the other
404 : // branches to dominate the blocks which are after.
405 : //
406 : // 4. Confidence for removal varies with the number of effectful
407 : // instructions. The reason being that an effectful instruction
408 : // can remove optimization opportunities based on Scalar
409 : // Replacement, and based on Alias Analysis.
410 : //
411 : // The following converts various units in some form of arbitrary
412 : // score, such that we can compare it to a threshold.
413 86 : size_t score = 0;
414 86 : MOZ_ASSERT(numSuccessorsOfPreds >= 1);
415 86 : score += predCount * JitOptions.branchPruningHitCountFactor / numSuccessorsOfPreds;
416 86 : score += numDominatedInst * JitOptions.branchPruningInstFactor;
417 86 : score += branchSpan * JitOptions.branchPruningBlockSpanFactor;
418 86 : score += numEffectfulInst * JitOptions.branchPruningEffectfulInstFactor;
419 86 : if (score < JitOptions.branchPruningThreshold)
420 31 : shouldBailout = false;
421 :
422 : // If the predecessors do not have enough hit counts, keep the
423 : // branch, until we recompile this function later, with more
424 : // information.
425 86 : if (predCount / numSuccessorsOfPreds < 50)
426 43 : shouldBailout = false;
427 :
428 : // There is only a single successors to the predecessors, thus the
429 : // decision should be taken as part of the previous block
430 : // investigation, and this block should be unreachable.
431 86 : if (numSuccessorsOfPreds == 1)
432 11 : shouldBailout = false;
433 :
434 : // If this is the exit block of a loop, then keep this basic
435 : // block. This heuristic is useful as a bailout is often much more
436 : // costly than a simple exit sequence.
437 86 : if (isLoopExit)
438 1 : shouldBailout = false;
439 :
440 : // Interpreters are often implemented as a table switch within a for
441 : // loop. What might happen is that the interpreter heats up in a
442 : // subset of instructions, but might need other instructions for the
443 : // rest of the evaluation.
444 86 : if (numSuccessorsOfPreds > 8)
445 0 : shouldBailout = false;
446 :
447 86 : JitSpew(JitSpew_Prune, "info: block %d,"
448 : " predCount: %" PRIuSIZE ", domInst: %" PRIuSIZE
449 : ", span: %" PRIuSIZE ", effectful: %" PRIuSIZE ", "
450 : " isLoopExit: %s, numSuccessorsOfPred: %" PRIuSIZE "."
451 : " (score: %" PRIuSIZE ", shouldBailout: %s)",
452 : block->id(), predCount, numDominatedInst, branchSpan, numEffectfulInst,
453 : isLoopExit ? "true" : "false", numSuccessorsOfPreds,
454 86 : score, shouldBailout ? "true" : "false");
455 : }
456 :
457 : // Continue to the next basic block if the current basic block can
458 : // remain unchanged.
459 633 : if (!isUnreachable && !shouldBailout)
460 528 : continue;
461 :
462 105 : someUnreachable = true;
463 105 : if (isUnreachable) {
464 70 : JitSpew(JitSpew_Prune, "Mark block %d as unreachable.", block->id());
465 70 : block->setUnreachable();
466 : // If the block is unreachable, then there is no need to convert it
467 : // to a bailing block.
468 35 : } else if (shouldBailout) {
469 35 : JitSpew(JitSpew_Prune, "Mark block %d as bailing block.", block->id());
470 35 : block->markUnchecked();
471 : }
472 :
473 : // When removing a loop header, we should ensure that its backedge is
474 : // removed first, otherwise this triggers an assertion in
475 : // removePredecessorsWithoutPhiOperands.
476 105 : if (block->isLoopHeader()) {
477 0 : JitSpew(JitSpew_Prune, "Mark block %d as bailing block. (loop backedge)", block->backedge()->id());
478 0 : block->backedge()->markUnchecked();
479 : }
480 : }
481 :
482 : // Returns early if nothing changed.
483 8 : if (!someUnreachable)
484 4 : return true;
485 :
486 4 : JitSpew(JitSpew_Prune, "Convert basic block to bailing blocks, and remove unreachable blocks:");
487 8 : JitSpewIndent indent(JitSpew_Prune);
488 :
489 : // As we are going to remove edges and basic block, we have to mark
490 : // instructions which would be needed by baseline if we were to bailout.
491 597 : for (PostorderIterator it(graph.poBegin()); it != graph.poEnd();) {
492 593 : if (mir->shouldCancel("Prune unused branches (marking loop)"))
493 0 : return false;
494 :
495 593 : MBasicBlock* block = *it++;
496 593 : if (!block->isMarked() && !block->unreachable())
497 488 : continue;
498 :
499 105 : FlagAllOperandsAsHavingRemovedUses(mir, block);
500 : }
501 :
502 : // Remove the blocks in post-order such that consumers are visited before
503 : // the predecessors, the only exception being the Phi nodes of loop headers.
504 597 : for (PostorderIterator it(graph.poBegin()); it != graph.poEnd();) {
505 593 : if (mir->shouldCancel("Prune unused branches (removal loop)"))
506 0 : return false;
507 :
508 593 : MBasicBlock* block = *it++;
509 593 : if (!block->isMarked() && !block->unreachable())
510 488 : continue;
511 :
512 105 : JitSpew(JitSpew_Prune, "Remove / Replace block %d.", block->id());
513 210 : JitSpewIndent indent(JitSpew_Prune);
514 :
515 : // As we are going to replace/remove the last instruction, we first have
516 : // to remove this block from the predecessor list of its successors.
517 105 : RemoveFromSuccessors(block);
518 :
519 : // Convert the current basic block to a bailing block which ends with an
520 : // Unreachable control instruction.
521 105 : if (block->isMarked()) {
522 35 : JitSpew(JitSpew_Prune, "Convert Block %d to a bailing block.", block->id());
523 35 : if (!graph.alloc().ensureBallast())
524 0 : return false;
525 35 : ConvertToBailingBlock(graph.alloc(), block);
526 35 : block->unmark();
527 : }
528 :
529 : // Remove all instructions.
530 105 : if (block->unreachable()) {
531 70 : JitSpew(JitSpew_Prune, "Remove Block %d.", block->id());
532 140 : JitSpewIndent indent(JitSpew_Prune);
533 70 : graph.removeBlock(block);
534 : }
535 : }
536 :
537 4 : return true;
538 : }
539 :
540 : static bool
541 3390 : SplitCriticalEdgesForBlock(MIRGraph& graph, MBasicBlock* block)
542 : {
543 3390 : if (block->numSuccessors() < 2)
544 2532 : return true;
545 2794 : for (size_t i = 0; i < block->numSuccessors(); i++) {
546 1936 : MBasicBlock* target = block->getSuccessor(i);
547 1936 : if (target->numPredecessors() < 2)
548 1922 : continue;
549 :
550 : // Create a simple new block which contains a goto and which split the
551 : // edge between block and target.
552 14 : MBasicBlock* split = MBasicBlock::NewSplitEdge(graph, block, i, target);
553 14 : if (!split)
554 0 : return false;
555 : }
556 858 : return true;
557 : }
558 :
559 : // A critical edge is an edge which is neither its successor's only predecessor
560 : // nor its predecessor's only successor. Critical edges must be split to
561 : // prevent copy-insertion and code motion from affecting other edges.
562 : bool
563 141 : jit::SplitCriticalEdges(MIRGraph& graph)
564 : {
565 3509 : for (MBasicBlockIterator iter(graph.begin()); iter != graph.end(); iter++) {
566 3368 : MBasicBlock* block = *iter;
567 3368 : if (!SplitCriticalEdgesForBlock(graph, block))
568 0 : return false;
569 : }
570 141 : return true;
571 : }
572 :
573 : bool
574 13 : jit::IsUint32Type(const MDefinition* def)
575 : {
576 13 : if (def->isBeta())
577 0 : def = def->getOperand(0);
578 :
579 13 : if (def->type() != MIRType::Int32)
580 0 : return false;
581 :
582 13 : return def->isUrsh() && def->getOperand(1)->isConstant() &&
583 13 : def->getOperand(1)->toConstant()->type() == MIRType::Int32 &&
584 13 : def->getOperand(1)->toConstant()->toInt32() == 0;
585 : }
586 :
587 : // Return whether a block simply computes the specified constant value.
588 : static bool
589 26 : BlockComputesConstant(MBasicBlock* block, MDefinition* value, bool* constBool)
590 : {
591 : // Look for values with no uses. This is used to eliminate constant
592 : // computing blocks in condition statements, and the phi which used to
593 : // consume the constant has already been removed.
594 26 : if (value->hasUses())
595 20 : return false;
596 :
597 6 : if (!value->isConstant() || value->block() != block)
598 6 : return false;
599 0 : if (!block->phisEmpty())
600 0 : return false;
601 0 : for (MInstructionIterator iter = block->begin(); iter != block->end(); ++iter) {
602 0 : if (*iter != value || !iter->isGoto())
603 0 : return false;
604 : }
605 0 : return value->toConstant()->valueToBoolean(constBool);
606 : }
607 :
608 : // Find phis that are redudant:
609 : //
610 : // 1) phi(a, a)
611 : // can get replaced by a
612 : //
613 : // 2) phi(filtertypeset(a, type1), filtertypeset(a, type1))
614 : // equals filtertypeset(a, type1)
615 : //
616 : // 3) phi(a, filtertypeset(a, type1))
617 : // equals filtertypeset(a, type1 union type(a))
618 : // equals filtertypeset(a, type(a))
619 : // equals a
620 : //
621 : // 4) phi(filtertypeset(a, type1), filtertypeset(a, type2))
622 : // equals filtertypeset(a, type1 union type2)
623 : //
624 : // This is the special case. We can only replace this with 'a' iif
625 : // type(a) == type1 union type2. Since optimizations could have
626 : // happened based on a more specific phi type.
627 : static bool
628 8 : IsPhiRedudantFilter(MPhi* phi)
629 : {
630 : // Handle (1) and (2)
631 8 : if (phi->operandIfRedundant())
632 0 : return true;
633 :
634 : // Handle (3)
635 8 : bool onlyFilters = false;
636 8 : MDefinition* a = phi->getOperand(0);
637 8 : if (a->isFilterTypeSet()) {
638 0 : a = a->toFilterTypeSet()->input();
639 0 : onlyFilters = true;
640 : }
641 :
642 16 : for (size_t i = 1; i < phi->numOperands(); i++) {
643 8 : MDefinition* operand = phi->getOperand(i);
644 8 : if (operand == a) {
645 0 : onlyFilters = false;
646 0 : continue;
647 : }
648 8 : if (operand->isFilterTypeSet() && operand->toFilterTypeSet()->input() == a)
649 8 : continue;
650 0 : return false;
651 : }
652 8 : if (!onlyFilters)
653 8 : return true;
654 :
655 : // Handle (4)
656 0 : MOZ_ASSERT(onlyFilters);
657 0 : return EqualTypes(a->type(), a->resultTypeSet(),
658 0 : phi->type(), phi->resultTypeSet());
659 : }
660 :
661 : // Determine whether phiBlock/testBlock simply compute a phi and perform a
662 : // test on it.
663 : static bool
664 22 : BlockIsSingleTest(MBasicBlock* phiBlock, MBasicBlock* testBlock, MPhi** pphi, MTest** ptest)
665 : {
666 22 : *pphi = nullptr;
667 22 : *ptest = nullptr;
668 :
669 22 : if (phiBlock != testBlock) {
670 4 : MOZ_ASSERT(phiBlock->numSuccessors() == 1 && phiBlock->getSuccessor(0) == testBlock);
671 4 : if (!phiBlock->begin()->isGoto())
672 0 : return false;
673 : }
674 :
675 22 : MInstruction* ins = *testBlock->begin();
676 22 : if (!ins->isTest())
677 7 : return false;
678 15 : MTest* test = ins->toTest();
679 15 : if (!test->input()->isPhi())
680 0 : return false;
681 15 : MPhi* phi = test->input()->toPhi();
682 15 : if (phi->block() != phiBlock)
683 2 : return false;
684 :
685 39 : for (MUseIterator iter = phi->usesBegin(); iter != phi->usesEnd(); ++iter) {
686 26 : MUse* use = *iter;
687 26 : if (use->consumer() == test)
688 13 : continue;
689 13 : if (use->consumer()->isResumePoint()) {
690 13 : MBasicBlock* useBlock = use->consumer()->block();
691 13 : if (useBlock == phiBlock || useBlock == testBlock)
692 13 : continue;
693 : }
694 0 : return false;
695 : }
696 :
697 30 : for (MPhiIterator iter = phiBlock->phisBegin(); iter != phiBlock->phisEnd(); ++iter) {
698 17 : if (*iter == phi)
699 13 : continue;
700 :
701 4 : if (IsPhiRedudantFilter(*iter))
702 4 : continue;
703 :
704 0 : return false;
705 : }
706 :
707 13 : if (phiBlock != testBlock && !testBlock->phisEmpty())
708 0 : return false;
709 :
710 13 : *pphi = phi;
711 13 : *ptest = test;
712 :
713 13 : return true;
714 : }
715 :
716 : // Change block so that it ends in a goto to the specific target block.
717 : // existingPred is an existing predecessor of the block.
718 : static void
719 13 : UpdateGotoSuccessor(TempAllocator& alloc, MBasicBlock* block, MBasicBlock* target,
720 : MBasicBlock* existingPred)
721 : {
722 13 : MInstruction* ins = block->lastIns();
723 13 : MOZ_ASSERT(ins->isGoto());
724 13 : ins->toGoto()->target()->removePredecessor(block);
725 13 : block->discardLastIns();
726 :
727 13 : MGoto* newGoto = MGoto::New(alloc, target);
728 13 : block->end(newGoto);
729 :
730 13 : target->addPredecessorSameInputsAs(block, existingPred);
731 13 : }
732 :
733 : // Change block so that it ends in a test of the specified value, going to
734 : // either ifTrue or ifFalse. existingPred is an existing predecessor of ifTrue
735 : // or ifFalse with the same values incoming to ifTrue/ifFalse as block.
736 : // existingPred is not required to be a predecessor of ifTrue/ifFalse if block
737 : // already ends in a test going to that block on a true/false result.
738 : static void
739 26 : UpdateTestSuccessors(TempAllocator& alloc, MBasicBlock* block,
740 : MDefinition* value, MBasicBlock* ifTrue, MBasicBlock* ifFalse,
741 : MBasicBlock* existingPred)
742 : {
743 26 : MInstruction* ins = block->lastIns();
744 26 : if (ins->isTest()) {
745 13 : MTest* test = ins->toTest();
746 13 : MOZ_ASSERT(test->input() == value);
747 :
748 13 : if (ifTrue != test->ifTrue()) {
749 0 : test->ifTrue()->removePredecessor(block);
750 0 : ifTrue->addPredecessorSameInputsAs(block, existingPred);
751 0 : MOZ_ASSERT(test->ifTrue() == test->getSuccessor(0));
752 0 : test->replaceSuccessor(0, ifTrue);
753 : }
754 :
755 13 : if (ifFalse != test->ifFalse()) {
756 0 : test->ifFalse()->removePredecessor(block);
757 0 : ifFalse->addPredecessorSameInputsAs(block, existingPred);
758 0 : MOZ_ASSERT(test->ifFalse() == test->getSuccessor(1));
759 0 : test->replaceSuccessor(1, ifFalse);
760 : }
761 :
762 13 : return;
763 : }
764 :
765 13 : MOZ_ASSERT(ins->isGoto());
766 13 : ins->toGoto()->target()->removePredecessor(block);
767 13 : block->discardLastIns();
768 :
769 13 : MTest* test = MTest::New(alloc, value, ifTrue, ifFalse);
770 13 : block->end(test);
771 :
772 13 : ifTrue->addPredecessorSameInputsAs(block, existingPred);
773 13 : ifFalse->addPredecessorSameInputsAs(block, existingPred);
774 : }
775 :
776 : static bool
777 439 : MaybeFoldConditionBlock(MIRGraph& graph, MBasicBlock* initialBlock)
778 : {
779 : // Optimize the MIR graph to improve the code generated for conditional
780 : // operations. A test like 'if (a ? b : c)' normally requires four blocks,
781 : // with a phi for the intermediate value. This can be improved to use three
782 : // blocks with no phi value, and if either b or c is constant,
783 : // e.g. 'if (a ? b : 0)', then the block associated with that constant
784 : // can be eliminated.
785 :
786 : /*
787 : * Look for a diamond pattern:
788 : *
789 : * initialBlock
790 : * / \
791 : * trueBranch falseBranch
792 : * \ /
793 : * phiBlock
794 : * |
795 : * testBlock
796 : *
797 : * Where phiBlock contains a single phi combining values pushed onto the
798 : * stack by trueBranch and falseBranch, and testBlock contains a test on
799 : * that phi. phiBlock and testBlock may be the same block; generated code
800 : * will use different blocks if the (?:) op is in an inlined function.
801 : */
802 :
803 439 : MInstruction* ins = initialBlock->lastIns();
804 439 : if (!ins->isTest())
805 293 : return true;
806 146 : MTest* initialTest = ins->toTest();
807 :
808 146 : MBasicBlock* trueBranch = initialTest->ifTrue();
809 146 : if (trueBranch->numPredecessors() != 1 || trueBranch->numSuccessors() != 1)
810 67 : return true;
811 79 : MBasicBlock* falseBranch = initialTest->ifFalse();
812 79 : if (falseBranch->numPredecessors() != 1 || falseBranch->numSuccessors() != 1)
813 27 : return true;
814 52 : MBasicBlock* phiBlock = trueBranch->getSuccessor(0);
815 52 : if (phiBlock != falseBranch->getSuccessor(0))
816 9 : return true;
817 43 : if (phiBlock->numPredecessors() != 2)
818 0 : return true;
819 :
820 43 : if (initialBlock->isLoopBackedge() || trueBranch->isLoopBackedge() || falseBranch->isLoopBackedge())
821 0 : return true;
822 :
823 43 : MBasicBlock* testBlock = phiBlock;
824 43 : if (testBlock->numSuccessors() == 1) {
825 25 : if (testBlock->isLoopBackedge())
826 0 : return true;
827 25 : testBlock = testBlock->getSuccessor(0);
828 25 : if (testBlock->numPredecessors() != 1)
829 21 : return true;
830 : }
831 :
832 : // Make sure the test block does not have any outgoing loop backedges.
833 22 : if (!SplitCriticalEdgesForBlock(graph, testBlock))
834 0 : return false;
835 :
836 : MPhi* phi;
837 : MTest* finalTest;
838 22 : if (!BlockIsSingleTest(phiBlock, testBlock, &phi, &finalTest))
839 9 : return true;
840 :
841 13 : MDefinition* trueResult = phi->getOperand(phiBlock->indexForPredecessor(trueBranch));
842 13 : MDefinition* falseResult = phi->getOperand(phiBlock->indexForPredecessor(falseBranch));
843 :
844 : // OK, we found the desired pattern, now transform the graph.
845 :
846 : // Patch up phis that filter their input.
847 30 : for (MPhiIterator iter = phiBlock->phisBegin(); iter != phiBlock->phisEnd(); ++iter) {
848 17 : if (*iter == phi)
849 13 : continue;
850 :
851 4 : MOZ_ASSERT(IsPhiRedudantFilter(*iter));
852 4 : MDefinition* redundant = (*iter)->operandIfRedundant();
853 :
854 4 : if (!redundant) {
855 4 : redundant = (*iter)->getOperand(0);
856 4 : if (redundant->isFilterTypeSet())
857 0 : redundant = redundant->toFilterTypeSet()->input();
858 : }
859 :
860 4 : (*iter)->replaceAllUsesWith(redundant);
861 : }
862 :
863 : // Remove the phi from phiBlock.
864 13 : phiBlock->discardPhi(*phiBlock->phisBegin());
865 :
866 : // If either trueBranch or falseBranch just computes a constant for the
867 : // test, determine the block that branch will end up jumping to and eliminate
868 : // the branch. Otherwise, change the end of the block to a test that jumps
869 : // directly to successors of testBlock, rather than to testBlock itself.
870 :
871 13 : MBasicBlock* trueTarget = trueBranch;
872 : bool constBool;
873 13 : if (BlockComputesConstant(trueBranch, trueResult, &constBool)) {
874 0 : trueTarget = constBool ? finalTest->ifTrue() : finalTest->ifFalse();
875 0 : phiBlock->removePredecessor(trueBranch);
876 0 : graph.removeBlock(trueBranch);
877 13 : } else if (initialTest->input() == trueResult) {
878 6 : UpdateGotoSuccessor(graph.alloc(), trueBranch, finalTest->ifTrue(), testBlock);
879 : } else {
880 7 : UpdateTestSuccessors(graph.alloc(), trueBranch, trueResult,
881 7 : finalTest->ifTrue(), finalTest->ifFalse(), testBlock);
882 : }
883 :
884 13 : MBasicBlock* falseTarget = falseBranch;
885 13 : if (BlockComputesConstant(falseBranch, falseResult, &constBool)) {
886 0 : falseTarget = constBool ? finalTest->ifTrue() : finalTest->ifFalse();
887 0 : phiBlock->removePredecessor(falseBranch);
888 0 : graph.removeBlock(falseBranch);
889 13 : } else if (initialTest->input() == falseResult) {
890 7 : UpdateGotoSuccessor(graph.alloc(), falseBranch, finalTest->ifFalse(), testBlock);
891 : } else {
892 6 : UpdateTestSuccessors(graph.alloc(), falseBranch, falseResult,
893 6 : finalTest->ifTrue(), finalTest->ifFalse(), testBlock);
894 : }
895 :
896 : // Short circuit the initial test to skip any constant branch eliminated above.
897 13 : UpdateTestSuccessors(graph.alloc(), initialBlock, initialTest->input(),
898 13 : trueTarget, falseTarget, testBlock);
899 :
900 : // Remove phiBlock, if different from testBlock.
901 13 : if (phiBlock != testBlock) {
902 0 : testBlock->removePredecessor(phiBlock);
903 0 : graph.removeBlock(phiBlock);
904 : }
905 :
906 : // Remove testBlock itself.
907 13 : finalTest->ifTrue()->removePredecessor(testBlock);
908 13 : finalTest->ifFalse()->removePredecessor(testBlock);
909 13 : graph.removeBlock(testBlock);
910 :
911 13 : return true;
912 : }
913 :
914 : bool
915 8 : jit::FoldTests(MIRGraph& graph)
916 : {
917 447 : for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
918 439 : if (!MaybeFoldConditionBlock(graph, *block))
919 0 : return false;
920 : }
921 8 : return true;
922 : }
923 :
924 : bool
925 8 : jit::FoldEmptyBlocks(MIRGraph& graph)
926 : {
927 582 : for (MBasicBlockIterator iter(graph.begin()); iter != graph.end(); ) {
928 574 : MBasicBlock* block = *iter;
929 574 : iter++;
930 :
931 574 : if (block->numPredecessors() != 1 || block->numSuccessors() != 1)
932 254 : continue;
933 :
934 320 : if (!block->phisEmpty())
935 5 : continue;
936 :
937 315 : if (block->outerResumePoint())
938 18 : continue;
939 :
940 297 : if (*block->begin() != *block->rbegin())
941 99 : continue;
942 :
943 198 : MBasicBlock* succ = block->getSuccessor(0);
944 198 : MBasicBlock* pred = block->getPredecessor(0);
945 :
946 198 : if (succ->numPredecessors() != 1)
947 76 : continue;
948 :
949 122 : size_t pos = pred->getSuccessorIndex(block);
950 122 : pred->lastIns()->replaceSuccessor(pos, succ);
951 :
952 122 : graph.removeBlock(block);
953 :
954 122 : succ->addPredecessorSameInputsAs(pred, block);
955 122 : succ->removePredecessor(block);
956 : }
957 8 : return true;
958 : }
959 :
960 : static void
961 614 : EliminateTriviallyDeadResumePointOperands(MIRGraph& graph, MResumePoint* rp)
962 : {
963 : // If we will pop the top of the stack immediately after resuming,
964 : // then don't preserve the top value in the resume point.
965 614 : if (rp->mode() != MResumePoint::ResumeAt || *rp->pc() != JSOP_POP)
966 611 : return;
967 :
968 3 : size_t top = rp->stackDepth() - 1;
969 3 : MOZ_ASSERT(!rp->isObservableOperand(top));
970 :
971 3 : MDefinition* def = rp->getOperand(top);
972 3 : if (def->isConstant())
973 3 : return;
974 :
975 0 : MConstant* constant = rp->block()->optimizedOutConstant(graph.alloc());
976 0 : rp->replaceOperand(top, constant);
977 : }
978 :
979 : // Operands to a resume point which are dead at the point of the resume can be
980 : // replaced with a magic value. This analysis supports limited detection of
981 : // dead operands, pruning those which are defined in the resume point's basic
982 : // block and have no uses outside the block or at points later than the resume
983 : // point.
984 : //
985 : // This is intended to ensure that extra resume points within a basic block
986 : // will not artificially extend the lifetimes of any SSA values. This could
987 : // otherwise occur if the new resume point captured a value which is created
988 : // between the old and new resume point and is dead at the new resume point.
989 : bool
990 8 : jit::EliminateDeadResumePointOperands(MIRGenerator* mir, MIRGraph& graph)
991 : {
992 : // If we are compiling try blocks, locals and arguments may be observable
993 : // from catch or finally blocks (which Ion does not compile). For now just
994 : // disable the pass in this case.
995 8 : if (graph.hasTryBlock())
996 1 : return true;
997 :
998 363 : for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
999 356 : if (mir->shouldCancel("Eliminate Dead Resume Point Operands (main loop)"))
1000 0 : return false;
1001 :
1002 356 : if (MResumePoint* rp = block->entryResumePoint()) {
1003 356 : if (!graph.alloc().ensureBallast())
1004 0 : return false;
1005 356 : EliminateTriviallyDeadResumePointOperands(graph, rp);
1006 : }
1007 :
1008 : // The logic below can get confused on infinite loops.
1009 356 : if (block->isLoopHeader() && block->backedge() == *block)
1010 0 : continue;
1011 :
1012 2181 : for (MInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
1013 1825 : if (MResumePoint* rp = ins->resumePoint()) {
1014 258 : if (!graph.alloc().ensureBallast())
1015 0 : return false;
1016 258 : EliminateTriviallyDeadResumePointOperands(graph, rp);
1017 : }
1018 :
1019 : // No benefit to replacing constant operands with other constants.
1020 1825 : if (ins->isConstant())
1021 596 : continue;
1022 :
1023 : // Scanning uses does not give us sufficient information to tell
1024 : // where instructions that are involved in box/unbox operations or
1025 : // parameter passing might be live. Rewriting uses of these terms
1026 : // in resume points may affect the interpreter's behavior. Rather
1027 : // than doing a more sophisticated analysis, just ignore these.
1028 4523 : if (ins->isUnbox() || ins->isParameter() || ins->isTypeBarrier() ||
1029 3133 : ins->isComputeThis() || ins->isFilterTypeSet())
1030 : {
1031 295 : continue;
1032 : }
1033 :
1034 : // Early intermediate values captured by resume points, such as
1035 : // TypedObject, ArrayState and its allocation, may be legitimately
1036 : // dead in Ion code, but are still needed if we bail out. They can
1037 : // recover on bailout.
1038 934 : if (ins->isNewDerivedTypedObject() || ins->isRecoveredOnBailout()) {
1039 0 : MOZ_ASSERT(ins->canRecoverOnBailout());
1040 0 : continue;
1041 : }
1042 :
1043 : // If the instruction's behavior has been constant folded into a
1044 : // separate instruction, we can't determine precisely where the
1045 : // instruction becomes dead and can't eliminate its uses.
1046 934 : if (ins->isImplicitlyUsed() || ins->isUseRemoved())
1047 82 : continue;
1048 :
1049 : // Check if this instruction's result is only used within the
1050 : // current block, and keep track of its last use in a definition
1051 : // (not resume point). This requires the instructions in the block
1052 : // to be numbered, ensured by running this immediately after alias
1053 : // analysis.
1054 852 : uint32_t maxDefinition = 0;
1055 1167 : for (MUseIterator uses(ins->usesBegin()); uses != ins->usesEnd(); uses++) {
1056 428 : MNode* consumer = uses->consumer();
1057 428 : if (consumer->isResumePoint()) {
1058 : // If the instruction's is captured by one of the resume point, then
1059 : // it might be observed indirectly while the frame is live on the
1060 : // stack, so it has to be computed.
1061 55 : MResumePoint* resume = consumer->toResumePoint();
1062 55 : if (resume->isObservableOperand(*uses)) {
1063 0 : maxDefinition = UINT32_MAX;
1064 0 : break;
1065 : }
1066 55 : continue;
1067 : }
1068 :
1069 373 : MDefinition* def = consumer->toDefinition();
1070 373 : if (def->block() != *block || def->isBox() || def->isPhi()) {
1071 113 : maxDefinition = UINT32_MAX;
1072 113 : break;
1073 : }
1074 260 : maxDefinition = Max(maxDefinition, def->id());
1075 : }
1076 852 : if (maxDefinition == UINT32_MAX)
1077 113 : continue;
1078 :
1079 : // Walk the uses a second time, removing any in resume points after
1080 : // the last use in a definition.
1081 1054 : for (MUseIterator uses(ins->usesBegin()); uses != ins->usesEnd(); ) {
1082 315 : MUse* use = *uses++;
1083 315 : if (use->consumer()->isDefinition())
1084 260 : continue;
1085 55 : MResumePoint* mrp = use->consumer()->toResumePoint();
1086 159 : if (mrp->block() != *block ||
1087 98 : !mrp->instruction() ||
1088 116 : mrp->instruction() == *ins ||
1089 12 : mrp->instruction()->id() <= maxDefinition)
1090 : {
1091 55 : continue;
1092 : }
1093 :
1094 0 : if (!graph.alloc().ensureBallast())
1095 0 : return false;
1096 :
1097 : // Store an optimized out magic value in place of all dead
1098 : // resume point operands. Making any such substitution can in
1099 : // general alter the interpreter's behavior, even though the
1100 : // code is dead, as the interpreter will still execute opcodes
1101 : // whose effects cannot be observed. If the magic value value
1102 : // were to flow to, say, a dead property access the
1103 : // interpreter could throw an exception; we avoid this problem
1104 : // by removing dead operands before removing dead code.
1105 0 : MConstant* constant = MConstant::New(graph.alloc(), MagicValue(JS_OPTIMIZED_OUT));
1106 0 : block->insertBefore(*(block->begin()), constant);
1107 0 : use->replaceProducer(constant);
1108 : }
1109 : }
1110 : }
1111 :
1112 7 : return true;
1113 : }
1114 :
1115 : // Test whether |def| would be needed if it had no uses.
1116 : bool
1117 2977 : js::jit::DeadIfUnused(const MDefinition* def)
1118 : {
1119 5611 : return !def->isEffectful() &&
1120 5288 : (!def->isGuard() || def->block() == def->block()->graph().osrBlock()) &&
1121 4938 : !def->isGuardRangeBailouts() &&
1122 9314 : !def->isControlInstruction() &&
1123 7045 : (!def->isInstruction() || !def->toInstruction()->resumePoint());
1124 : }
1125 :
1126 : // Test whether |def| may be safely discarded, due to being dead or due to being
1127 : // located in a basic block which has itself been marked for discarding.
1128 : bool
1129 7162 : js::jit::IsDiscardable(const MDefinition* def)
1130 : {
1131 7162 : return !def->hasUses() && (DeadIfUnused(def) || def->block()->isMarked());
1132 : }
1133 :
1134 : // Instructions are useless if they are unused and have no side effects.
1135 : // This pass eliminates useless instructions.
1136 : // The graph itself is unchanged.
1137 : bool
1138 8 : jit::EliminateDeadCode(MIRGenerator* mir, MIRGraph& graph)
1139 : {
1140 : // Traverse in postorder so that we hit uses before definitions.
1141 : // Traverse instruction list backwards for the same reason.
1142 411 : for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
1143 403 : if (mir->shouldCancel("Eliminate Dead Code (main loop)"))
1144 0 : return false;
1145 :
1146 : // Remove unused instructions.
1147 1875 : for (MInstructionReverseIterator iter = block->rbegin(); iter != block->rend(); ) {
1148 1472 : MInstruction* inst = *iter++;
1149 1472 : if (js::jit::IsDiscardable(inst))
1150 : {
1151 0 : block->discard(inst);
1152 : }
1153 : }
1154 : }
1155 :
1156 8 : return true;
1157 : }
1158 :
1159 : static inline bool
1160 1298 : IsPhiObservable(MPhi* phi, Observability observe)
1161 : {
1162 : // If the phi has uses which are not reflected in SSA, then behavior in the
1163 : // interpreter may be affected by removing the phi.
1164 1298 : if (phi->isImplicitlyUsed() || phi->isUseRemoved())
1165 220 : return true;
1166 :
1167 : // Check for uses of this phi node outside of other phi nodes.
1168 : // Note that, initially, we skip reading resume points, which we
1169 : // don't count as actual uses. If the only uses are resume points,
1170 : // then the SSA name is never consumed by the program. However,
1171 : // after optimizations have been performed, it's possible that the
1172 : // actual uses in the program have been (incorrectly) optimized
1173 : // away, so we must be more conservative and consider resume
1174 : // points as well.
1175 10170 : for (MUseIterator iter(phi->usesBegin()); iter != phi->usesEnd(); iter++) {
1176 9517 : MNode* consumer = iter->consumer();
1177 9517 : if (consumer->isResumePoint()) {
1178 8244 : MResumePoint* resume = consumer->toResumePoint();
1179 8244 : if (observe == ConservativeObservability)
1180 425 : return true;
1181 8244 : if (resume->isObservableOperand(*iter))
1182 180 : return true;
1183 : } else {
1184 1273 : MDefinition* def = consumer->toDefinition();
1185 1273 : if (!def->isPhi())
1186 245 : return true;
1187 : }
1188 : }
1189 :
1190 653 : return false;
1191 : }
1192 :
1193 : // Handles cases like:
1194 : // x is phi(a, x) --> a
1195 : // x is phi(a, a) --> a
1196 : static inline MDefinition*
1197 3571 : IsPhiRedundant(MPhi* phi)
1198 : {
1199 3571 : MDefinition* first = phi->operandIfRedundant();
1200 3571 : if (first == nullptr)
1201 2031 : return nullptr;
1202 :
1203 : // Propagate the ImplicitlyUsed flag if |phi| is replaced with another phi.
1204 1540 : if (phi->isImplicitlyUsed())
1205 30 : first->setImplicitlyUsedUnchecked();
1206 :
1207 1540 : return first;
1208 : }
1209 :
1210 : bool
1211 142 : jit::EliminatePhis(MIRGenerator* mir, MIRGraph& graph,
1212 : Observability observe)
1213 : {
1214 : // Eliminates redundant or unobservable phis from the graph. A
1215 : // redundant phi is something like b = phi(a, a) or b = phi(a, b),
1216 : // both of which can be replaced with a. An unobservable phi is
1217 : // one that whose value is never used in the program.
1218 : //
1219 : // Note that we must be careful not to eliminate phis representing
1220 : // values that the interpreter will require later. When the graph
1221 : // is first constructed, we can be more aggressive, because there
1222 : // is a greater correspondence between the CFG and the bytecode.
1223 : // After optimizations such as GVN have been performed, however,
1224 : // the bytecode and CFG may not correspond as closely to one
1225 : // another. In that case, we must be more conservative. The flag
1226 : // |conservativeObservability| is used to indicate that eliminate
1227 : // phis is being run after some optimizations have been performed,
1228 : // and thus we should use more conservative rules about
1229 : // observability. The particular danger is that we can optimize
1230 : // away uses of a phi because we think they are not executable,
1231 : // but the foundation for that assumption is false TI information
1232 : // that will eventually be invalidated. Therefore, if
1233 : // |conservativeObservability| is set, we will consider any use
1234 : // from a resume point to be observable. Otherwise, we demand a
1235 : // use from an actual instruction.
1236 :
1237 284 : Vector<MPhi*, 16, SystemAllocPolicy> worklist;
1238 :
1239 : // Add all observable phis to a worklist. We use the "in worklist" bit to
1240 : // mean "this phi is live".
1241 3607 : for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
1242 3465 : MPhiIterator iter = block->phisBegin();
1243 8523 : while (iter != block->phisEnd()) {
1244 2529 : MPhi* phi = *iter++;
1245 :
1246 2529 : if (mir->shouldCancel("Eliminate Phis (populate loop)"))
1247 0 : return false;
1248 :
1249 : // Flag all as unused, only observable phis would be marked as used
1250 : // when processed by the work list.
1251 2529 : phi->setUnused();
1252 :
1253 : // If the phi is redundant, remove it here.
1254 2529 : if (MDefinition* redundant = IsPhiRedundant(phi)) {
1255 1231 : phi->justReplaceAllUsesWith(redundant);
1256 1231 : block->discardPhi(phi);
1257 1231 : continue;
1258 : }
1259 :
1260 : // Enqueue observable Phis.
1261 1298 : if (IsPhiObservable(phi, observe)) {
1262 645 : phi->setInWorklist();
1263 645 : if (!worklist.append(phi))
1264 0 : return false;
1265 : }
1266 : }
1267 : }
1268 :
1269 : // Iteratively mark all phis reachable from live phis.
1270 2226 : while (!worklist.empty()) {
1271 1042 : if (mir->shouldCancel("Eliminate Phis (worklist)"))
1272 0 : return false;
1273 :
1274 1042 : MPhi* phi = worklist.popCopy();
1275 1042 : MOZ_ASSERT(phi->isUnused());
1276 1042 : phi->setNotInWorklist();
1277 :
1278 : // The removal of Phis can produce newly redundant phis.
1279 1042 : if (MDefinition* redundant = IsPhiRedundant(phi)) {
1280 : // Add to the worklist the used phis which are impacted.
1281 1148 : for (MUseDefIterator it(phi); it; it++) {
1282 839 : if (it.def()->isPhi()) {
1283 682 : MPhi* use = it.def()->toPhi();
1284 682 : if (!use->isUnused()) {
1285 112 : use->setUnusedUnchecked();
1286 112 : use->setInWorklist();
1287 112 : if (!worklist.append(use))
1288 0 : return false;
1289 : }
1290 : }
1291 : }
1292 309 : phi->justReplaceAllUsesWith(redundant);
1293 : } else {
1294 : // Otherwise flag them as used.
1295 733 : phi->setNotUnused();
1296 : }
1297 :
1298 : // The current phi is/was used, so all its operands are used.
1299 3258 : for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
1300 2216 : MDefinition* in = phi->getOperand(i);
1301 2216 : if (!in->isPhi() || !in->isUnused() || in->isInWorklist())
1302 1931 : continue;
1303 285 : in->setInWorklist();
1304 285 : if (!worklist.append(in->toPhi()))
1305 0 : return false;
1306 : }
1307 : }
1308 :
1309 : // Sweep dead phis.
1310 3607 : for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
1311 3465 : MPhiIterator iter = block->phisBegin();
1312 6061 : while (iter != block->phisEnd()) {
1313 1298 : MPhi* phi = *iter++;
1314 1298 : if (phi->isUnused()) {
1315 677 : if (!phi->optimizeOutAllUses(graph.alloc()))
1316 0 : return false;
1317 677 : block->discardPhi(phi);
1318 : }
1319 : }
1320 : }
1321 :
1322 142 : return true;
1323 : }
1324 :
1325 : namespace {
1326 :
1327 : // The type analysis algorithm inserts conversions and box/unbox instructions
1328 : // to make the IR graph well-typed for future passes.
1329 : //
1330 : // Phi adjustment: If a phi's inputs are all the same type, the phi is
1331 : // specialized to return that type.
1332 : //
1333 : // Input adjustment: Each input is asked to apply conversion operations to its
1334 : // inputs. This may include Box, Unbox, or other instruction-specific type
1335 : // conversion operations.
1336 : //
1337 8 : class TypeAnalyzer
1338 : {
1339 : MIRGenerator* mir;
1340 : MIRGraph& graph;
1341 : Vector<MPhi*, 0, SystemAllocPolicy> phiWorklist_;
1342 :
1343 3285 : TempAllocator& alloc() const {
1344 3285 : return graph.alloc();
1345 : }
1346 :
1347 90 : bool addPhiToWorklist(MPhi* phi) {
1348 90 : if (phi->isInWorklist())
1349 13 : return true;
1350 77 : if (!phiWorklist_.append(phi))
1351 0 : return false;
1352 77 : phi->setInWorklist();
1353 77 : return true;
1354 : }
1355 77 : MPhi* popPhi() {
1356 77 : MPhi* phi = phiWorklist_.popCopy();
1357 77 : phi->setNotInWorklist();
1358 77 : return phi;
1359 : }
1360 :
1361 : bool respecialize(MPhi* phi, MIRType type);
1362 : bool propagateSpecialization(MPhi* phi);
1363 : bool specializePhis();
1364 : void replaceRedundantPhi(MPhi* phi);
1365 : bool adjustPhiInputs(MPhi* phi);
1366 : bool adjustInputs(MDefinition* def);
1367 : bool insertConversions();
1368 :
1369 : bool checkFloatCoherency();
1370 : bool graphContainsFloat32();
1371 : bool markPhiConsumers();
1372 : bool markPhiProducers();
1373 : bool specializeValidFloatOps();
1374 : bool tryEmitFloatOperations();
1375 :
1376 : public:
1377 8 : TypeAnalyzer(MIRGenerator* mir, MIRGraph& graph)
1378 8 : : mir(mir), graph(graph)
1379 8 : { }
1380 :
1381 : bool analyze();
1382 : };
1383 :
1384 : } /* anonymous namespace */
1385 :
1386 : // Try to specialize this phi based on its non-cyclic inputs.
1387 : static MIRType
1388 206 : GuessPhiType(MPhi* phi, bool* hasInputsWithEmptyTypes)
1389 : {
1390 : #ifdef DEBUG
1391 : // Check that different magic constants aren't flowing together. Ignore
1392 : // JS_OPTIMIZED_OUT, since an operand could be legitimately optimized
1393 : // away.
1394 206 : MIRType magicType = MIRType::None;
1395 738 : for (size_t i = 0; i < phi->numOperands(); i++) {
1396 532 : MDefinition* in = phi->getOperand(i);
1397 1596 : if (in->type() == MIRType::MagicOptimizedArguments ||
1398 1064 : in->type() == MIRType::MagicHole ||
1399 532 : in->type() == MIRType::MagicIsConstructing)
1400 : {
1401 0 : if (magicType == MIRType::None)
1402 0 : magicType = in->type();
1403 0 : MOZ_ASSERT(magicType == in->type());
1404 : }
1405 : }
1406 : #endif
1407 :
1408 206 : *hasInputsWithEmptyTypes = false;
1409 :
1410 206 : MIRType type = MIRType::None;
1411 206 : bool convertibleToFloat32 = false;
1412 206 : bool hasPhiInputs = false;
1413 702 : for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
1414 528 : MDefinition* in = phi->getOperand(i);
1415 528 : if (in->isPhi()) {
1416 271 : hasPhiInputs = true;
1417 271 : if (!in->toPhi()->triedToSpecialize())
1418 247 : continue;
1419 24 : if (in->type() == MIRType::None) {
1420 : // The operand is a phi we tried to specialize, but we were
1421 : // unable to guess its type. propagateSpecialization will
1422 : // propagate the type to this phi when it becomes known.
1423 2 : continue;
1424 : }
1425 : }
1426 :
1427 : // Ignore operands which we've never observed.
1428 279 : if (in->resultTypeSet() && in->resultTypeSet()->empty()) {
1429 1 : *hasInputsWithEmptyTypes = true;
1430 1 : continue;
1431 : }
1432 :
1433 278 : if (type == MIRType::None) {
1434 177 : type = in->type();
1435 177 : if (in->canProduceFloat32())
1436 5 : convertibleToFloat32 = true;
1437 177 : continue;
1438 : }
1439 101 : if (type != in->type()) {
1440 32 : if (convertibleToFloat32 && in->type() == MIRType::Float32) {
1441 : // If we only saw definitions that can be converted into Float32 before and
1442 : // encounter a Float32 value, promote previous values to Float32
1443 0 : type = MIRType::Float32;
1444 32 : } else if (IsTypeRepresentableAsDouble(type) &&
1445 0 : IsTypeRepresentableAsDouble(in->type()))
1446 : {
1447 : // Specialize phis with int32 and double operands as double.
1448 0 : type = MIRType::Double;
1449 0 : convertibleToFloat32 &= in->canProduceFloat32();
1450 : } else {
1451 32 : return MIRType::Value;
1452 : }
1453 : }
1454 : }
1455 :
1456 174 : if (type == MIRType::None && !hasPhiInputs) {
1457 : // All inputs are non-phis with empty typesets. Use MIRType::Value
1458 : // in this case, as it's impossible to get better type information.
1459 0 : MOZ_ASSERT(*hasInputsWithEmptyTypes);
1460 0 : type = MIRType::Value;
1461 : }
1462 :
1463 174 : return type;
1464 : }
1465 :
1466 : bool
1467 95 : TypeAnalyzer::respecialize(MPhi* phi, MIRType type)
1468 : {
1469 95 : if (phi->type() == type)
1470 5 : return true;
1471 90 : phi->specialize(type);
1472 90 : return addPhiToWorklist(phi);
1473 : }
1474 :
1475 : bool
1476 254 : TypeAnalyzer::propagateSpecialization(MPhi* phi)
1477 : {
1478 254 : MOZ_ASSERT(phi->type() != MIRType::None);
1479 :
1480 : // Verify that this specialization matches any phis depending on it.
1481 937 : for (MUseDefIterator iter(phi); iter; iter++) {
1482 683 : if (!iter.def()->isPhi())
1483 309 : continue;
1484 374 : MPhi* use = iter.def()->toPhi();
1485 374 : if (!use->triedToSpecialize())
1486 19 : continue;
1487 355 : if (use->type() == MIRType::None) {
1488 : // We tried to specialize this phi, but were unable to guess its
1489 : // type. Now that we know the type of one of its operands, we can
1490 : // specialize it.
1491 29 : if (!respecialize(use, phi->type()))
1492 0 : return false;
1493 29 : continue;
1494 : }
1495 326 : if (use->type() != phi->type()) {
1496 : // Specialize phis with int32 that can be converted to float and float operands as floats.
1497 132 : if ((use->type() == MIRType::Int32 && use->canProduceFloat32() && phi->type() == MIRType::Float32) ||
1498 66 : (phi->type() == MIRType::Int32 && phi->canProduceFloat32() && use->type() == MIRType::Float32))
1499 : {
1500 0 : if (!respecialize(use, MIRType::Float32))
1501 0 : return false;
1502 0 : continue;
1503 : }
1504 :
1505 : // Specialize phis with int32 and double operands as double.
1506 66 : if (IsTypeRepresentableAsDouble(use->type()) &&
1507 0 : IsTypeRepresentableAsDouble(phi->type()))
1508 : {
1509 0 : if (!respecialize(use, MIRType::Double))
1510 0 : return false;
1511 0 : continue;
1512 : }
1513 :
1514 : // This phi in our use chain can now no longer be specialized.
1515 66 : if (!respecialize(use, MIRType::Value))
1516 0 : return false;
1517 : }
1518 : }
1519 :
1520 254 : return true;
1521 : }
1522 :
1523 : bool
1524 8 : TypeAnalyzer::specializePhis()
1525 : {
1526 16 : Vector<MPhi*, 0, SystemAllocPolicy> phisWithEmptyInputTypes;
1527 :
1528 461 : for (PostorderIterator block(graph.poBegin()); block != graph.poEnd(); block++) {
1529 453 : if (mir->shouldCancel("Specialize Phis (main loop)"))
1530 0 : return false;
1531 :
1532 659 : for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
1533 206 : if (mir->shouldCancel("Specialize Phis (inner loop)"))
1534 0 : return false;
1535 :
1536 : bool hasInputsWithEmptyTypes;
1537 206 : MIRType type = GuessPhiType(*phi, &hasInputsWithEmptyTypes);
1538 206 : phi->specialize(type);
1539 206 : if (type == MIRType::None) {
1540 : // We tried to guess the type but failed because all operands are
1541 : // phis we still have to visit. Set the triedToSpecialize flag but
1542 : // don't propagate the type to other phis, propagateSpecialization
1543 : // will do that once we know the type of one of the operands.
1544 :
1545 : // Edge case: when this phi has a non-phi input with an empty
1546 : // typeset, it's possible for two phis to have a cyclic
1547 : // dependency and they will both have MIRType::None. Specialize
1548 : // such phis to MIRType::Value later on.
1549 29 : if (hasInputsWithEmptyTypes && !phisWithEmptyInputTypes.append(*phi))
1550 0 : return false;
1551 29 : continue;
1552 : }
1553 177 : if (!propagateSpecialization(*phi))
1554 0 : return false;
1555 : }
1556 : }
1557 :
1558 8 : do {
1559 162 : while (!phiWorklist_.empty()) {
1560 77 : if (mir->shouldCancel("Specialize Phis (worklist)"))
1561 0 : return false;
1562 :
1563 77 : MPhi* phi = popPhi();
1564 77 : if (!propagateSpecialization(phi))
1565 0 : return false;
1566 : }
1567 :
1568 : // When two phis have a cyclic dependency and inputs that have an empty
1569 : // typeset (which are ignored by GuessPhiType), we may still have to
1570 : // specialize these to MIRType::Value.
1571 8 : while (!phisWithEmptyInputTypes.empty()) {
1572 0 : if (mir->shouldCancel("Specialize Phis (phisWithEmptyInputTypes)"))
1573 0 : return false;
1574 :
1575 0 : MPhi* phi = phisWithEmptyInputTypes.popCopy();
1576 0 : if (phi->type() == MIRType::None) {
1577 0 : phi->specialize(MIRType::Value);
1578 0 : if (!propagateSpecialization(phi))
1579 0 : return false;
1580 : }
1581 : }
1582 8 : } while (!phiWorklist_.empty());
1583 :
1584 8 : return true;
1585 : }
1586 :
1587 : bool
1588 202 : TypeAnalyzer::adjustPhiInputs(MPhi* phi)
1589 : {
1590 202 : MIRType phiType = phi->type();
1591 202 : MOZ_ASSERT(phiType != MIRType::None);
1592 :
1593 : // If we specialized a type that's not Value, there are 3 cases:
1594 : // 1. Every input is of that type.
1595 : // 2. Every observed input is of that type (i.e., some inputs haven't been executed yet).
1596 : // 3. Inputs were doubles and int32s, and was specialized to double.
1597 202 : if (phiType != MIRType::Value) {
1598 336 : for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
1599 238 : MDefinition* in = phi->getOperand(i);
1600 238 : if (in->type() == phiType)
1601 238 : continue;
1602 :
1603 0 : if (!alloc().ensureBallast())
1604 0 : return false;
1605 :
1606 0 : if (in->isBox() && in->toBox()->input()->type() == phiType) {
1607 0 : phi->replaceOperand(i, in->toBox()->input());
1608 : } else {
1609 : MInstruction* replacement;
1610 :
1611 0 : if (phiType == MIRType::Double && IsFloatType(in->type())) {
1612 : // Convert int32 operands to double.
1613 0 : replacement = MToDouble::New(alloc(), in);
1614 0 : } else if (phiType == MIRType::Float32) {
1615 0 : if (in->type() == MIRType::Int32 || in->type() == MIRType::Double) {
1616 0 : replacement = MToFloat32::New(alloc(), in);
1617 : } else {
1618 : // See comment below
1619 0 : if (in->type() != MIRType::Value) {
1620 0 : MBox* box = MBox::New(alloc(), in);
1621 0 : in->block()->insertBefore(in->block()->lastIns(), box);
1622 0 : in = box;
1623 : }
1624 :
1625 0 : MUnbox* unbox = MUnbox::New(alloc(), in, MIRType::Double, MUnbox::Fallible);
1626 0 : in->block()->insertBefore(in->block()->lastIns(), unbox);
1627 0 : replacement = MToFloat32::New(alloc(), in);
1628 : }
1629 : } else {
1630 : // If we know this branch will fail to convert to phiType,
1631 : // insert a box that'll immediately fail in the fallible unbox
1632 : // below.
1633 0 : if (in->type() != MIRType::Value) {
1634 0 : MBox* box = MBox::New(alloc(), in);
1635 0 : in->block()->insertBefore(in->block()->lastIns(), box);
1636 0 : in = box;
1637 : }
1638 :
1639 : // Be optimistic and insert unboxes when the operand is a
1640 : // value.
1641 0 : replacement = MUnbox::New(alloc(), in, phiType, MUnbox::Fallible);
1642 : }
1643 :
1644 0 : in->block()->insertBefore(in->block()->lastIns(), replacement);
1645 0 : phi->replaceOperand(i, replacement);
1646 : }
1647 : }
1648 :
1649 98 : return true;
1650 : }
1651 :
1652 : // Box every typed input.
1653 390 : for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
1654 286 : MDefinition* in = phi->getOperand(i);
1655 286 : if (in->type() == MIRType::Value)
1656 196 : continue;
1657 :
1658 : // The input is being explicitly unboxed, so sneak past and grab
1659 : // the original box.
1660 90 : if (in->isUnbox() && phi->typeIncludes(in->toUnbox()->input()))
1661 1 : in = in->toUnbox()->input();
1662 :
1663 90 : if (in->type() != MIRType::Value) {
1664 89 : if (!alloc().ensureBallast())
1665 0 : return false;
1666 :
1667 89 : MBasicBlock* pred = phi->block()->getPredecessor(i);
1668 89 : in = AlwaysBoxAt(alloc(), pred->lastIns(), in);
1669 : }
1670 :
1671 90 : phi->replaceOperand(i, in);
1672 : }
1673 :
1674 104 : return true;
1675 : }
1676 :
1677 : bool
1678 2304 : TypeAnalyzer::adjustInputs(MDefinition* def)
1679 : {
1680 : // Definitions such as MPhi have no type policy.
1681 2304 : if (!def->isInstruction())
1682 0 : return true;
1683 :
1684 2304 : MInstruction* ins = def->toInstruction();
1685 2304 : TypePolicy* policy = ins->typePolicy();
1686 2304 : if (policy && !policy->adjustInputs(alloc(), ins))
1687 0 : return false;
1688 2304 : return true;
1689 : }
1690 :
1691 : void
1692 4 : TypeAnalyzer::replaceRedundantPhi(MPhi* phi)
1693 : {
1694 4 : MBasicBlock* block = phi->block();
1695 4 : js::Value v;
1696 4 : switch (phi->type()) {
1697 : case MIRType::Undefined:
1698 4 : v = UndefinedValue();
1699 4 : break;
1700 : case MIRType::Null:
1701 0 : v = NullValue();
1702 0 : break;
1703 : case MIRType::MagicOptimizedArguments:
1704 0 : v = MagicValue(JS_OPTIMIZED_ARGUMENTS);
1705 0 : break;
1706 : case MIRType::MagicOptimizedOut:
1707 0 : v = MagicValue(JS_OPTIMIZED_OUT);
1708 0 : break;
1709 : case MIRType::MagicUninitializedLexical:
1710 0 : v = MagicValue(JS_UNINITIALIZED_LEXICAL);
1711 0 : break;
1712 : default:
1713 0 : MOZ_CRASH("unexpected type");
1714 : }
1715 4 : MConstant* c = MConstant::New(alloc(), v);
1716 : // The instruction pass will insert the box
1717 4 : block->insertBefore(*(block->begin()), c);
1718 4 : phi->justReplaceAllUsesWith(c);
1719 4 : }
1720 :
1721 : bool
1722 8 : TypeAnalyzer::insertConversions()
1723 : {
1724 : // Instructions are processed in reverse postorder: all uses are defs are
1725 : // seen before uses. This ensures that output adjustment (which may rewrite
1726 : // inputs of uses) does not conflict with input adjustment.
1727 461 : for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
1728 453 : if (mir->shouldCancel("Insert Conversions"))
1729 0 : return false;
1730 :
1731 659 : for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd()); iter != end; ) {
1732 206 : MPhi* phi = *iter++;
1733 614 : if (phi->type() == MIRType::Undefined ||
1734 404 : phi->type() == MIRType::Null ||
1735 404 : phi->type() == MIRType::MagicOptimizedArguments ||
1736 610 : phi->type() == MIRType::MagicOptimizedOut ||
1737 202 : phi->type() == MIRType::MagicUninitializedLexical)
1738 : {
1739 4 : replaceRedundantPhi(phi);
1740 4 : block->discardPhi(phi);
1741 : } else {
1742 202 : if (!adjustPhiInputs(phi))
1743 0 : return false;
1744 : }
1745 : }
1746 :
1747 : // AdjustInputs can add/remove/mutate instructions before and after the
1748 : // current instruction. Only increment the iterator after it is finished.
1749 2757 : for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) {
1750 2304 : if (!alloc().ensureBallast())
1751 0 : return false;
1752 :
1753 2304 : if (!adjustInputs(*iter))
1754 0 : return false;
1755 : }
1756 : }
1757 8 : return true;
1758 : }
1759 :
1760 : // This function tries to emit Float32 specialized operations whenever it's possible.
1761 : // MIR nodes are flagged as:
1762 : // - Producers, when they can create Float32 that might need to be coerced into a Double.
1763 : // Loads in Float32 arrays and conversions to Float32 are producers.
1764 : // - Consumers, when they can have Float32 as inputs and validate a legal use of a Float32.
1765 : // Stores in Float32 arrays and conversions to Float32 are consumers.
1766 : // - Float32 commutative, when using the Float32 instruction instead of the Double instruction
1767 : // does not result in a compound loss of precision. This is the case for +, -, /, * with 2
1768 : // operands, for instance. However, an addition with 3 operands is not commutative anymore,
1769 : // so an intermediate coercion is needed.
1770 : // Except for phis, all these flags are known after Ion building, so they cannot change during
1771 : // the process.
1772 : //
1773 : // The idea behind the algorithm is easy: whenever we can prove that a commutative operation
1774 : // has only producers as inputs and consumers as uses, we can specialize the operation as a
1775 : // float32 operation. Otherwise, we have to convert all float32 inputs to doubles. Even
1776 : // if a lot of conversions are produced, GVN will take care of eliminating the redundant ones.
1777 : //
1778 : // Phis have a special status. Phis need to be flagged as producers or consumers as they can
1779 : // be inputs or outputs of commutative instructions. Fortunately, producers and consumers
1780 : // properties are such that we can deduce the property using all non phis inputs first (which form
1781 : // an initial phi graph) and then propagate all properties from one phi to another using a
1782 : // fixed point algorithm. The algorithm is ensured to terminate as each iteration has less or as
1783 : // many flagged phis as the previous iteration (so the worst steady state case is all phis being
1784 : // flagged as false).
1785 : //
1786 : // In a nutshell, the algorithm applies three passes:
1787 : // 1 - Determine which phis are consumers. Each phi gets an initial value by making a global AND on
1788 : // all its non-phi inputs. Then each phi propagates its value to other phis. If after propagation,
1789 : // the flag value changed, we have to reapply the algorithm on all phi operands, as a phi is a
1790 : // consumer if all of its uses are consumers.
1791 : // 2 - Determine which phis are producers. It's the same algorithm, except that we have to reapply
1792 : // the algorithm on all phi uses, as a phi is a producer if all of its operands are producers.
1793 : // 3 - Go through all commutative operations and ensure their inputs are all producers and their
1794 : // uses are all consumers.
1795 : bool
1796 0 : TypeAnalyzer::markPhiConsumers()
1797 : {
1798 0 : MOZ_ASSERT(phiWorklist_.empty());
1799 :
1800 : // Iterate in postorder so worklist is initialized to RPO.
1801 0 : for (PostorderIterator block(graph.poBegin()); block != graph.poEnd(); ++block) {
1802 0 : if (mir->shouldCancel("Ensure Float32 commutativity - Consumer Phis - Initial state"))
1803 0 : return false;
1804 :
1805 0 : for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); ++phi) {
1806 0 : MOZ_ASSERT(!phi->isInWorklist());
1807 0 : bool canConsumeFloat32 = true;
1808 0 : for (MUseDefIterator use(*phi); canConsumeFloat32 && use; use++) {
1809 0 : MDefinition* usedef = use.def();
1810 0 : canConsumeFloat32 &= usedef->isPhi() || usedef->canConsumeFloat32(use.use());
1811 : }
1812 0 : phi->setCanConsumeFloat32(canConsumeFloat32);
1813 0 : if (canConsumeFloat32 && !addPhiToWorklist(*phi))
1814 0 : return false;
1815 : }
1816 : }
1817 :
1818 0 : while (!phiWorklist_.empty()) {
1819 0 : if (mir->shouldCancel("Ensure Float32 commutativity - Consumer Phis - Fixed point"))
1820 0 : return false;
1821 :
1822 0 : MPhi* phi = popPhi();
1823 0 : MOZ_ASSERT(phi->canConsumeFloat32(nullptr /* unused */));
1824 :
1825 0 : bool validConsumer = true;
1826 0 : for (MUseDefIterator use(phi); use; use++) {
1827 0 : MDefinition* def = use.def();
1828 0 : if (def->isPhi() && !def->canConsumeFloat32(use.use())) {
1829 0 : validConsumer = false;
1830 0 : break;
1831 : }
1832 : }
1833 :
1834 0 : if (validConsumer)
1835 0 : continue;
1836 :
1837 : // Propagate invalidated phis
1838 0 : phi->setCanConsumeFloat32(false);
1839 0 : for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
1840 0 : MDefinition* input = phi->getOperand(i);
1841 0 : if (input->isPhi() && !input->isInWorklist() && input->canConsumeFloat32(nullptr /* unused */))
1842 : {
1843 0 : if (!addPhiToWorklist(input->toPhi()))
1844 0 : return false;
1845 : }
1846 : }
1847 : }
1848 0 : return true;
1849 : }
1850 :
1851 : bool
1852 0 : TypeAnalyzer::markPhiProducers()
1853 : {
1854 0 : MOZ_ASSERT(phiWorklist_.empty());
1855 :
1856 : // Iterate in reverse postorder so worklist is initialized to PO.
1857 0 : for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); ++block) {
1858 0 : if (mir->shouldCancel("Ensure Float32 commutativity - Producer Phis - initial state"))
1859 0 : return false;
1860 :
1861 0 : for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); ++phi) {
1862 0 : MOZ_ASSERT(!phi->isInWorklist());
1863 0 : bool canProduceFloat32 = true;
1864 0 : for (size_t i = 0, e = phi->numOperands(); canProduceFloat32 && i < e; ++i) {
1865 0 : MDefinition* input = phi->getOperand(i);
1866 0 : canProduceFloat32 &= input->isPhi() || input->canProduceFloat32();
1867 : }
1868 0 : phi->setCanProduceFloat32(canProduceFloat32);
1869 0 : if (canProduceFloat32 && !addPhiToWorklist(*phi))
1870 0 : return false;
1871 : }
1872 : }
1873 :
1874 0 : while (!phiWorklist_.empty()) {
1875 0 : if (mir->shouldCancel("Ensure Float32 commutativity - Producer Phis - Fixed point"))
1876 0 : return false;
1877 :
1878 0 : MPhi* phi = popPhi();
1879 0 : MOZ_ASSERT(phi->canProduceFloat32());
1880 :
1881 0 : bool validProducer = true;
1882 0 : for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
1883 0 : MDefinition* input = phi->getOperand(i);
1884 0 : if (input->isPhi() && !input->canProduceFloat32()) {
1885 0 : validProducer = false;
1886 0 : break;
1887 : }
1888 : }
1889 :
1890 0 : if (validProducer)
1891 0 : continue;
1892 :
1893 : // Propagate invalidated phis
1894 0 : phi->setCanProduceFloat32(false);
1895 0 : for (MUseDefIterator use(phi); use; use++) {
1896 0 : MDefinition* def = use.def();
1897 0 : if (def->isPhi() && !def->isInWorklist() && def->canProduceFloat32())
1898 : {
1899 0 : if (!addPhiToWorklist(def->toPhi()))
1900 0 : return false;
1901 : }
1902 : }
1903 : }
1904 0 : return true;
1905 : }
1906 :
1907 : bool
1908 0 : TypeAnalyzer::specializeValidFloatOps()
1909 : {
1910 0 : for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); ++block) {
1911 0 : if (mir->shouldCancel("Ensure Float32 commutativity - Instructions"))
1912 0 : return false;
1913 :
1914 0 : for (MInstructionIterator ins(block->begin()); ins != block->end(); ++ins) {
1915 0 : if (!ins->isFloat32Commutative())
1916 0 : continue;
1917 :
1918 0 : if (ins->type() == MIRType::Float32)
1919 0 : continue;
1920 :
1921 0 : if (!alloc().ensureBallast())
1922 0 : return false;
1923 :
1924 : // This call will try to specialize the instruction iff all uses are consumers and
1925 : // all inputs are producers.
1926 0 : ins->trySpecializeFloat32(alloc());
1927 : }
1928 : }
1929 0 : return true;
1930 : }
1931 :
1932 : bool
1933 8 : TypeAnalyzer::graphContainsFloat32()
1934 : {
1935 461 : for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); ++block) {
1936 2506 : for (MDefinitionIterator def(*block); def; def++) {
1937 2053 : if (mir->shouldCancel("Ensure Float32 commutativity - Graph contains Float32"))
1938 0 : return false;
1939 :
1940 2053 : if (def->type() == MIRType::Float32)
1941 0 : return true;
1942 : }
1943 : }
1944 8 : return false;
1945 : }
1946 :
1947 : bool
1948 8 : TypeAnalyzer::tryEmitFloatOperations()
1949 : {
1950 : // Asm.js uses the ahead of time type checks to specialize operations, no need to check
1951 : // them again at this point.
1952 8 : if (mir->compilingWasm())
1953 0 : return true;
1954 :
1955 : // Check ahead of time that there is at least one definition typed as Float32, otherwise we
1956 : // don't need this pass.
1957 8 : if (!graphContainsFloat32())
1958 8 : return true;
1959 :
1960 0 : if (!markPhiConsumers())
1961 0 : return false;
1962 0 : if (!markPhiProducers())
1963 0 : return false;
1964 0 : if (!specializeValidFloatOps())
1965 0 : return false;
1966 0 : return true;
1967 : }
1968 :
1969 : bool
1970 8 : TypeAnalyzer::checkFloatCoherency()
1971 : {
1972 : #ifdef DEBUG
1973 : // Asserts that all Float32 instructions are flowing into Float32 consumers or specialized
1974 : // operations
1975 461 : for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); ++block) {
1976 453 : if (mir->shouldCancel("Check Float32 coherency"))
1977 0 : return false;
1978 :
1979 2736 : for (MDefinitionIterator def(*block); def; def++) {
1980 2283 : if (def->type() != MIRType::Float32)
1981 2283 : continue;
1982 :
1983 0 : for (MUseDefIterator use(*def); use; use++) {
1984 0 : MDefinition* consumer = use.def();
1985 0 : MOZ_ASSERT(consumer->isConsistentFloat32Use(use.use()));
1986 : }
1987 : }
1988 : }
1989 : #endif
1990 8 : return true;
1991 : }
1992 :
1993 : bool
1994 8 : TypeAnalyzer::analyze()
1995 : {
1996 8 : if (!tryEmitFloatOperations())
1997 0 : return false;
1998 8 : if (!specializePhis())
1999 0 : return false;
2000 8 : if (!insertConversions())
2001 0 : return false;
2002 8 : if (!checkFloatCoherency())
2003 0 : return false;
2004 8 : return true;
2005 : }
2006 :
2007 : bool
2008 8 : jit::ApplyTypeInformation(MIRGenerator* mir, MIRGraph& graph)
2009 : {
2010 16 : TypeAnalyzer analyzer(mir, graph);
2011 :
2012 8 : if (!analyzer.analyze())
2013 0 : return false;
2014 :
2015 8 : return true;
2016 : }
2017 :
2018 : // Check if `def` is only the N-th operand of `useDef`.
2019 : static inline size_t
2020 0 : IsExclusiveNthOperand(MDefinition* useDef, size_t n, MDefinition* def)
2021 : {
2022 0 : uint32_t num = useDef->numOperands();
2023 0 : if (n >= num || useDef->getOperand(n) != def)
2024 0 : return false;
2025 :
2026 0 : for (uint32_t i = 0; i < num; i++) {
2027 0 : if (i == n)
2028 0 : continue;
2029 0 : if (useDef->getOperand(i) == def)
2030 0 : return false;
2031 : }
2032 :
2033 0 : return true;
2034 : }
2035 :
2036 : static size_t
2037 0 : IsExclusiveThisArg(MCall* call, MDefinition* def)
2038 : {
2039 0 : return IsExclusiveNthOperand(call, MCall::IndexOfThis(), def);
2040 : }
2041 :
2042 : static size_t
2043 0 : IsExclusiveFirstArg(MCall* call, MDefinition* def)
2044 : {
2045 0 : return IsExclusiveNthOperand(call, MCall::IndexOfArgument(0), def);
2046 : }
2047 :
2048 : static bool
2049 0 : IsRegExpHoistableCall(MCall* call, MDefinition* def)
2050 : {
2051 0 : if (call->isConstructing())
2052 0 : return false;
2053 :
2054 : JSAtom* name;
2055 0 : if (WrappedFunction* fun = call->getSingleTarget()) {
2056 0 : if (!fun->isSelfHostedBuiltin())
2057 0 : return false;
2058 0 : name = GetSelfHostedFunctionName(fun->rawJSFunction());
2059 : } else {
2060 0 : MDefinition* funDef = call->getFunction();
2061 0 : if (funDef->isDebugCheckSelfHosted())
2062 0 : funDef = funDef->toDebugCheckSelfHosted()->input();
2063 0 : if (funDef->isTypeBarrier())
2064 0 : funDef = funDef->toTypeBarrier()->input();
2065 :
2066 0 : if (!funDef->isCallGetIntrinsicValue())
2067 0 : return false;
2068 0 : name = funDef->toCallGetIntrinsicValue()->name();
2069 : }
2070 :
2071 : // Hoistable only if the RegExp is the first argument of RegExpBuiltinExec.
2072 0 : CompileRuntime* runtime = GetJitContext()->runtime;
2073 0 : if (name == runtime->names().RegExpBuiltinExec ||
2074 0 : name == runtime->names().UnwrapAndCallRegExpBuiltinExec ||
2075 0 : name == runtime->names().RegExpMatcher ||
2076 0 : name == runtime->names().RegExpTester ||
2077 0 : name == runtime->names().RegExpSearcher)
2078 : {
2079 0 : return IsExclusiveFirstArg(call, def);
2080 : }
2081 :
2082 0 : if (name == runtime->names().RegExp_prototype_Exec)
2083 0 : return IsExclusiveThisArg(call, def);
2084 :
2085 0 : return false;
2086 : }
2087 :
2088 : static bool
2089 0 : CanCompareRegExp(MCompare* compare, MDefinition* def)
2090 : {
2091 : MDefinition* value;
2092 0 : if (compare->lhs() == def) {
2093 0 : value = compare->rhs();
2094 : } else {
2095 0 : MOZ_ASSERT(compare->rhs() == def);
2096 0 : value = compare->lhs();
2097 : }
2098 :
2099 : // Comparing two regexp that weren't cloned will give different result
2100 : // than if they were cloned.
2101 0 : if (value->mightBeType(MIRType::Object))
2102 0 : return false;
2103 :
2104 : // Make sure @@toPrimitive is not called which could notice
2105 : // the difference between a not cloned/cloned regexp.
2106 :
2107 0 : JSOp op = compare->jsop();
2108 : // Strict equality comparison won't invoke @@toPrimitive.
2109 0 : if (op == JSOP_STRICTEQ || op == JSOP_STRICTNE)
2110 0 : return true;
2111 :
2112 0 : if (op != JSOP_EQ && op != JSOP_NE) {
2113 : // Relational comparison always invoke @@toPrimitive.
2114 0 : MOZ_ASSERT(op == JSOP_GT || op == JSOP_GE || op == JSOP_LT || op == JSOP_LE);
2115 0 : return false;
2116 : }
2117 :
2118 : // Loose equality comparison can invoke @@toPrimitive.
2119 0 : if (value->mightBeType(MIRType::Boolean) || value->mightBeType(MIRType::String) ||
2120 0 : value->mightBeType(MIRType::Int32) ||
2121 0 : value->mightBeType(MIRType::Double) || value->mightBeType(MIRType::Float32) ||
2122 0 : value->mightBeType(MIRType::Symbol))
2123 : {
2124 0 : return false;
2125 : }
2126 :
2127 0 : return true;
2128 : }
2129 :
2130 : static inline void
2131 0 : SetNotInWorklist(MDefinitionVector& worklist)
2132 : {
2133 0 : for (size_t i = 0; i < worklist.length(); i++)
2134 0 : worklist[i]->setNotInWorklist();
2135 0 : }
2136 :
2137 : static bool
2138 0 : IsRegExpHoistable(MIRGenerator* mir, MDefinition* regexp, MDefinitionVector& worklist,
2139 : bool* hoistable)
2140 : {
2141 0 : MOZ_ASSERT(worklist.length() == 0);
2142 :
2143 0 : if (!worklist.append(regexp))
2144 0 : return false;
2145 0 : regexp->setInWorklist();
2146 :
2147 0 : for (size_t i = 0; i < worklist.length(); i++) {
2148 0 : MDefinition* def = worklist[i];
2149 0 : if (mir->shouldCancel("IsRegExpHoistable outer loop"))
2150 0 : return false;
2151 :
2152 0 : for (MUseIterator use = def->usesBegin(); use != def->usesEnd(); use++) {
2153 0 : if (mir->shouldCancel("IsRegExpHoistable inner loop"))
2154 0 : return false;
2155 :
2156 : // Ignore resume points. At this point all uses are listed.
2157 : // No DCE or GVN or something has happened.
2158 0 : if (use->consumer()->isResumePoint())
2159 0 : continue;
2160 :
2161 0 : MDefinition* useDef = use->consumer()->toDefinition();
2162 :
2163 : // Step through a few white-listed ops.
2164 0 : if (useDef->isPhi() || useDef->isFilterTypeSet() || useDef->isGuardShape()) {
2165 0 : if (useDef->isInWorklist())
2166 0 : continue;
2167 :
2168 0 : if (!worklist.append(useDef))
2169 0 : return false;
2170 0 : useDef->setInWorklist();
2171 0 : continue;
2172 : }
2173 :
2174 : // Instructions that doesn't invoke unknown code that may modify
2175 : // RegExp instance or pass it to elsewhere.
2176 0 : if (useDef->isRegExpMatcher() || useDef->isRegExpTester() ||
2177 0 : useDef->isRegExpSearcher())
2178 : {
2179 0 : if (IsExclusiveNthOperand(useDef, 0, def))
2180 0 : continue;
2181 0 : } else if (useDef->isLoadFixedSlot() || useDef->isTypeOf()) {
2182 0 : continue;
2183 0 : } else if (useDef->isCompare()) {
2184 0 : if (CanCompareRegExp(useDef->toCompare(), def))
2185 0 : continue;
2186 : }
2187 : // Instructions that modifies `lastIndex` property.
2188 0 : else if (useDef->isStoreFixedSlot()) {
2189 0 : if (IsExclusiveNthOperand(useDef, 0, def)) {
2190 0 : MStoreFixedSlot* store = useDef->toStoreFixedSlot();
2191 0 : if (store->slot() == RegExpObject::lastIndexSlot())
2192 0 : continue;
2193 : }
2194 0 : } else if (useDef->isSetPropertyCache()) {
2195 0 : if (IsExclusiveNthOperand(useDef, 0, def)) {
2196 0 : MSetPropertyCache* setProp = useDef->toSetPropertyCache();
2197 0 : if (setProp->idval()->isConstant()) {
2198 0 : Value propIdVal = setProp->idval()->toConstant()->toJSValue();
2199 0 : if (propIdVal.isString()) {
2200 0 : CompileRuntime* runtime = GetJitContext()->runtime;
2201 0 : if (propIdVal.toString() == runtime->names().lastIndex)
2202 0 : continue;
2203 : }
2204 : }
2205 : }
2206 : }
2207 : // MCall is safe only for some known safe functions.
2208 0 : else if (useDef->isCall()) {
2209 0 : if (IsRegExpHoistableCall(useDef->toCall(), def))
2210 0 : continue;
2211 : }
2212 :
2213 : // Everything else is unsafe.
2214 0 : SetNotInWorklist(worklist);
2215 0 : worklist.clear();
2216 0 : *hoistable = false;
2217 :
2218 0 : return true;
2219 : }
2220 : }
2221 :
2222 0 : SetNotInWorklist(worklist);
2223 0 : worklist.clear();
2224 0 : *hoistable = true;
2225 0 : return true;
2226 : }
2227 :
2228 : bool
2229 8 : jit::MakeMRegExpHoistable(MIRGenerator* mir, MIRGraph& graph)
2230 : {
2231 16 : MDefinitionVector worklist(graph.alloc());
2232 :
2233 652 : for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
2234 644 : if (mir->shouldCancel("MakeMRegExpHoistable outer loop"))
2235 0 : return false;
2236 :
2237 2776 : for (MDefinitionIterator iter(*block); iter; iter++) {
2238 2132 : if (!*iter)
2239 0 : MOZ_CRASH("confirm bug 1263794.");
2240 :
2241 2132 : if (mir->shouldCancel("MakeMRegExpHoistable inner loop"))
2242 0 : return false;
2243 :
2244 2132 : if (!iter->isRegExp())
2245 4264 : continue;
2246 :
2247 0 : MRegExp* regexp = iter->toRegExp();
2248 :
2249 0 : bool hoistable = false;
2250 0 : if (!IsRegExpHoistable(mir, regexp, worklist, &hoistable))
2251 0 : return false;
2252 :
2253 0 : if (!hoistable)
2254 0 : continue;
2255 :
2256 : // Make MRegExp hoistable
2257 0 : regexp->setMovable();
2258 0 : regexp->setDoNotClone();
2259 :
2260 : // That would be incorrect for global/sticky, because lastIndex
2261 : // could be wrong. Therefore setting the lastIndex to 0. That is
2262 : // faster than a not movable regexp.
2263 0 : RegExpObject* source = regexp->source();
2264 0 : if (source->sticky() || source->global()) {
2265 0 : if (!graph.alloc().ensureBallast())
2266 0 : return false;
2267 0 : MConstant* zero = MConstant::New(graph.alloc(), Int32Value(0));
2268 0 : regexp->block()->insertAfter(regexp, zero);
2269 :
2270 : MStoreFixedSlot* lastIndex =
2271 0 : MStoreFixedSlot::New(graph.alloc(), regexp, RegExpObject::lastIndexSlot(), zero);
2272 0 : regexp->block()->insertAfter(zero, lastIndex);
2273 : }
2274 : }
2275 : }
2276 :
2277 8 : return true;
2278 : }
2279 :
2280 : void
2281 141 : jit::RenumberBlocks(MIRGraph& graph)
2282 : {
2283 141 : size_t id = 0;
2284 3509 : for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++)
2285 3368 : block->setId(id++);
2286 141 : }
2287 :
2288 : // A utility for code which deletes blocks. Renumber the remaining blocks,
2289 : // recompute dominators, and optionally recompute AliasAnalysis dependencies.
2290 : bool
2291 5 : jit::AccountForCFGChanges(MIRGenerator* mir, MIRGraph& graph, bool updateAliasAnalysis,
2292 : bool underValueNumberer)
2293 : {
2294 : // Renumber the blocks and clear out the old dominator info.
2295 5 : size_t id = 0;
2296 387 : for (ReversePostorderIterator i(graph.rpoBegin()), e(graph.rpoEnd()); i != e; ++i) {
2297 382 : i->clearDominatorInfo();
2298 382 : i->setId(id++);
2299 : }
2300 :
2301 : // Recompute dominator info.
2302 5 : if (!BuildDominatorTree(graph))
2303 0 : return false;
2304 :
2305 : // If needed, update alias analysis dependencies.
2306 5 : if (updateAliasAnalysis) {
2307 2 : TraceLoggerThread* logger = TraceLoggerForCurrentThread();
2308 4 : AutoTraceLog log(logger, TraceLogger_AliasAnalysis);
2309 :
2310 2 : if (JitOptions.disableFlowAA) {
2311 2 : if (!AliasAnalysis(mir, graph).analyze())
2312 0 : return false;
2313 : } else {
2314 0 : if (!FlowAliasAnalysis(mir, graph).analyze())
2315 0 : return false;
2316 : }
2317 : }
2318 :
2319 5 : AssertExtendedGraphCoherency(graph, underValueNumberer);
2320 5 : return true;
2321 : }
2322 :
2323 : // Remove all blocks not marked with isMarked(). Unmark all remaining blocks.
2324 : // Alias analysis dependencies may be invalid after calling this function.
2325 : bool
2326 0 : jit::RemoveUnmarkedBlocks(MIRGenerator* mir, MIRGraph& graph, uint32_t numMarkedBlocks)
2327 : {
2328 0 : if (numMarkedBlocks == graph.numBlocks()) {
2329 : // If all blocks are marked, no blocks need removal. Just clear the
2330 : // marks. We'll still need to update the dominator tree below though,
2331 : // since we may have removed edges even if we didn't remove any blocks.
2332 0 : graph.unmarkBlocks();
2333 : } else {
2334 : // As we are going to remove edges and basic blocks, we have to mark
2335 : // instructions which would be needed by baseline if we were to
2336 : // bailout.
2337 0 : for (PostorderIterator it(graph.poBegin()); it != graph.poEnd();) {
2338 0 : MBasicBlock* block = *it++;
2339 0 : if (!block->isMarked())
2340 0 : continue;
2341 :
2342 0 : FlagAllOperandsAsHavingRemovedUses(mir, block);
2343 : }
2344 :
2345 : // Find unmarked blocks and remove them.
2346 0 : for (ReversePostorderIterator iter(graph.rpoBegin()); iter != graph.rpoEnd();) {
2347 0 : MBasicBlock* block = *iter++;
2348 :
2349 0 : if (block->isMarked()) {
2350 0 : block->unmark();
2351 0 : continue;
2352 : }
2353 :
2354 : // The block is unreachable. Clear out the loop header flag, as
2355 : // we're doing the sweep of a mark-and-sweep here, so we no longer
2356 : // need to worry about whether an unmarked block is a loop or not.
2357 0 : if (block->isLoopHeader())
2358 0 : block->clearLoopHeader();
2359 :
2360 0 : for (size_t i = 0, e = block->numSuccessors(); i != e; ++i)
2361 0 : block->getSuccessor(i)->removePredecessor(block);
2362 0 : graph.removeBlockIncludingPhis(block);
2363 : }
2364 : }
2365 :
2366 : // Renumber the blocks and update the dominator tree.
2367 0 : return AccountForCFGChanges(mir, graph, /*updateAliasAnalysis=*/false);
2368 : }
2369 :
2370 : // A Simple, Fast Dominance Algorithm by Cooper et al.
2371 : // Modified to support empty intersections for OSR, and in RPO.
2372 : static MBasicBlock*
2373 1401 : IntersectDominators(MBasicBlock* block1, MBasicBlock* block2)
2374 : {
2375 1401 : MBasicBlock* finger1 = block1;
2376 1401 : MBasicBlock* finger2 = block2;
2377 :
2378 1401 : MOZ_ASSERT(finger1);
2379 1401 : MOZ_ASSERT(finger2);
2380 :
2381 : // In the original paper, the block ID comparisons are on the postorder index.
2382 : // This implementation iterates in RPO, so the comparisons are reversed.
2383 :
2384 : // For this function to be called, the block must have multiple predecessors.
2385 : // If a finger is then found to be self-dominating, it must therefore be
2386 : // reachable from multiple roots through non-intersecting control flow.
2387 : // nullptr is returned in this case, to denote an empty intersection.
2388 :
2389 4247 : while (finger1->id() != finger2->id()) {
2390 7319 : while (finger1->id() > finger2->id()) {
2391 2951 : MBasicBlock* idom = finger1->immediateDominator();
2392 2951 : if (idom == finger1)
2393 6 : return nullptr; // Empty intersection.
2394 2945 : finger1 = idom;
2395 : }
2396 :
2397 4739 : while (finger2->id() > finger1->id()) {
2398 1658 : MBasicBlock* idom = finger2->immediateDominator();
2399 1658 : if (idom == finger2)
2400 0 : return nullptr; // Empty intersection.
2401 1658 : finger2 = idom;
2402 : }
2403 : }
2404 1395 : return finger1;
2405 : }
2406 :
2407 : void
2408 0 : jit::ClearDominatorTree(MIRGraph& graph)
2409 : {
2410 0 : for (MBasicBlockIterator iter = graph.begin(); iter != graph.end(); iter++)
2411 0 : iter->clearDominatorInfo();
2412 0 : }
2413 :
2414 : static void
2415 146 : ComputeImmediateDominators(MIRGraph& graph)
2416 : {
2417 : // The default start block is a root and therefore only self-dominates.
2418 146 : MBasicBlock* startBlock = graph.entryBlock();
2419 146 : startBlock->setImmediateDominator(startBlock);
2420 :
2421 : // Any OSR block is a root and therefore only self-dominates.
2422 146 : MBasicBlock* osrBlock = graph.osrBlock();
2423 146 : if (osrBlock)
2424 6 : osrBlock->setImmediateDominator(osrBlock);
2425 :
2426 146 : bool changed = true;
2427 :
2428 714 : while (changed) {
2429 284 : changed = false;
2430 :
2431 284 : ReversePostorderIterator block = graph.rpoBegin();
2432 :
2433 : // For each block in RPO, intersect all dominators.
2434 15268 : for (; block != graph.rpoEnd(); block++) {
2435 : // If a node has once been found to have no exclusive dominator,
2436 : // it will never have an exclusive dominator, so it may be skipped.
2437 7492 : if (block->immediateDominator() == *block)
2438 302 : continue;
2439 :
2440 : // A block with no predecessors is not reachable from any entry, so
2441 : // it self-dominates.
2442 7190 : if (MOZ_UNLIKELY(block->numPredecessors() == 0)) {
2443 0 : block->setImmediateDominator(*block);
2444 0 : continue;
2445 : }
2446 :
2447 7190 : MBasicBlock* newIdom = block->getPredecessor(0);
2448 :
2449 : // Find the first common dominator.
2450 8704 : for (size_t i = 1; i < block->numPredecessors(); i++) {
2451 1520 : MBasicBlock* pred = block->getPredecessor(i);
2452 1520 : if (pred->immediateDominator() == nullptr)
2453 119 : continue;
2454 :
2455 1401 : newIdom = IntersectDominators(pred, newIdom);
2456 :
2457 : // If there is no common dominator, the block self-dominates.
2458 1401 : if (newIdom == nullptr) {
2459 6 : block->setImmediateDominator(*block);
2460 6 : changed = true;
2461 6 : break;
2462 : }
2463 : }
2464 :
2465 7190 : if (newIdom && block->immediateDominator() != newIdom) {
2466 3592 : block->setImmediateDominator(newIdom);
2467 3592 : changed = true;
2468 : }
2469 : }
2470 : }
2471 :
2472 : #ifdef DEBUG
2473 : // Assert that all blocks have dominator information.
2474 3896 : for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
2475 3750 : MOZ_ASSERT(block->immediateDominator() != nullptr);
2476 : }
2477 : #endif
2478 146 : }
2479 :
2480 : bool
2481 146 : jit::BuildDominatorTree(MIRGraph& graph)
2482 : {
2483 146 : ComputeImmediateDominators(graph);
2484 :
2485 292 : Vector<MBasicBlock*, 4, JitAllocPolicy> worklist(graph.alloc());
2486 :
2487 : // Traversing through the graph in post-order means that every non-phi use
2488 : // of a definition is visited before the def itself. Since a def
2489 : // dominates its uses, by the time we reach a particular
2490 : // block, we have processed all of its dominated children, so
2491 : // block->numDominated() is accurate.
2492 3896 : for (PostorderIterator i(graph.poBegin()); i != graph.poEnd(); i++) {
2493 3750 : MBasicBlock* child = *i;
2494 3750 : MBasicBlock* parent = child->immediateDominator();
2495 :
2496 : // Dominance is defined such that blocks always dominate themselves.
2497 3750 : child->addNumDominated(1);
2498 :
2499 : // If the block only self-dominates, it has no definite parent.
2500 : // Add it to the worklist as a root for pre-order traversal.
2501 : // This includes all roots. Order does not matter.
2502 3750 : if (child == parent) {
2503 158 : if (!worklist.append(child))
2504 0 : return false;
2505 158 : continue;
2506 : }
2507 :
2508 3592 : if (!parent->addImmediatelyDominatedBlock(child))
2509 0 : return false;
2510 :
2511 3592 : parent->addNumDominated(child->numDominated());
2512 : }
2513 :
2514 : #ifdef DEBUG
2515 : // If compiling with OSR, many blocks will self-dominate.
2516 : // Without OSR, there is only one root block which dominates all.
2517 146 : if (!graph.osrBlock())
2518 140 : MOZ_ASSERT(graph.entryBlock()->numDominated() == graph.numBlocks());
2519 : #endif
2520 : // Now, iterate through the dominator tree in pre-order and annotate every
2521 : // block with its index in the traversal.
2522 146 : size_t index = 0;
2523 7646 : while (!worklist.empty()) {
2524 3750 : MBasicBlock* block = worklist.popCopy();
2525 3750 : block->setDomIndex(index);
2526 :
2527 3750 : if (!worklist.append(block->immediatelyDominatedBlocksBegin(),
2528 3750 : block->immediatelyDominatedBlocksEnd())) {
2529 0 : return false;
2530 : }
2531 3750 : index++;
2532 : }
2533 :
2534 146 : return true;
2535 : }
2536 :
2537 : bool
2538 8 : jit::BuildPhiReverseMapping(MIRGraph& graph)
2539 : {
2540 : // Build a mapping such that given a basic block, whose successor has one or
2541 : // more phis, we can find our specific input to that phi. To make this fast
2542 : // mapping work we rely on a specific property of our structured control
2543 : // flow graph: For a block with phis, its predecessors each have only one
2544 : // successor with phis. Consider each case:
2545 : // * Blocks with less than two predecessors cannot have phis.
2546 : // * Breaks. A break always has exactly one successor, and the break
2547 : // catch block has exactly one predecessor for each break, as
2548 : // well as a final predecessor for the actual loop exit.
2549 : // * Continues. A continue always has exactly one successor, and the
2550 : // continue catch block has exactly one predecessor for each
2551 : // continue, as well as a final predecessor for the actual
2552 : // loop continuation. The continue itself has exactly one
2553 : // successor.
2554 : // * An if. Each branch as exactly one predecessor.
2555 : // * A switch. Each branch has exactly one predecessor.
2556 : // * Loop tail. A new block is always created for the exit, and if a
2557 : // break statement is present, the exit block will forward
2558 : // directly to the break block.
2559 461 : for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
2560 453 : if (block->phisEmpty())
2561 387 : continue;
2562 :
2563 : // Assert on the above.
2564 210 : for (size_t j = 0; j < block->numPredecessors(); j++) {
2565 144 : MBasicBlock* pred = block->getPredecessor(j);
2566 :
2567 : #ifdef DEBUG
2568 144 : size_t numSuccessorsWithPhis = 0;
2569 288 : for (size_t k = 0; k < pred->numSuccessors(); k++) {
2570 144 : MBasicBlock* successor = pred->getSuccessor(k);
2571 144 : if (!successor->phisEmpty())
2572 144 : numSuccessorsWithPhis++;
2573 : }
2574 144 : MOZ_ASSERT(numSuccessorsWithPhis <= 1);
2575 : #endif
2576 :
2577 144 : pred->setSuccessorWithPhis(*block, j);
2578 : }
2579 : }
2580 :
2581 8 : return true;
2582 : }
2583 :
2584 : #ifdef DEBUG
2585 : static bool
2586 17405 : CheckSuccessorImpliesPredecessor(MBasicBlock* A, MBasicBlock* B)
2587 : {
2588 : // Assuming B = succ(A), verify A = pred(B).
2589 22036 : for (size_t i = 0; i < B->numPredecessors(); i++) {
2590 22036 : if (A == B->getPredecessor(i))
2591 17405 : return true;
2592 : }
2593 0 : return false;
2594 : }
2595 :
2596 : static bool
2597 17405 : CheckPredecessorImpliesSuccessor(MBasicBlock* A, MBasicBlock* B)
2598 : {
2599 : // Assuming B = pred(A), verify A = succ(B).
2600 21658 : for (size_t i = 0; i < B->numSuccessors(); i++) {
2601 21658 : if (A == B->getSuccessor(i))
2602 17405 : return true;
2603 : }
2604 0 : return false;
2605 : }
2606 :
2607 : // If you have issues with the usesBalance assertions, then define the macro
2608 : // _DEBUG_CHECK_OPERANDS_USES_BALANCE to spew information on the error output.
2609 : // This output can then be processed with the following awk script to filter and
2610 : // highlight which checks are missing or if there is an unexpected operand /
2611 : // use.
2612 : //
2613 : // define _DEBUG_CHECK_OPERANDS_USES_BALANCE 1
2614 : /*
2615 :
2616 : $ ./js 2>stderr.log
2617 : $ gawk '
2618 : /^==Check/ { context = ""; state = $2; }
2619 : /^[a-z]/ { context = context "\n\t" $0; }
2620 : /^==End/ {
2621 : if (state == "Operand") {
2622 : list[context] = list[context] - 1;
2623 : } else if (state == "Use") {
2624 : list[context] = list[context] + 1;
2625 : }
2626 : }
2627 : END {
2628 : for (ctx in list) {
2629 : if (list[ctx] > 0) {
2630 : print "Missing operand check", ctx, "\n"
2631 : }
2632 : if (list[ctx] < 0) {
2633 : print "Missing use check", ctx, "\n"
2634 : }
2635 : };
2636 : }' < stderr.log
2637 :
2638 : */
2639 :
2640 : static void
2641 477809 : CheckOperand(const MNode* consumer, const MUse* use, int32_t* usesBalance)
2642 : {
2643 477809 : MOZ_ASSERT(use->hasProducer());
2644 477809 : MDefinition* producer = use->producer();
2645 477809 : MOZ_ASSERT(!producer->isDiscarded());
2646 477809 : MOZ_ASSERT(producer->block() != nullptr);
2647 477809 : MOZ_ASSERT(use->consumer() == consumer);
2648 : #ifdef _DEBUG_CHECK_OPERANDS_USES_BALANCE
2649 : Fprinter print(stderr);
2650 : print.printf("==Check Operand\n");
2651 : use->producer()->dump(print);
2652 : print.printf(" index: %" PRIuSIZE "\n", use->consumer()->indexOf(use));
2653 : use->consumer()->dump(print);
2654 : print.printf("==End\n");
2655 : #endif
2656 477809 : --*usesBalance;
2657 477809 : }
2658 :
2659 : static void
2660 477809 : CheckUse(const MDefinition* producer, const MUse* use, int32_t* usesBalance)
2661 : {
2662 477809 : MOZ_ASSERT(!use->consumer()->block()->isDead());
2663 477809 : MOZ_ASSERT_IF(use->consumer()->isDefinition(),
2664 : !use->consumer()->toDefinition()->isDiscarded());
2665 477809 : MOZ_ASSERT(use->consumer()->block() != nullptr);
2666 477809 : MOZ_ASSERT(use->consumer()->getOperand(use->index()) == producer);
2667 : #ifdef _DEBUG_CHECK_OPERANDS_USES_BALANCE
2668 : Fprinter print(stderr);
2669 : print.printf("==Check Use\n");
2670 : use->producer()->dump(print);
2671 : print.printf(" index: %" PRIuSIZE "\n", use->consumer()->indexOf(use));
2672 : use->consumer()->dump(print);
2673 : print.printf("==End\n");
2674 : #endif
2675 477809 : ++*usesBalance;
2676 477809 : }
2677 :
2678 : // To properly encode entry resume points, we have to ensure that all the
2679 : // operands of the entry resume point are located before the safeInsertTop
2680 : // location.
2681 : static void
2682 14513 : AssertOperandsBeforeSafeInsertTop(MResumePoint* resume)
2683 : {
2684 14513 : MBasicBlock* block = resume->block();
2685 14513 : if (block == block->graph().osrBlock())
2686 99 : return;
2687 14414 : MInstruction* stop = block->safeInsertTop();
2688 254181 : for (size_t i = 0, e = resume->numOperands(); i < e; ++i) {
2689 239767 : MDefinition* def = resume->getOperand(i);
2690 239767 : if (def->block() != block)
2691 226922 : continue;
2692 12845 : if (def->isPhi())
2693 6408 : continue;
2694 :
2695 17260 : for (MInstructionIterator ins = block->begin(); true; ins++) {
2696 28083 : if (*ins == def)
2697 6437 : break;
2698 10823 : MOZ_ASSERT(*ins != stop,
2699 : "Resume point operand located after the safeInsertTop location");
2700 : }
2701 : }
2702 : }
2703 : #endif // DEBUG
2704 :
2705 : void
2706 262 : jit::AssertBasicGraphCoherency(MIRGraph& graph, bool force)
2707 : {
2708 : #ifdef DEBUG
2709 262 : if (!JitOptions.fullDebugChecks && !force)
2710 0 : return;
2711 :
2712 262 : MOZ_ASSERT(graph.entryBlock()->numPredecessors() == 0);
2713 262 : MOZ_ASSERT(graph.entryBlock()->phisEmpty());
2714 262 : MOZ_ASSERT(!graph.entryBlock()->unreachable());
2715 :
2716 262 : if (MBasicBlock* osrBlock = graph.osrBlock()) {
2717 99 : MOZ_ASSERT(osrBlock->numPredecessors() == 0);
2718 99 : MOZ_ASSERT(osrBlock->phisEmpty());
2719 99 : MOZ_ASSERT(osrBlock != graph.entryBlock());
2720 99 : MOZ_ASSERT(!osrBlock->unreachable());
2721 : }
2722 :
2723 262 : if (MResumePoint* resumePoint = graph.entryResumePoint())
2724 262 : MOZ_ASSERT(resumePoint->block() == graph.entryBlock());
2725 :
2726 : // Assert successor and predecessor list coherency.
2727 262 : uint32_t count = 0;
2728 262 : int32_t usesBalance = 0;
2729 14775 : for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
2730 14513 : count++;
2731 :
2732 14513 : MOZ_ASSERT(&block->graph() == &graph);
2733 14513 : MOZ_ASSERT(!block->isDead());
2734 14513 : MOZ_ASSERT_IF(block->outerResumePoint() != nullptr,
2735 : block->entryResumePoint() != nullptr);
2736 :
2737 31918 : for (size_t i = 0; i < block->numSuccessors(); i++)
2738 17405 : MOZ_ASSERT(CheckSuccessorImpliesPredecessor(*block, block->getSuccessor(i)));
2739 :
2740 31918 : for (size_t i = 0; i < block->numPredecessors(); i++)
2741 17405 : MOZ_ASSERT(CheckPredecessorImpliesSuccessor(*block, block->getPredecessor(i)));
2742 :
2743 14513 : if (MResumePoint* resume = block->entryResumePoint()) {
2744 14513 : MOZ_ASSERT(!resume->instruction());
2745 14513 : MOZ_ASSERT(resume->block() == *block);
2746 14513 : AssertOperandsBeforeSafeInsertTop(resume);
2747 : }
2748 14513 : if (MResumePoint* resume = block->outerResumePoint()) {
2749 673 : MOZ_ASSERT(!resume->instruction());
2750 673 : MOZ_ASSERT(resume->block() == *block);
2751 : }
2752 39979 : for (MResumePointIterator iter(block->resumePointsBegin()); iter != block->resumePointsEnd(); iter++) {
2753 : // We cannot yet assert that is there is no instruction then this is
2754 : // the entry resume point because we are still storing resume points
2755 : // in the InlinePropertyTable.
2756 25466 : MOZ_ASSERT_IF(iter->instruction(), iter->instruction()->block() == *block);
2757 443714 : for (uint32_t i = 0, e = iter->numOperands(); i < e; i++)
2758 418248 : CheckOperand(*iter, iter->getUseFor(i), &usesBalance);
2759 : }
2760 21358 : for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
2761 6845 : MOZ_ASSERT(phi->numOperands() == block->numPredecessors());
2762 6845 : MOZ_ASSERT(!phi->isRecoveredOnBailout());
2763 6845 : MOZ_ASSERT(phi->type() != MIRType::None);
2764 6845 : MOZ_ASSERT(phi->dependency() == nullptr);
2765 : }
2766 66592 : for (MDefinitionIterator iter(*block); iter; iter++) {
2767 52079 : MOZ_ASSERT(iter->block() == *block);
2768 52079 : MOZ_ASSERT_IF(iter->hasUses(), iter->type() != MIRType::None);
2769 52079 : MOZ_ASSERT(!iter->isDiscarded());
2770 52079 : MOZ_ASSERT_IF(iter->isStart(),
2771 : *block == graph.entryBlock() || *block == graph.osrBlock());
2772 52079 : MOZ_ASSERT_IF(iter->isParameter(),
2773 : *block == graph.entryBlock() || *block == graph.osrBlock());
2774 52079 : MOZ_ASSERT_IF(iter->isOsrEntry(), *block == graph.osrBlock());
2775 52079 : MOZ_ASSERT_IF(iter->isOsrValue(), *block == graph.osrBlock());
2776 :
2777 : // Assert that use chains are valid for this instruction.
2778 106820 : for (uint32_t i = 0, end = iter->numOperands(); i < end; i++)
2779 54741 : CheckOperand(*iter, iter->getUseFor(i), &usesBalance);
2780 529888 : for (MUseIterator use(iter->usesBegin()); use != iter->usesEnd(); use++)
2781 477809 : CheckUse(*iter, *use, &usesBalance);
2782 :
2783 52079 : if (iter->isInstruction()) {
2784 45234 : if (MResumePoint* resume = iter->toInstruction()->resumePoint()) {
2785 10280 : MOZ_ASSERT(resume->instruction() == *iter);
2786 10280 : MOZ_ASSERT(resume->block() == *block);
2787 10280 : MOZ_ASSERT(resume->block()->entryResumePoint() != nullptr);
2788 : }
2789 : }
2790 :
2791 52079 : if (iter->isRecoveredOnBailout())
2792 1565 : MOZ_ASSERT(!iter->hasLiveDefUses());
2793 : }
2794 :
2795 : // The control instruction is not visited by the MDefinitionIterator.
2796 14513 : MControlInstruction* control = block->lastIns();
2797 14513 : MOZ_ASSERT(control->block() == *block);
2798 14513 : MOZ_ASSERT(!control->hasUses());
2799 14513 : MOZ_ASSERT(control->type() == MIRType::None);
2800 14513 : MOZ_ASSERT(!control->isDiscarded());
2801 14513 : MOZ_ASSERT(!control->isRecoveredOnBailout());
2802 14513 : MOZ_ASSERT(control->resumePoint() == nullptr);
2803 19333 : for (uint32_t i = 0, end = control->numOperands(); i < end; i++)
2804 4820 : CheckOperand(control, control->getUseFor(i), &usesBalance);
2805 31918 : for (size_t i = 0; i < control->numSuccessors(); i++)
2806 17405 : MOZ_ASSERT(control->getSuccessor(i));
2807 : }
2808 :
2809 : // In case issues, see the _DEBUG_CHECK_OPERANDS_USES_BALANCE macro above.
2810 262 : MOZ_ASSERT(usesBalance <= 0, "More use checks than operand checks");
2811 262 : MOZ_ASSERT(usesBalance >= 0, "More operand checks than use checks");
2812 262 : MOZ_ASSERT(graph.numBlocks() == count);
2813 : #endif
2814 : }
2815 :
2816 : #ifdef DEBUG
2817 : static void
2818 214 : AssertReversePostorder(MIRGraph& graph)
2819 : {
2820 : // Check that every block is visited after all its predecessors (except backedges).
2821 11571 : for (ReversePostorderIterator iter(graph.rpoBegin()); iter != graph.rpoEnd(); ++iter) {
2822 11357 : MBasicBlock* block = *iter;
2823 11357 : MOZ_ASSERT(!block->isMarked());
2824 :
2825 24947 : for (size_t i = 0; i < block->numPredecessors(); i++) {
2826 13590 : MBasicBlock* pred = block->getPredecessor(i);
2827 13590 : if (!pred->isMarked()) {
2828 135 : MOZ_ASSERT(pred->isLoopBackedge());
2829 135 : MOZ_ASSERT(block->backedge() == pred);
2830 : }
2831 : }
2832 :
2833 11357 : block->mark();
2834 : }
2835 :
2836 214 : graph.unmarkBlocks();
2837 214 : }
2838 : #endif
2839 :
2840 : #ifdef DEBUG
2841 : static void
2842 142 : AssertDominatorTree(MIRGraph& graph)
2843 : {
2844 : // Check dominators.
2845 :
2846 142 : MOZ_ASSERT(graph.entryBlock()->immediateDominator() == graph.entryBlock());
2847 142 : if (MBasicBlock* osrBlock = graph.osrBlock())
2848 54 : MOZ_ASSERT(osrBlock->immediateDominator() == osrBlock);
2849 : else
2850 88 : MOZ_ASSERT(graph.entryBlock()->numDominated() == graph.numBlocks());
2851 :
2852 142 : size_t i = graph.numBlocks();
2853 142 : size_t totalNumDominated = 0;
2854 7622 : for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
2855 7480 : MOZ_ASSERT(block->dominates(*block));
2856 :
2857 7480 : MBasicBlock* idom = block->immediateDominator();
2858 7480 : MOZ_ASSERT(idom->dominates(*block));
2859 7480 : MOZ_ASSERT(idom == *block || idom->id() < block->id());
2860 :
2861 7480 : if (idom == *block) {
2862 250 : totalNumDominated += block->numDominated();
2863 : } else {
2864 7230 : bool foundInParent = false;
2865 11979 : for (size_t j = 0; j < idom->numImmediatelyDominatedBlocks(); j++) {
2866 11979 : if (idom->getImmediatelyDominatedBlock(j) == *block) {
2867 7230 : foundInParent = true;
2868 7230 : break;
2869 : }
2870 : }
2871 7230 : MOZ_ASSERT(foundInParent);
2872 : }
2873 :
2874 7480 : size_t numDominated = 1;
2875 14710 : for (size_t j = 0; j < block->numImmediatelyDominatedBlocks(); j++) {
2876 7230 : MBasicBlock* dom = block->getImmediatelyDominatedBlock(j);
2877 7230 : MOZ_ASSERT(block->dominates(dom));
2878 7230 : MOZ_ASSERT(dom->id() > block->id());
2879 7230 : MOZ_ASSERT(dom->immediateDominator() == *block);
2880 :
2881 7230 : numDominated += dom->numDominated();
2882 : }
2883 7480 : MOZ_ASSERT(block->numDominated() == numDominated);
2884 7480 : MOZ_ASSERT(block->numDominated() <= i);
2885 7480 : MOZ_ASSERT(block->numSuccessors() != 0 || block->numDominated() == 1);
2886 7480 : i--;
2887 : }
2888 142 : MOZ_ASSERT(i == 0);
2889 142 : MOZ_ASSERT(totalNumDominated == graph.numBlocks());
2890 142 : }
2891 : #endif
2892 :
2893 : void
2894 214 : jit::AssertGraphCoherency(MIRGraph& graph, bool force)
2895 : {
2896 : #ifdef DEBUG
2897 214 : if (!JitOptions.checkGraphConsistency)
2898 0 : return;
2899 214 : if (!JitOptions.fullDebugChecks && !force)
2900 0 : return;
2901 214 : AssertBasicGraphCoherency(graph, force);
2902 214 : AssertReversePostorder(graph);
2903 : #endif
2904 : }
2905 :
2906 : #ifdef DEBUG
2907 : static bool
2908 214494 : IsResumableMIRType(MIRType type)
2909 : {
2910 : // see CodeGeneratorShared::encodeAllocation
2911 214494 : switch (type) {
2912 : case MIRType::Undefined:
2913 : case MIRType::Null:
2914 : case MIRType::Boolean:
2915 : case MIRType::Int32:
2916 : case MIRType::Double:
2917 : case MIRType::Float32:
2918 : case MIRType::String:
2919 : case MIRType::Symbol:
2920 : case MIRType::Object:
2921 : case MIRType::MagicOptimizedArguments:
2922 : case MIRType::MagicOptimizedOut:
2923 : case MIRType::MagicUninitializedLexical:
2924 : case MIRType::MagicIsConstructing:
2925 : case MIRType::Value:
2926 : case MIRType::Int32x4:
2927 : case MIRType::Int16x8:
2928 : case MIRType::Int8x16:
2929 : case MIRType::Float32x4:
2930 : case MIRType::Bool32x4:
2931 : case MIRType::Bool16x8:
2932 : case MIRType::Bool8x16:
2933 214494 : return true;
2934 :
2935 : case MIRType::MagicHole:
2936 : case MIRType::ObjectOrNull:
2937 : case MIRType::None:
2938 : case MIRType::Slots:
2939 : case MIRType::Elements:
2940 : case MIRType::Pointer:
2941 : case MIRType::Shape:
2942 : case MIRType::ObjectGroup:
2943 : case MIRType::Doublex2: // NYI, see also RSimdBox::recover
2944 : case MIRType::SinCosDouble:
2945 : case MIRType::Int64:
2946 0 : return false;
2947 : }
2948 0 : MOZ_CRASH("Unknown MIRType.");
2949 : }
2950 :
2951 : static void
2952 14063 : AssertResumableOperands(MNode* node)
2953 : {
2954 230895 : for (size_t i = 0, e = node->numOperands(); i < e; ++i) {
2955 216832 : MDefinition* op = node->getOperand(i);
2956 216832 : if (op->isRecoveredOnBailout())
2957 2338 : continue;
2958 214494 : MOZ_ASSERT(IsResumableMIRType(op->type()),
2959 : "Resume point cannot encode its operands");
2960 : }
2961 14063 : }
2962 :
2963 : static void
2964 30086 : AssertIfResumableInstruction(MDefinition* def)
2965 : {
2966 30086 : if (!def->isRecoveredOnBailout())
2967 28974 : return;
2968 1112 : AssertResumableOperands(def);
2969 : }
2970 :
2971 : static void
2972 12951 : AssertResumePointDominatedByOperands(MResumePoint* resume)
2973 : {
2974 225998 : for (size_t i = 0, e = resume->numOperands(); i < e; ++i) {
2975 213047 : MDefinition* op = resume->getOperand(i);
2976 213047 : if (op->type() == MIRType::MagicOptimizedArguments)
2977 249 : continue;
2978 212798 : MOZ_ASSERT(op->block()->dominates(resume->block()),
2979 : "Resume point is not dominated by its operands");
2980 : }
2981 12951 : }
2982 : #endif // DEBUG
2983 :
2984 : void
2985 142 : jit::AssertExtendedGraphCoherency(MIRGraph& graph, bool underValueNumberer, bool force)
2986 : {
2987 : // Checks the basic GraphCoherency but also other conditions that
2988 : // do not hold immediately (such as the fact that critical edges
2989 : // are split)
2990 :
2991 : #ifdef DEBUG
2992 142 : if (!JitOptions.checkGraphConsistency)
2993 0 : return;
2994 142 : if (!JitOptions.fullDebugChecks && !force)
2995 0 : return;
2996 :
2997 142 : AssertGraphCoherency(graph, force);
2998 :
2999 142 : AssertDominatorTree(graph);
3000 :
3001 284 : DebugOnly<uint32_t> idx = 0;
3002 7622 : for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
3003 7480 : MOZ_ASSERT(block->id() == idx);
3004 7480 : ++idx;
3005 :
3006 : // No critical edges:
3007 7480 : if (block->numSuccessors() > 1)
3008 6459 : for (size_t i = 0; i < block->numSuccessors(); i++)
3009 4306 : MOZ_ASSERT(block->getSuccessor(i)->numPredecessors() == 1);
3010 :
3011 7480 : if (block->isLoopHeader()) {
3012 90 : if (underValueNumberer && block->numPredecessors() == 3) {
3013 : // Fixup block.
3014 0 : MOZ_ASSERT(block->getPredecessor(1)->numPredecessors() == 0);
3015 0 : MOZ_ASSERT(graph.osrBlock(),
3016 : "Fixup blocks should only exists if we have an osr block.");
3017 : } else {
3018 90 : MOZ_ASSERT(block->numPredecessors() == 2);
3019 : }
3020 90 : MBasicBlock* backedge = block->backedge();
3021 90 : MOZ_ASSERT(backedge->id() >= block->id());
3022 90 : MOZ_ASSERT(backedge->numSuccessors() == 1);
3023 90 : MOZ_ASSERT(backedge->getSuccessor(0) == *block);
3024 : }
3025 :
3026 7480 : if (!block->phisEmpty()) {
3027 3618 : for (size_t i = 0; i < block->numPredecessors(); i++) {
3028 2477 : MBasicBlock* pred = block->getPredecessor(i);
3029 2477 : MOZ_ASSERT(pred->successorWithPhis() == *block);
3030 2477 : MOZ_ASSERT(pred->positionInPhiSuccessor() == i);
3031 : }
3032 : }
3033 :
3034 7480 : uint32_t successorWithPhis = 0;
3035 16404 : for (size_t i = 0; i < block->numSuccessors(); i++)
3036 8924 : if (!block->getSuccessor(i)->phisEmpty())
3037 2477 : successorWithPhis++;
3038 :
3039 7480 : MOZ_ASSERT(successorWithPhis <= 1);
3040 7480 : MOZ_ASSERT((successorWithPhis != 0) == (block->successorWithPhis() != nullptr));
3041 :
3042 : // Verify that phi operands dominate the corresponding CFG predecessor
3043 : // edges.
3044 10802 : for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd()); iter != end; ++iter) {
3045 3322 : MPhi* phi = *iter;
3046 11541 : for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
3047 8219 : MOZ_ASSERT(phi->getOperand(i)->block()->dominates(block->getPredecessor(i)),
3048 : "Phi input is not dominated by its operand");
3049 : }
3050 : }
3051 :
3052 : // Verify that instructions are dominated by their operands.
3053 37566 : for (MInstructionIterator iter(block->begin()), end(block->end()); iter != end; ++iter) {
3054 30086 : MInstruction* ins = *iter;
3055 53588 : for (size_t i = 0, e = ins->numOperands(); i < e; ++i) {
3056 23502 : MDefinition* op = ins->getOperand(i);
3057 23502 : MBasicBlock* opBlock = op->block();
3058 23502 : MOZ_ASSERT(opBlock->dominates(*block),
3059 : "Instruction is not dominated by its operands");
3060 :
3061 : // If the operand is an instruction in the same block, check
3062 : // that it comes first.
3063 23502 : if (opBlock == *block && !op->isPhi()) {
3064 13218 : MInstructionIterator opIter = block->begin(op->toInstruction());
3065 54173 : do {
3066 54173 : ++opIter;
3067 54173 : MOZ_ASSERT(opIter != block->end(),
3068 : "Operand in same block as instruction does not precede");
3069 54173 : } while (*opIter != ins);
3070 : }
3071 : }
3072 30086 : AssertIfResumableInstruction(ins);
3073 30086 : if (MResumePoint* resume = ins->resumePoint()) {
3074 5098 : AssertResumePointDominatedByOperands(resume);
3075 5098 : AssertResumableOperands(resume);
3076 : }
3077 : }
3078 :
3079 : // Verify that the block resume points are dominated by their operands.
3080 7480 : if (MResumePoint* resume = block->entryResumePoint()) {
3081 7480 : AssertResumePointDominatedByOperands(resume);
3082 7480 : AssertResumableOperands(resume);
3083 : }
3084 7480 : if (MResumePoint* resume = block->outerResumePoint()) {
3085 373 : AssertResumePointDominatedByOperands(resume);
3086 373 : AssertResumableOperands(resume);
3087 : }
3088 : }
3089 : #endif
3090 : }
3091 :
3092 :
3093 : struct BoundsCheckInfo
3094 : {
3095 : MBoundsCheck* check;
3096 : uint32_t validEnd;
3097 : };
3098 :
3099 : typedef HashMap<uint32_t,
3100 : BoundsCheckInfo,
3101 : DefaultHasher<uint32_t>,
3102 : JitAllocPolicy> BoundsCheckMap;
3103 :
3104 : // Compute a hash for bounds checks which ignores constant offsets in the index.
3105 : static HashNumber
3106 1 : BoundsCheckHashIgnoreOffset(MBoundsCheck* check)
3107 : {
3108 1 : SimpleLinearSum indexSum = ExtractLinearSum(check->index());
3109 1 : uintptr_t index = indexSum.term ? uintptr_t(indexSum.term) : 0;
3110 1 : uintptr_t length = uintptr_t(check->length());
3111 1 : return index ^ length;
3112 : }
3113 :
3114 : static MBoundsCheck*
3115 1 : FindDominatingBoundsCheck(BoundsCheckMap& checks, MBoundsCheck* check, size_t index)
3116 : {
3117 : // Since we are traversing the dominator tree in pre-order, when we
3118 : // are looking at the |index|-th block, the next numDominated() blocks
3119 : // we traverse are precisely the set of blocks that are dominated.
3120 : //
3121 : // So, this value is visible in all blocks if:
3122 : // index <= index + ins->block->numDominated()
3123 : // and becomes invalid after that.
3124 1 : HashNumber hash = BoundsCheckHashIgnoreOffset(check);
3125 1 : BoundsCheckMap::Ptr p = checks.lookup(hash);
3126 1 : if (!p || index >= p->value().validEnd) {
3127 : // We didn't find a dominating bounds check.
3128 : BoundsCheckInfo info;
3129 1 : info.check = check;
3130 1 : info.validEnd = index + check->block()->numDominated();
3131 :
3132 1 : if(!checks.put(hash, info))
3133 0 : return nullptr;
3134 :
3135 1 : return check;
3136 : }
3137 :
3138 0 : return p->value().check;
3139 : }
3140 :
3141 : static MathSpace
3142 14 : ExtractMathSpace(MDefinition* ins)
3143 : {
3144 14 : MOZ_ASSERT(ins->isAdd() || ins->isSub());
3145 14 : MBinaryArithInstruction* arith = nullptr;
3146 14 : if (ins->isAdd())
3147 14 : arith = ins->toAdd();
3148 : else
3149 0 : arith = ins->toSub();
3150 14 : switch (arith->truncateKind()) {
3151 : case MDefinition::NoTruncate:
3152 : case MDefinition::TruncateAfterBailouts:
3153 : // TruncateAfterBailouts is considered as infinite space because the
3154 : // LinearSum will effectively remove the bailout check.
3155 13 : return MathSpace::Infinite;
3156 : case MDefinition::IndirectTruncate:
3157 : case MDefinition::Truncate:
3158 1 : return MathSpace::Modulo;
3159 : }
3160 0 : MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unknown TruncateKind");
3161 : }
3162 :
3163 : // Extract a linear sum from ins, if possible (otherwise giving the sum 'ins + 0').
3164 : SimpleLinearSum
3165 74 : jit::ExtractLinearSum(MDefinition* ins, MathSpace space)
3166 : {
3167 74 : if (ins->isBeta())
3168 8 : ins = ins->getOperand(0);
3169 :
3170 74 : if (ins->type() != MIRType::Int32)
3171 16 : return SimpleLinearSum(ins, 0);
3172 :
3173 58 : if (ins->isConstant())
3174 16 : return SimpleLinearSum(nullptr, ins->toConstant()->toInt32());
3175 :
3176 42 : if (!ins->isAdd() && !ins->isSub())
3177 28 : return SimpleLinearSum(ins, 0);
3178 :
3179 : // Only allow math which are in the same space.
3180 14 : MathSpace insSpace = ExtractMathSpace(ins);
3181 14 : if (space == MathSpace::Unknown)
3182 14 : space = insSpace;
3183 0 : else if (space != insSpace)
3184 0 : return SimpleLinearSum(ins, 0);
3185 14 : MOZ_ASSERT(space == MathSpace::Modulo || space == MathSpace::Infinite);
3186 :
3187 14 : MDefinition* lhs = ins->getOperand(0);
3188 14 : MDefinition* rhs = ins->getOperand(1);
3189 14 : if (lhs->type() != MIRType::Int32 || rhs->type() != MIRType::Int32)
3190 0 : return SimpleLinearSum(ins, 0);
3191 :
3192 : // Extract linear sums of each operand.
3193 14 : SimpleLinearSum lsum = ExtractLinearSum(lhs, space);
3194 14 : SimpleLinearSum rsum = ExtractLinearSum(rhs, space);
3195 :
3196 : // LinearSum only considers a single term operand, if both sides have
3197 : // terms, then ignore extracted linear sums.
3198 14 : if (lsum.term && rsum.term)
3199 0 : return SimpleLinearSum(ins, 0);
3200 :
3201 : // Check if this is of the form <SUM> + n or n + <SUM>.
3202 14 : if (ins->isAdd()) {
3203 : int32_t constant;
3204 14 : if (space == MathSpace::Modulo)
3205 1 : constant = lsum.constant + rsum.constant;
3206 13 : else if (!SafeAdd(lsum.constant, rsum.constant, &constant))
3207 0 : return SimpleLinearSum(ins, 0);
3208 14 : return SimpleLinearSum(lsum.term ? lsum.term : rsum.term, constant);
3209 : }
3210 :
3211 0 : MOZ_ASSERT(ins->isSub());
3212 : // Check if this is of the form <SUM> - n.
3213 0 : if (lsum.term) {
3214 : int32_t constant;
3215 0 : if (space == MathSpace::Modulo)
3216 0 : constant = lsum.constant - rsum.constant;
3217 0 : else if (!SafeSub(lsum.constant, rsum.constant, &constant))
3218 0 : return SimpleLinearSum(ins, 0);
3219 0 : return SimpleLinearSum(lsum.term, constant);
3220 : }
3221 :
3222 : // Ignore any of the form n - <SUM>.
3223 0 : return SimpleLinearSum(ins, 0);
3224 : }
3225 :
3226 : // Extract a linear inequality holding when a boolean test goes in the
3227 : // specified direction, of the form 'lhs + lhsN <= rhs' (or >=).
3228 : bool
3229 11 : jit::ExtractLinearInequality(MTest* test, BranchDirection direction,
3230 : SimpleLinearSum* plhs, MDefinition** prhs, bool* plessEqual)
3231 : {
3232 11 : if (!test->getOperand(0)->isCompare())
3233 5 : return false;
3234 :
3235 6 : MCompare* compare = test->getOperand(0)->toCompare();
3236 :
3237 6 : MDefinition* lhs = compare->getOperand(0);
3238 6 : MDefinition* rhs = compare->getOperand(1);
3239 :
3240 : // TODO: optimize Compare_UInt32
3241 6 : if (!compare->isInt32Comparison())
3242 0 : return false;
3243 :
3244 6 : MOZ_ASSERT(lhs->type() == MIRType::Int32);
3245 6 : MOZ_ASSERT(rhs->type() == MIRType::Int32);
3246 :
3247 6 : JSOp jsop = compare->jsop();
3248 6 : if (direction == FALSE_BRANCH)
3249 4 : jsop = NegateCompareOp(jsop);
3250 :
3251 6 : SimpleLinearSum lsum = ExtractLinearSum(lhs);
3252 6 : SimpleLinearSum rsum = ExtractLinearSum(rhs);
3253 :
3254 6 : if (!SafeSub(lsum.constant, rsum.constant, &lsum.constant))
3255 0 : return false;
3256 :
3257 : // Normalize operations to use <= or >=.
3258 6 : switch (jsop) {
3259 : case JSOP_LE:
3260 0 : *plessEqual = true;
3261 0 : break;
3262 : case JSOP_LT:
3263 : /* x < y ==> x + 1 <= y */
3264 0 : if (!SafeAdd(lsum.constant, 1, &lsum.constant))
3265 0 : return false;
3266 0 : *plessEqual = true;
3267 0 : break;
3268 : case JSOP_GE:
3269 4 : *plessEqual = false;
3270 4 : break;
3271 : case JSOP_GT:
3272 : /* x > y ==> x - 1 >= y */
3273 0 : if (!SafeSub(lsum.constant, 1, &lsum.constant))
3274 0 : return false;
3275 0 : *plessEqual = false;
3276 0 : break;
3277 : default:
3278 2 : return false;
3279 : }
3280 :
3281 4 : *plhs = lsum;
3282 4 : *prhs = rsum.term;
3283 :
3284 4 : return true;
3285 : }
3286 :
3287 : static bool
3288 2 : TryEliminateBoundsCheck(BoundsCheckMap& checks, size_t blockIndex, MBoundsCheck* dominated, bool* eliminated)
3289 : {
3290 2 : MOZ_ASSERT(!*eliminated);
3291 :
3292 : // Replace all uses of the bounds check with the actual index.
3293 : // This is (a) necessary, because we can coalesce two different
3294 : // bounds checks and would otherwise use the wrong index and
3295 : // (b) helps register allocation. Note that this is safe since
3296 : // no other pass after bounds check elimination moves instructions.
3297 2 : dominated->replaceAllUsesWith(dominated->index());
3298 :
3299 2 : if (!dominated->isMovable())
3300 0 : return true;
3301 :
3302 2 : if (!dominated->fallible())
3303 1 : return true;
3304 :
3305 1 : MBoundsCheck* dominating = FindDominatingBoundsCheck(checks, dominated, blockIndex);
3306 1 : if (!dominating)
3307 0 : return false;
3308 :
3309 1 : if (dominating == dominated) {
3310 : // We didn't find a dominating bounds check.
3311 1 : return true;
3312 : }
3313 :
3314 : // We found two bounds checks with the same hash number, but we still have
3315 : // to make sure the lengths and index terms are equal.
3316 0 : if (dominating->length() != dominated->length())
3317 0 : return true;
3318 :
3319 0 : SimpleLinearSum sumA = ExtractLinearSum(dominating->index());
3320 0 : SimpleLinearSum sumB = ExtractLinearSum(dominated->index());
3321 :
3322 : // Both terms should be nullptr or the same definition.
3323 0 : if (sumA.term != sumB.term)
3324 0 : return true;
3325 :
3326 : // This bounds check is redundant.
3327 0 : *eliminated = true;
3328 :
3329 : // Normalize the ranges according to the constant offsets in the two indexes.
3330 : int32_t minimumA, maximumA, minimumB, maximumB;
3331 0 : if (!SafeAdd(sumA.constant, dominating->minimum(), &minimumA) ||
3332 0 : !SafeAdd(sumA.constant, dominating->maximum(), &maximumA) ||
3333 0 : !SafeAdd(sumB.constant, dominated->minimum(), &minimumB) ||
3334 0 : !SafeAdd(sumB.constant, dominated->maximum(), &maximumB))
3335 : {
3336 0 : return false;
3337 : }
3338 :
3339 : // Update the dominating check to cover both ranges, denormalizing the
3340 : // result per the constant offset in the index.
3341 : int32_t newMinimum, newMaximum;
3342 0 : if (!SafeSub(Min(minimumA, minimumB), sumA.constant, &newMinimum) ||
3343 0 : !SafeSub(Max(maximumA, maximumB), sumA.constant, &newMaximum))
3344 : {
3345 0 : return false;
3346 : }
3347 :
3348 0 : dominating->setMinimum(newMinimum);
3349 0 : dominating->setMaximum(newMaximum);
3350 0 : return true;
3351 : }
3352 :
3353 : static void
3354 2 : TryEliminateTypeBarrierFromTest(MTypeBarrier* barrier, bool filtersNull, bool filtersUndefined,
3355 : MTest* test, BranchDirection direction, bool* eliminated)
3356 : {
3357 2 : MOZ_ASSERT(filtersNull || filtersUndefined);
3358 :
3359 : // Watch for code patterns similar to 'if (x.f) { ... = x.f }'. If x.f
3360 : // is either an object or null/undefined, there will be a type barrier on
3361 : // the latter read as the null/undefined value is never realized there.
3362 : // The type barrier can be eliminated, however, by looking at tests
3363 : // performed on the result of the first operation that filter out all
3364 : // types that have been seen in the first access but not the second.
3365 :
3366 : // A test 'if (x.f)' filters both null and undefined.
3367 :
3368 : // Disregard the possible unbox added before the Typebarrier for checking.
3369 2 : MDefinition* input = barrier->input();
3370 2 : MUnbox* inputUnbox = nullptr;
3371 2 : if (input->isUnbox() && input->toUnbox()->mode() != MUnbox::Fallible) {
3372 0 : inputUnbox = input->toUnbox();
3373 0 : input = inputUnbox->input();
3374 : }
3375 :
3376 2 : MDefinition* subject = nullptr;
3377 : bool removeUndefined;
3378 : bool removeNull;
3379 2 : test->filtersUndefinedOrNull(direction == TRUE_BRANCH, &subject, &removeUndefined, &removeNull);
3380 :
3381 : // The Test doesn't filter undefined nor null.
3382 2 : if (!subject)
3383 4 : return;
3384 :
3385 : // Make sure the subject equals the input to the TypeBarrier.
3386 0 : if (subject != input)
3387 0 : return;
3388 :
3389 : // When the TypeBarrier filters undefined, the test must at least also do,
3390 : // this, before the TypeBarrier can get removed.
3391 0 : if (!removeUndefined && filtersUndefined)
3392 0 : return;
3393 :
3394 : // When the TypeBarrier filters null, the test must at least also do,
3395 : // this, before the TypeBarrier can get removed.
3396 0 : if (!removeNull && filtersNull)
3397 0 : return;
3398 :
3399 : // Eliminate the TypeBarrier. The possible TypeBarrier unboxing is kept,
3400 : // but made infallible.
3401 0 : *eliminated = true;
3402 0 : if (inputUnbox)
3403 0 : inputUnbox->makeInfallible();
3404 0 : barrier->replaceAllUsesWith(barrier->input());
3405 : }
3406 :
3407 : static bool
3408 127 : TryEliminateTypeBarrier(MTypeBarrier* barrier, bool* eliminated)
3409 : {
3410 127 : MOZ_ASSERT(!*eliminated);
3411 :
3412 127 : const TemporaryTypeSet* barrierTypes = barrier->resultTypeSet();
3413 127 : const TemporaryTypeSet* inputTypes = barrier->input()->resultTypeSet();
3414 :
3415 : // Disregard the possible unbox added before the Typebarrier.
3416 127 : if (barrier->input()->isUnbox() && barrier->input()->toUnbox()->mode() != MUnbox::Fallible)
3417 75 : inputTypes = barrier->input()->toUnbox()->input()->resultTypeSet();
3418 :
3419 127 : if (!barrierTypes || !inputTypes)
3420 126 : return true;
3421 :
3422 1 : bool filtersNull = barrierTypes->filtersType(inputTypes, TypeSet::NullType());
3423 1 : bool filtersUndefined = barrierTypes->filtersType(inputTypes, TypeSet::UndefinedType());
3424 :
3425 1 : if (!filtersNull && !filtersUndefined)
3426 0 : return true;
3427 :
3428 1 : MBasicBlock* block = barrier->block();
3429 : while (true) {
3430 : BranchDirection direction;
3431 27 : MTest* test = block->immediateDominatorBranch(&direction);
3432 :
3433 27 : if (test) {
3434 2 : TryEliminateTypeBarrierFromTest(barrier, filtersNull, filtersUndefined,
3435 4 : test, direction, eliminated);
3436 : }
3437 :
3438 27 : MBasicBlock* previous = block->immediateDominator();
3439 27 : if (previous == block)
3440 1 : break;
3441 26 : block = previous;
3442 26 : }
3443 :
3444 1 : return true;
3445 : }
3446 :
3447 : static bool
3448 41 : TryOptimizeLoadObjectOrNull(MDefinition* def, MDefinitionVector* peliminateList)
3449 : {
3450 41 : if (def->type() != MIRType::Value)
3451 12 : return true;
3452 :
3453 : // Check if this definition can only produce object or null values.
3454 29 : TemporaryTypeSet* types = def->resultTypeSet();
3455 29 : if (!types)
3456 29 : return true;
3457 0 : if (types->baseFlags() & ~(TYPE_FLAG_NULL | TYPE_FLAG_ANYOBJECT))
3458 0 : return true;
3459 :
3460 0 : MDefinitionVector eliminateList(def->block()->graph().alloc());
3461 :
3462 0 : for (MUseDefIterator iter(def); iter; ++iter) {
3463 0 : MDefinition* ndef = iter.def();
3464 0 : switch (ndef->op()) {
3465 : case MDefinition::Op_Compare:
3466 0 : if (ndef->toCompare()->compareType() != MCompare::Compare_Null)
3467 0 : return true;
3468 0 : break;
3469 : case MDefinition::Op_Test:
3470 0 : break;
3471 : case MDefinition::Op_PostWriteBarrier:
3472 0 : break;
3473 : case MDefinition::Op_StoreFixedSlot:
3474 0 : break;
3475 : case MDefinition::Op_StoreSlot:
3476 0 : break;
3477 : case MDefinition::Op_ToObjectOrNull:
3478 0 : if (!eliminateList.append(ndef->toToObjectOrNull()))
3479 0 : return false;
3480 0 : break;
3481 : case MDefinition::Op_Unbox:
3482 0 : if (ndef->type() != MIRType::Object)
3483 0 : return true;
3484 0 : break;
3485 : case MDefinition::Op_TypeBarrier:
3486 : // For now, only handle type barriers which are not consumed
3487 : // anywhere and only test that the value is null.
3488 0 : if (ndef->hasUses() || ndef->resultTypeSet()->getKnownMIRType() != MIRType::Null)
3489 0 : return true;
3490 0 : break;
3491 : default:
3492 0 : return true;
3493 : }
3494 : }
3495 :
3496 : // On punboxing systems we are better off leaving the value boxed if it
3497 : // is only stored back to the heap.
3498 : #ifdef JS_PUNBOX64
3499 0 : bool foundUse = false;
3500 0 : for (MUseDefIterator iter(def); iter; ++iter) {
3501 0 : MDefinition* ndef = iter.def();
3502 0 : if (!ndef->isStoreFixedSlot() && !ndef->isStoreSlot()) {
3503 0 : foundUse = true;
3504 0 : break;
3505 : }
3506 : }
3507 0 : if (!foundUse)
3508 0 : return true;
3509 : #endif // JS_PUNBOX64
3510 :
3511 0 : def->setResultType(MIRType::ObjectOrNull);
3512 :
3513 : // Fixup the result type of MTypeBarrier uses.
3514 0 : for (MUseDefIterator iter(def); iter; ++iter) {
3515 0 : MDefinition* ndef = iter.def();
3516 0 : if (ndef->isTypeBarrier())
3517 0 : ndef->setResultType(MIRType::ObjectOrNull);
3518 : }
3519 :
3520 : // Eliminate MToObjectOrNull instruction uses.
3521 0 : for (size_t i = 0; i < eliminateList.length(); i++) {
3522 0 : MDefinition* ndef = eliminateList[i];
3523 0 : ndef->replaceAllUsesWith(def);
3524 0 : if (!peliminateList->append(ndef))
3525 0 : return false;
3526 : }
3527 :
3528 0 : return true;
3529 : }
3530 :
3531 : static inline MDefinition*
3532 1074 : PassthroughOperand(MDefinition* def)
3533 : {
3534 1074 : if (def->isConvertElementsToDoubles())
3535 0 : return def->toConvertElementsToDoubles()->elements();
3536 1074 : if (def->isMaybeCopyElementsForWrite())
3537 0 : return def->toMaybeCopyElementsForWrite()->object();
3538 1074 : if (def->isConvertUnboxedObjectToNative())
3539 0 : return def->toConvertUnboxedObjectToNative()->object();
3540 1074 : return nullptr;
3541 : }
3542 :
3543 : // Eliminate checks which are redundant given each other or other instructions.
3544 : //
3545 : // A type barrier is considered redundant if all missing types have been tested
3546 : // for by earlier control instructions.
3547 : //
3548 : // A bounds check is considered redundant if it's dominated by another bounds
3549 : // check with the same length and the indexes differ by only a constant amount.
3550 : // In this case we eliminate the redundant bounds check and update the other one
3551 : // to cover the ranges of both checks.
3552 : //
3553 : // Bounds checks are added to a hash map and since the hash function ignores
3554 : // differences in constant offset, this offers a fast way to find redundant
3555 : // checks.
3556 : bool
3557 8 : jit::EliminateRedundantChecks(MIRGraph& graph)
3558 : {
3559 16 : BoundsCheckMap checks(graph.alloc());
3560 :
3561 8 : if (!checks.init())
3562 0 : return false;
3563 :
3564 : // Stack for pre-order CFG traversal.
3565 16 : Vector<MBasicBlock*, 1, JitAllocPolicy> worklist(graph.alloc());
3566 :
3567 : // The index of the current block in the CFG traversal.
3568 8 : size_t index = 0;
3569 :
3570 : // Add all self-dominating blocks to the worklist.
3571 : // This includes all roots. Order does not matter.
3572 411 : for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
3573 403 : MBasicBlock* block = *i;
3574 403 : if (block->immediateDominator() == block) {
3575 14 : if (!worklist.append(block))
3576 0 : return false;
3577 : }
3578 : }
3579 :
3580 16 : MDefinitionVector eliminateList(graph.alloc());
3581 :
3582 : // Starting from each self-dominating block, traverse the CFG in pre-order.
3583 814 : while (!worklist.empty()) {
3584 403 : MBasicBlock* block = worklist.popCopy();
3585 :
3586 : // Add all immediate dominators to the front of the worklist.
3587 403 : if (!worklist.append(block->immediatelyDominatedBlocksBegin(),
3588 403 : block->immediatelyDominatedBlocksEnd())) {
3589 0 : return false;
3590 : }
3591 :
3592 1647 : for (MDefinitionIterator iter(block); iter; ) {
3593 1244 : MDefinition* def = *iter++;
3594 :
3595 1244 : bool eliminated = false;
3596 :
3597 1244 : switch (def->op()) {
3598 : case MDefinition::Op_BoundsCheck:
3599 2 : if (!TryEliminateBoundsCheck(checks, index, def->toBoundsCheck(), &eliminated))
3600 0 : return false;
3601 2 : break;
3602 : case MDefinition::Op_TypeBarrier:
3603 127 : if (!TryEliminateTypeBarrier(def->toTypeBarrier(), &eliminated))
3604 0 : return false;
3605 127 : break;
3606 : case MDefinition::Op_LoadFixedSlot:
3607 : case MDefinition::Op_LoadSlot:
3608 : case MDefinition::Op_LoadUnboxedObjectOrNull:
3609 41 : if (!TryOptimizeLoadObjectOrNull(def, &eliminateList))
3610 0 : return false;
3611 41 : break;
3612 : default:
3613 : // Now that code motion passes have finished, replace
3614 : // instructions which pass through one of their operands
3615 : // (and perform additional checks) with that operand.
3616 1074 : if (MDefinition* passthrough = PassthroughOperand(def))
3617 0 : def->replaceAllUsesWith(passthrough);
3618 1074 : break;
3619 : }
3620 :
3621 1244 : if (eliminated)
3622 0 : block->discardDef(def);
3623 : }
3624 403 : index++;
3625 : }
3626 :
3627 8 : MOZ_ASSERT(index == graph.numBlocks());
3628 :
3629 8 : for (size_t i = 0; i < eliminateList.length(); i++) {
3630 0 : MDefinition* def = eliminateList[i];
3631 0 : def->block()->discardDef(def);
3632 : }
3633 :
3634 8 : return true;
3635 : }
3636 :
3637 : static bool
3638 19 : NeedsKeepAlive(MInstruction* slotsOrElements, MInstruction* use)
3639 : {
3640 19 : MOZ_ASSERT(slotsOrElements->type() == MIRType::Elements ||
3641 : slotsOrElements->type() == MIRType::Slots);
3642 :
3643 19 : if (slotsOrElements->block() != use->block())
3644 2 : return true;
3645 :
3646 17 : MBasicBlock* block = use->block();
3647 17 : MInstructionIterator iter(block->begin(slotsOrElements));
3648 17 : MOZ_ASSERT(*iter == slotsOrElements);
3649 17 : ++iter;
3650 :
3651 : while (true) {
3652 25 : if (*iter == use)
3653 17 : return false;
3654 :
3655 4 : switch (iter->op()) {
3656 : case MDefinition::Op_Nop:
3657 : case MDefinition::Op_Constant:
3658 : case MDefinition::Op_KeepAliveObject:
3659 : case MDefinition::Op_Unbox:
3660 : case MDefinition::Op_LoadSlot:
3661 : case MDefinition::Op_StoreSlot:
3662 : case MDefinition::Op_LoadFixedSlot:
3663 : case MDefinition::Op_StoreFixedSlot:
3664 : case MDefinition::Op_LoadElement:
3665 : case MDefinition::Op_StoreElement:
3666 : case MDefinition::Op_InitializedLength:
3667 : case MDefinition::Op_ArrayLength:
3668 : case MDefinition::Op_BoundsCheck:
3669 4 : iter++;
3670 4 : break;
3671 : default:
3672 0 : return true;
3673 : }
3674 : }
3675 :
3676 : MOZ_CRASH("Unreachable");
3677 : }
3678 :
3679 : bool
3680 8 : jit::AddKeepAliveInstructions(MIRGraph& graph)
3681 : {
3682 411 : for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
3683 403 : MBasicBlock* block = *i;
3684 :
3685 1877 : for (MInstructionIterator insIter(block->begin()); insIter != block->end(); insIter++) {
3686 1474 : MInstruction* ins = *insIter;
3687 1474 : if (ins->type() != MIRType::Elements && ins->type() != MIRType::Slots)
3688 2920 : continue;
3689 :
3690 : MDefinition* ownerObject;
3691 14 : switch (ins->op()) {
3692 : case MDefinition::Op_ConstantElements:
3693 0 : continue;
3694 : case MDefinition::Op_ConvertElementsToDoubles:
3695 : // EliminateRedundantChecks should have replaced all uses.
3696 0 : MOZ_ASSERT(!ins->hasUses());
3697 0 : continue;
3698 : case MDefinition::Op_Elements:
3699 : case MDefinition::Op_TypedArrayElements:
3700 : case MDefinition::Op_TypedObjectElements:
3701 7 : MOZ_ASSERT(ins->numOperands() == 1);
3702 7 : ownerObject = ins->getOperand(0);
3703 7 : break;
3704 : case MDefinition::Op_Slots:
3705 7 : ownerObject = ins->toSlots()->object();
3706 7 : break;
3707 : default:
3708 0 : MOZ_CRASH("Unexpected op");
3709 : }
3710 :
3711 14 : MOZ_ASSERT(ownerObject->type() == MIRType::Object);
3712 :
3713 14 : if (ownerObject->isConstant()) {
3714 : // Constants are kept alive by other pointers, for instance
3715 : // ImmGCPtr in JIT code.
3716 0 : continue;
3717 : }
3718 :
3719 34 : for (MUseDefIterator uses(ins); uses; uses++) {
3720 20 : MInstruction* use = uses.def()->toInstruction();
3721 :
3722 20 : if (use->isStoreElementHole()) {
3723 : // StoreElementHole has an explicit object operand. If GVN
3724 : // is disabled, we can get different unbox instructions with
3725 : // the same object as input, so we check for that case.
3726 1 : MOZ_ASSERT_IF(!use->toStoreElementHole()->object()->isUnbox() && !ownerObject->isUnbox(),
3727 : use->toStoreElementHole()->object() == ownerObject);
3728 1 : continue;
3729 : }
3730 :
3731 19 : if (use->isFallibleStoreElement()) {
3732 : // See StoreElementHole case above.
3733 0 : MOZ_ASSERT_IF(!use->toFallibleStoreElement()->object()->isUnbox() && !ownerObject->isUnbox(),
3734 : use->toFallibleStoreElement()->object() == ownerObject);
3735 0 : continue;
3736 : }
3737 :
3738 19 : if (use->isInArray()) {
3739 : // See StoreElementHole case above.
3740 0 : MOZ_ASSERT_IF(!use->toInArray()->object()->isUnbox() && !ownerObject->isUnbox(),
3741 : use->toInArray()->object() == ownerObject);
3742 0 : continue;
3743 : }
3744 :
3745 19 : if (!NeedsKeepAlive(ins, use))
3746 17 : continue;
3747 :
3748 2 : if (!graph.alloc().ensureBallast())
3749 0 : return false;
3750 2 : MKeepAliveObject* keepAlive = MKeepAliveObject::New(graph.alloc(), ownerObject);
3751 2 : use->block()->insertAfter(use, keepAlive);
3752 : }
3753 : }
3754 : }
3755 :
3756 8 : return true;
3757 : }
3758 :
3759 : bool
3760 3 : LinearSum::multiply(int32_t scale)
3761 : {
3762 9 : for (size_t i = 0; i < terms_.length(); i++) {
3763 6 : if (!SafeMul(scale, terms_[i].scale, &terms_[i].scale))
3764 0 : return false;
3765 : }
3766 3 : return SafeMul(scale, constant_, &constant_);
3767 : }
3768 :
3769 : bool
3770 0 : LinearSum::divide(uint32_t scale)
3771 : {
3772 0 : MOZ_ASSERT(scale > 0);
3773 :
3774 0 : for (size_t i = 0; i < terms_.length(); i++) {
3775 0 : if (terms_[i].scale % scale != 0)
3776 0 : return false;
3777 : }
3778 0 : if (constant_ % scale != 0)
3779 0 : return false;
3780 :
3781 0 : for (size_t i = 0; i < terms_.length(); i++)
3782 0 : terms_[i].scale /= scale;
3783 0 : constant_ /= scale;
3784 :
3785 0 : return true;
3786 : }
3787 :
3788 : bool
3789 3 : LinearSum::add(const LinearSum& other, int32_t scale /* = 1 */)
3790 : {
3791 6 : for (size_t i = 0; i < other.terms_.length(); i++) {
3792 3 : int32_t newScale = scale;
3793 3 : if (!SafeMul(scale, other.terms_[i].scale, &newScale))
3794 0 : return false;
3795 3 : if (!add(other.terms_[i].term, newScale))
3796 0 : return false;
3797 : }
3798 3 : int32_t newConstant = scale;
3799 3 : if (!SafeMul(scale, other.constant_, &newConstant))
3800 0 : return false;
3801 3 : return add(newConstant);
3802 : }
3803 :
3804 : bool
3805 0 : LinearSum::add(SimpleLinearSum other, int32_t scale)
3806 : {
3807 0 : if (other.term && !add(other.term, scale))
3808 0 : return false;
3809 :
3810 : int32_t constant;
3811 0 : if (!SafeMul(other.constant, scale, &constant))
3812 0 : return false;
3813 :
3814 0 : return add(constant);
3815 : }
3816 :
3817 : bool
3818 18 : LinearSum::add(MDefinition* term, int32_t scale)
3819 : {
3820 18 : MOZ_ASSERT(term);
3821 :
3822 18 : if (scale == 0)
3823 0 : return true;
3824 :
3825 18 : if (MConstant* termConst = term->maybeConstantValue()) {
3826 0 : int32_t constant = termConst->toInt32();
3827 0 : if (!SafeMul(constant, scale, &constant))
3828 0 : return false;
3829 0 : return add(constant);
3830 : }
3831 :
3832 27 : for (size_t i = 0; i < terms_.length(); i++) {
3833 12 : if (term == terms_[i].term) {
3834 3 : if (!SafeAdd(scale, terms_[i].scale, &terms_[i].scale))
3835 0 : return false;
3836 3 : if (terms_[i].scale == 0) {
3837 3 : terms_[i] = terms_.back();
3838 3 : terms_.popBack();
3839 : }
3840 3 : return true;
3841 : }
3842 : }
3843 :
3844 30 : AutoEnterOOMUnsafeRegion oomUnsafe;
3845 15 : if (!terms_.append(LinearTerm(term, scale)))
3846 0 : oomUnsafe.crash("LinearSum::add");
3847 :
3848 15 : return true;
3849 : }
3850 :
3851 : bool
3852 9 : LinearSum::add(int32_t constant)
3853 : {
3854 9 : return SafeAdd(constant, constant_, &constant_);
3855 : }
3856 :
3857 : void
3858 0 : LinearSum::dump(GenericPrinter& out) const
3859 : {
3860 0 : for (size_t i = 0; i < terms_.length(); i++) {
3861 0 : int32_t scale = terms_[i].scale;
3862 0 : int32_t id = terms_[i].term->id();
3863 0 : MOZ_ASSERT(scale);
3864 0 : if (scale > 0) {
3865 0 : if (i)
3866 0 : out.printf("+");
3867 0 : if (scale == 1)
3868 0 : out.printf("#%d", id);
3869 : else
3870 0 : out.printf("%d*#%d", scale, id);
3871 0 : } else if (scale == -1) {
3872 0 : out.printf("-#%d", id);
3873 : } else {
3874 0 : out.printf("%d*#%d", scale, id);
3875 : }
3876 : }
3877 0 : if (constant_ > 0)
3878 0 : out.printf("+%d", constant_);
3879 0 : else if (constant_ < 0)
3880 0 : out.printf("%d", constant_);
3881 0 : }
3882 :
3883 : void
3884 0 : LinearSum::dump() const
3885 : {
3886 0 : Fprinter out(stderr);
3887 0 : dump(out);
3888 0 : out.finish();
3889 0 : }
3890 :
3891 : MDefinition*
3892 4 : jit::ConvertLinearSum(TempAllocator& alloc, MBasicBlock* block, const LinearSum& sum, bool convertConstant)
3893 : {
3894 4 : MDefinition* def = nullptr;
3895 :
3896 8 : for (size_t i = 0; i < sum.numTerms(); i++) {
3897 4 : LinearTerm term = sum.term(i);
3898 4 : MOZ_ASSERT(!term.term->isConstant());
3899 4 : if (term.scale == 1) {
3900 4 : if (def) {
3901 0 : def = MAdd::New(alloc, def, term.term);
3902 0 : def->toAdd()->setInt32Specialization();
3903 0 : block->insertAtEnd(def->toInstruction());
3904 0 : def->computeRange(alloc);
3905 : } else {
3906 4 : def = term.term;
3907 : }
3908 0 : } else if (term.scale == -1) {
3909 0 : if (!def) {
3910 0 : def = MConstant::New(alloc, Int32Value(0));
3911 0 : block->insertAtEnd(def->toInstruction());
3912 0 : def->computeRange(alloc);
3913 : }
3914 0 : def = MSub::New(alloc, def, term.term);
3915 0 : def->toSub()->setInt32Specialization();
3916 0 : block->insertAtEnd(def->toInstruction());
3917 0 : def->computeRange(alloc);
3918 : } else {
3919 0 : MOZ_ASSERT(term.scale != 0);
3920 0 : MConstant* factor = MConstant::New(alloc, Int32Value(term.scale));
3921 0 : block->insertAtEnd(factor);
3922 0 : MMul* mul = MMul::New(alloc, term.term, factor);
3923 0 : mul->setInt32Specialization();
3924 0 : block->insertAtEnd(mul);
3925 0 : mul->computeRange(alloc);
3926 0 : if (def) {
3927 0 : def = MAdd::New(alloc, def, mul);
3928 0 : def->toAdd()->setInt32Specialization();
3929 0 : block->insertAtEnd(def->toInstruction());
3930 0 : def->computeRange(alloc);
3931 : } else {
3932 0 : def = mul;
3933 : }
3934 : }
3935 : }
3936 :
3937 4 : if (convertConstant && sum.constant()) {
3938 0 : MConstant* constant = MConstant::New(alloc, Int32Value(sum.constant()));
3939 0 : block->insertAtEnd(constant);
3940 0 : constant->computeRange(alloc);
3941 0 : if (def) {
3942 0 : def = MAdd::New(alloc, def, constant);
3943 0 : def->toAdd()->setInt32Specialization();
3944 0 : block->insertAtEnd(def->toInstruction());
3945 0 : def->computeRange(alloc);
3946 : } else {
3947 0 : def = constant;
3948 : }
3949 : }
3950 :
3951 4 : if (!def) {
3952 0 : def = MConstant::New(alloc, Int32Value(0));
3953 0 : block->insertAtEnd(def->toInstruction());
3954 0 : def->computeRange(alloc);
3955 : }
3956 :
3957 4 : return def;
3958 : }
3959 :
3960 : MCompare*
3961 0 : jit::ConvertLinearInequality(TempAllocator& alloc, MBasicBlock* block, const LinearSum& sum)
3962 : {
3963 0 : LinearSum lhs(sum);
3964 :
3965 : // Look for a term with a -1 scale which we can use for the rhs.
3966 0 : MDefinition* rhsDef = nullptr;
3967 0 : for (size_t i = 0; i < lhs.numTerms(); i++) {
3968 0 : if (lhs.term(i).scale == -1) {
3969 0 : AutoEnterOOMUnsafeRegion oomUnsafe;
3970 0 : rhsDef = lhs.term(i).term;
3971 0 : if (!lhs.add(rhsDef, 1))
3972 0 : oomUnsafe.crash("ConvertLinearInequality");
3973 0 : break;
3974 : }
3975 : }
3976 :
3977 0 : MDefinition* lhsDef = nullptr;
3978 0 : JSOp op = JSOP_GE;
3979 :
3980 : do {
3981 0 : if (!lhs.numTerms()) {
3982 0 : lhsDef = MConstant::New(alloc, Int32Value(lhs.constant()));
3983 0 : block->insertAtEnd(lhsDef->toInstruction());
3984 0 : lhsDef->computeRange(alloc);
3985 0 : break;
3986 : }
3987 :
3988 0 : lhsDef = ConvertLinearSum(alloc, block, lhs);
3989 0 : if (lhs.constant() == 0)
3990 0 : break;
3991 :
3992 0 : if (lhs.constant() == -1) {
3993 0 : op = JSOP_GT;
3994 0 : break;
3995 : }
3996 :
3997 0 : if (!rhsDef) {
3998 0 : int32_t constant = lhs.constant();
3999 0 : if (SafeMul(constant, -1, &constant)) {
4000 0 : rhsDef = MConstant::New(alloc, Int32Value(constant));
4001 0 : block->insertAtEnd(rhsDef->toInstruction());
4002 0 : rhsDef->computeRange(alloc);
4003 0 : break;
4004 : }
4005 : }
4006 :
4007 0 : MDefinition* constant = MConstant::New(alloc, Int32Value(lhs.constant()));
4008 0 : block->insertAtEnd(constant->toInstruction());
4009 0 : constant->computeRange(alloc);
4010 0 : lhsDef = MAdd::New(alloc, lhsDef, constant);
4011 0 : lhsDef->toAdd()->setInt32Specialization();
4012 0 : block->insertAtEnd(lhsDef->toInstruction());
4013 0 : lhsDef->computeRange(alloc);
4014 : } while (false);
4015 :
4016 0 : if (!rhsDef) {
4017 0 : rhsDef = MConstant::New(alloc, Int32Value(0));
4018 0 : block->insertAtEnd(rhsDef->toInstruction());
4019 0 : rhsDef->computeRange(alloc);
4020 : }
4021 :
4022 0 : MCompare* compare = MCompare::New(alloc, lhsDef, rhsDef, op);
4023 0 : block->insertAtEnd(compare);
4024 0 : compare->setCompareType(MCompare::Compare_Int32);
4025 :
4026 0 : return compare;
4027 : }
4028 :
4029 : static bool
4030 2 : AnalyzePoppedThis(JSContext* cx, ObjectGroup* group,
4031 : MDefinition* thisValue, MInstruction* ins, bool definitelyExecuted,
4032 : HandlePlainObject baseobj,
4033 : Vector<TypeNewScript::Initializer>* initializerList,
4034 : Vector<PropertyName*>* accessedProperties,
4035 : bool* phandled)
4036 : {
4037 : // Determine the effect that a use of the |this| value when calling |new|
4038 : // on a script has on the properties definitely held by the new object.
4039 :
4040 2 : if (ins->isCallSetProperty()) {
4041 0 : MCallSetProperty* setprop = ins->toCallSetProperty();
4042 :
4043 0 : if (setprop->object() != thisValue)
4044 0 : return true;
4045 :
4046 0 : if (setprop->name() == cx->names().prototype ||
4047 0 : setprop->name() == cx->names().proto ||
4048 0 : setprop->name() == cx->names().constructor)
4049 : {
4050 0 : return true;
4051 : }
4052 :
4053 : // Ignore assignments to properties that were already written to.
4054 0 : if (baseobj->lookup(cx, NameToId(setprop->name()))) {
4055 0 : *phandled = true;
4056 0 : return true;
4057 : }
4058 :
4059 : // Don't add definite properties for properties that were already
4060 : // read in the constructor.
4061 0 : for (size_t i = 0; i < accessedProperties->length(); i++) {
4062 0 : if ((*accessedProperties)[i] == setprop->name())
4063 0 : return true;
4064 : }
4065 :
4066 : // Assignments to new properties must always execute.
4067 0 : if (!definitelyExecuted)
4068 0 : return true;
4069 :
4070 0 : RootedId id(cx, NameToId(setprop->name()));
4071 0 : if (!AddClearDefiniteGetterSetterForPrototypeChain(cx, group, id)) {
4072 : // The prototype chain already contains a getter/setter for this
4073 : // property, or type information is too imprecise.
4074 0 : return true;
4075 : }
4076 :
4077 : // Add the property to the object, being careful not to update type information.
4078 0 : DebugOnly<unsigned> slotSpan = baseobj->slotSpan();
4079 0 : MOZ_ASSERT(!baseobj->containsPure(id));
4080 0 : if (!NativeObject::addDataProperty(cx, baseobj, id, baseobj->slotSpan(), JSPROP_ENUMERATE))
4081 0 : return false;
4082 0 : MOZ_ASSERT(baseobj->slotSpan() != slotSpan);
4083 0 : MOZ_ASSERT(!baseobj->inDictionaryMode());
4084 :
4085 0 : Vector<MResumePoint*> callerResumePoints(cx);
4086 0 : for (MResumePoint* rp = ins->block()->callerResumePoint();
4087 0 : rp;
4088 0 : rp = rp->block()->callerResumePoint())
4089 : {
4090 0 : if (!callerResumePoints.append(rp))
4091 0 : return false;
4092 : }
4093 :
4094 0 : for (int i = callerResumePoints.length() - 1; i >= 0; i--) {
4095 0 : MResumePoint* rp = callerResumePoints[i];
4096 0 : JSScript* script = rp->block()->info().script();
4097 : TypeNewScript::Initializer entry(TypeNewScript::Initializer::SETPROP_FRAME,
4098 0 : script->pcToOffset(rp->pc()));
4099 0 : if (!initializerList->append(entry))
4100 0 : return false;
4101 : }
4102 :
4103 0 : JSScript* script = ins->block()->info().script();
4104 : TypeNewScript::Initializer entry(TypeNewScript::Initializer::SETPROP,
4105 0 : script->pcToOffset(setprop->resumePoint()->pc()));
4106 0 : if (!initializerList->append(entry))
4107 0 : return false;
4108 :
4109 0 : *phandled = true;
4110 0 : return true;
4111 : }
4112 :
4113 2 : if (ins->isCallGetProperty()) {
4114 0 : MCallGetProperty* get = ins->toCallGetProperty();
4115 :
4116 : /*
4117 : * Properties can be read from the 'this' object if the following hold:
4118 : *
4119 : * - The read is not on a getter along the prototype chain, which
4120 : * could cause 'this' to escape.
4121 : *
4122 : * - The accessed property is either already a definite property or
4123 : * is not later added as one. Since the definite properties are
4124 : * added to the object at the point of its creation, reading a
4125 : * definite property before it is assigned could incorrectly hit.
4126 : */
4127 0 : RootedId id(cx, NameToId(get->name()));
4128 0 : if (!baseobj->lookup(cx, id) && !accessedProperties->append(get->name()))
4129 0 : return false;
4130 :
4131 0 : if (!AddClearDefiniteGetterSetterForPrototypeChain(cx, group, id)) {
4132 : // The |this| value can escape if any property reads it does go
4133 : // through a getter.
4134 0 : return true;
4135 : }
4136 :
4137 0 : *phandled = true;
4138 0 : return true;
4139 : }
4140 :
4141 2 : if (ins->isPostWriteBarrier()) {
4142 1 : *phandled = true;
4143 1 : return true;
4144 : }
4145 :
4146 1 : return true;
4147 : }
4148 :
4149 : static int
4150 1 : CmpInstructions(const void* a, const void* b)
4151 : {
4152 2 : return (*static_cast<MInstruction * const*>(a))->id() -
4153 2 : (*static_cast<MInstruction * const*>(b))->id();
4154 : }
4155 :
4156 : bool
4157 1 : jit::AnalyzeNewScriptDefiniteProperties(JSContext* cx, HandleFunction fun,
4158 : ObjectGroup* group, HandlePlainObject baseobj,
4159 : Vector<TypeNewScript::Initializer>* initializerList)
4160 : {
4161 1 : MOZ_ASSERT(cx->zone()->types.activeAnalysis);
4162 :
4163 : // When invoking 'new' on the specified script, try to find some properties
4164 : // which will definitely be added to the created object before it has a
4165 : // chance to escape and be accessed elsewhere.
4166 :
4167 2 : RootedScript script(cx, JSFunction::getOrCreateScript(cx, fun));
4168 1 : if (!script)
4169 0 : return false;
4170 :
4171 1 : if (!jit::IsIonEnabled(cx) || !jit::IsBaselineEnabled(cx) || !script->canBaselineCompile())
4172 0 : return true;
4173 :
4174 : static const uint32_t MAX_SCRIPT_SIZE = 2000;
4175 1 : if (script->length() > MAX_SCRIPT_SIZE)
4176 0 : return true;
4177 :
4178 1 : TraceLoggerThread* logger = TraceLoggerForCurrentThread(cx);
4179 2 : TraceLoggerEvent event(TraceLogger_AnnotateScripts, script);
4180 2 : AutoTraceLog logScript(logger, event);
4181 2 : AutoTraceLog logCompile(logger, TraceLogger_IonAnalysis);
4182 :
4183 2 : Vector<PropertyName*> accessedProperties(cx);
4184 :
4185 2 : LifoAlloc alloc(TempAllocator::PreferredLifoChunkSize);
4186 2 : TempAllocator temp(&alloc);
4187 2 : JitContext jctx(cx, &temp);
4188 :
4189 1 : if (!jit::CanLikelyAllocateMoreExecutableMemory())
4190 0 : return true;
4191 :
4192 1 : if (!cx->compartment()->ensureJitCompartmentExists(cx))
4193 0 : return false;
4194 :
4195 1 : if (!script->hasBaselineScript()) {
4196 0 : MethodStatus status = BaselineCompile(cx, script);
4197 0 : if (status == Method_Error)
4198 0 : return false;
4199 0 : if (status != Method_Compiled)
4200 0 : return true;
4201 : }
4202 :
4203 1 : TypeScript::SetThis(cx, script, TypeSet::ObjectType(group));
4204 :
4205 1 : MIRGraph graph(&temp);
4206 1 : InlineScriptTree* inlineScriptTree = InlineScriptTree::New(&temp, nullptr, nullptr, script);
4207 1 : if (!inlineScriptTree)
4208 0 : return false;
4209 :
4210 : CompileInfo info(script, fun,
4211 : /* osrPc = */ nullptr,
4212 : Analysis_DefiniteProperties,
4213 1 : script->needsArgsObj(),
4214 2 : inlineScriptTree);
4215 :
4216 1 : const OptimizationInfo* optimizationInfo = IonOptimizations.get(OptimizationLevel::Normal);
4217 :
4218 1 : CompilerConstraintList* constraints = NewCompilerConstraintList(temp);
4219 1 : if (!constraints) {
4220 0 : ReportOutOfMemory(cx);
4221 0 : return false;
4222 : }
4223 :
4224 1 : BaselineInspector inspector(script);
4225 1 : const JitCompileOptions options(cx);
4226 :
4227 : IonBuilder builder(cx, CompileCompartment::get(cx->compartment()), options, &temp, &graph, constraints,
4228 2 : &inspector, &info, optimizationInfo, /* baselineFrame = */ nullptr);
4229 :
4230 1 : AbortReasonOr<Ok> buildResult = builder.build();
4231 1 : if (buildResult.isErr()) {
4232 0 : AbortReason reason = buildResult.unwrapErr();
4233 0 : if (cx->isThrowingOverRecursed() || cx->isThrowingOutOfMemory())
4234 0 : return false;
4235 0 : if (reason == AbortReason::Alloc) {
4236 0 : ReportOutOfMemory(cx);
4237 0 : return false;
4238 : }
4239 0 : MOZ_ASSERT(!cx->isExceptionPending());
4240 0 : return true;
4241 : }
4242 :
4243 1 : FinishDefinitePropertiesAnalysis(cx, constraints);
4244 :
4245 1 : if (!SplitCriticalEdges(graph)) {
4246 0 : ReportOutOfMemory(cx);
4247 0 : return false;
4248 : }
4249 :
4250 1 : RenumberBlocks(graph);
4251 :
4252 1 : if (!BuildDominatorTree(graph)) {
4253 0 : ReportOutOfMemory(cx);
4254 0 : return false;
4255 : }
4256 :
4257 1 : if (!EliminatePhis(&builder, graph, AggressiveObservability)) {
4258 0 : ReportOutOfMemory(cx);
4259 0 : return false;
4260 : }
4261 :
4262 1 : MDefinition* thisValue = graph.entryBlock()->getSlot(info.thisSlot());
4263 :
4264 : // Get a list of instructions using the |this| value in the order they
4265 : // appear in the graph.
4266 2 : Vector<MInstruction*> instructions(cx);
4267 :
4268 3 : for (MUseDefIterator uses(thisValue); uses; uses++) {
4269 2 : MDefinition* use = uses.def();
4270 :
4271 : // Don't track |this| through assignments to phis.
4272 2 : if (!use->isInstruction())
4273 0 : return true;
4274 :
4275 2 : if (!instructions.append(use->toInstruction()))
4276 0 : return false;
4277 : }
4278 :
4279 : // Sort the instructions to visit in increasing order.
4280 1 : qsort(instructions.begin(), instructions.length(),
4281 1 : sizeof(MInstruction*), CmpInstructions);
4282 :
4283 : // Find all exit blocks in the graph.
4284 2 : Vector<MBasicBlock*> exitBlocks(cx);
4285 4 : for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
4286 1 : if (!block->numSuccessors() && !exitBlocks.append(*block))
4287 0 : return false;
4288 : }
4289 :
4290 : // id of the last block which added a new property.
4291 1 : size_t lastAddedBlock = 0;
4292 :
4293 2 : for (size_t i = 0; i < instructions.length(); i++) {
4294 2 : MInstruction* ins = instructions[i];
4295 :
4296 : // Track whether the use of |this| is in unconditional code, i.e.
4297 : // the block dominates all graph exits.
4298 2 : bool definitelyExecuted = true;
4299 4 : for (size_t i = 0; i < exitBlocks.length(); i++) {
4300 4 : for (MBasicBlock* exit = exitBlocks[i];
4301 2 : exit != ins->block();
4302 : exit = exit->immediateDominator())
4303 : {
4304 0 : if (exit == exit->immediateDominator()) {
4305 0 : definitelyExecuted = false;
4306 0 : break;
4307 : }
4308 : }
4309 : }
4310 :
4311 : // Also check to see if the instruction is inside a loop body. Even if
4312 : // an access will always execute in the script, if it executes multiple
4313 : // times then we can get confused when rolling back objects while
4314 : // clearing the new script information.
4315 2 : if (ins->block()->loopDepth() != 0)
4316 0 : definitelyExecuted = false;
4317 :
4318 2 : bool handled = false;
4319 2 : size_t slotSpan = baseobj->slotSpan();
4320 2 : if (!AnalyzePoppedThis(cx, group, thisValue, ins, definitelyExecuted,
4321 : baseobj, initializerList, &accessedProperties, &handled))
4322 : {
4323 0 : return false;
4324 : }
4325 2 : if (!handled)
4326 1 : break;
4327 :
4328 1 : if (slotSpan != baseobj->slotSpan()) {
4329 0 : MOZ_ASSERT(ins->block()->id() >= lastAddedBlock);
4330 0 : lastAddedBlock = ins->block()->id();
4331 : }
4332 : }
4333 :
4334 1 : if (baseobj->slotSpan() != 0) {
4335 : // We found some definite properties, but their correctness is still
4336 : // contingent on the correct frames being inlined. Add constraints to
4337 : // invalidate the definite properties if additional functions could be
4338 : // called at the inline frame sites.
4339 0 : Vector<MBasicBlock*> exitBlocks(cx);
4340 0 : for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
4341 : // Inlining decisions made after the last new property was added to
4342 : // the object don't need to be frozen.
4343 0 : if (block->id() > lastAddedBlock)
4344 0 : break;
4345 0 : if (MResumePoint* rp = block->callerResumePoint()) {
4346 0 : if (block->numPredecessors() == 1 && block->getPredecessor(0) == rp->block()) {
4347 0 : JSScript* script = rp->block()->info().script();
4348 0 : if (!AddClearDefiniteFunctionUsesInScript(cx, group, script, block->info().script()))
4349 0 : return false;
4350 : }
4351 : }
4352 : }
4353 : }
4354 :
4355 1 : return true;
4356 : }
4357 :
4358 : static bool
4359 1027 : ArgumentsUseCanBeLazy(JSContext* cx, JSScript* script, MInstruction* ins, size_t index,
4360 : bool* argumentsContentsObserved)
4361 : {
4362 : // We can read the frame's arguments directly for f.apply(x, arguments).
4363 1027 : if (ins->isCall()) {
4364 141 : if (*ins->toCall()->resumePoint()->pc() == JSOP_FUNAPPLY &&
4365 91 : ins->toCall()->numActualArgs() == 2 &&
4366 41 : index == MCall::IndexOfArgument(1))
4367 : {
4368 41 : *argumentsContentsObserved = true;
4369 41 : return true;
4370 : }
4371 : }
4372 :
4373 : // arguments[i] can read fp->canonicalActualArg(i) directly.
4374 986 : if (ins->isCallGetElement() && index == 0) {
4375 756 : *argumentsContentsObserved = true;
4376 756 : return true;
4377 : }
4378 :
4379 : // MGetArgumentsObjectArg needs to be considered as a use that allows laziness.
4380 230 : if (ins->isGetArgumentsObjectArg() && index == 0)
4381 6 : return true;
4382 :
4383 : // arguments.length length can read fp->numActualArgs() directly.
4384 : // arguments.callee can read fp->callee() directly if the arguments object
4385 : // is mapped.
4386 654 : if (ins->isCallGetProperty() && index == 0 &&
4387 215 : (ins->toCallGetProperty()->name() == cx->names().length ||
4388 0 : (script->hasMappedArgsObj() && ins->toCallGetProperty()->name() == cx->names().callee)))
4389 : {
4390 215 : return true;
4391 : }
4392 :
4393 9 : return false;
4394 : }
4395 :
4396 : bool
4397 135 : jit::AnalyzeArgumentsUsage(JSContext* cx, JSScript* scriptArg)
4398 : {
4399 270 : RootedScript script(cx, scriptArg);
4400 270 : AutoEnterAnalysis enter(cx);
4401 :
4402 135 : MOZ_ASSERT(!script->analyzedArgsUsage());
4403 :
4404 : // Treat the script as needing an arguments object until we determine it
4405 : // does not need one. This both allows us to easily see where the arguments
4406 : // object can escape through assignments to the function's named arguments,
4407 : // and also simplifies handling of early returns.
4408 135 : script->setNeedsArgsObj(true);
4409 :
4410 : // Always construct arguments objects when in debug mode, for generator
4411 : // scripts (generators can be suspended when speculation fails) or when
4412 : // direct eval is present.
4413 : //
4414 : // FIXME: Don't build arguments for ES6 generator expressions.
4415 405 : if (scriptArg->isDebuggee() ||
4416 270 : script->isStarGenerator() ||
4417 270 : script->isLegacyGenerator() ||
4418 405 : script->isAsync() ||
4419 135 : script->bindingsAccessedDynamically())
4420 : {
4421 0 : return true;
4422 : }
4423 :
4424 135 : if (!jit::IsIonEnabled(cx))
4425 0 : return true;
4426 :
4427 : static const uint32_t MAX_SCRIPT_SIZE = 10000;
4428 135 : if (script->length() > MAX_SCRIPT_SIZE)
4429 0 : return true;
4430 :
4431 135 : if (!script->ensureHasTypes(cx))
4432 0 : return false;
4433 :
4434 135 : TraceLoggerThread* logger = TraceLoggerForCurrentThread(cx);
4435 270 : TraceLoggerEvent event(TraceLogger_AnnotateScripts, script);
4436 270 : AutoTraceLog logScript(logger, event);
4437 270 : AutoTraceLog logCompile(logger, TraceLogger_IonAnalysis);
4438 :
4439 270 : LifoAlloc alloc(TempAllocator::PreferredLifoChunkSize);
4440 270 : TempAllocator temp(&alloc);
4441 270 : JitContext jctx(cx, &temp);
4442 :
4443 135 : if (!jit::CanLikelyAllocateMoreExecutableMemory())
4444 0 : return true;
4445 :
4446 135 : if (!cx->compartment()->ensureJitCompartmentExists(cx))
4447 0 : return false;
4448 :
4449 135 : MIRGraph graph(&temp);
4450 135 : InlineScriptTree* inlineScriptTree = InlineScriptTree::New(&temp, nullptr, nullptr, script);
4451 135 : if (!inlineScriptTree) {
4452 0 : ReportOutOfMemory(cx);
4453 0 : return false;
4454 : }
4455 :
4456 135 : CompileInfo info(script, script->functionNonDelazifying(),
4457 : /* osrPc = */ nullptr,
4458 : Analysis_ArgumentsUsage,
4459 : /* needsArgsObj = */ true,
4460 270 : inlineScriptTree);
4461 :
4462 135 : const OptimizationInfo* optimizationInfo = IonOptimizations.get(OptimizationLevel::Normal);
4463 :
4464 135 : CompilerConstraintList* constraints = NewCompilerConstraintList(temp);
4465 135 : if (!constraints) {
4466 0 : ReportOutOfMemory(cx);
4467 0 : return false;
4468 : }
4469 :
4470 135 : BaselineInspector inspector(script);
4471 135 : const JitCompileOptions options(cx);
4472 :
4473 : IonBuilder builder(nullptr, CompileCompartment::get(cx->compartment()), options, &temp, &graph, constraints,
4474 270 : &inspector, &info, optimizationInfo, /* baselineFrame = */ nullptr);
4475 :
4476 135 : AbortReasonOr<Ok> buildResult = builder.build();
4477 135 : if (buildResult.isErr()) {
4478 3 : AbortReason reason = buildResult.unwrapErr();
4479 3 : if (cx->isThrowingOverRecursed() || cx->isThrowingOutOfMemory())
4480 0 : return false;
4481 3 : if (reason == AbortReason::Alloc) {
4482 0 : ReportOutOfMemory(cx);
4483 0 : return false;
4484 : }
4485 3 : MOZ_ASSERT(!cx->isExceptionPending());
4486 3 : return true;
4487 : }
4488 :
4489 132 : if (!SplitCriticalEdges(graph)) {
4490 0 : ReportOutOfMemory(cx);
4491 0 : return false;
4492 : }
4493 :
4494 132 : RenumberBlocks(graph);
4495 :
4496 132 : if (!BuildDominatorTree(graph)) {
4497 0 : ReportOutOfMemory(cx);
4498 0 : return false;
4499 : }
4500 :
4501 132 : if (!EliminatePhis(&builder, graph, AggressiveObservability)) {
4502 0 : ReportOutOfMemory(cx);
4503 0 : return false;
4504 : }
4505 :
4506 132 : MDefinition* argumentsValue = graph.entryBlock()->getSlot(info.argsObjSlot());
4507 :
4508 132 : bool argumentsContentsObserved = false;
4509 :
4510 1150 : for (MUseDefIterator uses(argumentsValue); uses; uses++) {
4511 1027 : MDefinition* use = uses.def();
4512 :
4513 : // Don't track |arguments| through assignments to phis.
4514 1027 : if (!use->isInstruction())
4515 9 : return true;
4516 :
4517 2054 : if (!ArgumentsUseCanBeLazy(cx, script, use->toInstruction(), use->indexOf(uses.use()),
4518 1027 : &argumentsContentsObserved))
4519 : {
4520 9 : return true;
4521 : }
4522 : }
4523 :
4524 : // If a script explicitly accesses the contents of 'arguments', and has
4525 : // formals which may be stored as part of a call object, don't use lazy
4526 : // arguments. The compiler can then assume that accesses through
4527 : // arguments[i] will be on unaliased variables.
4528 123 : if (script->funHasAnyAliasedFormal() && argumentsContentsObserved)
4529 0 : return true;
4530 :
4531 123 : script->setNeedsArgsObj(false);
4532 123 : return true;
4533 : }
4534 :
4535 : // Mark all the blocks that are in the loop with the given header.
4536 : // Returns the number of blocks marked. Set *canOsr to true if the loop is
4537 : // reachable from both the normal entry and the OSR entry.
4538 : size_t
4539 15 : jit::MarkLoopBlocks(MIRGraph& graph, MBasicBlock* header, bool* canOsr)
4540 : {
4541 : #ifdef DEBUG
4542 1158 : for (ReversePostorderIterator i = graph.rpoBegin(), e = graph.rpoEnd(); i != e; ++i)
4543 1143 : MOZ_ASSERT(!i->isMarked(), "Some blocks already marked");
4544 : #endif
4545 :
4546 15 : MBasicBlock* osrBlock = graph.osrBlock();
4547 15 : *canOsr = false;
4548 :
4549 : // The blocks are in RPO; start at the loop backedge, which marks the bottom
4550 : // of the loop, and walk up until we get to the header. Loops may be
4551 : // discontiguous, so we trace predecessors to determine which blocks are
4552 : // actually part of the loop. The backedge is always part of the loop, and
4553 : // so are its predecessors, transitively, up to the loop header or an OSR
4554 : // entry.
4555 15 : MBasicBlock* backedge = header->backedge();
4556 15 : backedge->mark();
4557 15 : size_t numMarked = 1;
4558 954 : for (PostorderIterator i = graph.poBegin(backedge); ; ++i) {
4559 954 : MOZ_ASSERT(i != graph.poEnd(),
4560 : "Reached the end of the graph while searching for the loop header");
4561 954 : MBasicBlock* block = *i;
4562 : // If we've reached the loop header, we're done.
4563 954 : if (block == header)
4564 15 : break;
4565 : // A block not marked by the time we reach it is not in the loop.
4566 939 : if (!block->isMarked())
4567 75 : continue;
4568 :
4569 : // This block is in the loop; trace to its predecessors.
4570 1926 : for (size_t p = 0, e = block->numPredecessors(); p != e; ++p) {
4571 1062 : MBasicBlock* pred = block->getPredecessor(p);
4572 1062 : if (pred->isMarked())
4573 198 : continue;
4574 :
4575 : // Blocks dominated by the OSR entry are not part of the loop
4576 : // (unless they aren't reachable from the normal entry).
4577 2211 : if (osrBlock && pred != header &&
4578 1533 : osrBlock->dominates(pred) && !osrBlock->dominates(header))
4579 : {
4580 0 : *canOsr = true;
4581 0 : continue;
4582 : }
4583 :
4584 864 : MOZ_ASSERT(pred->id() >= header->id() && pred->id() <= backedge->id(),
4585 : "Loop block not between loop header and loop backedge");
4586 :
4587 864 : pred->mark();
4588 864 : ++numMarked;
4589 :
4590 : // A nested loop may not exit back to the enclosing loop at its
4591 : // bottom. If we just marked its header, then the whole nested loop
4592 : // is part of the enclosing loop.
4593 864 : if (pred->isLoopHeader()) {
4594 15 : MBasicBlock* innerBackedge = pred->backedge();
4595 15 : if (!innerBackedge->isMarked()) {
4596 : // Mark its backedge so that we add all of its blocks to the
4597 : // outer loop as we walk upwards.
4598 0 : innerBackedge->mark();
4599 0 : ++numMarked;
4600 :
4601 : // If the nested loop is not contiguous, we may have already
4602 : // passed its backedge. If this happens, back up.
4603 0 : if (innerBackedge->id() > block->id()) {
4604 0 : i = graph.poBegin(innerBackedge);
4605 0 : --i;
4606 : }
4607 : }
4608 : }
4609 : }
4610 939 : }
4611 :
4612 : // If there's no path connecting the header to the backedge, then this isn't
4613 : // actually a loop. This can happen when the code starts with a loop but GVN
4614 : // folds some branches away.
4615 15 : if (!header->isMarked()) {
4616 0 : jit::UnmarkLoopBlocks(graph, header);
4617 0 : return 0;
4618 : }
4619 :
4620 15 : return numMarked;
4621 : }
4622 :
4623 : // Unmark all the blocks that are in the loop with the given header.
4624 : void
4625 10 : jit::UnmarkLoopBlocks(MIRGraph& graph, MBasicBlock* header)
4626 : {
4627 10 : MBasicBlock* backedge = header->backedge();
4628 636 : for (ReversePostorderIterator i = graph.rpoBegin(header); ; ++i) {
4629 636 : MOZ_ASSERT(i != graph.rpoEnd(),
4630 : "Reached the end of the graph while searching for the backedge");
4631 636 : MBasicBlock* block = *i;
4632 636 : if (block->isMarked()) {
4633 586 : block->unmark();
4634 586 : if (block == backedge)
4635 10 : break;
4636 : }
4637 626 : }
4638 :
4639 : #ifdef DEBUG
4640 772 : for (ReversePostorderIterator i = graph.rpoBegin(), e = graph.rpoEnd(); i != e; ++i)
4641 762 : MOZ_ASSERT(!i->isMarked(), "Not all blocks got unmarked");
4642 : #endif
4643 10 : }
4644 :
4645 : // Reorder the blocks in the loop starting at the given header to be contiguous.
4646 : static void
4647 5 : MakeLoopContiguous(MIRGraph& graph, MBasicBlock* header, size_t numMarked)
4648 : {
4649 5 : MBasicBlock* backedge = header->backedge();
4650 :
4651 5 : MOZ_ASSERT(header->isMarked(), "Loop header is not part of loop");
4652 5 : MOZ_ASSERT(backedge->isMarked(), "Loop backedge is not part of loop");
4653 :
4654 : // If there are any blocks between the loop header and the loop backedge
4655 : // that are not part of the loop, prepare to move them to the end. We keep
4656 : // them in order, which preserves RPO.
4657 5 : ReversePostorderIterator insertIter = graph.rpoBegin(backedge);
4658 5 : insertIter++;
4659 5 : MBasicBlock* insertPt = *insertIter;
4660 :
4661 : // Visit all the blocks from the loop header to the loop backedge.
4662 5 : size_t headerId = header->id();
4663 5 : size_t inLoopId = headerId;
4664 5 : size_t notInLoopId = inLoopId + numMarked;
4665 5 : ReversePostorderIterator i = graph.rpoBegin(header);
4666 : for (;;) {
4667 318 : MBasicBlock* block = *i++;
4668 318 : MOZ_ASSERT(block->id() >= header->id() && block->id() <= backedge->id(),
4669 : "Loop backedge should be last block in loop");
4670 :
4671 318 : if (block->isMarked()) {
4672 : // This block is in the loop.
4673 293 : block->unmark();
4674 293 : block->setId(inLoopId++);
4675 : // If we've reached the loop backedge, we're done!
4676 293 : if (block == backedge)
4677 5 : break;
4678 : } else {
4679 : // This block is not in the loop. Move it to the end.
4680 25 : graph.moveBlockBefore(insertPt, block);
4681 25 : block->setId(notInLoopId++);
4682 : }
4683 313 : }
4684 5 : MOZ_ASSERT(header->id() == headerId, "Loop header id changed");
4685 5 : MOZ_ASSERT(inLoopId == headerId + numMarked, "Wrong number of blocks kept in loop");
4686 5 : MOZ_ASSERT(notInLoopId == (insertIter != graph.rpoEnd() ? insertPt->id() : graph.numBlocks()),
4687 : "Wrong number of blocks moved out of loop");
4688 5 : }
4689 :
4690 : // Reorder the blocks in the graph so that loops are contiguous.
4691 : bool
4692 8 : jit::MakeLoopsContiguous(MIRGraph& graph)
4693 : {
4694 : // Visit all loop headers (in any order).
4695 411 : for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
4696 403 : MBasicBlock* header = *i;
4697 403 : if (!header->isLoopHeader())
4698 796 : continue;
4699 :
4700 : // Mark all blocks that are actually part of the loop.
4701 : bool canOsr;
4702 5 : size_t numMarked = MarkLoopBlocks(graph, header, &canOsr);
4703 :
4704 : // If the loop isn't a loop, don't try to optimize it.
4705 5 : if (numMarked == 0)
4706 0 : continue;
4707 :
4708 : // If there's an OSR block entering the loop in the middle, it's tricky,
4709 : // so don't try to handle it, for now.
4710 5 : if (canOsr) {
4711 0 : UnmarkLoopBlocks(graph, header);
4712 0 : continue;
4713 : }
4714 :
4715 : // Move all blocks between header and backedge that aren't marked to
4716 : // the end of the loop, making the loop itself contiguous.
4717 5 : MakeLoopContiguous(graph, header, numMarked);
4718 : }
4719 :
4720 8 : return true;
4721 : }
4722 :
4723 8 : MRootList::MRootList(TempAllocator& alloc)
4724 : {
4725 : #define INIT_VECTOR(name, _0, _1) \
4726 : roots_[JS::RootKind::name].emplace(alloc);
4727 8 : JS_FOR_EACH_TRACEKIND(INIT_VECTOR)
4728 : #undef INIT_VECTOR
4729 8 : }
4730 :
4731 : template <typename T>
4732 : static void
4733 22 : TraceVector(JSTracer* trc, const MRootList::RootVector& vector, const char* name)
4734 : {
4735 160 : for (auto ptr : vector) {
4736 138 : T ptrT = static_cast<T>(ptr);
4737 138 : TraceManuallyBarrieredEdge(trc, &ptrT, name);
4738 138 : MOZ_ASSERT(ptr == ptrT, "Shouldn't move without updating MIR pointers");
4739 : }
4740 22 : }
4741 :
4742 : void
4743 2 : MRootList::trace(JSTracer* trc)
4744 : {
4745 : #define TRACE_ROOTS(name, type, _) \
4746 : TraceVector<type*>(trc, *roots_[JS::RootKind::name], "mir-root-" #name);
4747 2 : JS_FOR_EACH_TRACEKIND(TRACE_ROOTS)
4748 : #undef TRACE_ROOTS
4749 2 : }
4750 :
4751 : MOZ_MUST_USE bool
4752 8 : jit::CreateMIRRootList(IonBuilder& builder)
4753 : {
4754 8 : MOZ_ASSERT(!builder.info().isAnalysis());
4755 :
4756 8 : TempAllocator& alloc = builder.alloc();
4757 8 : MIRGraph& graph = builder.graph();
4758 :
4759 8 : MRootList* roots = new(alloc.fallible()) MRootList(alloc);
4760 8 : if (!roots)
4761 0 : return false;
4762 :
4763 8 : JSScript* prevScript = nullptr;
4764 :
4765 652 : for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
4766 :
4767 644 : JSScript* script = block->info().script();
4768 644 : if (script != prevScript) {
4769 48 : if (!roots->append(script))
4770 0 : return false;
4771 48 : prevScript = script;
4772 : }
4773 :
4774 3107 : for (MInstructionIterator iter(block->begin()), end(block->end()); iter != end; iter++) {
4775 2463 : if (!iter->appendRoots(*roots))
4776 0 : return false;
4777 : }
4778 : }
4779 :
4780 8 : builder.setRootList(*roots);
4781 8 : return true;
4782 : }
4783 :
4784 : static void
4785 0 : DumpDefinition(GenericPrinter& out, MDefinition* def, size_t depth)
4786 : {
4787 0 : MDefinition::PrintOpcodeName(out, def->op());
4788 :
4789 0 : if (depth == 0)
4790 0 : return;
4791 :
4792 0 : for (size_t i = 0; i < def->numOperands(); i++) {
4793 0 : out.printf(" (");
4794 0 : DumpDefinition(out, def->getOperand(i), depth - 1);
4795 0 : out.printf(")");
4796 : }
4797 : }
4798 :
4799 : void
4800 8 : jit::DumpMIRExpressions(MIRGraph& graph)
4801 : {
4802 8 : if (!JitSpewEnabled(JitSpew_MIRExpressions))
4803 8 : return;
4804 :
4805 0 : size_t depth = 2;
4806 :
4807 0 : Fprinter& out = JitSpewPrinter();
4808 0 : for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
4809 0 : for (MInstructionIterator iter(block->begin()), end(block->end()); iter != end; iter++) {
4810 0 : DumpDefinition(out, *iter, depth);
4811 0 : out.printf("\n");
4812 : }
4813 : }
4814 : }
|