LCOV - code coverage report
Current view: top level - js/src/jit - RangeAnalysis.cpp (source / functions) Hit Total Coverage
Test: output.info Lines: 700 1782 39.3 %
Date: 2017-07-14 16:53:18 Functions: 68 163 41.7 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
       2             :  * vim: set ts=8 sts=4 et sw=4 tw=99:
       3             :  * This Source Code Form is subject to the terms of the Mozilla Public
       4             :  * License, v. 2.0. If a copy of the MPL was not distributed with this
       5             :  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
       6             : 
       7             : #include "jit/RangeAnalysis.h"
       8             : 
       9             : #include "mozilla/MathAlgorithms.h"
      10             : 
      11             : #include "jit/Ion.h"
      12             : #include "jit/IonAnalysis.h"
      13             : #include "jit/JitSpewer.h"
      14             : #include "jit/MIR.h"
      15             : #include "jit/MIRGenerator.h"
      16             : #include "jit/MIRGraph.h"
      17             : #include "js/Conversions.h"
      18             : #include "vm/TypedArrayObject.h"
      19             : 
      20             : #include "jsopcodeinlines.h"
      21             : 
      22             : using namespace js;
      23             : using namespace js::jit;
      24             : 
      25             : using mozilla::Abs;
      26             : using mozilla::CountLeadingZeroes32;
      27             : using mozilla::NumberEqualsInt32;
      28             : using mozilla::ExponentComponent;
      29             : using mozilla::FloorLog2;
      30             : using mozilla::IsInfinite;
      31             : using mozilla::IsNaN;
      32             : using mozilla::IsNegative;
      33             : using mozilla::IsNegativeZero;
      34             : using mozilla::NegativeInfinity;
      35             : using mozilla::PositiveInfinity;
      36             : using mozilla::Swap;
      37             : using JS::GenericNaN;
      38             : using JS::ToInt32;
      39             : 
      40             : // This algorithm is based on the paper "Eliminating Range Checks Using
      41             : // Static Single Assignment Form" by Gough and Klaren.
      42             : //
      43             : // We associate a range object with each SSA name, and the ranges are consulted
      44             : // in order to determine whether overflow is possible for arithmetic
      45             : // computations.
      46             : //
      47             : // An important source of range information that requires care to take
      48             : // advantage of is conditional control flow. Consider the code below:
      49             : //
      50             : // if (x < 0) {
      51             : //   y = x + 2000000000;
      52             : // } else {
      53             : //   if (x < 1000000000) {
      54             : //     y = x * 2;
      55             : //   } else {
      56             : //     y = x - 3000000000;
      57             : //   }
      58             : // }
      59             : //
      60             : // The arithmetic operations in this code cannot overflow, but it is not
      61             : // sufficient to simply associate each name with a range, since the information
      62             : // differs between basic blocks. The traditional dataflow approach would be
      63             : // associate ranges with (name, basic block) pairs. This solution is not
      64             : // satisfying, since we lose the benefit of SSA form: in SSA form, each
      65             : // definition has a unique name, so there is no need to track information about
      66             : // the control flow of the program.
      67             : //
      68             : // The approach used here is to add a new form of pseudo operation called a
      69             : // beta node, which associates range information with a value. These beta
      70             : // instructions take one argument and additionally have an auxiliary constant
      71             : // range associated with them. Operationally, beta nodes are just copies, but
      72             : // the invariant expressed by beta node copies is that the output will fall
      73             : // inside the range given by the beta node.  Gough and Klaeren refer to SSA
      74             : // extended with these beta nodes as XSA form. The following shows the example
      75             : // code transformed into XSA form:
      76             : //
      77             : // if (x < 0) {
      78             : //   x1 = Beta(x, [INT_MIN, -1]);
      79             : //   y1 = x1 + 2000000000;
      80             : // } else {
      81             : //   x2 = Beta(x, [0, INT_MAX]);
      82             : //   if (x2 < 1000000000) {
      83             : //     x3 = Beta(x2, [INT_MIN, 999999999]);
      84             : //     y2 = x3*2;
      85             : //   } else {
      86             : //     x4 = Beta(x2, [1000000000, INT_MAX]);
      87             : //     y3 = x4 - 3000000000;
      88             : //   }
      89             : //   y4 = Phi(y2, y3);
      90             : // }
      91             : // y = Phi(y1, y4);
      92             : //
      93             : // We insert beta nodes for the purposes of range analysis (they might also be
      94             : // usefully used for other forms of bounds check elimination) and remove them
      95             : // after range analysis is performed. The remaining compiler phases do not ever
      96             : // encounter beta nodes.
      97             : 
      98             : static bool
      99        3814 : IsDominatedUse(MBasicBlock* block, MUse* use)
     100             : {
     101        3814 :     MNode* n = use->consumer();
     102        3814 :     bool isPhi = n->isDefinition() && n->toDefinition()->isPhi();
     103             : 
     104        3814 :     if (isPhi) {
     105           0 :         MPhi* phi = n->toDefinition()->toPhi();
     106           0 :         return block->dominates(phi->block()->getPredecessor(phi->indexOf(use)));
     107             :     }
     108             : 
     109        3814 :     return block->dominates(n->block());
     110             : }
     111             : 
     112             : static inline void
     113        1295 : SpewRange(MDefinition* def)
     114             : {
     115             : #ifdef JS_JITSPEW
     116        1295 :     if (JitSpewEnabled(JitSpew_Range) && def->type() != MIRType::None && def->range()) {
     117           0 :         JitSpewHeader(JitSpew_Range);
     118           0 :         Fprinter& out = JitSpewPrinter();
     119           0 :         def->printName(out);
     120           0 :         out.printf(" has range ");
     121           0 :         def->range()->dump(out);
     122             :     }
     123             : #endif
     124        1295 : }
     125             : 
     126             : static inline void
     127           2 : SpewTruncate(MDefinition* def, MDefinition::TruncateKind kind, bool shouldClone)
     128             : {
     129             : #ifdef JS_JITSPEW
     130           2 :     if (JitSpewEnabled(JitSpew_Range)) {
     131           0 :         JitSpewHeader(JitSpew_Range);
     132           0 :         Fprinter& out = JitSpewPrinter();
     133           0 :         out.printf("truncating ");
     134           0 :         def->printName(out);
     135           0 :         out.printf(" (kind: %s, clone: %d)\n", MDefinition::TruncateKindString(kind), shouldClone);
     136             :     }
     137             : #endif
     138           2 : }
     139             : 
     140             : TempAllocator&
     141        2891 : RangeAnalysis::alloc() const
     142             : {
     143        2891 :     return graph_.alloc();
     144             : }
     145             : 
     146             : void
     147          50 : RangeAnalysis::replaceDominatedUsesWith(MDefinition* orig, MDefinition* dom,
     148             :                                             MBasicBlock* block)
     149             : {
     150        3914 :     for (MUseIterator i(orig->usesBegin()); i != orig->usesEnd(); ) {
     151        3864 :         MUse* use = *i++;
     152        3864 :         if (use->consumer() != dom && IsDominatedUse(block, use))
     153         421 :             use->replaceProducer(dom);
     154             :     }
     155          50 : }
     156             : 
     157             : bool
     158           8 : RangeAnalysis::addBetaNodes()
     159             : {
     160           8 :     JitSpew(JitSpew_Range, "Adding beta nodes");
     161             : 
     162         411 :     for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) {
     163         403 :         MBasicBlock* block = *i;
     164         403 :         JitSpew(JitSpew_Range, "Looking at block %d", block->id());
     165             : 
     166             :         BranchDirection branch_dir;
     167         403 :         MTest* test = block->immediateDominatorBranch(&branch_dir);
     168             : 
     169         403 :         if (!test || !test->getOperand(0)->isCompare())
     170         694 :             continue;
     171             : 
     172          78 :         MCompare* compare = test->getOperand(0)->toCompare();
     173             : 
     174          78 :         if (!compare->isNumericComparison())
     175           0 :             continue;
     176             : 
     177             :         // TODO: support unsigned comparisons
     178          78 :         if (compare->compareType() == MCompare::Compare_UInt32)
     179           0 :             continue;
     180             : 
     181          78 :         MDefinition* left = compare->getOperand(0);
     182          78 :         MDefinition* right = compare->getOperand(1);
     183             :         double bound;
     184          78 :         double conservativeLower = NegativeInfinity<double>();
     185          78 :         double conservativeUpper = PositiveInfinity<double>();
     186          78 :         MDefinition* val = nullptr;
     187             : 
     188          78 :         JSOp jsop = compare->jsop();
     189             : 
     190          78 :         if (branch_dir == FALSE_BRANCH) {
     191          39 :             jsop = NegateCompareOp(jsop);
     192          39 :             conservativeLower = GenericNaN();
     193          39 :             conservativeUpper = GenericNaN();
     194             :         }
     195             : 
     196          78 :         MConstant* leftConst = left->maybeConstantValue();
     197          78 :         MConstant* rightConst = right->maybeConstantValue();
     198          78 :         if (leftConst && leftConst->isTypeRepresentableAsDouble()) {
     199           2 :             bound = leftConst->numberToDouble();
     200           2 :             val = right;
     201           2 :             jsop = ReverseCompareOp(jsop);
     202          76 :         } else if (rightConst && rightConst->isTypeRepresentableAsDouble()) {
     203          60 :             bound = rightConst->numberToDouble();
     204          60 :             val = left;
     205          16 :         } else if (left->type() == MIRType::Int32 && right->type() == MIRType::Int32) {
     206          16 :             MDefinition* smaller = nullptr;
     207          16 :             MDefinition* greater = nullptr;
     208          16 :             if (jsop == JSOP_LT) {
     209           8 :                 smaller = left;
     210           8 :                 greater = right;
     211           8 :             } else if (jsop == JSOP_GT) {
     212           0 :                 smaller = right;
     213           0 :                 greater = left;
     214             :             }
     215          16 :             if (smaller && greater) {
     216           8 :                 if (!alloc().ensureBallast())
     217           0 :                     return false;
     218             : 
     219             :                 MBeta* beta;
     220           8 :                 beta = MBeta::New(alloc(), smaller,
     221          16 :                                   Range::NewInt32Range(alloc(), JSVAL_INT_MIN, JSVAL_INT_MAX-1));
     222           8 :                 block->insertBefore(*block->begin(), beta);
     223           8 :                 replaceDominatedUsesWith(smaller, beta, block);
     224           8 :                 JitSpew(JitSpew_Range, "Adding beta node for smaller %d", smaller->id());
     225           8 :                 beta = MBeta::New(alloc(), greater,
     226          16 :                                   Range::NewInt32Range(alloc(), JSVAL_INT_MIN+1, JSVAL_INT_MAX));
     227           8 :                 block->insertBefore(*block->begin(), beta);
     228           8 :                 replaceDominatedUsesWith(greater, beta, block);
     229           8 :                 JitSpew(JitSpew_Range, "Adding beta node for greater %d", greater->id());
     230             :             }
     231          16 :             continue;
     232             :         } else {
     233           0 :             continue;
     234             :         }
     235             : 
     236             :         // At this point, one of the operands if the compare is a constant, and
     237             :         // val is the other operand.
     238          62 :         MOZ_ASSERT(val);
     239             : 
     240          62 :         Range comp;
     241          62 :         switch (jsop) {
     242             :           case JSOP_LE:
     243           2 :             comp.setDouble(conservativeLower, bound);
     244           2 :             break;
     245             :           case JSOP_LT:
     246             :             // For integers, if x < c, the upper bound of x is c-1.
     247           0 :             if (val->type() == MIRType::Int32) {
     248             :                 int32_t intbound;
     249           0 :                 if (NumberEqualsInt32(bound, &intbound) && SafeSub(intbound, 1, &intbound))
     250           0 :                     bound = intbound;
     251             :             }
     252           0 :             comp.setDouble(conservativeLower, bound);
     253             : 
     254             :             // Negative zero is not less than zero.
     255           0 :             if (bound == 0)
     256           0 :                 comp.refineToExcludeNegativeZero();
     257           0 :             break;
     258             :           case JSOP_GE:
     259           0 :             comp.setDouble(bound, conservativeUpper);
     260           0 :             break;
     261             :           case JSOP_GT:
     262             :             // For integers, if x > c, the lower bound of x is c+1.
     263           2 :             if (val->type() == MIRType::Int32) {
     264             :                 int32_t intbound;
     265           2 :                 if (NumberEqualsInt32(bound, &intbound) && SafeAdd(intbound, 1, &intbound))
     266           2 :                     bound = intbound;
     267             :             }
     268           2 :             comp.setDouble(bound, conservativeUpper);
     269             : 
     270             :             // Negative zero is not greater than zero.
     271           2 :             if (bound == 0)
     272           0 :                 comp.refineToExcludeNegativeZero();
     273           2 :             break;
     274             :           case JSOP_STRICTEQ:
     275             :             // A strict comparison can test for things other than numeric value.
     276          29 :             if (!compare->isNumericComparison())
     277           0 :                 continue;
     278             :             // Otherwise fall through to handle JSOP_STRICTEQ the same as JSOP_EQ.
     279             :             MOZ_FALLTHROUGH;
     280             :           case JSOP_EQ:
     281          29 :             comp.setDouble(bound, bound);
     282          29 :             break;
     283             :           case JSOP_STRICTNE:
     284             :             // A strict comparison can test for things other than numeric value.
     285          29 :             if (!compare->isNumericComparison())
     286           0 :                 continue;
     287             :             // Otherwise fall through to handle JSOP_STRICTNE the same as JSOP_NE.
     288             :             MOZ_FALLTHROUGH;
     289             :           case JSOP_NE:
     290             :             // Negative zero is not not-equal to zero.
     291          29 :             if (bound == 0) {
     292           1 :                 comp.refineToExcludeNegativeZero();
     293           1 :                 break;
     294             :             }
     295          28 :             continue; // well, we could have
     296             :                       // [-\inf, bound-1] U [bound+1, \inf] but we only use contiguous ranges.
     297             :           default:
     298           0 :             continue;
     299             :         }
     300             : 
     301          34 :         if (JitSpewEnabled(JitSpew_Range)) {
     302           0 :             JitSpewHeader(JitSpew_Range);
     303           0 :             Fprinter& out = JitSpewPrinter();
     304           0 :             out.printf("Adding beta node for %d with range ", val->id());
     305           0 :             comp.dump(out);
     306             :         }
     307             : 
     308          34 :         if (!alloc().ensureBallast())
     309           0 :             return false;
     310             : 
     311          34 :         MBeta* beta = MBeta::New(alloc(), val, new(alloc()) Range(comp));
     312          34 :         block->insertBefore(*block->begin(), beta);
     313          34 :         replaceDominatedUsesWith(val, beta, block);
     314             :     }
     315             : 
     316           8 :     return true;
     317             : }
     318             : 
     319             : bool
     320           8 : RangeAnalysis::removeBetaNodes()
     321             : {
     322           8 :     JitSpew(JitSpew_Range, "Removing beta nodes");
     323             : 
     324         411 :     for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) {
     325         403 :         MBasicBlock* block = *i;
     326         453 :         for (MDefinitionIterator iter(*i); iter; ) {
     327         331 :             MDefinition* def = *iter++;
     328         331 :             if (def->isBeta()) {
     329          50 :                 MDefinition* op = def->getOperand(0);
     330          50 :                 JitSpew(JitSpew_Range, "Removing beta node %d for %d",
     331          50 :                         def->id(), op->id());
     332          50 :                 def->justReplaceAllUsesWith(op);
     333          50 :                 block->discardDef(def);
     334             :             } else {
     335             :                 // We only place Beta nodes at the beginning of basic
     336             :                 // blocks, so if we see something else, we can move on
     337             :                 // to the next block.
     338         281 :                 break;
     339             :             }
     340             :         }
     341             :     }
     342           8 :     return true;
     343             : }
     344             : 
     345             : void
     346           0 : SymbolicBound::dump(GenericPrinter& out) const
     347             : {
     348           0 :     if (loop)
     349           0 :         out.printf("[loop] ");
     350           0 :     sum.dump(out);
     351           0 : }
     352             : 
     353             : void
     354           0 : SymbolicBound::dump() const
     355             : {
     356           0 :     Fprinter out(stderr);
     357           0 :     dump(out);
     358           0 :     out.printf("\n");
     359           0 :     out.finish();
     360           0 : }
     361             : 
     362             : // Test whether the given range's exponent tells us anything that its lower
     363             : // and upper bound values don't.
     364             : static bool
     365           0 : IsExponentInteresting(const Range* r)
     366             : {
     367             :    // If it lacks either a lower or upper bound, the exponent is interesting.
     368           0 :    if (!r->hasInt32Bounds())
     369           0 :        return true;
     370             : 
     371             :    // Otherwise if there's no fractional part, the lower and upper bounds,
     372             :    // which are integers, are perfectly precise.
     373           0 :    if (!r->canHaveFractionalPart())
     374           0 :        return false;
     375             : 
     376             :    // Otherwise, if the bounds are conservatively rounded across a power-of-two
     377             :    // boundary, the exponent may imply a tighter range.
     378           0 :    return FloorLog2(Max(Abs(r->lower()), Abs(r->upper()))) > r->exponent();
     379             : }
     380             : 
     381             : void
     382           0 : Range::dump(GenericPrinter& out) const
     383             : {
     384           0 :     assertInvariants();
     385             : 
     386             :     // Floating-point or Integer subset.
     387           0 :     if (canHaveFractionalPart_)
     388           0 :         out.printf("F");
     389             :     else
     390           0 :         out.printf("I");
     391             : 
     392           0 :     out.printf("[");
     393             : 
     394           0 :     if (!hasInt32LowerBound_)
     395           0 :         out.printf("?");
     396             :     else
     397           0 :         out.printf("%d", lower_);
     398           0 :     if (symbolicLower_) {
     399           0 :         out.printf(" {");
     400           0 :         symbolicLower_->dump(out);
     401           0 :         out.printf("}");
     402             :     }
     403             : 
     404           0 :     out.printf(", ");
     405             : 
     406           0 :     if (!hasInt32UpperBound_)
     407           0 :         out.printf("?");
     408             :     else
     409           0 :         out.printf("%d", upper_);
     410           0 :     if (symbolicUpper_) {
     411           0 :         out.printf(" {");
     412           0 :         symbolicUpper_->dump(out);
     413           0 :         out.printf("}");
     414             :     }
     415             : 
     416           0 :     out.printf("]");
     417             : 
     418           0 :     bool includesNaN = max_exponent_ == IncludesInfinityAndNaN;
     419           0 :     bool includesNegativeInfinity = max_exponent_ >= IncludesInfinity && !hasInt32LowerBound_;
     420           0 :     bool includesPositiveInfinity = max_exponent_ >= IncludesInfinity && !hasInt32UpperBound_;
     421           0 :     bool includesNegativeZero = canBeNegativeZero_;
     422             : 
     423           0 :     if (includesNaN ||
     424           0 :         includesNegativeInfinity ||
     425           0 :         includesPositiveInfinity ||
     426             :         includesNegativeZero)
     427             :     {
     428           0 :         out.printf(" (");
     429           0 :         bool first = true;
     430           0 :         if (includesNaN) {
     431           0 :             if (first)
     432           0 :                 first = false;
     433             :             else
     434           0 :                 out.printf(" ");
     435           0 :             out.printf("U NaN");
     436             :         }
     437           0 :         if (includesNegativeInfinity) {
     438           0 :             if (first)
     439           0 :                 first = false;
     440             :             else
     441           0 :                 out.printf(" ");
     442           0 :             out.printf("U -Infinity");
     443             :         }
     444           0 :         if (includesPositiveInfinity) {
     445           0 :             if (first)
     446           0 :                 first = false;
     447             :             else
     448           0 :                 out.printf(" ");
     449           0 :             out.printf("U Infinity");
     450             :         }
     451           0 :         if (includesNegativeZero) {
     452           0 :             if (first)
     453           0 :                 first = false;
     454             :             else
     455           0 :                 out.printf(" ");
     456           0 :             out.printf("U -0");
     457             :         }
     458           0 :         out.printf(")");
     459             :     }
     460           0 :     if (max_exponent_ < IncludesInfinity && IsExponentInteresting(this))
     461           0 :         out.printf(" (< pow(2, %d+1))", max_exponent_);
     462           0 : }
     463             : 
     464             : void
     465           0 : Range::dump() const
     466             : {
     467           0 :     Fprinter out(stderr);
     468           0 :     dump(out);
     469           0 :     out.printf("\n");
     470           0 :     out.finish();
     471           0 : }
     472             : 
     473             : Range*
     474          50 : Range::intersect(TempAllocator& alloc, const Range* lhs, const Range* rhs, bool* emptyRange)
     475             : {
     476          50 :     *emptyRange = false;
     477             : 
     478          50 :     if (!lhs && !rhs)
     479           0 :         return nullptr;
     480             : 
     481          50 :     if (!lhs)
     482           0 :         return new(alloc) Range(*rhs);
     483          50 :     if (!rhs)
     484           0 :         return new(alloc) Range(*lhs);
     485             : 
     486          50 :     int32_t newLower = Max(lhs->lower_, rhs->lower_);
     487          50 :     int32_t newUpper = Min(lhs->upper_, rhs->upper_);
     488             : 
     489             :     // If upper < lower, then we have conflicting constraints. Consider:
     490             :     //
     491             :     // if (x < 0) {
     492             :     //   if (x > 0) {
     493             :     //     [Some code.]
     494             :     //   }
     495             :     // }
     496             :     //
     497             :     // In this case, the block is unreachable.
     498          50 :     if (newUpper < newLower) {
     499             :         // If both ranges can be NaN, the result can still be NaN.
     500           0 :         if (!lhs->canBeNaN() || !rhs->canBeNaN())
     501           0 :             *emptyRange = true;
     502           0 :         return nullptr;
     503             :     }
     504             : 
     505          50 :     bool newHasInt32LowerBound = lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_;
     506          50 :     bool newHasInt32UpperBound = lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_;
     507             : 
     508          50 :     FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(lhs->canHaveFractionalPart_ &&
     509         100 :                                                                      rhs->canHaveFractionalPart_);
     510          50 :     NegativeZeroFlag newMayIncludeNegativeZero = NegativeZeroFlag(lhs->canBeNegativeZero_ &&
     511         100 :                                                                   rhs->canBeNegativeZero_);
     512             : 
     513          50 :     uint16_t newExponent = Min(lhs->max_exponent_, rhs->max_exponent_);
     514             : 
     515             :     // NaN is a special value which is neither greater than infinity or less than
     516             :     // negative infinity. When we intersect two ranges like [?, 0] and [0, ?], we
     517             :     // can end up thinking we have both a lower and upper bound, even though NaN
     518             :     // is still possible. In this case, just be conservative, since any case where
     519             :     // we can have NaN is not especially interesting.
     520          50 :     if (newHasInt32LowerBound && newHasInt32UpperBound && newExponent == IncludesInfinityAndNaN)
     521           0 :         return nullptr;
     522             : 
     523             :     // If one of the ranges has a fractional part and the other doesn't, it's
     524             :     // possible that we will have computed a newExponent that's more precise
     525             :     // than our newLower and newUpper. This is unusual, so we handle it here
     526             :     // instead of in optimize().
     527             :     //
     528             :     // For example, consider the range F[0,1.5]. Range analysis represents the
     529             :     // lower and upper bound as integers, so we'd actually have
     530             :     // F[0,2] (< pow(2, 0+1)). In this case, the exponent gives us a slightly
     531             :     // more precise upper bound than the integer upper bound.
     532             :     //
     533             :     // When intersecting such a range with an integer range, the fractional part
     534             :     // of the range is dropped. The max exponent of 0 remains valid, so the
     535             :     // upper bound needs to be adjusted to 1.
     536             :     //
     537             :     // When intersecting F[0,2] (< pow(2, 0+1)) with a range like F[2,4],
     538             :     // the naive intersection is I[2,2], but since the max exponent tells us
     539             :     // that the value is always less than 2, the intersection is actually empty.
     540         100 :     if (lhs->canHaveFractionalPart() != rhs->canHaveFractionalPart() ||
     541          45 :         (lhs->canHaveFractionalPart() &&
     542           0 :          newHasInt32LowerBound && newHasInt32UpperBound &&
     543           0 :          newLower == newUpper))
     544             :     {
     545           5 :         refineInt32BoundsByExponent(newExponent,
     546             :                                     &newLower, &newHasInt32LowerBound,
     547           5 :                                     &newUpper, &newHasInt32UpperBound);
     548             : 
     549             :         // If we're intersecting two ranges that don't overlap, this could also
     550             :         // push the bounds past each other, since the actual intersection is
     551             :         // the empty set.
     552           5 :         if (newLower > newUpper) {
     553           0 :             *emptyRange = true;
     554           0 :             return nullptr;
     555             :         }
     556             :     }
     557             : 
     558             :     return new(alloc) Range(newLower, newHasInt32LowerBound, newUpper, newHasInt32UpperBound,
     559             :                             newCanHaveFractionalPart,
     560             :                             newMayIncludeNegativeZero,
     561          50 :                             newExponent);
     562             : }
     563             : 
     564             : void
     565           2 : Range::unionWith(const Range* other)
     566             : {
     567           2 :     int32_t newLower = Min(lower_, other->lower_);
     568           2 :     int32_t newUpper = Max(upper_, other->upper_);
     569             : 
     570           2 :     bool newHasInt32LowerBound = hasInt32LowerBound_ && other->hasInt32LowerBound_;
     571           2 :     bool newHasInt32UpperBound = hasInt32UpperBound_ && other->hasInt32UpperBound_;
     572             : 
     573             :     FractionalPartFlag newCanHaveFractionalPart =
     574           4 :         FractionalPartFlag(canHaveFractionalPart_ ||
     575           6 :                            other->canHaveFractionalPart_);
     576           4 :     NegativeZeroFlag newMayIncludeNegativeZero = NegativeZeroFlag(canBeNegativeZero_ ||
     577           6 :                                                                   other->canBeNegativeZero_);
     578             : 
     579           2 :     uint16_t newExponent = Max(max_exponent_, other->max_exponent_);
     580             : 
     581           2 :     rawInitialize(newLower, newHasInt32LowerBound, newUpper, newHasInt32UpperBound,
     582             :                   newCanHaveFractionalPart,
     583             :                   newMayIncludeNegativeZero,
     584           2 :                   newExponent);
     585           2 : }
     586             : 
     587         212 : Range::Range(const MDefinition* def)
     588             :   : symbolicLower_(nullptr),
     589         212 :     symbolicUpper_(nullptr)
     590             : {
     591         212 :     if (const Range* other = def->range()) {
     592             :         // The instruction has range information; use it.
     593         167 :         *this = *other;
     594             : 
     595             :         // Simulate the effect of converting the value to its type.
     596             :         // Note: we cannot clamp here, since ranges aren't allowed to shrink
     597             :         // and truncation can increase range again. So doing wrapAround to
     598             :         // mimick a possible truncation.
     599         167 :         switch (def->type()) {
     600             :           case MIRType::Int32:
     601             :             // MToInt32 cannot truncate. So we can safely clamp.
     602         167 :             if (def->isToInt32())
     603           2 :                 clampToInt32();
     604             :             else
     605         165 :                 wrapAroundToInt32();
     606         167 :             break;
     607             :           case MIRType::Boolean:
     608           0 :             wrapAroundToBoolean();
     609           0 :             break;
     610             :           case MIRType::None:
     611           0 :             MOZ_CRASH("Asking for the range of an instruction with no value");
     612             :           default:
     613           0 :             break;
     614             :         }
     615             :     } else {
     616             :         // Otherwise just use type information. We can trust the type here
     617             :         // because we don't care what value the instruction actually produces,
     618             :         // but what value we might get after we get past the bailouts.
     619          45 :         switch (def->type()) {
     620             :           case MIRType::Int32:
     621          29 :             setInt32(JSVAL_INT_MIN, JSVAL_INT_MAX);
     622          29 :             break;
     623             :           case MIRType::Boolean:
     624          10 :             setInt32(0, 1);
     625          10 :             break;
     626             :           case MIRType::None:
     627           0 :             MOZ_CRASH("Asking for the range of an instruction with no value");
     628             :           default:
     629           6 :             setUnknown();
     630           6 :             break;
     631             :         }
     632             :     }
     633             : 
     634             :     // As a special case, MUrsh is permitted to claim a result type of
     635             :     // MIRType::Int32 while actually returning values in [0,UINT32_MAX] without
     636             :     // bailouts. If range analysis hasn't ruled out values in
     637             :     // (INT32_MAX,UINT32_MAX], set the range to be conservatively correct for
     638             :     // use as either a uint32 or an int32.
     639         430 :     if (!hasInt32UpperBound() &&
     640           6 :         def->isUrsh() &&
     641         212 :         def->toUrsh()->bailoutsDisabled() &&
     642           0 :         def->type() != MIRType::Int64)
     643             :     {
     644           0 :         lower_ = INT32_MIN;
     645             :     }
     646             : 
     647         212 :     assertInvariants();
     648         212 : }
     649             : 
     650             : static uint16_t
     651         164 : ExponentImpliedByDouble(double d)
     652             : {
     653             :     // Handle the special values.
     654         164 :     if (IsNaN(d))
     655           2 :         return Range::IncludesInfinityAndNaN;
     656         162 :     if (IsInfinite(d))
     657           2 :         return Range::IncludesInfinity;
     658             : 
     659             :     // Otherwise take the exponent part and clamp it at zero, since the Range
     660             :     // class doesn't track fractional ranges.
     661         160 :     return uint16_t(Max(int_fast16_t(0), ExponentComponent(d)));
     662             : }
     663             : 
     664             : void
     665          82 : Range::setDouble(double l, double h)
     666             : {
     667          82 :     MOZ_ASSERT(!(l > h));
     668             : 
     669             :     // Infer lower_, upper_, hasInt32LowerBound_, and hasInt32UpperBound_.
     670          82 :     if (l >= INT32_MIN && l <= INT32_MAX) {
     671          80 :         lower_ = int32_t(::floor(l));
     672          80 :         hasInt32LowerBound_ = true;
     673           2 :     } else if (l >= INT32_MAX) {
     674           0 :         lower_ = INT32_MAX;
     675           0 :         hasInt32LowerBound_ = true;
     676             :     } else {
     677           2 :         lower_ = INT32_MIN;
     678           2 :         hasInt32LowerBound_ = false;
     679             :     }
     680          82 :     if (h >= INT32_MIN && h <= INT32_MAX) {
     681          80 :         upper_ = int32_t(::ceil(h));
     682          80 :         hasInt32UpperBound_ = true;
     683           2 :     } else if (h <= INT32_MIN) {
     684           0 :         upper_ = INT32_MIN;
     685           0 :         hasInt32UpperBound_ = true;
     686             :     } else {
     687           2 :         upper_ = INT32_MAX;
     688           2 :         hasInt32UpperBound_ = false;
     689             :     }
     690             : 
     691             :     // Infer max_exponent_.
     692          82 :     uint16_t lExp = ExponentImpliedByDouble(l);
     693          82 :     uint16_t hExp = ExponentImpliedByDouble(h);
     694          82 :     max_exponent_ = Max(lExp, hExp);
     695             : 
     696          82 :     canHaveFractionalPart_ = ExcludesFractionalParts;
     697          82 :     canBeNegativeZero_ = ExcludesNegativeZero;
     698             : 
     699             :     // Infer the canHaveFractionalPart_ setting. We can have a
     700             :     // fractional part if the range crosses through the neighborhood of zero. We
     701             :     // won't have a fractional value if the value is always beyond the point at
     702             :     // which double precision can't represent fractional values.
     703          82 :     uint16_t minExp = Min(lExp, hExp);
     704          82 :     bool includesNegative = IsNaN(l) || l < 0;
     705          82 :     bool includesPositive = IsNaN(h) || h > 0;
     706          82 :     bool crossesZero = includesNegative && includesPositive;
     707          82 :     if (crossesZero || minExp < MaxTruncatableExponent)
     708          82 :         canHaveFractionalPart_ = IncludesFractionalParts;
     709             : 
     710             :     // Infer the canBeNegativeZero_ setting. We can have a negative zero if
     711             :     // either bound is zero.
     712          82 :     if (!(l > 0) && !(h < 0))
     713           8 :         canBeNegativeZero_ = IncludesNegativeZero;
     714             : 
     715          82 :     optimize();
     716          82 : }
     717             : 
     718             : void
     719          49 : Range::setDoubleSingleton(double d)
     720             : {
     721          49 :     setDouble(d, d);
     722             : 
     723             :     // The above setDouble call is for comparisons, and treats negative zero
     724             :     // as equal to zero. We're aiming for a minimum range, so we can clear the
     725             :     // negative zero flag if the value isn't actually negative zero.
     726          49 :     if (!IsNegativeZero(d))
     727          49 :         canBeNegativeZero_ = ExcludesNegativeZero;
     728             : 
     729          49 :     assertInvariants();
     730          49 : }
     731             : 
     732             : static inline bool
     733           0 : MissingAnyInt32Bounds(const Range* lhs, const Range* rhs)
     734             : {
     735           0 :     return !lhs->hasInt32Bounds() || !rhs->hasInt32Bounds();
     736             : }
     737             : 
     738             : Range*
     739           8 : Range::add(TempAllocator& alloc, const Range* lhs, const Range* rhs)
     740             : {
     741           8 :     int64_t l = (int64_t) lhs->lower_ + (int64_t) rhs->lower_;
     742           8 :     if (!lhs->hasInt32LowerBound() || !rhs->hasInt32LowerBound())
     743           0 :         l = NoInt32LowerBound;
     744             : 
     745           8 :     int64_t h = (int64_t) lhs->upper_ + (int64_t) rhs->upper_;
     746           8 :     if (!lhs->hasInt32UpperBound() || !rhs->hasInt32UpperBound())
     747           0 :         h = NoInt32UpperBound;
     748             : 
     749             :     // The exponent is at most one greater than the greater of the operands'
     750             :     // exponents, except for NaN and infinity cases.
     751           8 :     uint16_t e = Max(lhs->max_exponent_, rhs->max_exponent_);
     752           8 :     if (e <= Range::MaxFiniteExponent)
     753           8 :         ++e;
     754             : 
     755             :     // Infinity + -Infinity is NaN.
     756           8 :     if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN())
     757           0 :         e = Range::IncludesInfinityAndNaN;
     758             : 
     759             :     return new(alloc) Range(l, h,
     760          16 :                             FractionalPartFlag(lhs->canHaveFractionalPart() ||
     761           8 :                                                rhs->canHaveFractionalPart()),
     762           8 :                             NegativeZeroFlag(lhs->canBeNegativeZero() &&
     763           0 :                                              rhs->canBeNegativeZero()),
     764           8 :                             e);
     765             : }
     766             : 
     767             : Range*
     768           0 : Range::sub(TempAllocator& alloc, const Range* lhs, const Range* rhs)
     769             : {
     770           0 :     int64_t l = (int64_t) lhs->lower_ - (int64_t) rhs->upper_;
     771           0 :     if (!lhs->hasInt32LowerBound() || !rhs->hasInt32UpperBound())
     772           0 :         l = NoInt32LowerBound;
     773             : 
     774           0 :     int64_t h = (int64_t) lhs->upper_ - (int64_t) rhs->lower_;
     775           0 :     if (!lhs->hasInt32UpperBound() || !rhs->hasInt32LowerBound())
     776           0 :         h = NoInt32UpperBound;
     777             : 
     778             :     // The exponent is at most one greater than the greater of the operands'
     779             :     // exponents, except for NaN and infinity cases.
     780           0 :     uint16_t e = Max(lhs->max_exponent_, rhs->max_exponent_);
     781           0 :     if (e <= Range::MaxFiniteExponent)
     782           0 :         ++e;
     783             : 
     784             :     // Infinity - Infinity is NaN.
     785           0 :     if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN())
     786           0 :         e = Range::IncludesInfinityAndNaN;
     787             : 
     788             :     return new(alloc) Range(l, h,
     789           0 :                             FractionalPartFlag(lhs->canHaveFractionalPart() ||
     790           0 :                                                rhs->canHaveFractionalPart()),
     791           0 :                             NegativeZeroFlag(lhs->canBeNegativeZero() &&
     792           0 :                                              rhs->canBeZero()),
     793           0 :                             e);
     794             : }
     795             : 
     796             : Range*
     797           0 : Range::and_(TempAllocator& alloc, const Range* lhs, const Range* rhs)
     798             : {
     799           0 :     MOZ_ASSERT(lhs->isInt32());
     800           0 :     MOZ_ASSERT(rhs->isInt32());
     801             : 
     802             :     // If both numbers can be negative, result can be negative in the whole range
     803           0 :     if (lhs->lower() < 0 && rhs->lower() < 0)
     804           0 :         return Range::NewInt32Range(alloc, INT32_MIN, Max(lhs->upper(), rhs->upper()));
     805             : 
     806             :     // Only one of both numbers can be negative.
     807             :     // - result can't be negative
     808             :     // - Upper bound is minimum of both upper range,
     809           0 :     int32_t lower = 0;
     810           0 :     int32_t upper = Min(lhs->upper(), rhs->upper());
     811             : 
     812             :     // EXCEPT when upper bound of non negative number is max value,
     813             :     // because negative value can return the whole max value.
     814             :     // -1 & 5 = 5
     815           0 :     if (lhs->lower() < 0)
     816           0 :        upper = rhs->upper();
     817           0 :     if (rhs->lower() < 0)
     818           0 :         upper = lhs->upper();
     819             : 
     820           0 :     return Range::NewInt32Range(alloc, lower, upper);
     821             : }
     822             : 
     823             : Range*
     824           0 : Range::or_(TempAllocator& alloc, const Range* lhs, const Range* rhs)
     825             : {
     826           0 :     MOZ_ASSERT(lhs->isInt32());
     827           0 :     MOZ_ASSERT(rhs->isInt32());
     828             :     // When one operand is always 0 or always -1, it's a special case where we
     829             :     // can compute a fully precise result. Handling these up front also
     830             :     // protects the code below from calling CountLeadingZeroes32 with a zero
     831             :     // operand or from shifting an int32_t by 32.
     832           0 :     if (lhs->lower() == lhs->upper()) {
     833           0 :         if (lhs->lower() == 0)
     834           0 :             return new(alloc) Range(*rhs);
     835           0 :         if (lhs->lower() == -1)
     836           0 :             return new(alloc) Range(*lhs);
     837             :     }
     838           0 :     if (rhs->lower() == rhs->upper()) {
     839           0 :         if (rhs->lower() == 0)
     840           0 :             return new(alloc) Range(*lhs);
     841           0 :         if (rhs->lower() == -1)
     842           0 :             return new(alloc) Range(*rhs);
     843             :     }
     844             : 
     845             :     // The code below uses CountLeadingZeroes32, which has undefined behavior
     846             :     // if its operand is 0. We rely on the code above to protect it.
     847           0 :     MOZ_ASSERT_IF(lhs->lower() >= 0, lhs->upper() != 0);
     848           0 :     MOZ_ASSERT_IF(rhs->lower() >= 0, rhs->upper() != 0);
     849           0 :     MOZ_ASSERT_IF(lhs->upper() < 0, lhs->lower() != -1);
     850           0 :     MOZ_ASSERT_IF(rhs->upper() < 0, rhs->lower() != -1);
     851             : 
     852           0 :     int32_t lower = INT32_MIN;
     853           0 :     int32_t upper = INT32_MAX;
     854             : 
     855           0 :     if (lhs->lower() >= 0 && rhs->lower() >= 0) {
     856             :         // Both operands are non-negative, so the result won't be less than either.
     857           0 :         lower = Max(lhs->lower(), rhs->lower());
     858             :         // The result will have leading zeros where both operands have leading zeros.
     859             :         // CountLeadingZeroes32 of a non-negative int32 will at least be 1 to account
     860             :         // for the bit of sign.
     861           0 :         upper = int32_t(UINT32_MAX >> Min(CountLeadingZeroes32(lhs->upper()),
     862           0 :                                           CountLeadingZeroes32(rhs->upper())));
     863             :     } else {
     864             :         // The result will have leading ones where either operand has leading ones.
     865           0 :         if (lhs->upper() < 0) {
     866           0 :             unsigned leadingOnes = CountLeadingZeroes32(~lhs->lower());
     867           0 :             lower = Max(lower, ~int32_t(UINT32_MAX >> leadingOnes));
     868           0 :             upper = -1;
     869             :         }
     870           0 :         if (rhs->upper() < 0) {
     871           0 :             unsigned leadingOnes = CountLeadingZeroes32(~rhs->lower());
     872           0 :             lower = Max(lower, ~int32_t(UINT32_MAX >> leadingOnes));
     873           0 :             upper = -1;
     874             :         }
     875             :     }
     876             : 
     877           0 :     return Range::NewInt32Range(alloc, lower, upper);
     878             : }
     879             : 
     880             : Range*
     881           0 : Range::xor_(TempAllocator& alloc, const Range* lhs, const Range* rhs)
     882             : {
     883           0 :     MOZ_ASSERT(lhs->isInt32());
     884           0 :     MOZ_ASSERT(rhs->isInt32());
     885           0 :     int32_t lhsLower = lhs->lower();
     886           0 :     int32_t lhsUpper = lhs->upper();
     887           0 :     int32_t rhsLower = rhs->lower();
     888           0 :     int32_t rhsUpper = rhs->upper();
     889           0 :     bool invertAfter = false;
     890             : 
     891             :     // If either operand is negative, bitwise-negate it, and arrange to negate
     892             :     // the result; ~((~x)^y) == x^y. If both are negative the negations on the
     893             :     // result cancel each other out; effectively this is (~x)^(~y) == x^y.
     894             :     // These transformations reduce the number of cases we have to handle below.
     895           0 :     if (lhsUpper < 0) {
     896           0 :         lhsLower = ~lhsLower;
     897           0 :         lhsUpper = ~lhsUpper;
     898           0 :         Swap(lhsLower, lhsUpper);
     899           0 :         invertAfter = !invertAfter;
     900             :     }
     901           0 :     if (rhsUpper < 0) {
     902           0 :         rhsLower = ~rhsLower;
     903           0 :         rhsUpper = ~rhsUpper;
     904           0 :         Swap(rhsLower, rhsUpper);
     905           0 :         invertAfter = !invertAfter;
     906             :     }
     907             : 
     908             :     // Handle cases where lhs or rhs is always zero specially, because they're
     909             :     // easy cases where we can be perfectly precise, and because it protects the
     910             :     // CountLeadingZeroes32 calls below from seeing 0 operands, which would be
     911             :     // undefined behavior.
     912           0 :     int32_t lower = INT32_MIN;
     913           0 :     int32_t upper = INT32_MAX;
     914           0 :     if (lhsLower == 0 && lhsUpper == 0) {
     915           0 :         upper = rhsUpper;
     916           0 :         lower = rhsLower;
     917           0 :     } else if (rhsLower == 0 && rhsUpper == 0) {
     918           0 :         upper = lhsUpper;
     919           0 :         lower = lhsLower;
     920           0 :     } else if (lhsLower >= 0 && rhsLower >= 0) {
     921             :         // Both operands are non-negative. The result will be non-negative.
     922           0 :         lower = 0;
     923             :         // To compute the upper value, take each operand's upper value and
     924             :         // set all bits that don't correspond to leading zero bits in the
     925             :         // other to one. For each one, this gives an upper bound for the
     926             :         // result, so we can take the minimum between the two.
     927           0 :         unsigned lhsLeadingZeros = CountLeadingZeroes32(lhsUpper);
     928           0 :         unsigned rhsLeadingZeros = CountLeadingZeroes32(rhsUpper);
     929           0 :         upper = Min(rhsUpper | int32_t(UINT32_MAX >> lhsLeadingZeros),
     930           0 :                     lhsUpper | int32_t(UINT32_MAX >> rhsLeadingZeros));
     931             :     }
     932             : 
     933             :     // If we bitwise-negated one (but not both) of the operands above, apply the
     934             :     // bitwise-negate to the result, completing ~((~x)^y) == x^y.
     935           0 :     if (invertAfter) {
     936           0 :         lower = ~lower;
     937           0 :         upper = ~upper;
     938           0 :         Swap(lower, upper);
     939             :     }
     940             : 
     941           0 :     return Range::NewInt32Range(alloc, lower, upper);
     942             : }
     943             : 
     944             : Range*
     945           0 : Range::not_(TempAllocator& alloc, const Range* op)
     946             : {
     947           0 :     MOZ_ASSERT(op->isInt32());
     948           0 :     return Range::NewInt32Range(alloc, ~op->upper(), ~op->lower());
     949             : }
     950             : 
     951             : Range*
     952           0 : Range::mul(TempAllocator& alloc, const Range* lhs, const Range* rhs)
     953             : {
     954           0 :     FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(lhs->canHaveFractionalPart_ ||
     955           0 :                                                                      rhs->canHaveFractionalPart_);
     956             : 
     957             :     NegativeZeroFlag newMayIncludeNegativeZero =
     958           0 :         NegativeZeroFlag((lhs->canHaveSignBitSet() && rhs->canBeFiniteNonNegative()) ||
     959           0 :                          (rhs->canHaveSignBitSet() && lhs->canBeFiniteNonNegative()));
     960             : 
     961             :     uint16_t exponent;
     962           0 :     if (!lhs->canBeInfiniteOrNaN() && !rhs->canBeInfiniteOrNaN()) {
     963             :         // Two finite values.
     964           0 :         exponent = lhs->numBits() + rhs->numBits() - 1;
     965           0 :         if (exponent > Range::MaxFiniteExponent)
     966           0 :             exponent = Range::IncludesInfinity;
     967           0 :     } else if (!lhs->canBeNaN() &&
     968           0 :                !rhs->canBeNaN() &&
     969           0 :                !(lhs->canBeZero() && rhs->canBeInfiniteOrNaN()) &&
     970           0 :                !(rhs->canBeZero() && lhs->canBeInfiniteOrNaN()))
     971             :     {
     972             :         // Two values that multiplied together won't produce a NaN.
     973           0 :         exponent = Range::IncludesInfinity;
     974             :     } else {
     975             :         // Could be anything.
     976           0 :         exponent = Range::IncludesInfinityAndNaN;
     977             :     }
     978             : 
     979           0 :     if (MissingAnyInt32Bounds(lhs, rhs))
     980             :         return new(alloc) Range(NoInt32LowerBound, NoInt32UpperBound,
     981             :                                 newCanHaveFractionalPart,
     982             :                                 newMayIncludeNegativeZero,
     983           0 :                                 exponent);
     984           0 :     int64_t a = (int64_t)lhs->lower() * (int64_t)rhs->lower();
     985           0 :     int64_t b = (int64_t)lhs->lower() * (int64_t)rhs->upper();
     986           0 :     int64_t c = (int64_t)lhs->upper() * (int64_t)rhs->lower();
     987           0 :     int64_t d = (int64_t)lhs->upper() * (int64_t)rhs->upper();
     988             :     return new(alloc) Range(
     989           0 :         Min( Min(a, b), Min(c, d) ),
     990           0 :         Max( Max(a, b), Max(c, d) ),
     991             :         newCanHaveFractionalPart,
     992             :         newMayIncludeNegativeZero,
     993           0 :         exponent);
     994             : }
     995             : 
     996             : Range*
     997           0 : Range::lsh(TempAllocator& alloc, const Range* lhs, int32_t c)
     998             : {
     999           0 :     MOZ_ASSERT(lhs->isInt32());
    1000           0 :     int32_t shift = c & 0x1f;
    1001             : 
    1002             :     // If the shift doesn't loose bits or shift bits into the sign bit, we
    1003             :     // can simply compute the correct range by shifting.
    1004           0 :     if ((int32_t)((uint32_t)lhs->lower() << shift << 1 >> shift >> 1) == lhs->lower() &&
    1005           0 :         (int32_t)((uint32_t)lhs->upper() << shift << 1 >> shift >> 1) == lhs->upper())
    1006             :     {
    1007           0 :         return Range::NewInt32Range(alloc,
    1008           0 :             uint32_t(lhs->lower()) << shift,
    1009           0 :             uint32_t(lhs->upper()) << shift);
    1010             :     }
    1011             : 
    1012           0 :     return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
    1013             : }
    1014             : 
    1015             : Range*
    1016           0 : Range::rsh(TempAllocator& alloc, const Range* lhs, int32_t c)
    1017             : {
    1018           0 :     MOZ_ASSERT(lhs->isInt32());
    1019           0 :     int32_t shift = c & 0x1f;
    1020           0 :     return Range::NewInt32Range(alloc,
    1021           0 :         lhs->lower() >> shift,
    1022           0 :         lhs->upper() >> shift);
    1023             : }
    1024             : 
    1025             : Range*
    1026           0 : Range::ursh(TempAllocator& alloc, const Range* lhs, int32_t c)
    1027             : {
    1028             :     // ursh's left operand is uint32, not int32, but for range analysis we
    1029             :     // currently approximate it as int32. We assume here that the range has
    1030             :     // already been adjusted accordingly by our callers.
    1031           0 :     MOZ_ASSERT(lhs->isInt32());
    1032             : 
    1033           0 :     int32_t shift = c & 0x1f;
    1034             : 
    1035             :     // If the value is always non-negative or always negative, we can simply
    1036             :     // compute the correct range by shifting.
    1037           0 :     if (lhs->isFiniteNonNegative() || lhs->isFiniteNegative()) {
    1038           0 :         return Range::NewUInt32Range(alloc,
    1039           0 :             uint32_t(lhs->lower()) >> shift,
    1040           0 :             uint32_t(lhs->upper()) >> shift);
    1041             :     }
    1042             : 
    1043             :     // Otherwise return the most general range after the shift.
    1044           0 :     return Range::NewUInt32Range(alloc, 0, UINT32_MAX >> shift);
    1045             : }
    1046             : 
    1047             : Range*
    1048           0 : Range::lsh(TempAllocator& alloc, const Range* lhs, const Range* rhs)
    1049             : {
    1050           0 :     MOZ_ASSERT(lhs->isInt32());
    1051           0 :     MOZ_ASSERT(rhs->isInt32());
    1052           0 :     return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
    1053             : }
    1054             : 
    1055             : Range*
    1056           0 : Range::rsh(TempAllocator& alloc, const Range* lhs, const Range* rhs)
    1057             : {
    1058           0 :     MOZ_ASSERT(lhs->isInt32());
    1059           0 :     MOZ_ASSERT(rhs->isInt32());
    1060             : 
    1061             :     // Canonicalize the shift range to 0 to 31.
    1062           0 :     int32_t shiftLower = rhs->lower();
    1063           0 :     int32_t shiftUpper = rhs->upper();
    1064           0 :     if ((int64_t(shiftUpper) - int64_t(shiftLower)) >= 31) {
    1065           0 :         shiftLower = 0;
    1066           0 :         shiftUpper = 31;
    1067             :     } else {
    1068           0 :         shiftLower &= 0x1f;
    1069           0 :         shiftUpper &= 0x1f;
    1070           0 :         if (shiftLower > shiftUpper) {
    1071           0 :             shiftLower = 0;
    1072           0 :             shiftUpper = 31;
    1073             :         }
    1074             :     }
    1075           0 :     MOZ_ASSERT(shiftLower >= 0 && shiftUpper <= 31);
    1076             : 
    1077             :     // The lhs bounds are signed, thus the minimum is either the lower bound
    1078             :     // shift by the smallest shift if negative or the lower bound shifted by the
    1079             :     // biggest shift otherwise.  And the opposite for the maximum.
    1080           0 :     int32_t lhsLower = lhs->lower();
    1081           0 :     int32_t min = lhsLower < 0 ? lhsLower >> shiftLower : lhsLower >> shiftUpper;
    1082           0 :     int32_t lhsUpper = lhs->upper();
    1083           0 :     int32_t max = lhsUpper >= 0 ? lhsUpper >> shiftLower : lhsUpper >> shiftUpper;
    1084             : 
    1085           0 :     return Range::NewInt32Range(alloc, min, max);
    1086             : }
    1087             : 
    1088             : Range*
    1089           0 : Range::ursh(TempAllocator& alloc, const Range* lhs, const Range* rhs)
    1090             : {
    1091             :     // ursh's left operand is uint32, not int32, but for range analysis we
    1092             :     // currently approximate it as int32. We assume here that the range has
    1093             :     // already been adjusted accordingly by our callers.
    1094           0 :     MOZ_ASSERT(lhs->isInt32());
    1095           0 :     MOZ_ASSERT(rhs->isInt32());
    1096           0 :     return Range::NewUInt32Range(alloc, 0, lhs->isFiniteNonNegative() ? lhs->upper() : UINT32_MAX);
    1097             : }
    1098             : 
    1099             : Range*
    1100           0 : Range::abs(TempAllocator& alloc, const Range* op)
    1101             : {
    1102           0 :     int32_t l = op->lower_;
    1103           0 :     int32_t u = op->upper_;
    1104           0 :     FractionalPartFlag canHaveFractionalPart = op->canHaveFractionalPart_;
    1105             : 
    1106             :     // Abs never produces a negative zero.
    1107           0 :     NegativeZeroFlag canBeNegativeZero = ExcludesNegativeZero;
    1108             : 
    1109           0 :     return new(alloc) Range(Max(Max(int32_t(0), l), u == INT32_MIN ? INT32_MAX : -u),
    1110             :                             true,
    1111           0 :                             Max(Max(int32_t(0), u), l == INT32_MIN ? INT32_MAX : -l),
    1112           0 :                             op->hasInt32Bounds() && l != INT32_MIN,
    1113             :                             canHaveFractionalPart,
    1114             :                             canBeNegativeZero,
    1115           0 :                             op->max_exponent_);
    1116             : }
    1117             : 
    1118             : Range*
    1119           0 : Range::min(TempAllocator& alloc, const Range* lhs, const Range* rhs)
    1120             : {
    1121             :     // If either operand is NaN, the result is NaN.
    1122           0 :     if (lhs->canBeNaN() || rhs->canBeNaN())
    1123           0 :         return nullptr;
    1124             : 
    1125           0 :     FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(lhs->canHaveFractionalPart_ ||
    1126           0 :                                                                      rhs->canHaveFractionalPart_);
    1127           0 :     NegativeZeroFlag newMayIncludeNegativeZero = NegativeZeroFlag(lhs->canBeNegativeZero_ ||
    1128           0 :                                                                   rhs->canBeNegativeZero_);
    1129             : 
    1130           0 :     return new(alloc) Range(Min(lhs->lower_, rhs->lower_),
    1131           0 :                             lhs->hasInt32LowerBound_ && rhs->hasInt32LowerBound_,
    1132           0 :                             Min(lhs->upper_, rhs->upper_),
    1133           0 :                             lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_,
    1134             :                             newCanHaveFractionalPart,
    1135             :                             newMayIncludeNegativeZero,
    1136           0 :                             Max(lhs->max_exponent_, rhs->max_exponent_));
    1137             : }
    1138             : 
    1139             : Range*
    1140           5 : Range::max(TempAllocator& alloc, const Range* lhs, const Range* rhs)
    1141             : {
    1142             :     // If either operand is NaN, the result is NaN.
    1143           5 :     if (lhs->canBeNaN() || rhs->canBeNaN())
    1144           0 :         return nullptr;
    1145             : 
    1146          10 :     FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(lhs->canHaveFractionalPart_ ||
    1147          15 :                                                                      rhs->canHaveFractionalPart_);
    1148          10 :     NegativeZeroFlag newMayIncludeNegativeZero = NegativeZeroFlag(lhs->canBeNegativeZero_ ||
    1149          15 :                                                                   rhs->canBeNegativeZero_);
    1150             : 
    1151           5 :     return new(alloc) Range(Max(lhs->lower_, rhs->lower_),
    1152           5 :                             lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_,
    1153           5 :                             Max(lhs->upper_, rhs->upper_),
    1154           5 :                             lhs->hasInt32UpperBound_ && rhs->hasInt32UpperBound_,
    1155             :                             newCanHaveFractionalPart,
    1156             :                             newMayIncludeNegativeZero,
    1157          15 :                             Max(lhs->max_exponent_, rhs->max_exponent_));
    1158             : }
    1159             : 
    1160             : Range*
    1161           0 : Range::floor(TempAllocator& alloc, const Range* op)
    1162             : {
    1163           0 :     Range* copy = new(alloc) Range(*op);
    1164             :     // Decrement lower bound of copy range if op have a factional part and lower
    1165             :     // bound is Int32 defined. Also we avoid to decrement when op have a
    1166             :     // fractional part but lower_ >= JSVAL_INT_MAX.
    1167           0 :     if (op->canHaveFractionalPart() && op->hasInt32LowerBound())
    1168           0 :         copy->setLowerInit(int64_t(copy->lower_) - 1);
    1169             : 
    1170             :     // Also refine max_exponent_ because floor may have decremented int value
    1171             :     // If we've got int32 defined bounds, just deduce it using defined bounds.
    1172             :     // But, if we don't have those, value's max_exponent_ may have changed.
    1173             :     // Because we're looking to maintain an over estimation, if we can,
    1174             :     // we increment it.
    1175           0 :     if(copy->hasInt32Bounds())
    1176           0 :         copy->max_exponent_ = copy->exponentImpliedByInt32Bounds();
    1177           0 :     else if(copy->max_exponent_ < MaxFiniteExponent)
    1178           0 :         copy->max_exponent_++;
    1179             : 
    1180           0 :     copy->canHaveFractionalPart_ = ExcludesFractionalParts;
    1181           0 :     copy->assertInvariants();
    1182           0 :     return copy;
    1183             : }
    1184             : 
    1185             : Range*
    1186           0 : Range::ceil(TempAllocator& alloc, const Range* op)
    1187             : {
    1188           0 :     Range* copy = new(alloc) Range(*op);
    1189             : 
    1190             :     // We need to refine max_exponent_ because ceil may have incremented the int value.
    1191             :     // If we have got int32 bounds defined, just deduce it using the defined bounds.
    1192             :     // Else we can just increment its value,
    1193             :     // as we are looking to maintain an over estimation.
    1194           0 :     if (copy->hasInt32Bounds())
    1195           0 :         copy->max_exponent_ = copy->exponentImpliedByInt32Bounds();
    1196           0 :     else if (copy->max_exponent_ < MaxFiniteExponent)
    1197           0 :         copy->max_exponent_++;
    1198             : 
    1199           0 :     copy->canHaveFractionalPart_ = ExcludesFractionalParts;
    1200           0 :     copy->assertInvariants();
    1201           0 :     return copy;
    1202             : }
    1203             : 
    1204             : Range*
    1205           0 : Range::sign(TempAllocator& alloc, const Range* op)
    1206             : {
    1207           0 :     if (op->canBeNaN())
    1208           0 :         return nullptr;
    1209             : 
    1210           0 :     return new(alloc) Range(Max(Min(op->lower_, 1), -1),
    1211           0 :                             Max(Min(op->upper_, 1), -1),
    1212             :                             Range::ExcludesFractionalParts,
    1213           0 :                             NegativeZeroFlag(op->canBeNegativeZero()),
    1214           0 :                             0);
    1215             : }
    1216             : 
    1217             : Range*
    1218           0 : Range::NaNToZero(TempAllocator& alloc, const Range *op)
    1219             : {
    1220           0 :     Range* copy = new(alloc) Range(*op);
    1221           0 :     if (copy->canBeNaN()) {
    1222           0 :         copy->max_exponent_ = Range::IncludesInfinity;
    1223           0 :         if (!copy->canBeZero()) {
    1224           0 :             Range zero;
    1225           0 :             zero.setDoubleSingleton(0);
    1226           0 :             copy->unionWith(&zero);
    1227             :         }
    1228             :     }
    1229           0 :     copy->refineToExcludeNegativeZero();
    1230           0 :     return copy;
    1231             : }
    1232             : 
    1233             : bool
    1234           0 : Range::negativeZeroMul(const Range* lhs, const Range* rhs)
    1235             : {
    1236             :     // The result can only be negative zero if both sides are finite and they
    1237             :     // have differing signs.
    1238           0 :     return (lhs->canHaveSignBitSet() && rhs->canBeFiniteNonNegative()) ||
    1239           0 :            (rhs->canHaveSignBitSet() && lhs->canBeFiniteNonNegative());
    1240             : }
    1241             : 
    1242             : bool
    1243           2 : Range::update(const Range* other)
    1244             : {
    1245             :     bool changed =
    1246           4 :         lower_ != other->lower_ ||
    1247           4 :         hasInt32LowerBound_ != other->hasInt32LowerBound_ ||
    1248           4 :         upper_ != other->upper_ ||
    1249           4 :         hasInt32UpperBound_ != other->hasInt32UpperBound_ ||
    1250           4 :         canHaveFractionalPart_ != other->canHaveFractionalPart_ ||
    1251           6 :         canBeNegativeZero_ != other->canBeNegativeZero_ ||
    1252           4 :         max_exponent_ != other->max_exponent_;
    1253           2 :     if (changed) {
    1254           0 :         lower_ = other->lower_;
    1255           0 :         hasInt32LowerBound_ = other->hasInt32LowerBound_;
    1256           0 :         upper_ = other->upper_;
    1257           0 :         hasInt32UpperBound_ = other->hasInt32UpperBound_;
    1258           0 :         canHaveFractionalPart_ = other->canHaveFractionalPart_;
    1259           0 :         canBeNegativeZero_ = other->canBeNegativeZero_;
    1260           0 :         max_exponent_ = other->max_exponent_;
    1261           0 :         assertInvariants();
    1262             :     }
    1263             : 
    1264           2 :     return changed;
    1265             : }
    1266             : 
    1267             : ///////////////////////////////////////////////////////////////////////////////
    1268             : // Range Computation for MIR Nodes
    1269             : ///////////////////////////////////////////////////////////////////////////////
    1270             : 
    1271             : void
    1272         175 : MPhi::computeRange(TempAllocator& alloc)
    1273             : {
    1274         175 :     if (type() != MIRType::Int32 && type() != MIRType::Double)
    1275         159 :         return;
    1276             : 
    1277          16 :     Range* range = nullptr;
    1278          26 :     for (size_t i = 0, e = numOperands(); i < e; i++) {
    1279          24 :         if (getOperand(i)->block()->unreachable()) {
    1280           0 :             JitSpew(JitSpew_Range, "Ignoring unreachable input %d", getOperand(i)->id());
    1281           0 :             continue;
    1282             :         }
    1283             : 
    1284             :         // Peek at the pre-bailout range so we can take a short-cut; if any of
    1285             :         // the operands has an unknown range, this phi has an unknown range.
    1286          24 :         if (!getOperand(i)->range())
    1287          14 :             return;
    1288             : 
    1289          10 :         Range input(getOperand(i));
    1290             : 
    1291          10 :         if (range)
    1292           2 :             range->unionWith(&input);
    1293             :         else
    1294           8 :             range = new(alloc) Range(input);
    1295             :     }
    1296             : 
    1297           2 :     setRange(range);
    1298             : }
    1299             : 
    1300             : void
    1301          50 : MBeta::computeRange(TempAllocator& alloc)
    1302             : {
    1303          50 :     bool emptyRange = false;
    1304             : 
    1305          50 :     Range opRange(getOperand(0));
    1306          50 :     Range* range = Range::intersect(alloc, &opRange, comparison_, &emptyRange);
    1307          50 :     if (emptyRange) {
    1308           0 :         JitSpew(JitSpew_Range, "Marking block for inst %d unreachable", id());
    1309           0 :         block()->setUnreachableUnchecked();
    1310             :     } else {
    1311          50 :         setRange(range);
    1312             :     }
    1313          50 : }
    1314             : 
    1315             : void
    1316         187 : MConstant::computeRange(TempAllocator& alloc)
    1317             : {
    1318         187 :     if (isTypeRepresentableAsDouble()) {
    1319          49 :         double d = numberToDouble();
    1320          49 :         setRange(Range::NewDoubleSingletonRange(alloc, d));
    1321         138 :     } else if (type() == MIRType::Boolean) {
    1322          38 :         bool b = toBoolean();
    1323          38 :         setRange(Range::NewInt32Range(alloc, b, b));
    1324             :     }
    1325         187 : }
    1326             : 
    1327             : void
    1328           2 : MCharCodeAt::computeRange(TempAllocator& alloc)
    1329             : {
    1330             :     // ECMA 262 says that the integer will be non-negative and at most 65535.
    1331           2 :     setRange(Range::NewInt32Range(alloc, 0, 65535));
    1332           2 : }
    1333             : 
    1334             : void
    1335           0 : MClampToUint8::computeRange(TempAllocator& alloc)
    1336             : {
    1337           0 :     setRange(Range::NewUInt32Range(alloc, 0, 255));
    1338           0 : }
    1339             : 
    1340             : void
    1341           0 : MBitAnd::computeRange(TempAllocator& alloc)
    1342             : {
    1343           0 :     if (specialization_ == MIRType::Int64)
    1344           0 :         return;
    1345             : 
    1346           0 :     Range left(getOperand(0));
    1347           0 :     Range right(getOperand(1));
    1348           0 :     left.wrapAroundToInt32();
    1349           0 :     right.wrapAroundToInt32();
    1350             : 
    1351           0 :     setRange(Range::and_(alloc, &left, &right));
    1352             : }
    1353             : 
    1354             : void
    1355           0 : MBitOr::computeRange(TempAllocator& alloc)
    1356             : {
    1357           0 :     if (specialization_ == MIRType::Int64)
    1358           0 :         return;
    1359             : 
    1360           0 :     Range left(getOperand(0));
    1361           0 :     Range right(getOperand(1));
    1362           0 :     left.wrapAroundToInt32();
    1363           0 :     right.wrapAroundToInt32();
    1364             : 
    1365           0 :     setRange(Range::or_(alloc, &left, &right));
    1366             : }
    1367             : 
    1368             : void
    1369           0 : MBitXor::computeRange(TempAllocator& alloc)
    1370             : {
    1371           0 :     if (specialization_ == MIRType::Int64)
    1372           0 :         return;
    1373             : 
    1374           0 :     Range left(getOperand(0));
    1375           0 :     Range right(getOperand(1));
    1376           0 :     left.wrapAroundToInt32();
    1377           0 :     right.wrapAroundToInt32();
    1378             : 
    1379           0 :     setRange(Range::xor_(alloc, &left, &right));
    1380             : }
    1381             : 
    1382             : void
    1383           0 : MBitNot::computeRange(TempAllocator& alloc)
    1384             : {
    1385           0 :     Range op(getOperand(0));
    1386           0 :     op.wrapAroundToInt32();
    1387             : 
    1388           0 :     setRange(Range::not_(alloc, &op));
    1389           0 : }
    1390             : 
    1391             : void
    1392           0 : MLsh::computeRange(TempAllocator& alloc)
    1393             : {
    1394           0 :     if (specialization_ == MIRType::Int64)
    1395           0 :         return;
    1396             : 
    1397           0 :     Range left(getOperand(0));
    1398           0 :     Range right(getOperand(1));
    1399           0 :     left.wrapAroundToInt32();
    1400             : 
    1401           0 :     MConstant* rhsConst = getOperand(1)->maybeConstantValue();
    1402           0 :     if (rhsConst && rhsConst->type() == MIRType::Int32) {
    1403           0 :         int32_t c = rhsConst->toInt32();
    1404           0 :         setRange(Range::lsh(alloc, &left, c));
    1405           0 :         return;
    1406             :     }
    1407             : 
    1408           0 :     right.wrapAroundToShiftCount();
    1409           0 :     setRange(Range::lsh(alloc, &left, &right));
    1410             : }
    1411             : 
    1412             : void
    1413           0 : MRsh::computeRange(TempAllocator& alloc)
    1414             : {
    1415           0 :     if (specialization_ == MIRType::Int64)
    1416           0 :         return;
    1417             : 
    1418           0 :     Range left(getOperand(0));
    1419           0 :     Range right(getOperand(1));
    1420           0 :     left.wrapAroundToInt32();
    1421             : 
    1422           0 :     MConstant* rhsConst = getOperand(1)->maybeConstantValue();
    1423           0 :     if (rhsConst && rhsConst->type() == MIRType::Int32) {
    1424           0 :         int32_t c = rhsConst->toInt32();
    1425           0 :         setRange(Range::rsh(alloc, &left, c));
    1426           0 :         return;
    1427             :     }
    1428             : 
    1429           0 :     right.wrapAroundToShiftCount();
    1430           0 :     setRange(Range::rsh(alloc, &left, &right));
    1431             : }
    1432             : 
    1433             : void
    1434           0 : MUrsh::computeRange(TempAllocator& alloc)
    1435             : {
    1436           0 :     if (specialization_ == MIRType::Int64)
    1437           0 :         return;
    1438             : 
    1439           0 :     Range left(getOperand(0));
    1440           0 :     Range right(getOperand(1));
    1441             : 
    1442             :     // ursh can be thought of as converting its left operand to uint32, or it
    1443             :     // can be thought of as converting its left operand to int32, and then
    1444             :     // reinterpreting the int32 bits as a uint32 value. Both approaches yield
    1445             :     // the same result. Since we lack support for full uint32 ranges, we use
    1446             :     // the second interpretation, though it does cause us to be conservative.
    1447           0 :     left.wrapAroundToInt32();
    1448           0 :     right.wrapAroundToShiftCount();
    1449             : 
    1450           0 :     MConstant* rhsConst = getOperand(1)->maybeConstantValue();
    1451           0 :     if (rhsConst && rhsConst->type() == MIRType::Int32) {
    1452           0 :         int32_t c = rhsConst->toInt32();
    1453           0 :         setRange(Range::ursh(alloc, &left, c));
    1454             :     } else {
    1455           0 :         setRange(Range::ursh(alloc, &left, &right));
    1456             :     }
    1457             : 
    1458           0 :     MOZ_ASSERT(range()->lower() >= 0);
    1459             : }
    1460             : 
    1461             : void
    1462           0 : MAbs::computeRange(TempAllocator& alloc)
    1463             : {
    1464           0 :     if (specialization_ != MIRType::Int32 && specialization_ != MIRType::Double)
    1465           0 :         return;
    1466             : 
    1467           0 :     Range other(getOperand(0));
    1468           0 :     Range* next = Range::abs(alloc, &other);
    1469           0 :     if (implicitTruncate_)
    1470           0 :         next->wrapAroundToInt32();
    1471           0 :     setRange(next);
    1472             : }
    1473             : 
    1474             : void
    1475           0 : MFloor::computeRange(TempAllocator& alloc)
    1476             : {
    1477           0 :     Range other(getOperand(0));
    1478           0 :     setRange(Range::floor(alloc, &other));
    1479           0 : }
    1480             : 
    1481             : void
    1482           0 : MCeil::computeRange(TempAllocator& alloc)
    1483             : {
    1484           0 :     Range other(getOperand(0));
    1485           0 :     setRange(Range::ceil(alloc, &other));
    1486           0 : }
    1487             : 
    1488             : void
    1489           0 : MClz::computeRange(TempAllocator& alloc)
    1490             : {
    1491           0 :     if (type() != MIRType::Int32)
    1492           0 :         return;
    1493           0 :     setRange(Range::NewUInt32Range(alloc, 0, 32));
    1494             : }
    1495             : 
    1496             : void
    1497           0 : MCtz::computeRange(TempAllocator& alloc)
    1498             : {
    1499           0 :     if (type() != MIRType::Int32)
    1500           0 :         return;
    1501           0 :     setRange(Range::NewUInt32Range(alloc, 0, 32));
    1502             : }
    1503             : 
    1504             : void
    1505           0 : MPopcnt::computeRange(TempAllocator& alloc)
    1506             : {
    1507           0 :     if (type() != MIRType::Int32)
    1508           0 :         return;
    1509           0 :     setRange(Range::NewUInt32Range(alloc, 0, 32));
    1510             : }
    1511             : 
    1512             : void
    1513           5 : MMinMax::computeRange(TempAllocator& alloc)
    1514             : {
    1515           5 :     if (specialization_ != MIRType::Int32 && specialization_ != MIRType::Double)
    1516           0 :         return;
    1517             : 
    1518           5 :     Range left(getOperand(0));
    1519           5 :     Range right(getOperand(1));
    1520           5 :     setRange(isMax() ? Range::max(alloc, &left, &right) : Range::min(alloc, &left, &right));
    1521             : }
    1522             : 
    1523             : void
    1524           8 : MAdd::computeRange(TempAllocator& alloc)
    1525             : {
    1526           8 :     if (specialization() != MIRType::Int32 && specialization() != MIRType::Double)
    1527           0 :         return;
    1528           8 :     Range left(getOperand(0));
    1529           8 :     Range right(getOperand(1));
    1530           8 :     Range* next = Range::add(alloc, &left, &right);
    1531           8 :     if (isTruncated())
    1532           0 :         next->wrapAroundToInt32();
    1533           8 :     setRange(next);
    1534             : }
    1535             : 
    1536             : void
    1537           0 : MSub::computeRange(TempAllocator& alloc)
    1538             : {
    1539           0 :     if (specialization() != MIRType::Int32 && specialization() != MIRType::Double)
    1540           0 :         return;
    1541           0 :     Range left(getOperand(0));
    1542           0 :     Range right(getOperand(1));
    1543           0 :     Range* next = Range::sub(alloc, &left, &right);
    1544           0 :     if (isTruncated())
    1545           0 :         next->wrapAroundToInt32();
    1546           0 :     setRange(next);
    1547             : }
    1548             : 
    1549             : void
    1550           0 : MMul::computeRange(TempAllocator& alloc)
    1551             : {
    1552           0 :     if (specialization() != MIRType::Int32 && specialization() != MIRType::Double)
    1553           0 :         return;
    1554           0 :     Range left(getOperand(0));
    1555           0 :     Range right(getOperand(1));
    1556           0 :     if (canBeNegativeZero())
    1557           0 :         canBeNegativeZero_ = Range::negativeZeroMul(&left, &right);
    1558           0 :     Range* next = Range::mul(alloc, &left, &right);
    1559           0 :     if (!next->canBeNegativeZero())
    1560           0 :         canBeNegativeZero_ = false;
    1561             :     // Truncated multiplications could overflow in both directions
    1562           0 :     if (isTruncated())
    1563           0 :         next->wrapAroundToInt32();
    1564           0 :     setRange(next);
    1565             : }
    1566             : 
    1567             : void
    1568           0 : MMod::computeRange(TempAllocator& alloc)
    1569             : {
    1570           0 :     if (specialization() != MIRType::Int32 && specialization() != MIRType::Double)
    1571           0 :         return;
    1572           0 :     Range lhs(getOperand(0));
    1573           0 :     Range rhs(getOperand(1));
    1574             : 
    1575             :     // If either operand is a NaN, the result is NaN. This also conservatively
    1576             :     // handles Infinity cases.
    1577           0 :     if (!lhs.hasInt32Bounds() || !rhs.hasInt32Bounds())
    1578           0 :         return;
    1579             : 
    1580             :     // If RHS can be zero, the result can be NaN.
    1581           0 :     if (rhs.lower() <= 0 && rhs.upper() >= 0)
    1582           0 :         return;
    1583             : 
    1584             :     // If both operands are non-negative integers, we can optimize this to an
    1585             :     // unsigned mod.
    1586           0 :     if (specialization() == MIRType::Int32 && rhs.lower() > 0) {
    1587           0 :         bool hasDoubles = lhs.lower() < 0 || lhs.canHaveFractionalPart() ||
    1588           0 :             rhs.canHaveFractionalPart();
    1589             :         // It is not possible to check that lhs.lower() >= 0, since the range
    1590             :         // of a ursh with rhs a 0 constant is wrapped around the int32 range in
    1591             :         // Range::Range(). However, IsUint32Type() will only return true for
    1592             :         // nodes that lie in the range [0, UINT32_MAX].
    1593           0 :         bool hasUint32s = IsUint32Type(getOperand(0)) &&
    1594           0 :             getOperand(1)->type() == MIRType::Int32 &&
    1595           0 :             (IsUint32Type(getOperand(1)) || getOperand(1)->isConstant());
    1596           0 :         if (!hasDoubles || hasUint32s)
    1597           0 :             unsigned_ = true;
    1598             :     }
    1599             : 
    1600             :     // For unsigned mod, we have to convert both operands to unsigned.
    1601             :     // Note that we handled the case of a zero rhs above.
    1602           0 :     if (unsigned_) {
    1603             :         // The result of an unsigned mod will never be unsigned-greater than
    1604             :         // either operand.
    1605           0 :         uint32_t lhsBound = Max<uint32_t>(lhs.lower(), lhs.upper());
    1606           0 :         uint32_t rhsBound = Max<uint32_t>(rhs.lower(), rhs.upper());
    1607             : 
    1608             :         // If either range crosses through -1 as a signed value, it could be
    1609             :         // the maximum unsigned value when interpreted as unsigned. If the range
    1610             :         // doesn't include -1, then the simple max value we computed above is
    1611             :         // correct.
    1612           0 :         if (lhs.lower() <= -1 && lhs.upper() >= -1)
    1613           0 :             lhsBound = UINT32_MAX;
    1614           0 :         if (rhs.lower() <= -1 && rhs.upper() >= -1)
    1615           0 :             rhsBound = UINT32_MAX;
    1616             : 
    1617             :         // The result will never be equal to the rhs, and we shouldn't have
    1618             :         // any rounding to worry about.
    1619           0 :         MOZ_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart());
    1620           0 :         --rhsBound;
    1621             : 
    1622             :         // This gives us two upper bounds, so we can take the best one.
    1623           0 :         setRange(Range::NewUInt32Range(alloc, 0, Min(lhsBound, rhsBound)));
    1624           0 :         return;
    1625             :     }
    1626             : 
    1627             :     // Math.abs(lhs % rhs) == Math.abs(lhs) % Math.abs(rhs).
    1628             :     // First, the absolute value of the result will always be less than the
    1629             :     // absolute value of rhs. (And if rhs is zero, the result is NaN).
    1630           0 :     int64_t a = Abs<int64_t>(rhs.lower());
    1631           0 :     int64_t b = Abs<int64_t>(rhs.upper());
    1632           0 :     if (a == 0 && b == 0)
    1633           0 :         return;
    1634           0 :     int64_t rhsAbsBound = Max(a, b);
    1635             : 
    1636             :     // If the value is known to be integer, less-than abs(rhs) is equivalent
    1637             :     // to less-than-or-equal abs(rhs)-1. This is important for being able to
    1638             :     // say that the result of x%256 is an 8-bit unsigned number.
    1639           0 :     if (!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart())
    1640           0 :         --rhsAbsBound;
    1641             : 
    1642             :     // Next, the absolute value of the result will never be greater than the
    1643             :     // absolute value of lhs.
    1644           0 :     int64_t lhsAbsBound = Max(Abs<int64_t>(lhs.lower()), Abs<int64_t>(lhs.upper()));
    1645             : 
    1646             :     // This gives us two upper bounds, so we can take the best one.
    1647           0 :     int64_t absBound = Min(lhsAbsBound, rhsAbsBound);
    1648             : 
    1649             :     // Now consider the sign of the result.
    1650             :     // If lhs is non-negative, the result will be non-negative.
    1651             :     // If lhs is non-positive, the result will be non-positive.
    1652           0 :     int64_t lower = lhs.lower() >= 0 ? 0 : -absBound;
    1653           0 :     int64_t upper = lhs.upper() <= 0 ? 0 : absBound;
    1654             : 
    1655             :     Range::FractionalPartFlag newCanHaveFractionalPart =
    1656           0 :         Range::FractionalPartFlag(lhs.canHaveFractionalPart() ||
    1657           0 :                                   rhs.canHaveFractionalPart());
    1658             : 
    1659             :     // If the lhs can have the sign bit set and we can return a zero, it'll be a
    1660             :     // negative zero.
    1661             :     Range::NegativeZeroFlag newMayIncludeNegativeZero =
    1662           0 :         Range::NegativeZeroFlag(lhs.canHaveSignBitSet());
    1663             : 
    1664           0 :     setRange(new(alloc) Range(lower, upper,
    1665             :                               newCanHaveFractionalPart,
    1666             :                               newMayIncludeNegativeZero,
    1667           0 :                               Min(lhs.exponent(), rhs.exponent())));
    1668             : }
    1669             : 
    1670             : void
    1671           0 : MDiv::computeRange(TempAllocator& alloc)
    1672             : {
    1673           0 :     if (specialization() != MIRType::Int32 && specialization() != MIRType::Double)
    1674           0 :         return;
    1675           0 :     Range lhs(getOperand(0));
    1676           0 :     Range rhs(getOperand(1));
    1677             : 
    1678             :     // If either operand is a NaN, the result is NaN. This also conservatively
    1679             :     // handles Infinity cases.
    1680           0 :     if (!lhs.hasInt32Bounds() || !rhs.hasInt32Bounds())
    1681           0 :         return;
    1682             : 
    1683             :     // Something simple for now: When dividing by a positive rhs, the result
    1684             :     // won't be further from zero than lhs.
    1685           0 :     if (lhs.lower() >= 0 && rhs.lower() >= 1) {
    1686           0 :         setRange(new(alloc) Range(0, lhs.upper(),
    1687             :                                   Range::IncludesFractionalParts,
    1688             :                                   Range::IncludesNegativeZero,
    1689           0 :                                   lhs.exponent()));
    1690           0 :     } else if (unsigned_ && rhs.lower() >= 1) {
    1691             :         // We shouldn't set the unsigned flag if the inputs can have
    1692             :         // fractional parts.
    1693           0 :         MOZ_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart());
    1694             :         // We shouldn't set the unsigned flag if the inputs can be
    1695             :         // negative zero.
    1696           0 :         MOZ_ASSERT(!lhs.canBeNegativeZero() && !rhs.canBeNegativeZero());
    1697             :         // Unsigned division by a non-zero rhs will return a uint32 value.
    1698           0 :         setRange(Range::NewUInt32Range(alloc, 0, UINT32_MAX));
    1699             :     }
    1700             : }
    1701             : 
    1702             : void
    1703           0 : MSqrt::computeRange(TempAllocator& alloc)
    1704             : {
    1705           0 :     Range input(getOperand(0));
    1706             : 
    1707             :     // If either operand is a NaN, the result is NaN. This also conservatively
    1708             :     // handles Infinity cases.
    1709           0 :     if (!input.hasInt32Bounds())
    1710           0 :         return;
    1711             : 
    1712             :     // Sqrt of a negative non-zero value is NaN.
    1713           0 :     if (input.lower() < 0)
    1714           0 :         return;
    1715             : 
    1716             :     // Something simple for now: When taking the sqrt of a positive value, the
    1717             :     // result won't be further from zero than the input.
    1718             :     // And, sqrt of an integer may have a fractional part.
    1719           0 :     setRange(new(alloc) Range(0, input.upper(),
    1720             :                               Range::IncludesFractionalParts,
    1721           0 :                               input.canBeNegativeZero(),
    1722           0 :                               input.exponent()));
    1723             : }
    1724             : 
    1725             : void
    1726           0 : MToDouble::computeRange(TempAllocator& alloc)
    1727             : {
    1728           0 :     setRange(new(alloc) Range(getOperand(0)));
    1729           0 : }
    1730             : 
    1731             : void
    1732           0 : MToFloat32::computeRange(TempAllocator& alloc)
    1733             : {
    1734           0 : }
    1735             : 
    1736             : void
    1737           2 : MTruncateToInt32::computeRange(TempAllocator& alloc)
    1738             : {
    1739           2 :     Range* output = new(alloc) Range(getOperand(0));
    1740           2 :     output->wrapAroundToInt32();
    1741           2 :     setRange(output);
    1742           2 : }
    1743             : 
    1744             : void
    1745           1 : MToInt32::computeRange(TempAllocator& alloc)
    1746             : {
    1747             :     // No clamping since this computes the range *before* bailouts.
    1748           1 :     setRange(new(alloc) Range(getOperand(0)));
    1749           1 : }
    1750             : 
    1751             : void
    1752           5 : MLimitedTruncate::computeRange(TempAllocator& alloc)
    1753             : {
    1754           5 :     Range* output = new(alloc) Range(input());
    1755           5 :     setRange(output);
    1756           5 : }
    1757             : 
    1758             : void
    1759          10 : MFilterTypeSet::computeRange(TempAllocator& alloc)
    1760             : {
    1761          10 :     setRange(new(alloc) Range(getOperand(0)));
    1762          10 : }
    1763             : 
    1764             : static Range*
    1765           0 : GetTypedArrayRange(TempAllocator& alloc, Scalar::Type type)
    1766             : {
    1767           0 :     switch (type) {
    1768             :       case Scalar::Uint8Clamped:
    1769             :       case Scalar::Uint8:
    1770           0 :         return Range::NewUInt32Range(alloc, 0, UINT8_MAX);
    1771             :       case Scalar::Uint16:
    1772           0 :         return Range::NewUInt32Range(alloc, 0, UINT16_MAX);
    1773             :       case Scalar::Uint32:
    1774           0 :         return Range::NewUInt32Range(alloc, 0, UINT32_MAX);
    1775             : 
    1776             :       case Scalar::Int8:
    1777           0 :         return Range::NewInt32Range(alloc, INT8_MIN, INT8_MAX);
    1778             :       case Scalar::Int16:
    1779           0 :         return Range::NewInt32Range(alloc, INT16_MIN, INT16_MAX);
    1780             :       case Scalar::Int32:
    1781           0 :         return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
    1782             : 
    1783             :       case Scalar::Int64:
    1784             :       case Scalar::Float32:
    1785             :       case Scalar::Float64:
    1786             :       case Scalar::Float32x4:
    1787             :       case Scalar::Int8x16:
    1788             :       case Scalar::Int16x8:
    1789             :       case Scalar::Int32x4:
    1790             :       case Scalar::MaxTypedArrayViewType:
    1791           0 :         break;
    1792             :     }
    1793           0 :     return nullptr;
    1794             : }
    1795             : 
    1796             : void
    1797           0 : MLoadUnboxedScalar::computeRange(TempAllocator& alloc)
    1798             : {
    1799             :     // We have an Int32 type and if this is a UInt32 load it may produce a value
    1800             :     // outside of our range, but we have a bailout to handle those cases.
    1801           0 :     setRange(GetTypedArrayRange(alloc, readType()));
    1802           0 : }
    1803             : 
    1804             : void
    1805           0 : MLoadTypedArrayElementStatic::computeRange(TempAllocator& alloc)
    1806             : {
    1807             :     // We don't currently use MLoadTypedArrayElementStatic for uint32, so we
    1808             :     // don't have to worry about it returning a value outside our type.
    1809           0 :     MOZ_ASSERT(someTypedArray_->as<TypedArrayObject>().type() != Scalar::Uint32);
    1810             : 
    1811           0 :     setRange(GetTypedArrayRange(alloc, someTypedArray_->as<TypedArrayObject>().type()));
    1812           0 : }
    1813             : 
    1814             : void
    1815           2 : MArrayLength::computeRange(TempAllocator& alloc)
    1816             : {
    1817             :     // Array lengths can go up to UINT32_MAX, but we only create MArrayLength
    1818             :     // nodes when the value is known to be int32 (see the
    1819             :     // OBJECT_FLAG_LENGTH_OVERFLOW flag).
    1820           2 :     setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
    1821           2 : }
    1822             : 
    1823             : void
    1824           1 : MInitializedLength::computeRange(TempAllocator& alloc)
    1825             : {
    1826           1 :     setRange(Range::NewUInt32Range(alloc, 0, NativeObject::MAX_DENSE_ELEMENTS_COUNT));
    1827           1 : }
    1828             : 
    1829             : void
    1830           0 : MTypedArrayLength::computeRange(TempAllocator& alloc)
    1831             : {
    1832           0 :     setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
    1833           0 : }
    1834             : 
    1835             : void
    1836           8 : MStringLength::computeRange(TempAllocator& alloc)
    1837             : {
    1838             :     static_assert(JSString::MAX_LENGTH <= UINT32_MAX,
    1839             :                   "NewUInt32Range requires a uint32 value");
    1840           8 :     setRange(Range::NewUInt32Range(alloc, 0, JSString::MAX_LENGTH));
    1841           8 : }
    1842             : 
    1843             : void
    1844           2 : MArgumentsLength::computeRange(TempAllocator& alloc)
    1845             : {
    1846             :     // This is is a conservative upper bound on what |TooManyActualArguments|
    1847             :     // checks.  If exceeded, Ion will not be entered in the first place.
    1848             :     MOZ_ASSERT(JitOptions.maxStackArgs <= UINT32_MAX,
    1849             :                "NewUInt32Range requires a uint32 value");
    1850           2 :     setRange(Range::NewUInt32Range(alloc, 0, JitOptions.maxStackArgs));
    1851           2 : }
    1852             : 
    1853             : void
    1854           2 : MBoundsCheck::computeRange(TempAllocator& alloc)
    1855             : {
    1856             :     // Just transfer the incoming index range to the output. The length() is
    1857             :     // also interesting, but it is handled as a bailout check, and we're
    1858             :     // computing a pre-bailout range here.
    1859           2 :     setRange(new(alloc) Range(index()));
    1860           2 : }
    1861             : 
    1862             : void
    1863           1 : MArrayPush::computeRange(TempAllocator& alloc)
    1864             : {
    1865             :     // MArrayPush returns the new array length.
    1866           1 :     setRange(Range::NewUInt32Range(alloc, 0, UINT32_MAX));
    1867           1 : }
    1868             : 
    1869             : void
    1870           0 : MMathFunction::computeRange(TempAllocator& alloc)
    1871             : {
    1872           0 :     Range opRange(getOperand(0));
    1873           0 :     switch (function()) {
    1874             :       case Sin:
    1875             :       case Cos:
    1876           0 :         if (!opRange.canBeInfiniteOrNaN())
    1877           0 :             setRange(Range::NewDoubleRange(alloc, -1.0, 1.0));
    1878           0 :         break;
    1879             :       case Sign:
    1880           0 :         setRange(Range::sign(alloc, &opRange));
    1881           0 :         break;
    1882             :     default:
    1883           0 :         break;
    1884             :     }
    1885           0 : }
    1886             : 
    1887             : void
    1888           0 : MRandom::computeRange(TempAllocator& alloc)
    1889             : {
    1890           0 :     Range* r = Range::NewDoubleRange(alloc, 0.0, 1.0);
    1891             : 
    1892             :     // Random never returns negative zero.
    1893           0 :     r->refineToExcludeNegativeZero();
    1894             : 
    1895           0 :     setRange(r);
    1896           0 : }
    1897             : 
    1898             : void
    1899           0 : MNaNToZero::computeRange(TempAllocator& alloc)
    1900             : {
    1901           0 :     Range other(input());
    1902           0 :     setRange(Range::NaNToZero(alloc, &other));
    1903           0 : }
    1904             : 
    1905             : ///////////////////////////////////////////////////////////////////////////////
    1906             : // Range Analysis
    1907             : ///////////////////////////////////////////////////////////////////////////////
    1908             : 
    1909             : bool
    1910           5 : RangeAnalysis::analyzeLoop(MBasicBlock* header)
    1911             : {
    1912           5 :     MOZ_ASSERT(header->hasUniqueBackedge());
    1913             : 
    1914             :     // Try to compute an upper bound on the number of times the loop backedge
    1915             :     // will be taken. Look for tests that dominate the backedge and which have
    1916             :     // an edge leaving the loop body.
    1917           5 :     MBasicBlock* backedge = header->backedge();
    1918             : 
    1919             :     // Ignore trivial infinite loops.
    1920           5 :     if (backedge == header)
    1921           0 :         return true;
    1922             : 
    1923             :     bool canOsr;
    1924           5 :     size_t numBlocks = MarkLoopBlocks(graph_, header, &canOsr);
    1925             : 
    1926             :     // Ignore broken loops.
    1927           5 :     if (numBlocks == 0)
    1928           0 :         return true;
    1929             : 
    1930           5 :     LoopIterationBound* iterationBound = nullptr;
    1931             : 
    1932           5 :     MBasicBlock* block = backedge;
    1933          21 :     do {
    1934             :         BranchDirection direction;
    1935          26 :         MTest* branch = block->immediateDominatorBranch(&direction);
    1936             : 
    1937          26 :         if (block == block->immediateDominator())
    1938           3 :             break;
    1939             : 
    1940          26 :         block = block->immediateDominator();
    1941             : 
    1942          26 :         if (branch) {
    1943          11 :             direction = NegateBranchDirection(direction);
    1944          11 :             MBasicBlock* otherBlock = branch->branchSuccessor(direction);
    1945          11 :             if (!otherBlock->isMarked()) {
    1946          11 :                 if (!alloc().ensureBallast())
    1947           0 :                     return false;
    1948          11 :                 iterationBound =
    1949          11 :                     analyzeLoopIterationCount(header, branch, direction);
    1950          11 :                 if (iterationBound)
    1951           3 :                     break;
    1952             :             }
    1953             :         }
    1954          23 :     } while (block != header);
    1955             : 
    1956           5 :     if (!iterationBound) {
    1957           2 :         UnmarkLoopBlocks(graph_, header);
    1958           2 :         return true;
    1959             :     }
    1960             : 
    1961           3 :     if (!loopIterationBounds.append(iterationBound))
    1962           0 :         return false;
    1963             : 
    1964             : #ifdef DEBUG
    1965           3 :     if (JitSpewEnabled(JitSpew_Range)) {
    1966           0 :         Sprinter sp(GetJitContext()->cx);
    1967           0 :         if (!sp.init())
    1968           0 :             return false;
    1969           0 :         iterationBound->boundSum.dump(sp);
    1970           0 :         JitSpew(JitSpew_Range, "computed symbolic bound on backedges: %s",
    1971           0 :                 sp.string());
    1972             :     }
    1973             : #endif
    1974             : 
    1975             :     // Try to compute symbolic bounds for the phi nodes at the head of this
    1976             :     // loop, expressed in terms of the iteration bound just computed.
    1977             : 
    1978          23 :     for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++)
    1979          20 :         analyzeLoopPhi(header, iterationBound, *iter);
    1980             : 
    1981           3 :     if (!mir->compilingWasm()) {
    1982             :         // Try to hoist any bounds checks from the loop using symbolic bounds.
    1983             : 
    1984           6 :         Vector<MBoundsCheck*, 0, JitAllocPolicy> hoistedChecks(alloc());
    1985             : 
    1986         274 :         for (ReversePostorderIterator iter(graph_.rpoBegin(header)); iter != graph_.rpoEnd(); iter++) {
    1987         271 :             MBasicBlock* block = *iter;
    1988         271 :             if (!block->isMarked())
    1989          42 :                 continue;
    1990             : 
    1991         678 :             for (MDefinitionIterator iter(block); iter; iter++) {
    1992         449 :                 MDefinition* def = *iter;
    1993         449 :                 if (def->isBoundsCheck() && def->isMovable()) {
    1994           2 :                     if (!alloc().ensureBallast())
    1995           0 :                         return false;
    1996           2 :                     if (tryHoistBoundsCheck(header, def->toBoundsCheck())) {
    1997           2 :                         if (!hoistedChecks.append(def->toBoundsCheck()))
    1998           0 :                             return false;
    1999             :                     }
    2000             :                 }
    2001             :             }
    2002             :         }
    2003             : 
    2004             :         // Note: replace all uses of the original bounds check with the
    2005             :         // actual index. This is usually done during bounds check elimination,
    2006             :         // but in this case it's safe to do it here since the load/store is
    2007             :         // definitely not loop-invariant, so we will never move it before
    2008             :         // one of the bounds checks we just added.
    2009           5 :         for (size_t i = 0; i < hoistedChecks.length(); i++) {
    2010           2 :             MBoundsCheck* ins = hoistedChecks[i];
    2011           2 :             ins->replaceAllUsesWith(ins->index());
    2012           2 :             ins->block()->discard(ins);
    2013             :         }
    2014             :     }
    2015             : 
    2016           3 :     UnmarkLoopBlocks(graph_, header);
    2017           3 :     return true;
    2018             : }
    2019             : 
    2020             : // Unbox beta nodes in order to hoist instruction properly, and not be limited
    2021             : // by the beta nodes which are added after each branch.
    2022             : static inline MDefinition*
    2023           7 : DefinitionOrBetaInputDefinition(MDefinition* ins)
    2024             : {
    2025           9 :     while (ins->isBeta())
    2026           2 :         ins = ins->toBeta()->input();
    2027           5 :     return ins;
    2028             : }
    2029             : 
    2030             : LoopIterationBound*
    2031          11 : RangeAnalysis::analyzeLoopIterationCount(MBasicBlock* header,
    2032             :                                          MTest* test, BranchDirection direction)
    2033             : {
    2034          11 :     SimpleLinearSum lhs(nullptr, 0);
    2035             :     MDefinition* rhs;
    2036             :     bool lessEqual;
    2037          11 :     if (!ExtractLinearInequality(test, direction, &lhs, &rhs, &lessEqual))
    2038           7 :         return nullptr;
    2039             : 
    2040             :     // Ensure the rhs is a loop invariant term.
    2041           4 :     if (rhs && rhs->block()->isMarked()) {
    2042           1 :         if (lhs.term && lhs.term->block()->isMarked())
    2043           1 :             return nullptr;
    2044           0 :         MDefinition* temp = lhs.term;
    2045           0 :         lhs.term = rhs;
    2046           0 :         rhs = temp;
    2047           0 :         if (!SafeSub(0, lhs.constant, &lhs.constant))
    2048           0 :             return nullptr;
    2049           0 :         lessEqual = !lessEqual;
    2050             :     }
    2051             : 
    2052           3 :     MOZ_ASSERT_IF(rhs, !rhs->block()->isMarked());
    2053             : 
    2054             :     // Ensure the lhs is a phi node from the start of the loop body.
    2055           3 :     if (!lhs.term || !lhs.term->isPhi() || lhs.term->block() != header)
    2056           0 :         return nullptr;
    2057             : 
    2058             :     // Check that the value of the lhs changes by a constant amount with each
    2059             :     // loop iteration. This requires that the lhs be written in every loop
    2060             :     // iteration with a value that is a constant difference from its value at
    2061             :     // the start of the iteration.
    2062             : 
    2063           3 :     if (lhs.term->toPhi()->numOperands() != 2)
    2064           0 :         return nullptr;
    2065             : 
    2066             :     // The first operand of the phi should be the lhs' value at the start of
    2067             :     // the first executed iteration, and not a value written which could
    2068             :     // replace the second operand below during the middle of execution.
    2069           3 :     MDefinition* lhsInitial = lhs.term->toPhi()->getLoopPredecessorOperand();
    2070           3 :     if (lhsInitial->block()->isMarked())
    2071           0 :         return nullptr;
    2072             : 
    2073             :     // The second operand of the phi should be a value written by an add/sub
    2074             :     // in every loop iteration, i.e. in a block which dominates the backedge.
    2075             :     MDefinition* lhsWrite =
    2076           3 :         DefinitionOrBetaInputDefinition(lhs.term->toPhi()->getLoopBackedgeOperand());
    2077           3 :     if (!lhsWrite->isAdd() && !lhsWrite->isSub())
    2078           0 :         return nullptr;
    2079           3 :     if (!lhsWrite->block()->isMarked())
    2080           0 :         return nullptr;
    2081           3 :     MBasicBlock* bb = header->backedge();
    2082           3 :     for (; bb != lhsWrite->block() && bb != header; bb = bb->immediateDominator()) {}
    2083           3 :     if (bb != lhsWrite->block())
    2084           0 :         return nullptr;
    2085             : 
    2086           3 :     SimpleLinearSum lhsModified = ExtractLinearSum(lhsWrite);
    2087             : 
    2088             :     // Check that the value of the lhs at the backedge is of the form
    2089             :     // 'old(lhs) + N'. We can be sure that old(lhs) is the value at the start
    2090             :     // of the iteration, and not that written to lhs in a previous iteration,
    2091             :     // as such a previous value could not appear directly in the addition:
    2092             :     // it could not be stored in lhs as the lhs add/sub executes in every
    2093             :     // iteration, and if it were stored in another variable its use here would
    2094             :     // be as an operand to a phi node for that variable.
    2095           3 :     if (lhsModified.term != lhs.term)
    2096           0 :         return nullptr;
    2097             : 
    2098           6 :     LinearSum iterationBound(alloc());
    2099           6 :     LinearSum currentIteration(alloc());
    2100             : 
    2101           3 :     if (lhsModified.constant == 1 && !lessEqual) {
    2102             :         // The value of lhs is 'initial(lhs) + iterCount' and this will end
    2103             :         // execution of the loop if 'lhs + lhsN >= rhs'. Thus, an upper bound
    2104             :         // on the number of backedges executed is:
    2105             :         //
    2106             :         // initial(lhs) + iterCount + lhsN == rhs
    2107             :         // iterCount == rhsN - initial(lhs) - lhsN
    2108             : 
    2109           3 :         if (rhs) {
    2110           3 :             if (!iterationBound.add(rhs, 1))
    2111           0 :                 return nullptr;
    2112             :         }
    2113           3 :         if (!iterationBound.add(lhsInitial, -1))
    2114           0 :             return nullptr;
    2115             : 
    2116             :         int32_t lhsConstant;
    2117           3 :         if (!SafeSub(0, lhs.constant, &lhsConstant))
    2118           0 :             return nullptr;
    2119           3 :         if (!iterationBound.add(lhsConstant))
    2120           0 :             return nullptr;
    2121             : 
    2122           3 :         if (!currentIteration.add(lhs.term, 1))
    2123           0 :             return nullptr;
    2124           3 :         if (!currentIteration.add(lhsInitial, -1))
    2125           0 :             return nullptr;
    2126           0 :     } else if (lhsModified.constant == -1 && lessEqual) {
    2127             :         // The value of lhs is 'initial(lhs) - iterCount'. Similar to the above
    2128             :         // case, an upper bound on the number of backedges executed is:
    2129             :         //
    2130             :         // initial(lhs) - iterCount + lhsN == rhs
    2131             :         // iterCount == initial(lhs) - rhs + lhsN
    2132             : 
    2133           0 :         if (!iterationBound.add(lhsInitial, 1))
    2134           0 :             return nullptr;
    2135           0 :         if (rhs) {
    2136           0 :             if (!iterationBound.add(rhs, -1))
    2137           0 :                 return nullptr;
    2138             :         }
    2139           0 :         if (!iterationBound.add(lhs.constant))
    2140           0 :             return nullptr;
    2141             : 
    2142           0 :         if (!currentIteration.add(lhsInitial, 1))
    2143           0 :             return nullptr;
    2144           0 :         if (!currentIteration.add(lhs.term, -1))
    2145           0 :             return nullptr;
    2146             :     } else {
    2147           0 :         return nullptr;
    2148             :     }
    2149             : 
    2150           3 :     return new(alloc()) LoopIterationBound(header, test, iterationBound, currentIteration);
    2151             : }
    2152             : 
    2153             : void
    2154          20 : RangeAnalysis::analyzeLoopPhi(MBasicBlock* header, LoopIterationBound* loopBound, MPhi* phi)
    2155             : {
    2156             :     // Given a bound on the number of backedges taken, compute an upper and
    2157             :     // lower bound for a phi node that may change by a constant amount each
    2158             :     // iteration. Unlike for the case when computing the iteration bound
    2159             :     // itself, the phi does not need to change the same amount every iteration,
    2160             :     // but is required to change at most N and be either nondecreasing or
    2161             :     // nonincreasing.
    2162             : 
    2163          20 :     MOZ_ASSERT(phi->numOperands() == 2);
    2164             : 
    2165          20 :     MDefinition* initial = phi->getLoopPredecessorOperand();
    2166          20 :     if (initial->block()->isMarked())
    2167          17 :         return;
    2168             : 
    2169          20 :     SimpleLinearSum modified = ExtractLinearSum(phi->getLoopBackedgeOperand());
    2170             : 
    2171          20 :     if (modified.term != phi || modified.constant == 0)
    2172          17 :         return;
    2173             : 
    2174           3 :     if (!phi->range())
    2175           3 :         phi->setRange(new(alloc()) Range(phi));
    2176             : 
    2177           6 :     LinearSum initialSum(alloc());
    2178           3 :     if (!initialSum.add(initial, 1))
    2179           0 :         return;
    2180             : 
    2181             :     // The phi may change by N each iteration, and is either nondecreasing or
    2182             :     // nonincreasing. initial(phi) is either a lower or upper bound for the
    2183             :     // phi, and initial(phi) + loopBound * N is either an upper or lower bound,
    2184             :     // at all points within the loop, provided that loopBound >= 0.
    2185             :     //
    2186             :     // We are more interested, however, in the bound for phi at points
    2187             :     // dominated by the loop bound's test; if the test dominates e.g. a bounds
    2188             :     // check we want to hoist from the loop, using the value of the phi at the
    2189             :     // head of the loop for this will usually be too imprecise to hoist the
    2190             :     // check. These points will execute only if the backedge executes at least
    2191             :     // one more time (as the test passed and the test dominates the backedge),
    2192             :     // so we know both that loopBound >= 1 and that the phi's value has changed
    2193             :     // at most loopBound - 1 times. Thus, another upper or lower bound for the
    2194             :     // phi is initial(phi) + (loopBound - 1) * N, without requiring us to
    2195             :     // ensure that loopBound >= 0.
    2196             : 
    2197           6 :     LinearSum limitSum(loopBound->boundSum);
    2198           3 :     if (!limitSum.multiply(modified.constant) || !limitSum.add(initialSum))
    2199           0 :         return;
    2200             : 
    2201             :     int32_t negativeConstant;
    2202           3 :     if (!SafeSub(0, modified.constant, &negativeConstant) || !limitSum.add(negativeConstant))
    2203           0 :         return;
    2204             : 
    2205           3 :     Range* initRange = initial->range();
    2206           3 :     if (modified.constant > 0) {
    2207           3 :         if (initRange && initRange->hasInt32LowerBound())
    2208           0 :             phi->range()->refineLower(initRange->lower());
    2209           3 :         phi->range()->setSymbolicLower(SymbolicBound::New(alloc(), nullptr, initialSum));
    2210           3 :         phi->range()->setSymbolicUpper(SymbolicBound::New(alloc(), loopBound, limitSum));
    2211             :     } else {
    2212           0 :         if (initRange && initRange->hasInt32UpperBound())
    2213           0 :             phi->range()->refineUpper(initRange->upper());
    2214           0 :         phi->range()->setSymbolicUpper(SymbolicBound::New(alloc(), nullptr, initialSum));
    2215           0 :         phi->range()->setSymbolicLower(SymbolicBound::New(alloc(), loopBound, limitSum));
    2216             :     }
    2217             : 
    2218           3 :     JitSpew(JitSpew_Range, "added symbolic range on %d", phi->id());
    2219           3 :     SpewRange(phi);
    2220             : }
    2221             : 
    2222             : // Whether bound is valid at the specified bounds check instruction in a loop,
    2223             : // and may be used to hoist ins.
    2224             : static inline bool
    2225           4 : SymbolicBoundIsValid(MBasicBlock* header, MBoundsCheck* ins, const SymbolicBound* bound)
    2226             : {
    2227           4 :     if (!bound->loop)
    2228           2 :         return true;
    2229           2 :     if (ins->block() == header)
    2230           0 :         return false;
    2231           2 :     MBasicBlock* bb = ins->block()->immediateDominator();
    2232          10 :     while (bb != header && bb != bound->loop->test->block())
    2233           4 :         bb = bb->immediateDominator();
    2234           2 :     return bb == bound->loop->test->block();
    2235             : }
    2236             : 
    2237             : bool
    2238           2 : RangeAnalysis::tryHoistBoundsCheck(MBasicBlock* header, MBoundsCheck* ins)
    2239             : {
    2240             :     // The bounds check's length must be loop invariant.
    2241           2 :     MDefinition *length = DefinitionOrBetaInputDefinition(ins->length());
    2242           2 :     if (length->block()->isMarked())
    2243           0 :         return false;
    2244             : 
    2245             :     // The bounds check's index should not be loop invariant (else we would
    2246             :     // already have hoisted it during LICM).
    2247           2 :     SimpleLinearSum index = ExtractLinearSum(ins->index());
    2248           2 :     if (!index.term || !index.term->block()->isMarked())
    2249           0 :         return false;
    2250             : 
    2251             :     // Check for a symbolic lower and upper bound on the index. If either
    2252             :     // condition depends on an iteration bound for the loop, only hoist if
    2253             :     // the bounds check is dominated by the iteration bound's test.
    2254           2 :     if (!index.term->range())
    2255           0 :         return false;
    2256           2 :     const SymbolicBound* lower = index.term->range()->symbolicLower();
    2257           2 :     if (!lower || !SymbolicBoundIsValid(header, ins, lower))
    2258           0 :         return false;
    2259           2 :     const SymbolicBound* upper = index.term->range()->symbolicUpper();
    2260           2 :     if (!upper || !SymbolicBoundIsValid(header, ins, upper))
    2261           0 :         return false;
    2262             : 
    2263           2 :     MBasicBlock* preLoop = header->loopPredecessor();
    2264           2 :     MOZ_ASSERT(!preLoop->isMarked());
    2265             : 
    2266           2 :     MDefinition* lowerTerm = ConvertLinearSum(alloc(), preLoop, lower->sum);
    2267           2 :     if (!lowerTerm)
    2268           0 :         return false;
    2269             : 
    2270           2 :     MDefinition* upperTerm = ConvertLinearSum(alloc(), preLoop, upper->sum);
    2271           2 :     if (!upperTerm)
    2272           0 :         return false;
    2273             : 
    2274             :     // We are checking that index + indexConstant >= 0, and know that
    2275             :     // index >= lowerTerm + lowerConstant. Thus, check that:
    2276             :     //
    2277             :     // lowerTerm + lowerConstant + indexConstant >= 0
    2278             :     // lowerTerm >= -lowerConstant - indexConstant
    2279             : 
    2280           2 :     int32_t lowerConstant = 0;
    2281           2 :     if (!SafeSub(lowerConstant, index.constant, &lowerConstant))
    2282           0 :         return false;
    2283           2 :     if (!SafeSub(lowerConstant, lower->sum.constant(), &lowerConstant))
    2284           0 :         return false;
    2285             : 
    2286             :     // We are checking that index < boundsLength, and know that
    2287             :     // index <= upperTerm + upperConstant. Thus, check that:
    2288             :     //
    2289             :     // upperTerm + upperConstant < boundsLength
    2290             : 
    2291           2 :     int32_t upperConstant = index.constant;
    2292           2 :     if (!SafeAdd(upper->sum.constant(), upperConstant, &upperConstant))
    2293           0 :         return false;
    2294             : 
    2295             :     // Hoist the loop invariant lower bounds checks.
    2296           2 :     MBoundsCheckLower* lowerCheck = MBoundsCheckLower::New(alloc(), lowerTerm);
    2297           2 :     lowerCheck->setMinimum(lowerConstant);
    2298           2 :     lowerCheck->computeRange(alloc());
    2299           2 :     lowerCheck->collectRangeInfoPreTrunc();
    2300           2 :     preLoop->insertBefore(preLoop->lastIns(), lowerCheck);
    2301             : 
    2302             :     // Hoist the loop invariant upper bounds checks.
    2303           2 :     if (upperTerm != length || upperConstant >= 0) {
    2304           0 :         MBoundsCheck* upperCheck = MBoundsCheck::New(alloc(), upperTerm, length);
    2305           0 :         upperCheck->setMinimum(upperConstant);
    2306           0 :         upperCheck->setMaximum(upperConstant);
    2307           0 :         upperCheck->computeRange(alloc());
    2308           0 :         upperCheck->collectRangeInfoPreTrunc();
    2309           0 :         preLoop->insertBefore(preLoop->lastIns(), upperCheck);
    2310             :     }
    2311             : 
    2312           2 :     return true;
    2313             : }
    2314             : 
    2315             : bool
    2316           8 : RangeAnalysis::analyze()
    2317             : {
    2318           8 :     JitSpew(JitSpew_Range, "Doing range propagation");
    2319             : 
    2320         411 :     for (ReversePostorderIterator iter(graph_.rpoBegin()); iter != graph_.rpoEnd(); iter++) {
    2321         403 :         MBasicBlock* block = *iter;
    2322             :         // No blocks are supposed to be unreachable, except when we have an OSR
    2323             :         // block, in which case the Value Numbering phase add fixup blocks which
    2324             :         // are unreachable.
    2325         403 :         MOZ_ASSERT(!block->unreachable() || graph_.osrBlock());
    2326             : 
    2327             :         // If the block's immediate dominator is unreachable, the block is
    2328             :         // unreachable. Iterating in RPO, we'll always see the immediate
    2329             :         // dominator before the block.
    2330         403 :         if (block->immediateDominator()->unreachable()) {
    2331           0 :             block->setUnreachableUnchecked();
    2332           0 :             continue;
    2333             :         }
    2334             : 
    2335        1695 :         for (MDefinitionIterator iter(block); iter; iter++) {
    2336        1292 :             MDefinition* def = *iter;
    2337        1292 :             if (!alloc().ensureBallast())
    2338           0 :                 return false;
    2339             : 
    2340        1292 :             def->computeRange(alloc());
    2341        1292 :             JitSpew(JitSpew_Range, "computing range on %d", def->id());
    2342        1292 :             SpewRange(def);
    2343             :         }
    2344             : 
    2345             :         // Beta node range analysis may have marked this block unreachable. If
    2346             :         // so, it's no longer interesting to continue processing it.
    2347         403 :         if (block->unreachable())
    2348           0 :             continue;
    2349             : 
    2350         403 :         if (block->isLoopHeader()) {
    2351           5 :             if (!analyzeLoop(block))
    2352           0 :                 return false;
    2353             :         }
    2354             : 
    2355             :         // First pass at collecting range info - while the beta nodes are still
    2356             :         // around and before truncation.
    2357        1923 :         for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++)
    2358        1520 :             iter->collectRangeInfoPreTrunc();
    2359             :     }
    2360             : 
    2361           8 :     return true;
    2362             : }
    2363             : 
    2364             : bool
    2365           8 : RangeAnalysis::addRangeAssertions()
    2366             : {
    2367           8 :     if (!JitOptions.checkRangeAnalysis)
    2368           8 :         return true;
    2369             : 
    2370             :     // Check the computed range for this instruction, if the option is set. Note
    2371             :     // that this code is quite invasive; it adds numerous additional
    2372             :     // instructions for each MInstruction with a computed range, and it uses
    2373             :     // registers, so it also affects register allocation.
    2374           0 :     for (ReversePostorderIterator iter(graph_.rpoBegin()); iter != graph_.rpoEnd(); iter++) {
    2375           0 :         MBasicBlock* block = *iter;
    2376             : 
    2377             :         // Do not add assertions in unreachable blocks.
    2378           0 :         if (block->unreachable())
    2379           0 :             continue;
    2380             : 
    2381           0 :         for (MDefinitionIterator iter(block); iter; iter++) {
    2382           0 :             MDefinition* ins = *iter;
    2383             : 
    2384             :             // Perform range checking for all numeric and numeric-like types.
    2385           0 :             if (!IsNumberType(ins->type()) &&
    2386           0 :                 ins->type() != MIRType::Boolean &&
    2387           0 :                 ins->type() != MIRType::Value)
    2388             :             {
    2389           0 :                 continue;
    2390             :             }
    2391             : 
    2392             :             // MIsNoIter is fused with the MTest that follows it and emitted as
    2393             :             // LIsNoIterAndBranch. Skip it to avoid complicating MIsNoIter
    2394             :             // lowering.
    2395           0 :             if (ins->isIsNoIter())
    2396           0 :                 continue;
    2397             : 
    2398           0 :             Range r(ins);
    2399             : 
    2400           0 :             MOZ_ASSERT_IF(ins->type() == MIRType::Int64, r.isUnknown());
    2401             : 
    2402             :             // Don't insert assertions if there's nothing interesting to assert.
    2403           0 :             if (r.isUnknown() || (ins->type() == MIRType::Int32 && r.isUnknownInt32()))
    2404           0 :                 continue;
    2405             : 
    2406             :             // Don't add a use to an instruction that is recovered on bailout.
    2407           0 :             if (ins->isRecoveredOnBailout())
    2408           0 :                 continue;
    2409             : 
    2410           0 :             if (!alloc().ensureBallast())
    2411           0 :                 return false;
    2412           0 :             MAssertRange* guard = MAssertRange::New(alloc(), ins, new(alloc()) Range(r));
    2413             : 
    2414             :             // Beta nodes and interrupt checks are required to be located at the
    2415             :             // beginnings of basic blocks, so we must insert range assertions
    2416             :             // after any such instructions.
    2417           0 :             MInstruction* insertAt = nullptr;
    2418           0 :             if (block->graph().osrBlock() == block)
    2419           0 :                 insertAt = ins->toInstruction();
    2420             :             else
    2421           0 :                 insertAt = block->safeInsertTop(ins);
    2422             : 
    2423           0 :             if (insertAt == *iter)
    2424           0 :                 block->insertAfter(insertAt,  guard);
    2425             :             else
    2426           0 :                 block->insertBefore(insertAt, guard);
    2427             :         }
    2428             :     }
    2429             : 
    2430           0 :     return true;
    2431             : }
    2432             : 
    2433             : ///////////////////////////////////////////////////////////////////////////////
    2434             : // Range based Truncation
    2435             : ///////////////////////////////////////////////////////////////////////////////
    2436             : 
    2437             : void
    2438           2 : Range::clampToInt32()
    2439             : {
    2440           2 :     if (isInt32())
    2441           0 :         return;
    2442           2 :     int32_t l = hasInt32LowerBound() ? lower() : JSVAL_INT_MIN;
    2443           2 :     int32_t h = hasInt32UpperBound() ? upper() : JSVAL_INT_MAX;
    2444           2 :     setInt32(l, h);
    2445             : }
    2446             : 
    2447             : void
    2448         169 : Range::wrapAroundToInt32()
    2449             : {
    2450         169 :     if (!hasInt32Bounds()) {
    2451           2 :         setInt32(JSVAL_INT_MIN, JSVAL_INT_MAX);
    2452         167 :     } else if (canHaveFractionalPart()) {
    2453             :         // Clearing the fractional field may provide an opportunity to refine
    2454             :         // lower_ or upper_.
    2455           0 :         canHaveFractionalPart_ = ExcludesFractionalParts;
    2456           0 :         canBeNegativeZero_ = ExcludesNegativeZero;
    2457           0 :         refineInt32BoundsByExponent(max_exponent_,
    2458             :                                     &lower_, &hasInt32LowerBound_,
    2459           0 :                                     &upper_, &hasInt32UpperBound_);
    2460             : 
    2461           0 :         assertInvariants();
    2462             :     } else {
    2463             :         // If nothing else, we can clear the negative zero flag.
    2464         167 :         canBeNegativeZero_ = ExcludesNegativeZero;
    2465             :     }
    2466         169 :     MOZ_ASSERT(isInt32());
    2467         169 : }
    2468             : 
    2469             : void
    2470           0 : Range::wrapAroundToShiftCount()
    2471             : {
    2472           0 :     wrapAroundToInt32();
    2473           0 :     if (lower() < 0 || upper() >= 32)
    2474           0 :         setInt32(0, 31);
    2475           0 : }
    2476             : 
    2477             : void
    2478           0 : Range::wrapAroundToBoolean()
    2479             : {
    2480           0 :     wrapAroundToInt32();
    2481           0 :     if (!isBoolean())
    2482           0 :         setInt32(0, 1);
    2483           0 :     MOZ_ASSERT(isBoolean());
    2484           0 : }
    2485             : 
    2486             : bool
    2487           5 : MDefinition::needTruncation(TruncateKind kind)
    2488             : {
    2489             :     // No procedure defined for truncating this instruction.
    2490           5 :     return false;
    2491             : }
    2492             : 
    2493             : void
    2494           0 : MDefinition::truncate()
    2495             : {
    2496           0 :     MOZ_CRASH("No procedure defined for truncating this instruction.");
    2497             : }
    2498             : 
    2499             : bool
    2500           1 : MConstant::needTruncation(TruncateKind kind)
    2501             : {
    2502           1 :     return IsFloatingPointType(type());
    2503             : }
    2504             : 
    2505             : void
    2506           0 : MConstant::truncate()
    2507             : {
    2508           0 :     MOZ_ASSERT(needTruncation(Truncate));
    2509             : 
    2510             :     // Truncate the double to int, since all uses truncates it.
    2511           0 :     int32_t res = ToInt32(numberToDouble());
    2512           0 :     payload_.asBits = 0;
    2513           0 :     payload_.i32 = res;
    2514           0 :     setResultType(MIRType::Int32);
    2515           0 :     if (range())
    2516           0 :         range()->setInt32(res, res);
    2517           0 : }
    2518             : 
    2519             : bool
    2520           1 : MPhi::needTruncation(TruncateKind kind)
    2521             : {
    2522           1 :     if (type() == MIRType::Double || type() == MIRType::Int32) {
    2523           1 :         truncateKind_ = kind;
    2524           1 :         return true;
    2525             :     }
    2526             : 
    2527           0 :     return false;
    2528             : }
    2529             : 
    2530             : void
    2531           1 : MPhi::truncate()
    2532             : {
    2533           1 :     setResultType(MIRType::Int32);
    2534           1 :     if (truncateKind_ >= IndirectTruncate && range())
    2535           1 :         range()->wrapAroundToInt32();
    2536           1 : }
    2537             : 
    2538             : bool
    2539           2 : MAdd::needTruncation(TruncateKind kind)
    2540             : {
    2541             :     // Remember analysis, needed for fallible checks.
    2542           2 :     setTruncateKind(kind);
    2543             : 
    2544           2 :     return type() == MIRType::Double || type() == MIRType::Int32;
    2545             : }
    2546             : 
    2547             : void
    2548           1 : MAdd::truncate()
    2549             : {
    2550           1 :     MOZ_ASSERT(needTruncation(truncateKind()));
    2551           1 :     specialization_ = MIRType::Int32;
    2552           1 :     setResultType(MIRType::Int32);
    2553           1 :     if (truncateKind() >= IndirectTruncate && range())
    2554           1 :         range()->wrapAroundToInt32();
    2555           1 : }
    2556             : 
    2557             : bool
    2558           0 : MSub::needTruncation(TruncateKind kind)
    2559             : {
    2560             :     // Remember analysis, needed for fallible checks.
    2561           0 :     setTruncateKind(kind);
    2562             : 
    2563           0 :     return type() == MIRType::Double || type() == MIRType::Int32;
    2564             : }
    2565             : 
    2566             : void
    2567           0 : MSub::truncate()
    2568             : {
    2569           0 :     MOZ_ASSERT(needTruncation(truncateKind()));
    2570           0 :     specialization_ = MIRType::Int32;
    2571           0 :     setResultType(MIRType::Int32);
    2572           0 :     if (truncateKind() >= IndirectTruncate && range())
    2573           0 :         range()->wrapAroundToInt32();
    2574           0 : }
    2575             : 
    2576             : bool
    2577           0 : MMul::needTruncation(TruncateKind kind)
    2578             : {
    2579             :     // Remember analysis, needed for fallible checks.
    2580           0 :     setTruncateKind(kind);
    2581             : 
    2582           0 :     return type() == MIRType::Double || type() == MIRType::Int32;
    2583             : }
    2584             : 
    2585             : void
    2586           0 : MMul::truncate()
    2587             : {
    2588           0 :     MOZ_ASSERT(needTruncation(truncateKind()));
    2589           0 :     specialization_ = MIRType::Int32;
    2590           0 :     setResultType(MIRType::Int32);
    2591           0 :     if (truncateKind() >= IndirectTruncate) {
    2592           0 :         setCanBeNegativeZero(false);
    2593           0 :         if (range())
    2594           0 :             range()->wrapAroundToInt32();
    2595             :     }
    2596           0 : }
    2597             : 
    2598             : bool
    2599           0 : MDiv::needTruncation(TruncateKind kind)
    2600             : {
    2601             :     // Remember analysis, needed for fallible checks.
    2602           0 :     setTruncateKind(kind);
    2603             : 
    2604           0 :     return type() == MIRType::Double || type() == MIRType::Int32;
    2605             : }
    2606             : 
    2607             : void
    2608           0 : MDiv::truncate()
    2609             : {
    2610           0 :     MOZ_ASSERT(needTruncation(truncateKind()));
    2611           0 :     specialization_ = MIRType::Int32;
    2612           0 :     setResultType(MIRType::Int32);
    2613             : 
    2614             :     // Divisions where the lhs and rhs are unsigned and the result is
    2615             :     // truncated can be lowered more efficiently.
    2616           0 :     if (unsignedOperands()) {
    2617           0 :         replaceWithUnsignedOperands();
    2618           0 :         unsigned_ = true;
    2619             :     }
    2620           0 : }
    2621             : 
    2622             : bool
    2623           0 : MMod::needTruncation(TruncateKind kind)
    2624             : {
    2625             :     // Remember analysis, needed for fallible checks.
    2626           0 :     setTruncateKind(kind);
    2627             : 
    2628           0 :     return type() == MIRType::Double || type() == MIRType::Int32;
    2629             : }
    2630             : 
    2631             : void
    2632           0 : MMod::truncate()
    2633             : {
    2634             :     // As for division, handle unsigned modulus with a truncated result.
    2635           0 :     MOZ_ASSERT(needTruncation(truncateKind()));
    2636           0 :     specialization_ = MIRType::Int32;
    2637           0 :     setResultType(MIRType::Int32);
    2638             : 
    2639           0 :     if (unsignedOperands()) {
    2640           0 :         replaceWithUnsignedOperands();
    2641           0 :         unsigned_ = true;
    2642             :     }
    2643           0 : }
    2644             : 
    2645             : bool
    2646           0 : MToDouble::needTruncation(TruncateKind kind)
    2647             : {
    2648           0 :     MOZ_ASSERT(type() == MIRType::Double);
    2649           0 :     setTruncateKind(kind);
    2650             : 
    2651           0 :     return true;
    2652             : }
    2653             : 
    2654             : void
    2655           0 : MToDouble::truncate()
    2656             : {
    2657           0 :     MOZ_ASSERT(needTruncation(truncateKind()));
    2658             : 
    2659             :     // We use the return type to flag that this MToDouble should be replaced by
    2660             :     // a MTruncateToInt32 when modifying the graph.
    2661           0 :     setResultType(MIRType::Int32);
    2662           0 :     if (truncateKind() >= IndirectTruncate) {
    2663           0 :         if (range())
    2664           0 :             range()->wrapAroundToInt32();
    2665             :     }
    2666           0 : }
    2667             : 
    2668             : bool
    2669           0 : MLoadTypedArrayElementStatic::needTruncation(TruncateKind kind)
    2670             : {
    2671             :     // IndirectTruncate not possible, since it returns 'undefined'
    2672             :     // upon out of bounds read. Doing arithmetic on 'undefined' gives wrong
    2673             :     // results. So only set infallible if explicitly truncated.
    2674           0 :     if (kind == Truncate)
    2675           0 :         setInfallible();
    2676             : 
    2677           0 :     return false;
    2678             : }
    2679             : 
    2680             : bool
    2681           0 : MLimitedTruncate::needTruncation(TruncateKind kind)
    2682             : {
    2683           0 :     setTruncateKind(kind);
    2684           0 :     setResultType(MIRType::Int32);
    2685           0 :     if (kind >= IndirectTruncate && range())
    2686           0 :         range()->wrapAroundToInt32();
    2687           0 :     return false;
    2688             : }
    2689             : 
    2690             : bool
    2691          45 : MCompare::needTruncation(TruncateKind kind)
    2692             : {
    2693             :     // If we're compiling wasm, don't try to optimize the comparison type, as
    2694             :     // the code presumably is already using the type it wants. Also, wasm
    2695             :     // doesn't support bailouts, so we woudn't be able to rely on
    2696             :     // TruncateAfterBailouts to convert our inputs.
    2697          45 :     if (block()->info().compilingWasm())
    2698           0 :        return false;
    2699             : 
    2700          45 :     if (!isDoubleComparison())
    2701          45 :         return false;
    2702             : 
    2703             :     // If both operands are naturally in the int32 range, we can convert from
    2704             :     // a double comparison to being an int32 comparison.
    2705           0 :     if (!Range(lhs()).isInt32() || !Range(rhs()).isInt32())
    2706           0 :         return false;
    2707             : 
    2708           0 :     return true;
    2709             : }
    2710             : 
    2711             : void
    2712           0 : MCompare::truncate()
    2713             : {
    2714           0 :     compareType_ = Compare_Int32;
    2715             : 
    2716             :     // Truncating the operands won't change their value because we don't force a
    2717             :     // truncation, but it will change their type, which we need because we
    2718             :     // now expect integer inputs.
    2719           0 :     truncateOperands_ = true;
    2720           0 : }
    2721             : 
    2722             : MDefinition::TruncateKind
    2723          44 : MDefinition::operandTruncateKind(size_t index) const
    2724             : {
    2725             :     // Generic routine: We don't know anything.
    2726          44 :     return NoTruncate;
    2727             : }
    2728             : 
    2729             : MDefinition::TruncateKind
    2730          39 : MPhi::operandTruncateKind(size_t index) const
    2731             : {
    2732             :     // The truncation applied to a phi is effectively applied to the phi's
    2733             :     // operands.
    2734          39 :     return truncateKind_;
    2735             : }
    2736             : 
    2737             : MDefinition::TruncateKind
    2738           0 : MTruncateToInt32::operandTruncateKind(size_t index) const
    2739             : {
    2740             :     // This operator is an explicit truncate to int32.
    2741           0 :     return Truncate;
    2742             : }
    2743             : 
    2744             : MDefinition::TruncateKind
    2745           0 : MBinaryBitwiseInstruction::operandTruncateKind(size_t index) const
    2746             : {
    2747             :     // The bitwise operators truncate to int32.
    2748           0 :     return Truncate;
    2749             : }
    2750             : 
    2751             : MDefinition::TruncateKind
    2752           5 : MLimitedTruncate::operandTruncateKind(size_t index) const
    2753             : {
    2754           5 :     return Min(truncateKind(), truncateLimit_);
    2755             : }
    2756             : 
    2757             : MDefinition::TruncateKind
    2758           8 : MAdd::operandTruncateKind(size_t index) const
    2759             : {
    2760             :     // This operator is doing some arithmetic. If its result is truncated,
    2761             :     // it's an indirect truncate for its operands.
    2762           8 :     return Min(truncateKind(), IndirectTruncate);
    2763             : }
    2764             : 
    2765             : MDefinition::TruncateKind
    2766           0 : MSub::operandTruncateKind(size_t index) const
    2767             : {
    2768             :     // See the comment in MAdd::operandTruncateKind.
    2769           0 :     return Min(truncateKind(), IndirectTruncate);
    2770             : }
    2771             : 
    2772             : MDefinition::TruncateKind
    2773           0 : MMul::operandTruncateKind(size_t index) const
    2774             : {
    2775             :     // See the comment in MAdd::operandTruncateKind.
    2776           0 :     return Min(truncateKind(), IndirectTruncate);
    2777             : }
    2778             : 
    2779             : MDefinition::TruncateKind
    2780           0 : MToDouble::operandTruncateKind(size_t index) const
    2781             : {
    2782             :     // MToDouble propagates its truncate kind to its operand.
    2783           0 :     return truncateKind();
    2784             : }
    2785             : 
    2786             : MDefinition::TruncateKind
    2787           0 : MStoreUnboxedScalar::operandTruncateKind(size_t index) const
    2788             : {
    2789             :     // Some receiver objects, such as typed arrays, will truncate out of range integer inputs.
    2790           0 :     return (truncateInput() && index == 2 && isIntegerWrite()) ? Truncate : NoTruncate;
    2791             : }
    2792             : 
    2793             : MDefinition::TruncateKind
    2794           0 : MStoreTypedArrayElementHole::operandTruncateKind(size_t index) const
    2795             : {
    2796             :     // An integer store truncates the stored value.
    2797           0 :     return index == 3 && isIntegerWrite() ? Truncate : NoTruncate;
    2798             : }
    2799             : 
    2800             : MDefinition::TruncateKind
    2801           0 : MStoreTypedArrayElementStatic::operandTruncateKind(size_t index) const
    2802             : {
    2803             :     // An integer store truncates the stored value.
    2804           0 :     return index == 1 && isIntegerWrite() ? Truncate : NoTruncate;
    2805             : }
    2806             : 
    2807             : MDefinition::TruncateKind
    2808           0 : MDiv::operandTruncateKind(size_t index) const
    2809             : {
    2810           0 :     return Min(truncateKind(), TruncateAfterBailouts);
    2811             : }
    2812             : 
    2813             : MDefinition::TruncateKind
    2814           0 : MMod::operandTruncateKind(size_t index) const
    2815             : {
    2816           0 :     return Min(truncateKind(), TruncateAfterBailouts);
    2817             : }
    2818             : 
    2819             : MDefinition::TruncateKind
    2820          43 : MCompare::operandTruncateKind(size_t index) const
    2821             : {
    2822             :     // If we're doing an int32 comparison on operands which were previously
    2823             :     // floating-point, convert them!
    2824          43 :     MOZ_ASSERT_IF(truncateOperands_, isInt32Comparison());
    2825          43 :     return truncateOperands_ ? TruncateAfterBailouts : NoTruncate;
    2826             : }
    2827             : 
    2828             : static bool
    2829         108 : TruncateTest(TempAllocator& alloc, MTest* test)
    2830             : {
    2831             :     // If all possible inputs to the test are either int32 or boolean,
    2832             :     // convert those inputs to int32 so that an int32 test can be performed.
    2833             : 
    2834         108 :     if (test->input()->type() != MIRType::Value)
    2835          78 :         return true;
    2836             : 
    2837          30 :     if (!test->input()->isPhi() || !test->input()->hasOneDefUse() || test->input()->isImplicitlyUsed())
    2838          24 :         return true;
    2839             : 
    2840           6 :     MPhi* phi = test->input()->toPhi();
    2841           8 :     for (size_t i = 0; i < phi->numOperands(); i++) {
    2842           8 :         MDefinition* def = phi->getOperand(i);
    2843           8 :         if (!def->isBox())
    2844           3 :             return true;
    2845           5 :         MDefinition* inner = def->getOperand(0);
    2846           5 :         if (inner->type() != MIRType::Boolean && inner->type() != MIRType::Int32)
    2847           3 :             return true;
    2848             :     }
    2849             : 
    2850           0 :     for (size_t i = 0; i < phi->numOperands(); i++) {
    2851           0 :         MDefinition* inner = phi->getOperand(i)->getOperand(0);
    2852           0 :         if (inner->type() != MIRType::Int32) {
    2853           0 :             if (!alloc.ensureBallast())
    2854           0 :                 return false;
    2855           0 :             MBasicBlock* block = inner->block();
    2856           0 :             inner = MToInt32::New(alloc, inner);
    2857           0 :             block->insertBefore(block->lastIns(), inner->toInstruction());
    2858             :         }
    2859           0 :         MOZ_ASSERT(inner->type() == MIRType::Int32);
    2860           0 :         phi->replaceOperand(i, inner);
    2861             :     }
    2862             : 
    2863           0 :     phi->setResultType(MIRType::Int32);
    2864           0 :     return true;
    2865             : }
    2866             : 
    2867             : // Truncating instruction result is an optimization which implies
    2868             : // knowing all uses of an instruction.  This implies that if one of
    2869             : // the uses got removed, then Range Analysis is not be allowed to do
    2870             : // any modification which can change the result, especially if the
    2871             : // result can be observed.
    2872             : //
    2873             : // This corner can easily be understood with UCE examples, but it
    2874             : // might also happen with type inference assumptions.  Note: Type
    2875             : // inference is implicitly branches where other types might be
    2876             : // flowing into.
    2877             : static bool
    2878           0 : CloneForDeadBranches(TempAllocator& alloc, MInstruction* candidate)
    2879             : {
    2880             :     // Compare returns a boolean so it doesn't have to be recovered on bailout
    2881             :     // because the output would remain correct.
    2882           0 :     if (candidate->isCompare())
    2883           0 :         return true;
    2884             : 
    2885           0 :     MOZ_ASSERT(candidate->canClone());
    2886           0 :     if (!alloc.ensureBallast())
    2887           0 :         return false;
    2888             : 
    2889           0 :     MDefinitionVector operands(alloc);
    2890           0 :     size_t end = candidate->numOperands();
    2891           0 :     if (!operands.reserve(end))
    2892           0 :         return false;
    2893           0 :     for (size_t i = 0; i < end; ++i)
    2894           0 :         operands.infallibleAppend(candidate->getOperand(i));
    2895             : 
    2896           0 :     MInstruction* clone = candidate->clone(alloc, operands);
    2897           0 :     clone->setRange(nullptr);
    2898             : 
    2899             :     // Set UseRemoved flag on the cloned instruction in order to chain recover
    2900             :     // instruction for the bailout path.
    2901           0 :     clone->setUseRemovedUnchecked();
    2902             : 
    2903           0 :     candidate->block()->insertBefore(candidate, clone);
    2904             : 
    2905           0 :     if (!candidate->maybeConstantValue()) {
    2906           0 :         MOZ_ASSERT(clone->canRecoverOnBailout());
    2907           0 :         clone->setRecoveredOnBailout();
    2908             :     }
    2909             : 
    2910             :     // Replace the candidate by its recovered on bailout clone within recovered
    2911             :     // instructions and resume points operands.
    2912           0 :     for (MUseIterator i(candidate->usesBegin()); i != candidate->usesEnd(); ) {
    2913           0 :         MUse* use = *i++;
    2914           0 :         MNode* ins = use->consumer();
    2915           0 :         if (ins->isDefinition() && !ins->toDefinition()->isRecoveredOnBailout())
    2916           0 :             continue;
    2917             : 
    2918           0 :         use->replaceProducer(clone);
    2919             :     }
    2920             : 
    2921           0 :     return true;
    2922             : }
    2923             : 
    2924             : // Examine all the users of |candidate| and determine the most aggressive
    2925             : // truncate kind that satisfies all of them.
    2926             : static MDefinition::TruncateKind
    2927         140 : ComputeRequestedTruncateKind(MDefinition* candidate, bool* shouldClone)
    2928             : {
    2929         140 :     bool isCapturedResult = false;   // Check if used by a recovered instruction or a resume point.
    2930         140 :     bool isObservableResult = false; // Check if it can be read from another frame.
    2931         140 :     bool isRecoverableResult = true; // Check if it can safely be reconstructed.
    2932         140 :     bool hasUseRemoved = candidate->isUseRemoved();
    2933             : 
    2934         140 :     MDefinition::TruncateKind kind = MDefinition::Truncate;
    2935         337 :     for (MUseIterator use(candidate->usesBegin()); use != candidate->usesEnd(); use++) {
    2936         329 :         if (use->consumer()->isResumePoint()) {
    2937             :             // Truncation is a destructive optimization, as such, we need to pay
    2938             :             // attention to removed branches and prevent optimization
    2939             :             // destructive optimizations if we have no alternative. (see
    2940             :             // UseRemoved flag)
    2941         177 :             isCapturedResult = true;
    2942         354 :             isObservableResult = isObservableResult ||
    2943         177 :                 use->consumer()->toResumePoint()->isObservableOperand(*use);
    2944         354 :             isRecoverableResult = isRecoverableResult &&
    2945         177 :                 use->consumer()->toResumePoint()->isRecoverableOperand(*use);
    2946         177 :             continue;
    2947             :         }
    2948             : 
    2949         152 :         MDefinition* consumer = use->consumer()->toDefinition();
    2950         152 :         if (consumer->isRecoveredOnBailout()) {
    2951          17 :             isCapturedResult = true;
    2952          17 :             hasUseRemoved = hasUseRemoved || consumer->isUseRemoved();
    2953          17 :             continue;
    2954             :         }
    2955             : 
    2956         135 :         MDefinition::TruncateKind consumerKind = consumer->operandTruncateKind(consumer->indexOf(*use));
    2957         135 :         kind = Min(kind, consumerKind);
    2958         135 :         if (kind == MDefinition::NoTruncate)
    2959         132 :             break;
    2960             :     }
    2961             : 
    2962             :     // We cannot do full trunction on guarded instructions.
    2963         140 :     if (candidate->isGuard() || candidate->isGuardRangeBailouts())
    2964           2 :         kind = Min(kind, MDefinition::TruncateAfterBailouts);
    2965             : 
    2966             :     // If the value naturally produces an int32 value (before bailout checks)
    2967             :     // that needs no conversion, we don't have to worry about resume points
    2968             :     // seeing truncated values.
    2969         140 :     bool needsConversion = !candidate->range() || !candidate->range()->isInt32();
    2970             : 
    2971             :     // If the instruction is explicitly truncated (not indirectly) by all its
    2972             :     // uses and if it has no removed uses, then we can safely encode its
    2973             :     // truncated result as part of the resume point operands.  This is safe,
    2974             :     // because even if we resume with a truncated double, the next baseline
    2975             :     // instruction operating on this instruction is going to be a no-op.
    2976             :     //
    2977             :     // Note, that if the result can be observed from another frame, then this
    2978             :     // optimization is not safe.
    2979         140 :     bool safeToConvert = kind == MDefinition::Truncate && !hasUseRemoved && !isObservableResult;
    2980             : 
    2981             :     // If the candidate instruction appears as operand of a resume point or a
    2982             :     // recover instruction, and we have to truncate its result, then we might
    2983             :     // have to either recover the result during the bailout, or avoid the
    2984             :     // truncation.
    2985         140 :     if (isCapturedResult && needsConversion && !safeToConvert) {
    2986             : 
    2987             :         // If the result can be recovered from all the resume points (not needed
    2988             :         // for iterating over the inlined frames), and this instruction can be
    2989             :         // recovered on bailout, then we can clone it and use the cloned
    2990             :         // instruction to encode the recover instruction.  Otherwise, we should
    2991             :         // keep the original result and bailout if the value is not in the int32
    2992             :         // range.
    2993           0 :         if (!JitOptions.disableRecoverIns && isRecoverableResult && candidate->canRecoverOnBailout())
    2994           0 :             *shouldClone = true;
    2995             :         else
    2996           0 :             kind = Min(kind, MDefinition::TruncateAfterBailouts);
    2997             :     }
    2998             : 
    2999         140 :     return kind;
    3000             : }
    3001             : 
    3002             : static MDefinition::TruncateKind
    3003        1027 : ComputeTruncateKind(MDefinition* candidate, bool* shouldClone)
    3004             : {
    3005             :     // Compare operations might coerce its inputs to int32 if the ranges are
    3006             :     // correct.  So we do not need to check if all uses are coerced.
    3007        1027 :     if (candidate->isCompare())
    3008          45 :         return MDefinition::TruncateAfterBailouts;
    3009             : 
    3010             :     // Set truncated flag if range analysis ensure that it has no
    3011             :     // rounding errors and no fractional part. Note that we can't use
    3012             :     // the MDefinition Range constructor, because we need to know if
    3013             :     // the value will have rounding errors before any bailout checks.
    3014         982 :     const Range* r = candidate->range();
    3015         982 :     bool canHaveRoundingErrors = !r || r->canHaveRoundingErrors();
    3016             : 
    3017             :     // Special case integer division and modulo: a/b can be infinite, and a%b
    3018             :     // can be NaN but cannot actually have rounding errors induced by truncation.
    3019         982 :     if ((candidate->isDiv() || candidate->isMod()) &&
    3020           0 :         static_cast<const MBinaryArithInstruction *>(candidate)->specialization() == MIRType::Int32)
    3021             :     {
    3022           0 :         canHaveRoundingErrors = false;
    3023             :     }
    3024             : 
    3025         982 :     if (canHaveRoundingErrors)
    3026         842 :         return MDefinition::NoTruncate;
    3027             : 
    3028             :     // Ensure all observable uses are truncated.
    3029         140 :     return ComputeRequestedTruncateKind(candidate, shouldClone);
    3030             : }
    3031             : 
    3032             : static void
    3033           2 : RemoveTruncatesOnOutput(MDefinition* truncated)
    3034             : {
    3035             :     // Compare returns a boolean so it doen't have any output truncates.
    3036           2 :     if (truncated->isCompare())
    3037           0 :         return;
    3038             : 
    3039           2 :     MOZ_ASSERT(truncated->type() == MIRType::Int32);
    3040           2 :     MOZ_ASSERT(Range(truncated).isInt32());
    3041             : 
    3042           7 :     for (MUseDefIterator use(truncated); use; use++) {
    3043           5 :         MDefinition* def = use.def();
    3044           5 :         if (!def->isTruncateToInt32() || !def->isToInt32())
    3045           5 :             continue;
    3046             : 
    3047           0 :         def->replaceAllUsesWith(truncated);
    3048             :     }
    3049             : }
    3050             : 
    3051             : static void
    3052           2 : AdjustTruncatedInputs(TempAllocator& alloc, MDefinition* truncated)
    3053             : {
    3054           2 :     MBasicBlock* block = truncated->block();
    3055           6 :     for (size_t i = 0, e = truncated->numOperands(); i < e; i++) {
    3056           4 :         MDefinition::TruncateKind kind = truncated->operandTruncateKind(i);
    3057           4 :         if (kind == MDefinition::NoTruncate)
    3058           0 :             continue;
    3059             : 
    3060           4 :         MDefinition* input = truncated->getOperand(i);
    3061           4 :         if (input->type() == MIRType::Int32)
    3062           4 :             continue;
    3063             : 
    3064           0 :         if (input->isToDouble() && input->getOperand(0)->type() == MIRType::Int32) {
    3065           0 :             truncated->replaceOperand(i, input->getOperand(0));
    3066             :         } else {
    3067             :             MInstruction* op;
    3068           0 :             if (kind == MDefinition::TruncateAfterBailouts)
    3069           0 :                 op = MToInt32::New(alloc, truncated->getOperand(i));
    3070             :             else
    3071           0 :                 op = MTruncateToInt32::New(alloc, truncated->getOperand(i));
    3072             : 
    3073           0 :             if (truncated->isPhi()) {
    3074           0 :                 MBasicBlock* pred = block->getPredecessor(i);
    3075           0 :                 pred->insertBefore(pred->lastIns(), op);
    3076             :             } else {
    3077           0 :                 block->insertBefore(truncated->toInstruction(), op);
    3078             :             }
    3079           0 :             truncated->replaceOperand(i, op);
    3080             :         }
    3081             :     }
    3082             : 
    3083           2 :     if (truncated->isToDouble()) {
    3084           0 :         truncated->replaceAllUsesWith(truncated->toToDouble()->getOperand(0));
    3085           0 :         block->discard(truncated->toToDouble());
    3086             :     }
    3087           2 : }
    3088             : 
    3089             : // Iterate backward on all instruction and attempt to truncate operations for
    3090             : // each instruction which respect the following list of predicates: Has been
    3091             : // analyzed by range analysis, the range has no rounding errors, all uses cases
    3092             : // are truncating the result.
    3093             : //
    3094             : // If the truncation of the operation is successful, then the instruction is
    3095             : // queue for later updating the graph to restore the type correctness by
    3096             : // converting the operands that need to be truncated.
    3097             : //
    3098             : // We iterate backward because it is likely that a truncated operation truncates
    3099             : // some of its operands.
    3100             : bool
    3101           8 : RangeAnalysis::truncate()
    3102             : {
    3103           8 :     JitSpew(JitSpew_Range, "Do range-base truncation (backward loop)");
    3104             : 
    3105             :     // Automatic truncation is disabled for wasm because the truncation logic
    3106             :     // is based on IonMonkey which assumes that we can bailout if the truncation
    3107             :     // logic fails. As wasm code has no bailout mechanism, it is safer to avoid
    3108             :     // any automatic truncations.
    3109           8 :     MOZ_ASSERT(!mir->compilingWasm());
    3110             : 
    3111          16 :     Vector<MDefinition*, 16, SystemAllocPolicy> worklist;
    3112             : 
    3113         411 :     for (PostorderIterator block(graph_.poBegin()); block != graph_.poEnd(); block++) {
    3114        1875 :         for (MInstructionReverseIterator iter(block->rbegin()); iter != block->rend(); iter++) {
    3115        1472 :             if (iter->isRecoveredOnBailout())
    3116        1529 :                 continue;
    3117             : 
    3118        1414 :             if (iter->type() == MIRType::None) {
    3119         562 :                 if (iter->isTest()) {
    3120         108 :                     if (!TruncateTest(alloc(), iter->toTest()))
    3121           0 :                         return false;
    3122             :                 }
    3123         562 :                 continue;
    3124             :             }
    3125             : 
    3126             :             // Remember all bitop instructions for folding after range analysis.
    3127         852 :             switch (iter->op()) {
    3128             :               case MDefinition::Op_BitAnd:
    3129             :               case MDefinition::Op_BitOr:
    3130             :               case MDefinition::Op_BitXor:
    3131             :               case MDefinition::Op_Lsh:
    3132             :               case MDefinition::Op_Rsh:
    3133             :               case MDefinition::Op_Ursh:
    3134           0 :                 if (!bitops.append(static_cast<MBinaryBitwiseInstruction*>(*iter)))
    3135           0 :                     return false;
    3136           0 :                 break;
    3137             :               default:;
    3138             :             }
    3139             : 
    3140         852 :             bool shouldClone = false;
    3141         852 :             MDefinition::TruncateKind kind = ComputeTruncateKind(*iter, &shouldClone);
    3142         852 :             if (kind == MDefinition::NoTruncate)
    3143         800 :                 continue;
    3144             : 
    3145             :             // Range Analysis is sometimes eager to do optimizations, even if we
    3146             :             // are not be able to truncate an instruction. In such case, we
    3147             :             // speculatively compile the instruction to an int32 instruction
    3148             :             // while adding a guard. This is what is implied by
    3149             :             // TruncateAfterBailout.
    3150             :             //
    3151             :             // If we already experienced an overflow bailout while executing
    3152             :             // code within the current JSScript, we no longer attempt to make
    3153             :             // this kind of eager optimizations.
    3154          52 :             if (kind <= MDefinition::TruncateAfterBailouts && block->info().hadOverflowBailout())
    3155           0 :                 continue;
    3156             : 
    3157             :             // Truncate this instruction if possible.
    3158          52 :             if (!iter->needTruncation(kind))
    3159          51 :                 continue;
    3160             : 
    3161           1 :             SpewTruncate(*iter, kind, shouldClone);
    3162             : 
    3163             :             // If needed, clone the current instruction for keeping it for the
    3164             :             // bailout path.  This give us the ability to truncate instructions
    3165             :             // even after the removal of branches.
    3166           1 :             if (shouldClone && !CloneForDeadBranches(alloc(), *iter))
    3167           0 :                 return false;
    3168             : 
    3169           1 :             iter->truncate();
    3170             : 
    3171             :             // Delay updates of inputs/outputs to avoid creating node which
    3172             :             // would be removed by the truncation of the next operations.
    3173           1 :             iter->setInWorklist();
    3174           1 :             if (!worklist.append(*iter))
    3175           0 :                 return false;
    3176             :         }
    3177         578 :         for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd()); iter != end; ++iter) {
    3178         175 :             bool shouldClone = false;
    3179         175 :             MDefinition::TruncateKind kind = ComputeTruncateKind(*iter, &shouldClone);
    3180         175 :             if (kind == MDefinition::NoTruncate)
    3181         348 :                 continue;
    3182             : 
    3183             :             // Truncate this phi if possible.
    3184           1 :             if (shouldClone || !iter->needTruncation(kind))
    3185           0 :                 continue;
    3186             : 
    3187           1 :             SpewTruncate(*iter, kind, shouldClone);
    3188             : 
    3189           1 :             iter->truncate();
    3190             : 
    3191             :             // Delay updates of inputs/outputs to avoid creating node which
    3192             :             // would be removed by the truncation of the next operations.
    3193           1 :             iter->setInWorklist();
    3194           1 :             if (!worklist.append(*iter))
    3195           0 :                 return false;
    3196             :         }
    3197             :     }
    3198             : 
    3199             :     // Update inputs/outputs of truncated instructions.
    3200           8 :     JitSpew(JitSpew_Range, "Do graph type fixup (dequeue)");
    3201           2 :     while (!worklist.empty()) {
    3202           2 :         if (!alloc().ensureBallast())
    3203           0 :             return false;
    3204           2 :         MDefinition* def = worklist.popCopy();
    3205           2 :         def->setNotInWorklist();
    3206           2 :         RemoveTruncatesOnOutput(def);
    3207           2 :         AdjustTruncatedInputs(alloc(), def);
    3208             :     }
    3209             : 
    3210           8 :     return true;
    3211             : }
    3212             : 
    3213             : bool
    3214           8 : RangeAnalysis::removeUnnecessaryBitops()
    3215             : {
    3216             :     // Note: This operation change the semantic of the program in a way which
    3217             :     // uniquely works with Int32, Recover Instructions added by the Sink phase
    3218             :     // expects the MIR Graph to still have a valid flow as-if they were double
    3219             :     // operations instead of Int32 operations. Thus, this phase should be
    3220             :     // executed after the Sink phase, and before DCE.
    3221             : 
    3222             :     // Fold any unnecessary bitops in the graph, such as (x | 0) on an integer
    3223             :     // input. This is done after range analysis rather than during GVN as the
    3224             :     // presence of the bitop can change which instructions are truncated.
    3225           8 :     for (size_t i = 0; i < bitops.length(); i++) {
    3226           0 :         MBinaryBitwiseInstruction* ins = bitops[i];
    3227           0 :         if (ins->isRecoveredOnBailout())
    3228           0 :             continue;
    3229             : 
    3230           0 :         MDefinition* folded = ins->foldUnnecessaryBitop();
    3231           0 :         if (folded != ins) {
    3232           0 :             ins->replaceAllLiveUsesWith(folded);
    3233           0 :             ins->setRecoveredOnBailout();
    3234             :         }
    3235             :     }
    3236             : 
    3237           8 :     bitops.clear();
    3238           8 :     return true;
    3239             : }
    3240             : 
    3241             : 
    3242             : ///////////////////////////////////////////////////////////////////////////////
    3243             : // Collect Range information of operands
    3244             : ///////////////////////////////////////////////////////////////////////////////
    3245             : 
    3246             : void
    3247           0 : MInArray::collectRangeInfoPreTrunc()
    3248             : {
    3249           0 :     Range indexRange(index());
    3250           0 :     if (indexRange.isFiniteNonNegative())
    3251           0 :         needsNegativeIntCheck_ = false;
    3252           0 : }
    3253             : 
    3254             : void
    3255           0 : MLoadElementHole::collectRangeInfoPreTrunc()
    3256             : {
    3257           0 :     Range indexRange(index());
    3258           0 :     if (indexRange.isFiniteNonNegative()) {
    3259           0 :         needsNegativeIntCheck_ = false;
    3260           0 :         setNotGuard();
    3261             :     }
    3262           0 : }
    3263             : 
    3264             : void
    3265           0 : MLoadTypedArrayElementStatic::collectRangeInfoPreTrunc()
    3266             : {
    3267           0 :     Range range(ptr());
    3268             : 
    3269           0 :     if (range.hasInt32LowerBound() && range.hasInt32UpperBound()) {
    3270           0 :         int64_t offset = this->offset();
    3271           0 :         int64_t lower = range.lower() + offset;
    3272           0 :         int64_t upper = range.upper() + offset;
    3273           0 :         int64_t length = this->length();
    3274           0 :         if (lower >= 0 && upper < length)
    3275           0 :             setNeedsBoundsCheck(false);
    3276             :     }
    3277           0 : }
    3278             : 
    3279             : void
    3280           0 : MStoreTypedArrayElementStatic::collectRangeInfoPreTrunc()
    3281             : {
    3282           0 :     Range range(ptr());
    3283             : 
    3284           0 :     if (range.hasInt32LowerBound() && range.hasInt32UpperBound()) {
    3285           0 :         int64_t offset = this->offset();
    3286           0 :         int64_t lower = range.lower() + offset;
    3287           0 :         int64_t upper = range.upper() + offset;
    3288           0 :         int64_t length = this->length();
    3289           0 :         if (lower >= 0 && upper < length)
    3290           0 :             setNeedsBoundsCheck(false);
    3291             :     }
    3292           0 : }
    3293             : 
    3294             : void
    3295           0 : MClz::collectRangeInfoPreTrunc()
    3296             : {
    3297           0 :     Range inputRange(input());
    3298           0 :     if (!inputRange.canBeZero())
    3299           0 :         operandIsNeverZero_ = true;
    3300           0 : }
    3301             : 
    3302             : void
    3303           0 : MCtz::collectRangeInfoPreTrunc()
    3304             : {
    3305           0 :     Range inputRange(input());
    3306           0 :     if (!inputRange.canBeZero())
    3307           0 :         operandIsNeverZero_ = true;
    3308           0 : }
    3309             : 
    3310             : void
    3311           0 : MDiv::collectRangeInfoPreTrunc()
    3312             : {
    3313           0 :     Range lhsRange(lhs());
    3314           0 :     Range rhsRange(rhs());
    3315             : 
    3316             :     // Test if Dividend is non-negative.
    3317           0 :     if (lhsRange.isFiniteNonNegative())
    3318           0 :         canBeNegativeDividend_ = false;
    3319             : 
    3320             :     // Try removing divide by zero check.
    3321           0 :     if (!rhsRange.canBeZero())
    3322           0 :         canBeDivideByZero_ = false;
    3323             : 
    3324             :     // If lhsRange does not contain INT32_MIN in its range,
    3325             :     // negative overflow check can be skipped.
    3326           0 :     if (!lhsRange.contains(INT32_MIN))
    3327           0 :         canBeNegativeOverflow_ = false;
    3328             : 
    3329             :     // If rhsRange does not contain -1 likewise.
    3330           0 :     if (!rhsRange.contains(-1))
    3331           0 :         canBeNegativeOverflow_ = false;
    3332             : 
    3333             :     // If lhsRange does not contain a zero,
    3334             :     // negative zero check can be skipped.
    3335           0 :     if (!lhsRange.canBeZero())
    3336           0 :         canBeNegativeZero_ = false;
    3337             : 
    3338             :     // If rhsRange >= 0 negative zero check can be skipped.
    3339           0 :     if (rhsRange.isFiniteNonNegative())
    3340           0 :         canBeNegativeZero_ = false;
    3341           0 : }
    3342             : 
    3343             : void
    3344           0 : MMul::collectRangeInfoPreTrunc()
    3345             : {
    3346           0 :     Range lhsRange(lhs());
    3347           0 :     Range rhsRange(rhs());
    3348             : 
    3349             :     // If lhsRange contains only positive then we can skip negative zero check.
    3350           0 :     if (lhsRange.isFiniteNonNegative() && !lhsRange.canBeZero())
    3351           0 :         setCanBeNegativeZero(false);
    3352             : 
    3353             :     // Likewise rhsRange.
    3354           0 :     if (rhsRange.isFiniteNonNegative() && !rhsRange.canBeZero())
    3355           0 :         setCanBeNegativeZero(false);
    3356             : 
    3357             :     // If rhsRange and lhsRange contain Non-negative integers only,
    3358             :     // We skip negative zero check.
    3359           0 :     if (rhsRange.isFiniteNonNegative() && lhsRange.isFiniteNonNegative())
    3360           0 :         setCanBeNegativeZero(false);
    3361             : 
    3362             :     //If rhsRange and lhsRange < 0. Then we skip negative zero check.
    3363           0 :     if (rhsRange.isFiniteNegative() && lhsRange.isFiniteNegative())
    3364           0 :         setCanBeNegativeZero(false);
    3365           0 : }
    3366             : 
    3367             : void
    3368           0 : MMod::collectRangeInfoPreTrunc()
    3369             : {
    3370           0 :     Range lhsRange(lhs());
    3371           0 :     Range rhsRange(rhs());
    3372           0 :     if (lhsRange.isFiniteNonNegative())
    3373           0 :         canBeNegativeDividend_ = false;
    3374           0 :     if (!rhsRange.canBeZero())
    3375           0 :         canBeDivideByZero_ = false;
    3376             : 
    3377           0 : }
    3378             : 
    3379             : void
    3380           1 : MToInt32::collectRangeInfoPreTrunc()
    3381             : {
    3382           1 :     Range inputRange(input());
    3383           1 :     if (!inputRange.canBeNegativeZero())
    3384           0 :         canBeNegativeZero_ = false;
    3385           1 : }
    3386             : 
    3387             : void
    3388           2 : MBoundsCheck::collectRangeInfoPreTrunc()
    3389             : {
    3390           2 :     Range indexRange(index());
    3391           2 :     Range lengthRange(length());
    3392           2 :     if (!indexRange.hasInt32LowerBound() || !indexRange.hasInt32UpperBound())
    3393           0 :         return;
    3394           2 :     if (!lengthRange.hasInt32LowerBound() || lengthRange.canBeNaN())
    3395           0 :         return;
    3396             : 
    3397           2 :     int64_t indexLower = indexRange.lower();
    3398           2 :     int64_t indexUpper = indexRange.upper();
    3399           2 :     int64_t lengthLower = lengthRange.lower();
    3400           2 :     int64_t min = minimum();
    3401           2 :     int64_t max = maximum();
    3402             : 
    3403           2 :     if (indexLower + min >= 0 && indexUpper + max < lengthLower)
    3404           1 :         fallible_ = false;
    3405             : }
    3406             : 
    3407             : void
    3408           2 : MBoundsCheckLower::collectRangeInfoPreTrunc()
    3409             : {
    3410           2 :     Range indexRange(index());
    3411           2 :     if (indexRange.hasInt32LowerBound() && indexRange.lower() >= minimum_)
    3412           0 :         fallible_ = false;
    3413           2 : }
    3414             : 
    3415             : void
    3416          45 : MCompare::collectRangeInfoPreTrunc()
    3417             : {
    3418          45 :     if (!Range(lhs()).canBeNaN() && !Range(rhs()).canBeNaN())
    3419          45 :         operandsAreNeverNaN_ = true;
    3420          45 : }
    3421             : 
    3422             : void
    3423           2 : MNot::collectRangeInfoPreTrunc()
    3424             : {
    3425           2 :     if (!Range(input()).canBeNaN())
    3426           0 :         operandIsNeverNaN_ = true;
    3427           2 : }
    3428             : 
    3429             : void
    3430           0 : MPowHalf::collectRangeInfoPreTrunc()
    3431             : {
    3432           0 :     Range inputRange(input());
    3433           0 :     if (!inputRange.canBeInfiniteOrNaN() || inputRange.hasInt32LowerBound())
    3434           0 :         operandIsNeverNegativeInfinity_ = true;
    3435           0 :     if (!inputRange.canBeNegativeZero())
    3436           0 :         operandIsNeverNegativeZero_ = true;
    3437           0 :     if (!inputRange.canBeNaN())
    3438           0 :         operandIsNeverNaN_ = true;
    3439           0 : }
    3440             : 
    3441             : void
    3442           0 : MUrsh::collectRangeInfoPreTrunc()
    3443             : {
    3444           0 :     if (specialization_ == MIRType::Int64)
    3445           0 :         return;
    3446             : 
    3447           0 :     Range lhsRange(lhs()), rhsRange(rhs());
    3448             : 
    3449             :     // As in MUrsh::computeRange(), convert the inputs.
    3450           0 :     lhsRange.wrapAroundToInt32();
    3451           0 :     rhsRange.wrapAroundToShiftCount();
    3452             : 
    3453             :     // If the most significant bit of our result is always going to be zero,
    3454             :     // we can optimize by disabling bailout checks for enforcing an int32 range.
    3455           0 :     if (lhsRange.lower() >= 0 || rhsRange.lower() >= 1)
    3456           0 :         bailoutsDisabled_ = true;
    3457             : }
    3458             : 
    3459             : static bool
    3460           0 : DoesMaskMatchRange(int32_t mask, Range& range)
    3461             : {
    3462             :     // Check if range is positive, because the bitand operator in `(-3) & 0xff` can't be
    3463             :     // eliminated.
    3464           0 :     if (range.lower() >= 0) {
    3465           0 :         MOZ_ASSERT(range.isInt32());
    3466             :         // Check that the mask value has all bits set given the range upper bound. Note that the
    3467             :         // upper bound does not have to be exactly the mask value. For example, consider `x &
    3468             :         // 0xfff` where `x` is a uint8. That expression can still be optimized to `x`.
    3469           0 :         int bits = 1 + FloorLog2(range.upper());
    3470           0 :         uint32_t maskNeeded = (bits == 32) ? 0xffffffff : (uint32_t(1) << bits) - 1;
    3471           0 :         if ((mask & maskNeeded) == maskNeeded)
    3472           0 :             return true;
    3473             :     }
    3474             : 
    3475           0 :     return false;
    3476             : }
    3477             : 
    3478             : void
    3479           0 : MBinaryBitwiseInstruction::collectRangeInfoPreTrunc()
    3480             : {
    3481           0 :     Range lhsRange(lhs());
    3482           0 :     Range rhsRange(rhs());
    3483             : 
    3484           0 :     if (lhs()->isConstant() && lhs()->type() == MIRType::Int32 &&
    3485           0 :         DoesMaskMatchRange(lhs()->toConstant()->toInt32(), rhsRange))
    3486             :     {
    3487           0 :         maskMatchesRightRange = true;
    3488             :     }
    3489             : 
    3490           0 :     if (rhs()->isConstant() && rhs()->type() == MIRType::Int32 &&
    3491           0 :         DoesMaskMatchRange(rhs()->toConstant()->toInt32(), lhsRange))
    3492             :     {
    3493           0 :         maskMatchesLeftRange = true;
    3494             :     }
    3495           0 : }
    3496             : 
    3497             : void
    3498           0 : MNaNToZero::collectRangeInfoPreTrunc()
    3499             : {
    3500           0 :     Range inputRange(input());
    3501             : 
    3502           0 :     if (!inputRange.canBeNaN())
    3503           0 :         operandIsNeverNaN_ = true;
    3504           0 :     if (!inputRange.canBeNegativeZero())
    3505           0 :         operandIsNeverNegativeZero_ = true;
    3506           0 : }
    3507             : 
    3508             : bool
    3509           8 : RangeAnalysis::prepareForUCE(bool* shouldRemoveDeadCode)
    3510             : {
    3511           8 :     *shouldRemoveDeadCode = false;
    3512             : 
    3513         411 :     for (ReversePostorderIterator iter(graph_.rpoBegin()); iter != graph_.rpoEnd(); iter++) {
    3514         403 :         MBasicBlock* block = *iter;
    3515             : 
    3516         403 :         if (!block->unreachable())
    3517         403 :             continue;
    3518             : 
    3519             :         // Filter out unreachable fake entries.
    3520           0 :         if (block->numPredecessors() == 0) {
    3521             :             // Ignore fixup blocks added by the Value Numbering phase, in order
    3522             :             // to keep the dominator tree as-is when we have OSR Block which are
    3523             :             // no longer reachable from the main entry point of the graph.
    3524           0 :             MOZ_ASSERT(graph_.osrBlock());
    3525           0 :             continue;
    3526             :         }
    3527             : 
    3528           0 :         MControlInstruction* cond = block->getPredecessor(0)->lastIns();
    3529           0 :         if (!cond->isTest())
    3530           0 :             continue;
    3531             : 
    3532             :         // Replace the condition of the test control instruction by a constant
    3533             :         // chosen based which of the successors has the unreachable flag which is
    3534             :         // added by MBeta::computeRange on its own block.
    3535           0 :         MTest* test = cond->toTest();
    3536           0 :         MDefinition* condition = test->input();
    3537             : 
    3538             :         // If the false-branch is unreachable, then the test condition must be true.
    3539             :         // If the true-branch is unreachable, then the test condition must be false.
    3540           0 :         MOZ_ASSERT(block == test->ifTrue() || block == test->ifFalse());
    3541           0 :         bool value = block == test->ifFalse();
    3542           0 :         MConstant* constant = MConstant::New(alloc().fallible(), BooleanValue(value));
    3543           0 :         if (!constant)
    3544           0 :             return false;
    3545             : 
    3546           0 :         condition->setGuardRangeBailoutsUnchecked();
    3547             : 
    3548           0 :         test->block()->insertBefore(test, constant);
    3549             : 
    3550           0 :         test->replaceOperand(0, constant);
    3551           0 :         JitSpew(JitSpew_Range, "Update condition of %d to reflect unreachable branches.",
    3552           0 :                 test->id());
    3553             : 
    3554           0 :         *shouldRemoveDeadCode = true;
    3555             :     }
    3556             : 
    3557           8 :     return tryRemovingGuards();
    3558             : }
    3559             : 
    3560           8 : bool RangeAnalysis::tryRemovingGuards()
    3561             : {
    3562          16 :     MDefinitionVector guards(alloc());
    3563             : 
    3564         411 :     for (ReversePostorderIterator block = graph_.rpoBegin(); block != graph_.rpoEnd(); block++) {
    3565        1647 :         for (MDefinitionIterator iter(*block); iter; iter++) {
    3566        1244 :             if (!iter->isGuardRangeBailouts())
    3567        1242 :                 continue;
    3568             : 
    3569           2 :             iter->setInWorklist();
    3570           2 :             if (!guards.append(*iter))
    3571           0 :                 return false;
    3572             :         }
    3573             :     }
    3574             : 
    3575             :     // Flag all fallible instructions which were indirectly used in the
    3576             :     // computation of the condition, such that we do not ignore
    3577             :     // bailout-paths which are used to shrink the input range of the
    3578             :     // operands of the condition.
    3579          10 :     for (size_t i = 0; i < guards.length(); i++) {
    3580           2 :         MDefinition* guard = guards[i];
    3581             : 
    3582             :         // If this ins is a guard even without guardRangeBailouts,
    3583             :         // there is no reason in trying to hoist the guardRangeBailouts check.
    3584           2 :         guard->setNotGuardRangeBailouts();
    3585           2 :         if (!DeadIfUnused(guard)) {
    3586           0 :             guard->setGuardRangeBailouts();
    3587           0 :             continue;
    3588             :         }
    3589           2 :         guard->setGuardRangeBailouts();
    3590             : 
    3591           2 :         if (!guard->isPhi()) {
    3592           2 :             if (!guard->range())
    3593           0 :                 continue;
    3594             : 
    3595             :             // Filter the range of the instruction based on its MIRType.
    3596           2 :             Range typeFilteredRange(guard);
    3597             : 
    3598             :             // If the output range is updated by adding the inner range,
    3599             :             // then the MIRType act as an effectful filter. As we do not know if
    3600             :             // this filtered Range might change or not the result of the
    3601             :             // previous comparison, we have to keep this instruction as a guard
    3602             :             // because it has to bailout in order to restrict the Range to its
    3603             :             // MIRType.
    3604           2 :             if (typeFilteredRange.update(guard->range()))
    3605           0 :                 continue;
    3606             :         }
    3607             : 
    3608           2 :         guard->setNotGuardRangeBailouts();
    3609             : 
    3610             :         // Propagate the guard to its operands.
    3611           2 :         for (size_t op = 0, e = guard->numOperands(); op < e; op++) {
    3612           0 :             MDefinition* operand = guard->getOperand(op);
    3613             : 
    3614             :             // Already marked.
    3615           0 :             if (operand->isInWorklist())
    3616           0 :                 continue;
    3617             : 
    3618           0 :             MOZ_ASSERT(!operand->isGuardRangeBailouts());
    3619             : 
    3620           0 :             operand->setInWorklist();
    3621           0 :             operand->setGuardRangeBailouts();
    3622           0 :             if (!guards.append(operand))
    3623           0 :                 return false;
    3624             :         }
    3625             :     }
    3626             : 
    3627          10 :     for (size_t i = 0; i < guards.length(); i++) {
    3628           2 :         MDefinition* guard = guards[i];
    3629           2 :         guard->setNotInWorklist();
    3630             :     }
    3631             : 
    3632           8 :     return true;
    3633             : }

Generated by: LCOV version 1.13