LCOV - code coverage report
Current view: top level - js/src/jit - BacktrackingAllocator.cpp (source / functions) Hit Total Coverage
Test: output.info Lines: 1296 1607 80.6 %
Date: 2017-07-14 16:53:18 Functions: 78 84 92.9 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
       2             :  * vim: set ts=8 sts=4 et sw=4 tw=99:
       3             :  * This Source Code Form is subject to the terms of the Mozilla Public
       4             :  * License, v. 2.0. If a copy of the MPL was not distributed with this
       5             :  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
       6             : 
       7             : #include "jit/BacktrackingAllocator.h"
       8             : 
       9             : #include "jsprf.h"
      10             : 
      11             : #include "jit/BitSet.h"
      12             : 
      13             : using namespace js;
      14             : using namespace js::jit;
      15             : 
      16             : using mozilla::DebugOnly;
      17             : 
      18             : /////////////////////////////////////////////////////////////////////
      19             : // Utility
      20             : /////////////////////////////////////////////////////////////////////
      21             : 
      22             : static inline bool
      23       19832 : SortBefore(UsePosition* a, UsePosition* b)
      24             : {
      25       19832 :     return a->pos <= b->pos;
      26             : }
      27             : 
      28             : static inline bool
      29        7161 : SortBefore(LiveRange::BundleLink* a, LiveRange::BundleLink* b)
      30             : {
      31        7161 :     LiveRange* rangea = LiveRange::get(a);
      32        7161 :     LiveRange* rangeb = LiveRange::get(b);
      33        7161 :     MOZ_ASSERT(!rangea->intersects(rangeb));
      34        7161 :     return rangea->from() < rangeb->from();
      35             : }
      36             : 
      37             : static inline bool
      38        3478 : SortBefore(LiveRange::RegisterLink* a, LiveRange::RegisterLink* b)
      39             : {
      40        3478 :     return LiveRange::get(a)->from() <= LiveRange::get(b)->from();
      41             : }
      42             : 
      43             : template <typename T>
      44             : static inline void
      45       26048 : InsertSortedList(InlineForwardList<T> &list, T* value)
      46             : {
      47       26048 :     if (list.empty()) {
      48        5182 :         list.pushFront(value);
      49        5182 :         return;
      50             :     }
      51             : 
      52       20866 :     if (SortBefore(list.back(), value)) {
      53       15097 :         list.pushBack(value);
      54       15097 :         return;
      55             :     }
      56             : 
      57        5769 :     T* prev = nullptr;
      58        9605 :     for (InlineForwardListIterator<T> iter = list.begin(); iter; iter++) {
      59        9605 :         if (SortBefore(value, *iter))
      60        5769 :             break;
      61        3836 :         prev = *iter;
      62             :     }
      63             : 
      64        5769 :     if (prev)
      65         900 :         list.insertAfter(prev, value);
      66             :     else
      67        4869 :         list.pushFront(value);
      68             : }
      69             : 
      70             : /////////////////////////////////////////////////////////////////////
      71             : // LiveRange
      72             : /////////////////////////////////////////////////////////////////////
      73             : 
      74             : void
      75       17932 : LiveRange::addUse(UsePosition* use)
      76             : {
      77       17932 :     MOZ_ASSERT(covers(use->pos));
      78       17932 :     InsertSortedList(uses_, use);
      79       17932 : }
      80             : 
      81             : void
      82        1288 : LiveRange::distributeUses(LiveRange* other)
      83             : {
      84        1288 :     MOZ_ASSERT(other->vreg() == vreg());
      85        1288 :     MOZ_ASSERT(this != other);
      86             : 
      87             :     // Move over all uses which fit in |other|'s boundaries.
      88        9894 :     for (UsePositionIterator iter = usesBegin(); iter; ) {
      89        8606 :         UsePosition* use = *iter;
      90        8606 :         if (other->covers(use->pos)) {
      91        8548 :             uses_.removeAndIncrement(iter);
      92        8548 :             other->addUse(use);
      93             :         } else {
      94          58 :             iter++;
      95             :         }
      96             :     }
      97             : 
      98             :     // Distribute the definition to |other| as well, if possible.
      99        1288 :     if (hasDefinition() && from() == other->from())
     100         195 :         other->setHasDefinition();
     101        1288 : }
     102             : 
     103             : bool
     104         466 : LiveRange::contains(LiveRange* other) const
     105             : {
     106         466 :     return from() <= other->from() && to() >= other->to();
     107             : }
     108             : 
     109             : void
     110        8043 : LiveRange::intersect(LiveRange* other, Range* pre, Range* inside, Range* post) const
     111             : {
     112        8043 :     MOZ_ASSERT(pre->empty() && inside->empty() && post->empty());
     113             : 
     114        8043 :     CodePosition innerFrom = from();
     115        8043 :     if (from() < other->from()) {
     116        4509 :         if (to() < other->from()) {
     117        3625 :             *pre = range_;
     118       10518 :             return;
     119             :         }
     120         884 :         *pre = Range(from(), other->from());
     121         884 :         innerFrom = other->from();
     122             :     }
     123             : 
     124        4418 :     CodePosition innerTo = to();
     125        4418 :     if (to() > other->to()) {
     126        3284 :         if (from() >= other->to()) {
     127        3268 :             *post = range_;
     128        3268 :             return;
     129             :         }
     130          16 :         *post = Range(other->to(), to());
     131          16 :         innerTo = other->to();
     132             :     }
     133             : 
     134        1150 :     if (innerFrom != innerTo)
     135         304 :         *inside = Range(innerFrom, innerTo);
     136             : }
     137             : 
     138             : bool
     139        7161 : LiveRange::intersects(LiveRange* other) const
     140             : {
     141        7161 :     Range pre, inside, post;
     142        7161 :     intersect(other, &pre, &inside, &post);
     143        7161 :     return !inside.empty();
     144             : }
     145             : 
     146             : /////////////////////////////////////////////////////////////////////
     147             : // SpillSet
     148             : /////////////////////////////////////////////////////////////////////
     149             : 
     150             : void
     151         105 : SpillSet::setAllocation(LAllocation alloc)
     152             : {
     153         299 :     for (size_t i = 0; i < numSpilledBundles(); i++)
     154         194 :         spilledBundle(i)->setAllocation(alloc);
     155         105 : }
     156             : 
     157             : /////////////////////////////////////////////////////////////////////
     158             : // LiveBundle
     159             : /////////////////////////////////////////////////////////////////////
     160             : 
     161             : #ifdef DEBUG
     162             : size_t
     163          78 : LiveBundle::numRanges() const
     164             : {
     165          78 :     size_t count = 0;
     166         382 :     for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++)
     167         304 :         count++;
     168          78 :     return count;
     169             : }
     170             : #endif // DEBUG
     171             : 
     172             : LiveRange*
     173        3042 : LiveBundle::rangeFor(CodePosition pos) const
     174             : {
     175        9407 :     for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) {
     176        9407 :         LiveRange* range = LiveRange::get(*iter);
     177        9407 :         if (range->covers(pos))
     178        3042 :             return range;
     179             :     }
     180           0 :     return nullptr;
     181             : }
     182             : 
     183             : void
     184        6157 : LiveBundle::addRange(LiveRange* range)
     185             : {
     186        6157 :     MOZ_ASSERT(!range->bundle());
     187        6157 :     range->setBundle(this);
     188        6157 :     InsertSortedList(ranges_, &range->bundleLink);
     189        6157 : }
     190             : 
     191             : bool
     192         621 : LiveBundle::addRange(TempAllocator& alloc, uint32_t vreg, CodePosition from, CodePosition to)
     193             : {
     194         621 :     LiveRange* range = LiveRange::FallibleNew(alloc, vreg, from, to);
     195         621 :     if (!range)
     196           0 :         return false;
     197         621 :     addRange(range);
     198         621 :     return true;
     199             : }
     200             : 
     201             : bool
     202         936 : LiveBundle::addRangeAndDistributeUses(TempAllocator& alloc, LiveRange* oldRange,
     203             :                                       CodePosition from, CodePosition to)
     204             : {
     205         936 :     LiveRange* range = LiveRange::FallibleNew(alloc, oldRange->vreg(), from, to);
     206         936 :     if (!range)
     207           0 :         return false;
     208         936 :     addRange(range);
     209         936 :     oldRange->distributeUses(range);
     210         936 :     return true;
     211             : }
     212             : 
     213             : LiveRange*
     214        2565 : LiveBundle::popFirstRange()
     215             : {
     216        2565 :     LiveRange::BundleLinkIterator iter = rangesBegin();
     217        2565 :     if (!iter)
     218         342 :         return nullptr;
     219             : 
     220        2223 :     LiveRange* range = LiveRange::get(*iter);
     221        2223 :     ranges_.removeAt(iter);
     222             : 
     223        2223 :     range->setBundle(nullptr);
     224        2223 :     return range;
     225             : }
     226             : 
     227             : void
     228           0 : LiveBundle::removeRange(LiveRange* range)
     229             : {
     230           0 :     for (LiveRange::BundleLinkIterator iter = rangesBegin(); iter; iter++) {
     231           0 :         LiveRange* existing = LiveRange::get(*iter);
     232           0 :         if (existing == range) {
     233           0 :             ranges_.removeAt(iter);
     234           0 :             return;
     235             :         }
     236             :     }
     237           0 :     MOZ_CRASH();
     238             : }
     239             : 
     240             : /////////////////////////////////////////////////////////////////////
     241             : // VirtualRegister
     242             : /////////////////////////////////////////////////////////////////////
     243             : 
     244             : bool
     245       15743 : VirtualRegister::addInitialRange(TempAllocator& alloc, CodePosition from, CodePosition to)
     246             : {
     247       15743 :     MOZ_ASSERT(from < to);
     248             : 
     249             :     // Mark [from,to) as a live range for this register during the initial
     250             :     // liveness analysis, coalescing with any existing overlapping ranges.
     251             : 
     252       15743 :     LiveRange* prev = nullptr;
     253       15743 :     LiveRange* merged = nullptr;
     254       42622 :     for (LiveRange::RegisterLinkIterator iter(rangesBegin()); iter; ) {
     255       38642 :         LiveRange* existing = LiveRange::get(*iter);
     256             : 
     257       38642 :         if (from > existing->to()) {
     258             :             // The new range should go after this one.
     259       12786 :             prev = existing;
     260       12786 :             iter++;
     261       12786 :             continue;
     262             :         }
     263             : 
     264       25856 :         if (to.next() < existing->from()) {
     265             :             // The new range should go before this one.
     266       11763 :             break;
     267             :         }
     268             : 
     269       14093 :         if (!merged) {
     270             :             // This is the first old range we've found that overlaps the new
     271             :             // range. Extend this one to cover its union with the new range.
     272       13741 :             merged = existing;
     273             : 
     274       13741 :             if (from < existing->from())
     275        5573 :                 existing->setFrom(from);
     276       13741 :             if (to > existing->to())
     277         399 :                 existing->setTo(to);
     278             : 
     279             :             // Continue searching to see if any other old ranges can be
     280             :             // coalesced with the new merged range.
     281       13741 :             iter++;
     282       13741 :             continue;
     283             :         }
     284             : 
     285             :         // Coalesce this range into the previous range we merged into.
     286         352 :         MOZ_ASSERT(existing->from() >= merged->from());
     287         352 :         if (existing->to() > merged->to())
     288         352 :             merged->setTo(existing->to());
     289             : 
     290         352 :         MOZ_ASSERT(!existing->hasDefinition());
     291         352 :         existing->distributeUses(merged);
     292         352 :         MOZ_ASSERT(!existing->hasUses());
     293             : 
     294         352 :         ranges_.removeAndIncrement(iter);
     295             :     }
     296             : 
     297       15743 :     if (!merged) {
     298             :         // The new range does not overlap any existing range for the vreg.
     299        2002 :         LiveRange* range = LiveRange::FallibleNew(alloc, vreg(), from, to);
     300        2002 :         if (!range)
     301           0 :             return false;
     302             : 
     303        2002 :         if (prev)
     304         112 :             ranges_.insertAfter(&prev->registerLink, &range->registerLink);
     305             :         else
     306        1890 :             ranges_.pushFront(&range->registerLink);
     307             :     }
     308             : 
     309       15743 :     return true;
     310             : }
     311             : 
     312             : void
     313        5970 : VirtualRegister::addInitialUse(UsePosition* use)
     314             : {
     315        5970 :     LiveRange::get(*rangesBegin())->addUse(use);
     316        5970 : }
     317             : 
     318             : void
     319         887 : VirtualRegister::setInitialDefinition(CodePosition from)
     320             : {
     321         887 :     LiveRange* first = LiveRange::get(*rangesBegin());
     322         887 :     MOZ_ASSERT(from >= first->from());
     323         887 :     first->setFrom(from);
     324         887 :     first->setHasDefinition();
     325         887 : }
     326             : 
     327             : LiveRange*
     328        1843 : VirtualRegister::rangeFor(CodePosition pos, bool preferRegister /* = false */) const
     329             : {
     330        1843 :     LiveRange* found = nullptr;
     331       13436 :     for (LiveRange::RegisterLinkIterator iter = rangesBegin(); iter; iter++) {
     332       12241 :         LiveRange* range = LiveRange::get(*iter);
     333       12241 :         if (range->covers(pos)) {
     334        1910 :             if (!preferRegister || range->bundle()->allocation().isRegister())
     335         648 :                 return range;
     336        1262 :             if (!found)
     337        1262 :                 found = range;
     338             :         }
     339             :     }
     340        1195 :     return found;
     341             : }
     342             : 
     343             : void
     344        1959 : VirtualRegister::addRange(LiveRange* range)
     345             : {
     346        1959 :     InsertSortedList(ranges_, &range->registerLink);
     347        1959 : }
     348             : 
     349             : void
     350        1514 : VirtualRegister::removeRange(LiveRange* range)
     351             : {
     352        2053 :     for (LiveRange::RegisterLinkIterator iter = rangesBegin(); iter; iter++) {
     353        2053 :         LiveRange* existing = LiveRange::get(*iter);
     354        2053 :         if (existing == range) {
     355        1514 :             ranges_.removeAt(iter);
     356        1514 :             return;
     357             :         }
     358             :     }
     359           0 :     MOZ_CRASH();
     360             : }
     361             : 
     362             : /////////////////////////////////////////////////////////////////////
     363             : // BacktrackingAllocator
     364             : /////////////////////////////////////////////////////////////////////
     365             : 
     366             : // This function pre-allocates and initializes as much global state as possible
     367             : // to avoid littering the algorithms with memory management cruft.
     368             : bool
     369           8 : BacktrackingAllocator::init()
     370             : {
     371           8 :     if (!RegisterAllocator::init())
     372           0 :         return false;
     373             : 
     374           8 :     liveIn = mir->allocate<BitSet>(graph.numBlockIds());
     375           8 :     if (!liveIn)
     376           0 :         return false;
     377             : 
     378           8 :     size_t numVregs = graph.numVirtualRegisters();
     379           8 :     if (!vregs.init(mir->alloc(), numVregs))
     380           0 :         return false;
     381           8 :     memset(&vregs[0], 0, sizeof(VirtualRegister) * numVregs);
     382        1078 :     for (uint32_t i = 0; i < numVregs; i++)
     383        1070 :         new(&vregs[i]) VirtualRegister();
     384             : 
     385             :     // Build virtual register objects.
     386         411 :     for (size_t i = 0; i < graph.numBlocks(); i++) {
     387         403 :         if (mir->shouldCancel("Create data structures (main loop)"))
     388           0 :             return false;
     389             : 
     390         403 :         LBlock* block = graph.getBlock(i);
     391        1762 :         for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
     392        1359 :             if (mir->shouldCancel("Create data structures (inner loop 1)"))
     393           0 :                 return false;
     394             : 
     395        1932 :             for (size_t j = 0; j < ins->numDefs(); j++) {
     396         573 :                 LDefinition* def = ins->getDef(j);
     397         573 :                 if (def->isBogusTemp())
     398           0 :                     continue;
     399         573 :                 vreg(def).init(*ins, def, /* isTemp = */ false);
     400             :             }
     401             : 
     402        1746 :             for (size_t j = 0; j < ins->numTemps(); j++) {
     403         387 :                 LDefinition* def = ins->getTemp(j);
     404         387 :                 if (def->isBogusTemp())
     405          73 :                     continue;
     406         314 :                 vreg(def).init(*ins, def, /* isTemp = */ true);
     407             :             }
     408             :         }
     409         578 :         for (size_t j = 0; j < block->numPhis(); j++) {
     410         175 :             LPhi* phi = block->getPhi(j);
     411         175 :             LDefinition* def = phi->getDef(0);
     412         175 :             vreg(def).init(phi, def, /* isTemp = */ false);
     413             :         }
     414             :     }
     415             : 
     416           8 :     LiveRegisterSet remainingRegisters(allRegisters_.asLiveSet());
     417         232 :     while (!remainingRegisters.emptyGeneral()) {
     418         112 :         AnyRegister reg = AnyRegister(remainingRegisters.takeAnyGeneral());
     419         112 :         registers[reg.code()].allocatable = true;
     420             :     }
     421         728 :     while (!remainingRegisters.emptyFloat()) {
     422         360 :         AnyRegister reg = AnyRegister(remainingRegisters.takeAnyFloat<RegTypeName::Any>());
     423         360 :         registers[reg.code()].allocatable = true;
     424             :     }
     425             : 
     426           8 :     LifoAlloc* lifoAlloc = mir->alloc().lifoAlloc();
     427         520 :     for (size_t i = 0; i < AnyRegister::Total; i++) {
     428         512 :         registers[i].reg = AnyRegister::FromCode(i);
     429         512 :         registers[i].allocations.setAllocator(lifoAlloc);
     430             :     }
     431             : 
     432           8 :     hotcode.setAllocator(lifoAlloc);
     433           8 :     callRanges.setAllocator(lifoAlloc);
     434             : 
     435             :     // Partition the graph into hot and cold sections, for helping to make
     436             :     // splitting decisions. Since we don't have any profiling data this is a
     437             :     // crapshoot, so just mark the bodies of inner loops as hot and everything
     438             :     // else as cold.
     439             : 
     440           8 :     LBlock* backedge = nullptr;
     441         411 :     for (size_t i = 0; i < graph.numBlocks(); i++) {
     442         403 :         LBlock* block = graph.getBlock(i);
     443             : 
     444             :         // If we see a loop header, mark the backedge so we know when we have
     445             :         // hit the end of the loop. Don't process the loop immediately, so that
     446             :         // if there is an inner loop we will ignore the outer backedge.
     447         403 :         if (block->mir()->isLoopHeader())
     448           5 :             backedge = block->mir()->backedge()->lir();
     449             : 
     450         403 :         if (block == backedge) {
     451           5 :             LBlock* header = block->mir()->loopHeaderOfBackedge()->lir();
     452           5 :             LiveRange* range = LiveRange::FallibleNew(alloc(), 0, entryOf(header),
     453          10 :                                                       exitOf(block).next());
     454           5 :             if (!range || !hotcode.insert(range))
     455           0 :                 return false;
     456             :         }
     457             :     }
     458             : 
     459           8 :     return true;
     460             : }
     461             : 
     462             : bool
     463        2205 : BacktrackingAllocator::addInitialFixedRange(AnyRegister reg, CodePosition from, CodePosition to)
     464             : {
     465        2205 :     LiveRange* range = LiveRange::FallibleNew(alloc(), 0, from, to);
     466        2205 :     return range && registers[reg.code()].allocations.insert(range);
     467             : }
     468             : 
     469             : #ifdef DEBUG
     470             : // Returns true iff ins has a def/temp reusing the input allocation.
     471             : static bool
     472         168 : IsInputReused(LInstruction* ins, LUse* use)
     473             : {
     474         327 :     for (size_t i = 0; i < ins->numDefs(); i++) {
     475         159 :         if (ins->getDef(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
     476           0 :             ins->getOperand(ins->getDef(i)->getReusedInput())->toUse() == use)
     477             :         {
     478           0 :             return true;
     479             :         }
     480             :     }
     481             : 
     482         168 :     for (size_t i = 0; i < ins->numTemps(); i++) {
     483           0 :         if (ins->getTemp(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
     484           0 :             ins->getOperand(ins->getTemp(i)->getReusedInput())->toUse() == use)
     485             :         {
     486           0 :             return true;
     487             :         }
     488             :     }
     489             : 
     490         168 :     return false;
     491             : }
     492             : #endif
     493             : 
     494             : /*
     495             :  * This function builds up liveness ranges for all virtual registers
     496             :  * defined in the function.
     497             :  *
     498             :  * The algorithm is based on the one published in:
     499             :  *
     500             :  * Wimmer, Christian, and Michael Franz. "Linear Scan Register Allocation on
     501             :  *     SSA Form." Proceedings of the International Symposium on Code Generation
     502             :  *     and Optimization. Toronto, Ontario, Canada, ACM. 2010. 170-79. PDF.
     503             :  *
     504             :  * The algorithm operates on blocks ordered such that dominators of a block
     505             :  * are before the block itself, and such that all blocks of a loop are
     506             :  * contiguous. It proceeds backwards over the instructions in this order,
     507             :  * marking registers live at their uses, ending their live ranges at
     508             :  * definitions, and recording which registers are live at the top of every
     509             :  * block. To deal with loop backedges, registers live at the beginning of
     510             :  * a loop gain a range covering the entire loop.
     511             :  */
     512             : bool
     513           8 : BacktrackingAllocator::buildLivenessInfo()
     514             : {
     515           8 :     JitSpew(JitSpew_RegAlloc, "Beginning liveness analysis");
     516             : 
     517          16 :     Vector<MBasicBlock*, 1, SystemAllocPolicy> loopWorkList;
     518           8 :     BitSet loopDone(graph.numBlockIds());
     519           8 :     if (!loopDone.init(alloc()))
     520           0 :         return false;
     521             : 
     522         411 :     for (size_t i = graph.numBlocks(); i > 0; i--) {
     523         403 :         if (mir->shouldCancel("Build Liveness Info (main loop)"))
     524           0 :             return false;
     525             : 
     526         403 :         LBlock* block = graph.getBlock(i - 1);
     527         403 :         MBasicBlock* mblock = block->mir();
     528             : 
     529         403 :         BitSet& live = liveIn[mblock->id()];
     530         403 :         new (&live) BitSet(graph.numVirtualRegisters());
     531         403 :         if (!live.init(alloc()))
     532           0 :             return false;
     533             : 
     534             :         // Propagate liveIn from our successors to us.
     535         882 :         for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) {
     536         479 :             MBasicBlock* successor = mblock->lastIns()->getSuccessor(i);
     537             :             // Skip backedges, as we fix them up at the loop header.
     538         479 :             if (mblock->id() < successor->id())
     539         474 :                 live.insertAll(liveIn[successor->id()]);
     540             :         }
     541             : 
     542             :         // Add successor phis.
     543         403 :         if (mblock->successorWithPhis()) {
     544         134 :             LBlock* phiSuccessor = mblock->successorWithPhis()->lir();
     545         564 :             for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) {
     546         430 :                 LPhi* phi = phiSuccessor->getPhi(j);
     547         430 :                 LAllocation* use = phi->getOperand(mblock->positionInPhiSuccessor());
     548         430 :                 uint32_t reg = use->toUse()->virtualRegister();
     549         430 :                 live.insert(reg);
     550         430 :                 vreg(use).setUsedByPhi();
     551             :             }
     552             :         }
     553             : 
     554             :         // Registers are assumed alive for the entire block, a define shortens
     555             :         // the range to the point of definition.
     556        6148 :         for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
     557        5745 :             if (!vregs[*liveRegId].addInitialRange(alloc(), entryOf(block), exitOf(block).next()))
     558           0 :                 return false;
     559             :         }
     560             : 
     561             :         // Shorten the front end of ranges for live variables to their point of
     562             :         // definition, if found.
     563        1762 :         for (LInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) {
     564             :             // Calls may clobber registers, so force a spill and reload around the callsite.
     565        1359 :             if (ins->isCall()) {
     566        2280 :                 for (AnyRegisterIterator iter(allRegisters_.asLiveSet()); iter.more(); ++iter) {
     567        2242 :                     bool found = false;
     568        4388 :                     for (size_t i = 0; i < ins->numDefs(); i++) {
     569        8732 :                         if (ins->getDef(i)->isFixed() &&
     570        8732 :                             ins->getDef(i)->output()->aliases(LAllocation(*iter))) {
     571          37 :                             found = true;
     572          37 :                             break;
     573             :                         }
     574             :                     }
     575             :                     // If this register doesn't have an explicit def above, mark
     576             :                     // it as clobbered by the call unless it is actually
     577             :                     // call-preserved.
     578        2242 :                     if (!found && !ins->isCallPreserved(*iter)) {
     579        2205 :                         if (!addInitialFixedRange(*iter, outputOf(*ins), outputOf(*ins).next()))
     580           0 :                             return false;
     581             :                     }
     582             :                 }
     583             : 
     584             :                 CallRange* callRange =
     585          38 :                     new(alloc().fallible()) CallRange(outputOf(*ins), outputOf(*ins).next());
     586          38 :                 if (!callRange)
     587           0 :                     return false;
     588             : 
     589          38 :                 callRangesList.pushFront(callRange);
     590          38 :                 if (!callRanges.insert(callRange))
     591           0 :                     return false;
     592             :             }
     593             : 
     594        1932 :             for (size_t i = 0; i < ins->numDefs(); i++) {
     595         573 :                 LDefinition* def = ins->getDef(i);
     596         573 :                 if (def->isBogusTemp())
     597           0 :                     continue;
     598             : 
     599         573 :                 CodePosition from = outputOf(*ins);
     600             : 
     601         573 :                 if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
     602             :                     // MUST_REUSE_INPUT is implemented by allocating an output
     603             :                     // register and moving the input to it. Register hints are
     604             :                     // used to avoid unnecessary moves. We give the input an
     605             :                     // LUse::ANY policy to avoid allocating a register for the
     606             :                     // input.
     607          13 :                     LUse* inputUse = ins->getOperand(def->getReusedInput())->toUse();
     608          13 :                     MOZ_ASSERT(inputUse->policy() == LUse::REGISTER);
     609          13 :                     MOZ_ASSERT(inputUse->usedAtStart());
     610          13 :                     *inputUse = LUse(inputUse->virtualRegister(), LUse::ANY, /* usedAtStart = */ true);
     611             :                 }
     612             : 
     613         573 :                 if (!vreg(def).addInitialRange(alloc(), from, from.next()))
     614           0 :                     return false;
     615         573 :                 vreg(def).setInitialDefinition(from);
     616         573 :                 live.remove(def->virtualRegister());
     617             :             }
     618             : 
     619        1746 :             for (size_t i = 0; i < ins->numTemps(); i++) {
     620         387 :                 LDefinition* temp = ins->getTemp(i);
     621         387 :                 if (temp->isBogusTemp())
     622          73 :                     continue;
     623             : 
     624             :                 // Normally temps are considered to cover both the input
     625             :                 // and output of the associated instruction. In some cases
     626             :                 // though we want to use a fixed register as both an input
     627             :                 // and clobbered register in the instruction, so watch for
     628             :                 // this and shorten the temp to cover only the output.
     629         314 :                 CodePosition from = inputOf(*ins);
     630         314 :                 if (temp->policy() == LDefinition::FIXED) {
     631         126 :                     AnyRegister reg = temp->output()->toRegister();
     632         276 :                     for (LInstruction::InputIterator alloc(**ins); alloc.more(); alloc.next()) {
     633         150 :                         if (alloc->isUse()) {
     634         150 :                             LUse* use = alloc->toUse();
     635         150 :                             if (use->isFixedRegister()) {
     636         150 :                                 if (GetFixedRegister(vreg(use).def(), use) == reg)
     637          24 :                                     from = outputOf(*ins);
     638             :                             }
     639             :                         }
     640             :                     }
     641             :                 }
     642             : 
     643         314 :                 CodePosition to = ins->isCall() ? outputOf(*ins) : outputOf(*ins).next();
     644             : 
     645         314 :                 if (!vreg(temp).addInitialRange(alloc(), from, to))
     646           0 :                     return false;
     647         314 :                 vreg(temp).setInitialDefinition(from);
     648             :             }
     649             : 
     650        2718 :             DebugOnly<bool> hasUseRegister = false;
     651        2718 :             DebugOnly<bool> hasUseRegisterAtStart = false;
     652             : 
     653        9259 :             for (LInstruction::InputIterator inputAlloc(**ins); inputAlloc.more(); inputAlloc.next()) {
     654        7900 :                 if (inputAlloc->isUse()) {
     655        5971 :                     LUse* use = inputAlloc->toUse();
     656             : 
     657             :                     // Call uses should always be at-start, since calls use all
     658             :                     // registers.
     659        5971 :                     MOZ_ASSERT_IF(ins->isCall() && !inputAlloc.isSnapshotInput(),
     660             :                                   use->usedAtStart());
     661             : 
     662             : #ifdef DEBUG
     663             :                     // Don't allow at-start call uses if there are temps of the same kind,
     664             :                     // so that we don't assign the same register. Only allow this when the
     665             :                     // use and temp are fixed registers, as they can't alias.
     666        5971 :                     if (ins->isCall() && use->usedAtStart()) {
     667          63 :                         for (size_t i = 0; i < ins->numTemps(); i++) {
     668          30 :                             MOZ_ASSERT_IF(!ins->getTemp(i)->isBogusTemp(),
     669             :                                           vreg(ins->getTemp(i)).type() != vreg(use).type() ||
     670             :                                           (use->isFixedRegister() && ins->getTemp(i)->isFixed()));
     671             :                         }
     672             :                     }
     673             : 
     674             :                     // If there are both useRegisterAtStart(x) and useRegister(y)
     675             :                     // uses, we may assign the same register to both operands
     676             :                     // (bug 772830). Don't allow this for now.
     677        5971 :                     if (use->policy() == LUse::REGISTER) {
     678         656 :                         if (use->usedAtStart()) {
     679         168 :                             if (!IsInputReused(*ins, use))
     680         168 :                                 hasUseRegisterAtStart = true;
     681             :                         } else {
     682         488 :                             hasUseRegister = true;
     683             :                         }
     684             :                     }
     685        5971 :                     MOZ_ASSERT(!(hasUseRegister && hasUseRegisterAtStart));
     686             : #endif
     687             : 
     688             :                     // Don't treat RECOVERED_INPUT uses as keeping the vreg alive.
     689        5971 :                     if (use->policy() == LUse::RECOVERED_INPUT)
     690           1 :                         continue;
     691             : 
     692        5970 :                     CodePosition to = use->usedAtStart() ? inputOf(*ins) : outputOf(*ins);
     693        5970 :                     if (use->isFixedRegister()) {
     694          60 :                         LAllocation reg(AnyRegister::FromCode(use->registerCode()));
     695         103 :                         for (size_t i = 0; i < ins->numDefs(); i++) {
     696          43 :                             LDefinition* def = ins->getDef(i);
     697          43 :                             if (def->policy() == LDefinition::FIXED && *def->output() == reg)
     698           1 :                                 to = inputOf(*ins);
     699             :                         }
     700             :                     }
     701             : 
     702        5970 :                     if (!vreg(use).addInitialRange(alloc(), entryOf(block), to.next()))
     703           0 :                         return false;
     704        5970 :                     UsePosition* usePosition = new(alloc().fallible()) UsePosition(use, to);
     705        5970 :                     if (!usePosition)
     706           0 :                         return false;
     707        5970 :                     vreg(use).addInitialUse(usePosition);
     708        5970 :                     live.insert(use->virtualRegister());
     709             :                 }
     710             :             }
     711             :         }
     712             : 
     713             :         // Phis have simultaneous assignment semantics at block begin, so at
     714             :         // the beginning of the block we can be sure that liveIn does not
     715             :         // contain any phi outputs.
     716         578 :         for (unsigned int i = 0; i < block->numPhis(); i++) {
     717         175 :             LDefinition* def = block->getPhi(i)->getDef(0);
     718         175 :             if (live.contains(def->virtualRegister())) {
     719         175 :                 live.remove(def->virtualRegister());
     720             :             } else {
     721             :                 // This is a dead phi, so add a dummy range over all phis. This
     722             :                 // can go away if we have an earlier dead code elimination pass.
     723           0 :                 CodePosition entryPos = entryOf(block);
     724           0 :                 if (!vreg(def).addInitialRange(alloc(), entryPos, entryPos.next()))
     725           0 :                     return false;
     726             :             }
     727             :         }
     728             : 
     729         403 :         if (mblock->isLoopHeader()) {
     730             :             // A divergence from the published algorithm is required here, as
     731             :             // our block order does not guarantee that blocks of a loop are
     732             :             // contiguous. As a result, a single live range spanning the
     733             :             // loop is not possible. Additionally, we require liveIn in a later
     734             :             // pass for resolution, so that must also be fixed up here.
     735           5 :             MBasicBlock* loopBlock = mblock->backedge();
     736             :             while (true) {
     737             :                 // Blocks must already have been visited to have a liveIn set.
     738         293 :                 MOZ_ASSERT(loopBlock->id() >= mblock->id());
     739             : 
     740             :                 // Add a range for this entire loop block
     741         293 :                 CodePosition from = entryOf(loopBlock->lir());
     742         293 :                 CodePosition to = exitOf(loopBlock->lir()).next();
     743             : 
     744        3434 :                 for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
     745        3141 :                     if (!vregs[*liveRegId].addInitialRange(alloc(), from, to))
     746           0 :                         return false;
     747             :                 }
     748             : 
     749             :                 // Fix up the liveIn set.
     750         293 :                 liveIn[loopBlock->id()].insertAll(live);
     751             : 
     752             :                 // Make sure we don't visit this node again
     753         293 :                 loopDone.insert(loopBlock->id());
     754             : 
     755             :                 // If this is the loop header, any predecessors are either the
     756             :                 // backedge or out of the loop, so skip any predecessors of
     757             :                 // this block
     758         293 :                 if (loopBlock != mblock) {
     759         642 :                     for (size_t i = 0; i < loopBlock->numPredecessors(); i++) {
     760         354 :                         MBasicBlock* pred = loopBlock->getPredecessor(i);
     761         354 :                         if (loopDone.contains(pred->id()))
     762          66 :                             continue;
     763         288 :                         if (!loopWorkList.append(pred))
     764           0 :                             return false;
     765             :                     }
     766             :                 }
     767             : 
     768             :                 // Terminate loop if out of work.
     769         293 :                 if (loopWorkList.empty())
     770          10 :                     break;
     771             : 
     772             :                 // Grab the next block off the work list, skipping any OSR block.
     773         288 :                 MBasicBlock* osrBlock = graph.mir().osrBlock();
     774         288 :                 while (!loopWorkList.empty()) {
     775         288 :                     loopBlock = loopWorkList.popCopy();
     776         288 :                     if (loopBlock != osrBlock)
     777         288 :                         break;
     778             :                 }
     779             : 
     780             :                 // If end is reached without finding a non-OSR block, then no more work items were found.
     781         288 :                 if (loopBlock == osrBlock) {
     782           0 :                     MOZ_ASSERT(loopWorkList.empty());
     783           0 :                     break;
     784             :                 }
     785         288 :             }
     786             : 
     787             :             // Clear the done set for other loops
     788           5 :             loopDone.clear();
     789             :         }
     790             : 
     791         403 :         MOZ_ASSERT_IF(!mblock->numPredecessors(), live.empty());
     792             :     }
     793             : 
     794           8 :     JitSpew(JitSpew_RegAlloc, "Liveness analysis complete");
     795             : 
     796           8 :     if (JitSpewEnabled(JitSpew_RegAlloc))
     797           0 :         dumpInstructions();
     798             : 
     799           8 :     return true;
     800             : }
     801             : 
     802             : bool
     803           8 : BacktrackingAllocator::go()
     804             : {
     805           8 :     JitSpew(JitSpew_RegAlloc, "Beginning register allocation");
     806             : 
     807           8 :     if (!init())
     808           0 :         return false;
     809             : 
     810           8 :     if (!buildLivenessInfo())
     811           0 :         return false;
     812             : 
     813           8 :     if (!allocationQueue.reserve(graph.numVirtualRegisters() * 3 / 2))
     814           0 :         return false;
     815             : 
     816           8 :     JitSpew(JitSpew_RegAlloc, "Beginning grouping and queueing registers");
     817           8 :     if (!mergeAndQueueRegisters())
     818           0 :         return false;
     819             : 
     820           8 :     if (JitSpewEnabled(JitSpew_RegAlloc))
     821           0 :         dumpVregs();
     822             : 
     823           8 :     JitSpew(JitSpew_RegAlloc, "Beginning main allocation loop");
     824             : 
     825             :     // Allocate, spill and split bundles until finished.
     826        2884 :     while (!allocationQueue.empty()) {
     827        1438 :         if (mir->shouldCancel("Backtracking Allocation"))
     828           0 :             return false;
     829             : 
     830        1438 :         QueueItem item = allocationQueue.removeHighest();
     831        1438 :         if (!processBundle(mir, item.bundle))
     832           0 :             return false;
     833             :     }
     834             : 
     835           8 :     JitSpew(JitSpew_RegAlloc, "Main allocation loop complete");
     836             : 
     837           8 :     if (!tryAllocatingRegistersForSpillBundles())
     838           0 :         return false;
     839             : 
     840           8 :     if (!pickStackSlots())
     841           0 :         return false;
     842             : 
     843           8 :     if (JitSpewEnabled(JitSpew_RegAlloc))
     844           0 :         dumpAllocations();
     845             : 
     846           8 :     if (!resolveControlFlow())
     847           0 :         return false;
     848             : 
     849           8 :     if (!reifyAllocations())
     850           0 :         return false;
     851             : 
     852           8 :     if (!populateSafepoints())
     853           0 :         return false;
     854             : 
     855           8 :     if (!annotateMoveGroups())
     856           0 :         return false;
     857             : 
     858           8 :     return true;
     859             : }
     860             : 
     861             : static bool
     862        1372 : IsArgumentSlotDefinition(LDefinition* def)
     863             : {
     864        1372 :     return def->policy() == LDefinition::FIXED && def->output()->isArgument();
     865             : }
     866             : 
     867             : static bool
     868         691 : IsThisSlotDefinition(LDefinition* def)
     869             : {
     870         713 :     return IsArgumentSlotDefinition(def) &&
     871         713 :         def->output()->toArgument()->index() < THIS_FRAME_ARGSLOT + sizeof(Value);
     872             : }
     873             : 
     874             : bool
     875         451 : BacktrackingAllocator::tryMergeBundles(LiveBundle* bundle0, LiveBundle* bundle1)
     876             : {
     877             :     // See if bundle0 and bundle1 can be merged together.
     878         451 :     if (bundle0 == bundle1)
     879         104 :         return true;
     880             : 
     881             :     // Get a representative virtual register from each bundle.
     882         347 :     VirtualRegister& reg0 = vregs[bundle0->firstRange()->vreg()];
     883         347 :     VirtualRegister& reg1 = vregs[bundle1->firstRange()->vreg()];
     884             : 
     885         347 :     if (!reg0.isCompatible(reg1))
     886           0 :         return true;
     887             : 
     888             :     // Registers which might spill to the frame's |this| slot can only be
     889             :     // grouped with other such registers. The frame's |this| slot must always
     890             :     // hold the |this| value, as required by JitFrame tracing and by the Ion
     891             :     // constructor calling convention.
     892         347 :     if (IsThisSlotDefinition(reg0.def()) || IsThisSlotDefinition(reg1.def())) {
     893           3 :         if (*reg0.def()->output() != *reg1.def()->output())
     894           0 :             return true;
     895             :     }
     896             : 
     897             :     // Registers which might spill to the frame's argument slots can only be
     898             :     // grouped with other such registers if the frame might access those
     899             :     // arguments through a lazy arguments object or rest parameter.
     900         347 :     if (IsArgumentSlotDefinition(reg0.def()) || IsArgumentSlotDefinition(reg1.def())) {
     901          13 :         if (graph.mir().entryBlock()->info().mayReadFrameArgsDirectly()) {
     902           3 :             if (*reg0.def()->output() != *reg1.def()->output())
     903           1 :                 return true;
     904             :         }
     905             :     }
     906             : 
     907             :     // Limit the number of times we compare ranges if there are many ranges in
     908             :     // one of the bundles, to avoid quadratic behavior.
     909             :     static const size_t MAX_RANGES = 200;
     910             : 
     911             :     // Make sure that ranges in the bundles do not overlap.
     912         346 :     LiveRange::BundleLinkIterator iter0 = bundle0->rangesBegin(), iter1 = bundle1->rangesBegin();
     913         346 :     size_t count = 0;
     914        3042 :     while (iter0 && iter1) {
     915        1352 :         if (++count >= MAX_RANGES)
     916           0 :             return true;
     917             : 
     918        1352 :         LiveRange* range0 = LiveRange::get(*iter0);
     919        1352 :         LiveRange* range1 = LiveRange::get(*iter1);
     920             : 
     921        1352 :         if (range0->from() >= range1->to())
     922         622 :             iter1++;
     923         730 :         else if (range1->from() >= range0->to())
     924         726 :             iter0++;
     925             :         else
     926           4 :             return true;
     927             :     }
     928             : 
     929             :     // Move all ranges from bundle1 into bundle0.
     930        2565 :     while (LiveRange* range = bundle1->popFirstRange())
     931        2223 :         bundle0->addRange(range);
     932             : 
     933         342 :     return true;
     934             : }
     935             : 
     936             : static inline LDefinition*
     937        6011 : FindReusingDefOrTemp(LNode* ins, LAllocation* alloc)
     938             : {
     939        7828 :     for (size_t i = 0; i < ins->numDefs(); i++) {
     940        1838 :         LDefinition* def = ins->getDef(i);
     941        1867 :         if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
     942          29 :             ins->getOperand(def->getReusedInput()) == alloc)
     943          21 :             return def;
     944             :     }
     945        7798 :     for (size_t i = 0; i < ins->numTemps(); i++) {
     946        1808 :         LDefinition* def = ins->getTemp(i);
     947        1808 :         if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
     948           0 :             ins->getOperand(def->getReusedInput()) == alloc)
     949           0 :             return def;
     950             :     }
     951        5990 :     return nullptr;
     952             : }
     953             : 
     954             : static inline size_t
     955           4 : NumReusingDefs(LNode* ins)
     956             : {
     957           4 :     size_t num = 0;
     958           8 :     for (size_t i = 0; i < ins->numDefs(); i++) {
     959           4 :         LDefinition* def = ins->getDef(i);
     960           4 :         if (def->policy() == LDefinition::MUST_REUSE_INPUT)
     961           4 :             num++;
     962             :     }
     963           4 :     return num;
     964             : }
     965             : 
     966             : bool
     967          13 : BacktrackingAllocator::tryMergeReusedRegister(VirtualRegister& def, VirtualRegister& input)
     968             : {
     969             :     // def is a vreg which reuses input for its output physical register. Try
     970             :     // to merge ranges for def with those of input if possible, as avoiding
     971             :     // copies before def's instruction is crucial for generated code quality
     972             :     // (MUST_REUSE_INPUT is used for all arithmetic on x86/x64).
     973             : 
     974          13 :     if (def.rangeFor(inputOf(def.ins()))) {
     975           0 :         MOZ_ASSERT(def.isTemp());
     976           0 :         def.setMustCopyInput();
     977           0 :         return true;
     978             :     }
     979             : 
     980          13 :     LiveRange* inputRange = input.rangeFor(outputOf(def.ins()));
     981          13 :     if (!inputRange) {
     982             :         // The input is not live after the instruction, either in a safepoint
     983             :         // for the instruction or in subsequent code. The input and output
     984             :         // can thus be in the same group.
     985           9 :         return tryMergeBundles(def.firstBundle(), input.firstBundle());
     986             :     }
     987             : 
     988             :     // The input is live afterwards, either in future instructions or in a
     989             :     // safepoint for the reusing instruction. This is impossible to satisfy
     990             :     // without copying the input.
     991             :     //
     992             :     // It may or may not be better to split the input into two bundles at the
     993             :     // point of the definition, which may permit merging. One case where it is
     994             :     // definitely better to split is if the input never has any register uses
     995             :     // after the instruction. Handle this splitting eagerly.
     996             : 
     997           4 :     LBlock* block = def.ins()->block();
     998             : 
     999             :     // The input's lifetime must end within the same block as the definition,
    1000             :     // otherwise it could live on in phis elsewhere.
    1001           4 :     if (inputRange != input.lastRange() || inputRange->to() > exitOf(block)) {
    1002           3 :         def.setMustCopyInput();
    1003           3 :         return true;
    1004             :     }
    1005             : 
    1006             :     // If we already split the input for some other register, don't make a
    1007             :     // third bundle.
    1008           1 :     if (inputRange->bundle() != input.firstRange()->bundle()) {
    1009           0 :         def.setMustCopyInput();
    1010           0 :         return true;
    1011             :     }
    1012             : 
    1013             :     // If the input will start out in memory then adding a separate bundle for
    1014             :     // memory uses after the def won't help.
    1015           1 :     if (input.def()->isFixed() && !input.def()->output()->isRegister()) {
    1016           0 :         def.setMustCopyInput();
    1017           0 :         return true;
    1018             :     }
    1019             : 
    1020             :     // The input cannot have register or reused uses after the definition.
    1021           8 :     for (UsePositionIterator iter = inputRange->usesBegin(); iter; iter++) {
    1022           8 :         if (iter->pos <= inputOf(def.ins()))
    1023           7 :             continue;
    1024             : 
    1025           1 :         LUse* use = iter->use();
    1026           1 :         if (FindReusingDefOrTemp(insData[iter->pos], use)) {
    1027           0 :             def.setMustCopyInput();
    1028           1 :             return true;
    1029             :         }
    1030           1 :         if (iter->usePolicy() != LUse::ANY && iter->usePolicy() != LUse::KEEPALIVE) {
    1031           1 :             def.setMustCopyInput();
    1032           1 :             return true;
    1033             :         }
    1034             :     }
    1035             : 
    1036           0 :     LiveRange* preRange = LiveRange::FallibleNew(alloc(), input.vreg(),
    1037           0 :                                                  inputRange->from(), outputOf(def.ins()));
    1038           0 :     if (!preRange)
    1039           0 :         return false;
    1040             : 
    1041             :     // The new range starts at reg's input position, which means it overlaps
    1042             :     // with the old range at one position. This is what we want, because we
    1043             :     // need to copy the input before the instruction.
    1044           0 :     LiveRange* postRange = LiveRange::FallibleNew(alloc(), input.vreg(),
    1045           0 :                                                   inputOf(def.ins()), inputRange->to());
    1046           0 :     if (!postRange)
    1047           0 :         return false;
    1048             : 
    1049           0 :     inputRange->distributeUses(preRange);
    1050           0 :     inputRange->distributeUses(postRange);
    1051           0 :     MOZ_ASSERT(!inputRange->hasUses());
    1052             : 
    1053           0 :     JitSpew(JitSpew_RegAlloc, "  splitting reused input at %u to try to help grouping",
    1054           0 :             inputOf(def.ins()).bits());
    1055             : 
    1056           0 :     LiveBundle* firstBundle = inputRange->bundle();
    1057           0 :     input.removeRange(inputRange);
    1058           0 :     input.addRange(preRange);
    1059           0 :     input.addRange(postRange);
    1060             : 
    1061           0 :     firstBundle->removeRange(inputRange);
    1062           0 :     firstBundle->addRange(preRange);
    1063             : 
    1064             :     // The new range goes in a separate bundle, where it will be spilled during
    1065             :     // allocation.
    1066           0 :     LiveBundle* secondBundle = LiveBundle::FallibleNew(alloc(), nullptr, nullptr);
    1067           0 :     if (!secondBundle)
    1068           0 :         return false;
    1069           0 :     secondBundle->addRange(postRange);
    1070             : 
    1071           0 :     return tryMergeBundles(def.firstBundle(), input.firstBundle());
    1072             : }
    1073             : 
    1074             : bool
    1075           8 : BacktrackingAllocator::mergeAndQueueRegisters()
    1076             : {
    1077           8 :     MOZ_ASSERT(!vregs[0u].hasRanges());
    1078             : 
    1079             :     // Create a bundle for each register containing all its ranges.
    1080        1070 :     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
    1081        1062 :         VirtualRegister& reg = vregs[i];
    1082        1062 :         if (!reg.hasRanges())
    1083           0 :             continue;
    1084             : 
    1085        1062 :         LiveBundle* bundle = LiveBundle::FallibleNew(alloc(), nullptr, nullptr);
    1086        1062 :         if (!bundle)
    1087           0 :             return false;
    1088        2712 :         for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
    1089        1650 :             LiveRange* range = LiveRange::get(*iter);
    1090        1650 :             bundle->addRange(range);
    1091             :         }
    1092             :     }
    1093             : 
    1094             :     // If there is an OSR block, merge parameters in that block with the
    1095             :     // corresponding parameters in the initial block.
    1096           8 :     if (MBasicBlock* osr = graph.mir().osrBlock()) {
    1097           3 :         size_t original = 1;
    1098         128 :         for (LInstructionIterator iter = osr->lir()->begin(); iter != osr->lir()->end(); iter++) {
    1099         125 :             if (iter->isParameter()) {
    1100          24 :                 for (size_t i = 0; i < iter->numDefs(); i++) {
    1101          24 :                     DebugOnly<bool> found = false;
    1102          12 :                     VirtualRegister &paramVreg = vreg(iter->getDef(i));
    1103          30 :                     for (; original < paramVreg.vreg(); original++) {
    1104          21 :                         VirtualRegister &originalVreg = vregs[original];
    1105          21 :                         if (*originalVreg.def()->output() == *iter->getDef(i)->output()) {
    1106          12 :                             MOZ_ASSERT(originalVreg.ins()->isParameter());
    1107          12 :                             if (!tryMergeBundles(originalVreg.firstBundle(), paramVreg.firstBundle()))
    1108           0 :                                 return false;
    1109          12 :                             found = true;
    1110          12 :                             break;
    1111             :                         }
    1112             :                     }
    1113          12 :                     MOZ_ASSERT(found);
    1114             :                 }
    1115             :             }
    1116             :         }
    1117             :     }
    1118             : 
    1119             :     // Try to merge registers with their reused inputs.
    1120        1070 :     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
    1121        1062 :         VirtualRegister& reg = vregs[i];
    1122        1062 :         if (!reg.hasRanges())
    1123           0 :             continue;
    1124             : 
    1125        1062 :         if (reg.def()->policy() == LDefinition::MUST_REUSE_INPUT) {
    1126          13 :             LUse* use = reg.ins()->getOperand(reg.def()->getReusedInput())->toUse();
    1127          13 :             if (!tryMergeReusedRegister(reg, vreg(use)))
    1128           0 :                 return false;
    1129             :         }
    1130             :     }
    1131             : 
    1132             :     // Try to merge phis with their inputs.
    1133         411 :     for (size_t i = 0; i < graph.numBlocks(); i++) {
    1134         403 :         LBlock* block = graph.getBlock(i);
    1135         578 :         for (size_t j = 0; j < block->numPhis(); j++) {
    1136         175 :             LPhi* phi = block->getPhi(j);
    1137         175 :             VirtualRegister &outputVreg = vreg(phi->getDef(0));
    1138         605 :             for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) {
    1139         430 :                 VirtualRegister& inputVreg = vreg(phi->getOperand(k)->toUse());
    1140         430 :                 if (!tryMergeBundles(inputVreg.firstBundle(), outputVreg.firstBundle()))
    1141           0 :                     return false;
    1142             :             }
    1143             :         }
    1144             :     }
    1145             : 
    1146             :     // Add all bundles to the allocation queue, and create spill sets for them.
    1147        1070 :     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
    1148        1062 :         VirtualRegister& reg = vregs[i];
    1149        2712 :         for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
    1150        1650 :             LiveRange* range = LiveRange::get(*iter);
    1151        1650 :             LiveBundle* bundle = range->bundle();
    1152        1650 :             if (range == bundle->firstRange()) {
    1153         720 :                 if (!alloc().ensureBallast())
    1154           0 :                     return false;
    1155             : 
    1156         720 :                 SpillSet* spill = SpillSet::New(alloc());
    1157         720 :                 if (!spill)
    1158           0 :                     return false;
    1159         720 :                 bundle->setSpillSet(spill);
    1160             : 
    1161         720 :                 size_t priority = computePriority(bundle);
    1162         720 :                 if (!allocationQueue.insert(QueueItem(bundle, priority)))
    1163           0 :                     return false;
    1164             :             }
    1165             :         }
    1166             :     }
    1167             : 
    1168           8 :     return true;
    1169             : }
    1170             : 
    1171             : static const size_t MAX_ATTEMPTS = 2;
    1172             : 
    1173             : bool
    1174         332 : BacktrackingAllocator::tryAllocateFixed(LiveBundle* bundle, Requirement requirement,
    1175             :                                         bool* success, bool* pfixed,
    1176             :                                         LiveBundleVector& conflicting)
    1177             : {
    1178             :     // Spill bundles which are required to be in a certain stack slot.
    1179         332 :     if (!requirement.allocation().isRegister()) {
    1180          24 :         JitSpew(JitSpew_RegAlloc, "  stack allocation requirement");
    1181          24 :         bundle->setAllocation(requirement.allocation());
    1182          24 :         *success = true;
    1183          24 :         return true;
    1184             :     }
    1185             : 
    1186         308 :     AnyRegister reg = requirement.allocation().toRegister();
    1187         308 :     return tryAllocateRegister(registers[reg.code()], bundle, success, pfixed, conflicting);
    1188             : }
    1189             : 
    1190             : bool
    1191        1144 : BacktrackingAllocator::tryAllocateNonFixed(LiveBundle* bundle,
    1192             :                                            Requirement requirement, Requirement hint,
    1193             :                                            bool* success, bool* pfixed,
    1194             :                                            LiveBundleVector& conflicting)
    1195             : {
    1196             :     // If we want, but do not require a bundle to be in a specific register,
    1197             :     // only look at that register for allocating and evict or spill if it is
    1198             :     // not available. Picking a separate register may be even worse than
    1199             :     // spilling, as it will still necessitate moves and will tie up more
    1200             :     // registers than if we spilled.
    1201        1144 :     if (hint.kind() == Requirement::FIXED) {
    1202           0 :         AnyRegister reg = hint.allocation().toRegister();
    1203           0 :         if (!tryAllocateRegister(registers[reg.code()], bundle, success, pfixed, conflicting))
    1204           0 :             return false;
    1205           0 :         if (*success)
    1206           0 :             return true;
    1207             :     }
    1208             : 
    1209             :     // Spill bundles which have no hint or register requirement.
    1210        1144 :     if (requirement.kind() == Requirement::NONE && hint.kind() != Requirement::REGISTER) {
    1211         199 :         JitSpew(JitSpew_RegAlloc, "  postponed spill (no hint or register requirement)");
    1212         199 :         if (!spilledBundles.append(bundle))
    1213           0 :             return false;
    1214         199 :         *success = true;
    1215         199 :         return true;
    1216             :     }
    1217             : 
    1218         945 :     if (conflicting.empty() || minimalBundle(bundle)) {
    1219             :         // Search for any available register which the bundle can be
    1220             :         // allocated to.
    1221       18334 :         for (size_t i = 0; i < AnyRegister::Total; i++) {
    1222       18105 :             if (!tryAllocateRegister(registers[i], bundle, success, pfixed, conflicting))
    1223           0 :                 return false;
    1224       18105 :             if (*success)
    1225         716 :                 return true;
    1226             :         }
    1227             :     }
    1228             : 
    1229             :     // Spill bundles which have no register requirement if they didn't get
    1230             :     // allocated.
    1231         229 :     if (requirement.kind() == Requirement::NONE) {
    1232          15 :         JitSpew(JitSpew_RegAlloc, "  postponed spill (no register requirement)");
    1233          15 :         if (!spilledBundles.append(bundle))
    1234           0 :             return false;
    1235          15 :         *success = true;
    1236          15 :         return true;
    1237             :     }
    1238             : 
    1239             :     // We failed to allocate this bundle.
    1240         214 :     MOZ_ASSERT(!*success);
    1241         214 :     return true;
    1242             : }
    1243             : 
    1244             : bool
    1245        1438 : BacktrackingAllocator::processBundle(MIRGenerator* mir, LiveBundle* bundle)
    1246             : {
    1247        1438 :     if (JitSpewEnabled(JitSpew_RegAlloc)) {
    1248           0 :         JitSpew(JitSpew_RegAlloc, "Allocating %s [priority %" PRIuSIZE "] [weight %" PRIuSIZE "]",
    1249           0 :                 bundle->toString().get(), computePriority(bundle), computeSpillWeight(bundle));
    1250             :     }
    1251             : 
    1252             :     // A bundle can be processed by doing any of the following:
    1253             :     //
    1254             :     // - Assigning the bundle a register. The bundle cannot overlap any other
    1255             :     //   bundle allocated for that physical register.
    1256             :     //
    1257             :     // - Spilling the bundle, provided it has no register uses.
    1258             :     //
    1259             :     // - Splitting the bundle into two or more bundles which cover the original
    1260             :     //   one. The new bundles are placed back onto the priority queue for later
    1261             :     //   processing.
    1262             :     //
    1263             :     // - Evicting one or more existing allocated bundles, and then doing one
    1264             :     //   of the above operations. Evicted bundles are placed back on the
    1265             :     //   priority queue. Any evicted bundles must have a lower spill weight
    1266             :     //   than the bundle being processed.
    1267             :     //
    1268             :     // As long as this structure is followed, termination is guaranteed.
    1269             :     // In general, we want to minimize the amount of bundle splitting (which
    1270             :     // generally necessitates spills), so allocate longer lived, lower weight
    1271             :     // bundles first and evict and split them later if they prevent allocation
    1272             :     // for higher weight bundles.
    1273             : 
    1274        1438 :     Requirement requirement, hint;
    1275        1438 :     bool canAllocate = computeRequirement(bundle, &requirement, &hint);
    1276             : 
    1277             :     bool fixed;
    1278        2876 :     LiveBundleVector conflicting;
    1279        1494 :     for (size_t attempt = 0;; attempt++) {
    1280        1494 :         if (mir->shouldCancel("Backtracking Allocation (processBundle loop)"))
    1281           0 :             return false;
    1282             : 
    1283        1494 :         if (canAllocate) {
    1284        1476 :             bool success = false;
    1285        1476 :             fixed = false;
    1286        1476 :             conflicting.clear();
    1287             : 
    1288             :             // Ok, let's try allocating for this bundle.
    1289        1476 :             if (requirement.kind() == Requirement::FIXED) {
    1290         332 :                 if (!tryAllocateFixed(bundle, requirement, &success, &fixed, conflicting))
    1291        1194 :                     return false;
    1292             :             } else {
    1293        1144 :                 if (!tryAllocateNonFixed(bundle, requirement, hint, &success, &fixed, conflicting))
    1294           0 :                     return false;
    1295             :             }
    1296             : 
    1297             :             // If that worked, we're done!
    1298        1476 :             if (success)
    1299        1194 :                 return true;
    1300             : 
    1301             :             // If that didn't work, but we have one or more non-fixed bundles
    1302             :             // known to be conflicting, maybe we can evict them and try again.
    1303         564 :             if ((attempt < MAX_ATTEMPTS || minimalBundle(bundle)) &&
    1304         378 :                 !fixed &&
    1305         474 :                 !conflicting.empty() &&
    1306          96 :                 maximumSpillWeight(conflicting) < computeSpillWeight(bundle))
    1307             :                 {
    1308         112 :                     for (size_t i = 0; i < conflicting.length(); i++) {
    1309          56 :                         if (!evictBundle(conflicting[i]))
    1310           0 :                             return false;
    1311             :                     }
    1312         112 :                     continue;
    1313             :                 }
    1314             :         }
    1315             : 
    1316             :         // A minimal bundle cannot be split any further. If we try to split it
    1317             :         // it at this point we will just end up with the same bundle and will
    1318             :         // enter an infinite loop. Weights and the initial live ranges must
    1319             :         // be constructed so that any minimal bundle is allocatable.
    1320         244 :         MOZ_ASSERT(!minimalBundle(bundle));
    1321             : 
    1322         244 :         LiveBundle* conflict = conflicting.empty() ? nullptr : conflicting[0];
    1323         244 :         return chooseBundleSplit(bundle, canAllocate && fixed, conflict);
    1324          56 :     }
    1325             : }
    1326             : 
    1327             : bool
    1328        1438 : BacktrackingAllocator::computeRequirement(LiveBundle* bundle,
    1329             :                                           Requirement *requirement, Requirement *hint)
    1330             : {
    1331             :     // Set any requirement or hint on bundle according to its definition and
    1332             :     // uses. Return false if there are conflicting requirements which will
    1333             :     // require the bundle to be split.
    1334             : 
    1335        5196 :     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
    1336        3776 :         LiveRange* range = LiveRange::get(*iter);
    1337        3776 :         VirtualRegister &reg = vregs[range->vreg()];
    1338             : 
    1339        3776 :         if (range->hasDefinition()) {
    1340             :             // Deal with any definition constraints/hints.
    1341        1357 :             LDefinition::Policy policy = reg.def()->policy();
    1342        1357 :             if (policy == LDefinition::FIXED) {
    1343             :                 // Fixed policies get a FIXED requirement.
    1344         247 :                 JitSpew(JitSpew_RegAlloc, "  Requirement %s, fixed by definition",
    1345         494 :                         reg.def()->output()->toString().get());
    1346         247 :                 if (!requirement->merge(Requirement(*reg.def()->output())))
    1347          18 :                     return false;
    1348        1110 :             } else if (reg.ins()->isPhi()) {
    1349             :                 // Phis don't have any requirements, but they should prefer their
    1350             :                 // input allocations. This is captured by the group hints above.
    1351             :             } else {
    1352             :                 // Non-phis get a REGISTER requirement.
    1353        1110 :                 if (!requirement->merge(Requirement(Requirement::REGISTER)))
    1354           0 :                     return false;
    1355             :             }
    1356             :         }
    1357             : 
    1358             :         // Search uses for requirements.
    1359       18236 :         for (UsePositionIterator iter = range->usesBegin(); iter; iter++) {
    1360       14478 :             LUse::Policy policy = iter->usePolicy();
    1361       14478 :             if (policy == LUse::FIXED) {
    1362         115 :                 AnyRegister required = GetFixedRegister(reg.def(), iter->use());
    1363             : 
    1364         115 :                 JitSpew(JitSpew_RegAlloc, "  Requirement %s, due to use at %u",
    1365         230 :                         required.name(), iter->pos.bits());
    1366             : 
    1367             :                 // If there are multiple fixed registers which the bundle is
    1368             :                 // required to use, fail. The bundle will need to be split before
    1369             :                 // it can be allocated.
    1370         115 :                 if (!requirement->merge(Requirement(LAllocation(required))))
    1371           8 :                     return false;
    1372       14363 :             } else if (policy == LUse::REGISTER) {
    1373        1396 :                 if (!requirement->merge(Requirement(Requirement::REGISTER)))
    1374          10 :                     return false;
    1375       12967 :             } else if (policy == LUse::ANY) {
    1376             :                 // ANY differs from KEEPALIVE by actively preferring a register.
    1377         102 :                 if (!hint->merge(Requirement(Requirement::REGISTER)))
    1378           0 :                     return false;
    1379             :             }
    1380             :         }
    1381             :     }
    1382             : 
    1383        1420 :     return true;
    1384             : }
    1385             : 
    1386             : bool
    1387       30958 : BacktrackingAllocator::tryAllocateRegister(PhysicalRegister& r, LiveBundle* bundle,
    1388             :                                            bool* success, bool* pfixed, LiveBundleVector& conflicting)
    1389             : {
    1390       30958 :     *success = false;
    1391             : 
    1392       30958 :     if (!r.allocatable)
    1393        2381 :         return true;
    1394             : 
    1395       57154 :     LiveBundleVector aliasedConflicting;
    1396             : 
    1397       47523 :     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
    1398       43113 :         LiveRange* range = LiveRange::get(*iter);
    1399       43113 :         VirtualRegister &reg = vregs[range->vreg()];
    1400             : 
    1401       43113 :         if (!reg.isCompatible(r.reg))
    1402       44391 :             return true;
    1403             : 
    1404       41917 :         for (size_t a = 0; a < r.reg.numAliased(); a++) {
    1405       22971 :             PhysicalRegister& rAlias = registers[r.reg.aliased(a).code()];
    1406             :             LiveRange* existing;
    1407       22971 :             if (!rAlias.allocations.contains(range, &existing))
    1408       11529 :                 continue;
    1409       11442 :             if (existing->hasVreg()) {
    1410        7499 :                 MOZ_ASSERT(existing->bundle()->allocation().toRegister() == rAlias.reg);
    1411        7499 :                 bool duplicate = false;
    1412        9574 :                 for (size_t i = 0; i < aliasedConflicting.length(); i++) {
    1413        4803 :                     if (aliasedConflicting[i] == existing->bundle()) {
    1414        2728 :                         duplicate = true;
    1415        2728 :                         break;
    1416             :                     }
    1417             :                 }
    1418        7499 :                 if (!duplicate && !aliasedConflicting.append(existing->bundle()))
    1419        3943 :                     return false;
    1420             :             } else {
    1421        3943 :                 JitSpew(JitSpew_RegAlloc, "  %s collides with fixed use %s",
    1422        7886 :                         rAlias.reg.name(), existing->toString().get());
    1423        3943 :                 *pfixed = true;
    1424        3943 :                 return true;
    1425             :             }
    1426             :         }
    1427             :     }
    1428             : 
    1429        4410 :     if (!aliasedConflicting.empty()) {
    1430             :         // One or more aliased registers is allocated to another bundle
    1431             :         // overlapping this one. Keep track of the conflicting set, and in the
    1432             :         // case of multiple conflicting sets keep track of the set with the
    1433             :         // lowest maximum spill weight.
    1434             : 
    1435             :         // The #ifdef guards against "unused variable 'existing'" bustage.
    1436             : #ifdef JS_JITSPEW
    1437        3434 :         if (JitSpewEnabled(JitSpew_RegAlloc)) {
    1438           0 :             if (aliasedConflicting.length() == 1) {
    1439           0 :                 LiveBundle* existing = aliasedConflicting[0];
    1440           0 :                 JitSpew(JitSpew_RegAlloc, "  %s collides with %s [weight %" PRIuSIZE "]",
    1441           0 :                         r.reg.name(), existing->toString().get(), computeSpillWeight(existing));
    1442             :             } else {
    1443           0 :                 JitSpew(JitSpew_RegAlloc, "  %s collides with the following", r.reg.name());
    1444           0 :                 for (size_t i = 0; i < aliasedConflicting.length(); i++) {
    1445           0 :                     LiveBundle* existing = aliasedConflicting[i];
    1446           0 :                     JitSpew(JitSpew_RegAlloc, "      %s [weight %" PRIuSIZE "]",
    1447           0 :                             existing->toString().get(), computeSpillWeight(existing));
    1448             :                 }
    1449             :             }
    1450             :         }
    1451             : #endif
    1452             : 
    1453        3434 :         if (conflicting.empty()) {
    1454         926 :             if (!conflicting.appendAll(aliasedConflicting))
    1455           0 :                 return false;
    1456             :         } else {
    1457        2508 :             if (maximumSpillWeight(aliasedConflicting) < maximumSpillWeight(conflicting)) {
    1458         442 :                 conflicting.clear();
    1459         442 :                 if (!conflicting.appendAll(aliasedConflicting))
    1460           0 :                     return false;
    1461             :             }
    1462             :         }
    1463        3434 :         return true;
    1464             :     }
    1465             : 
    1466         976 :     JitSpew(JitSpew_RegAlloc, "  allocated to %s", r.reg.name());
    1467             : 
    1468        2311 :     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
    1469        1335 :         LiveRange* range = LiveRange::get(*iter);
    1470        1335 :         if (!alloc().ensureBallast())
    1471           0 :             return false;
    1472        1335 :         if (!r.allocations.insert(range))
    1473           0 :             return false;
    1474             :     }
    1475             : 
    1476         976 :     bundle->setAllocation(LAllocation(r.reg));
    1477         976 :     *success = true;
    1478         976 :     return true;
    1479             : }
    1480             : 
    1481             : bool
    1482          56 : BacktrackingAllocator::evictBundle(LiveBundle* bundle)
    1483             : {
    1484          56 :     if (JitSpewEnabled(JitSpew_RegAlloc)) {
    1485           0 :         JitSpew(JitSpew_RegAlloc, "  Evicting %s [priority %" PRIuSIZE "] [weight %" PRIuSIZE "]",
    1486           0 :                 bundle->toString().get(), computePriority(bundle), computeSpillWeight(bundle));
    1487             :     }
    1488             : 
    1489          56 :     AnyRegister reg(bundle->allocation().toRegister());
    1490          56 :     PhysicalRegister& physical = registers[reg.code()];
    1491          56 :     MOZ_ASSERT(physical.reg == reg && physical.allocatable);
    1492             : 
    1493         227 :     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
    1494         171 :         LiveRange* range = LiveRange::get(*iter);
    1495         171 :         physical.allocations.remove(range);
    1496             :     }
    1497             : 
    1498          56 :     bundle->setAllocation(LAllocation());
    1499             : 
    1500          56 :     size_t priority = computePriority(bundle);
    1501          56 :     return allocationQueue.insert(QueueItem(bundle, priority));
    1502             : }
    1503             : 
    1504             : bool
    1505         244 : BacktrackingAllocator::splitAndRequeueBundles(LiveBundle* bundle,
    1506             :                                               const LiveBundleVector& newBundles)
    1507             : {
    1508         244 :     if (JitSpewEnabled(JitSpew_RegAlloc)) {
    1509           0 :         JitSpew(JitSpew_RegAlloc, "    splitting bundle %s into:", bundle->toString().get());
    1510           0 :         for (size_t i = 0; i < newBundles.length(); i++)
    1511           0 :             JitSpew(JitSpew_RegAlloc, "      %s", newBundles[i]->toString().get());
    1512             :     }
    1513             : 
    1514             :     // Remove all ranges in the old bundle from their register's list.
    1515        1758 :     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
    1516        1514 :         LiveRange* range = LiveRange::get(*iter);
    1517        1514 :         vregs[range->vreg()].removeRange(range);
    1518             :     }
    1519             : 
    1520             :     // Add all ranges in the new bundles to their register's list.
    1521         906 :     for (size_t i = 0; i < newBundles.length(); i++) {
    1522         662 :         LiveBundle* newBundle = newBundles[i];
    1523        2621 :         for (LiveRange::BundleLinkIterator iter = newBundle->rangesBegin(); iter; iter++) {
    1524        1959 :             LiveRange* range = LiveRange::get(*iter);
    1525        1959 :             vregs[range->vreg()].addRange(range);
    1526             :         }
    1527             :     }
    1528             : 
    1529             :     // Queue the new bundles for register assignment.
    1530         906 :     for (size_t i = 0; i < newBundles.length(); i++) {
    1531         662 :         LiveBundle* newBundle = newBundles[i];
    1532         662 :         size_t priority = computePriority(newBundle);
    1533         662 :         if (!allocationQueue.insert(QueueItem(newBundle, priority)))
    1534           0 :             return false;
    1535             :     }
    1536             : 
    1537         244 :     return true;
    1538             : }
    1539             : 
    1540             : bool
    1541         194 : BacktrackingAllocator::spill(LiveBundle* bundle)
    1542             : {
    1543         194 :     JitSpew(JitSpew_RegAlloc, "  Spilling bundle");
    1544         194 :     MOZ_ASSERT(bundle->allocation().isBogus());
    1545             : 
    1546         194 :     if (LiveBundle* spillParent = bundle->spillParent()) {
    1547           0 :         JitSpew(JitSpew_RegAlloc, "    Using existing spill bundle");
    1548           0 :         for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
    1549           0 :             LiveRange* range = LiveRange::get(*iter);
    1550           0 :             LiveRange* parentRange = spillParent->rangeFor(range->from());
    1551           0 :             MOZ_ASSERT(parentRange->contains(range));
    1552           0 :             MOZ_ASSERT(range->vreg() == parentRange->vreg());
    1553           0 :             range->distributeUses(parentRange);
    1554           0 :             MOZ_ASSERT(!range->hasUses());
    1555           0 :             vregs[range->vreg()].removeRange(range);
    1556             :         }
    1557           0 :         return true;
    1558             :     }
    1559             : 
    1560         194 :     return bundle->spillSet()->addSpilledBundle(bundle);
    1561             : }
    1562             : 
    1563             : bool
    1564           8 : BacktrackingAllocator::tryAllocatingRegistersForSpillBundles()
    1565             : {
    1566         222 :     for (auto it = spilledBundles.begin(); it != spilledBundles.end(); it++) {
    1567         214 :         LiveBundle* bundle = *it;
    1568         428 :         LiveBundleVector conflicting;
    1569         214 :         bool fixed = false;
    1570         214 :         bool success = false;
    1571             : 
    1572         214 :         if (mir->shouldCancel("Backtracking Try Allocating Spilled Bundles"))
    1573           0 :             return false;
    1574             : 
    1575         214 :         if (JitSpewEnabled(JitSpew_RegAlloc))
    1576           0 :             JitSpew(JitSpew_RegAlloc, "Spill or allocate %s", bundle->toString().get());
    1577             : 
    1578             :         // Search for any available register which the bundle can be
    1579             :         // allocated to.
    1580       12739 :         for (size_t i = 0; i < AnyRegister::Total; i++) {
    1581       12545 :             if (!tryAllocateRegister(registers[i], bundle, &success, &fixed, conflicting))
    1582           0 :                 return false;
    1583       12545 :             if (success)
    1584          20 :                 break;
    1585             :         }
    1586             : 
    1587             :         // If the bundle still has no register, spill the bundle.
    1588         214 :         if (!success && !spill(bundle))
    1589           0 :             return false;
    1590             :     }
    1591             : 
    1592           8 :     return true;
    1593             : }
    1594             : 
    1595             : bool
    1596           8 : BacktrackingAllocator::pickStackSlots()
    1597             : {
    1598        1070 :     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
    1599        1062 :         VirtualRegister& reg = vregs[i];
    1600             : 
    1601        1062 :         if (mir->shouldCancel("Backtracking Pick Stack Slots"))
    1602           0 :             return false;
    1603             : 
    1604        3157 :         for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
    1605        2095 :             LiveRange* range = LiveRange::get(*iter);
    1606        2095 :             LiveBundle* bundle = range->bundle();
    1607             : 
    1608        2095 :             if (bundle->allocation().isBogus()) {
    1609         105 :                 if (!pickStackSlot(bundle->spillSet()))
    1610           0 :                     return false;
    1611         105 :                 MOZ_ASSERT(!bundle->allocation().isBogus());
    1612             :             }
    1613             :         }
    1614             :     }
    1615             : 
    1616           8 :     return true;
    1617             : }
    1618             : 
    1619             : bool
    1620         105 : BacktrackingAllocator::pickStackSlot(SpillSet* spillSet)
    1621             : {
    1622             :     // Look through all ranges that have been spilled in this set for a
    1623             :     // register definition which is fixed to a stack or argument slot. If we
    1624             :     // find one, use it for all bundles that have been spilled. tryMergeBundles
    1625             :     // makes sure this reuse is possible when an initial bundle contains ranges
    1626             :     // from multiple virtual registers.
    1627         299 :     for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
    1628         194 :         LiveBundle* bundle = spillSet->spilledBundle(i);
    1629        1087 :         for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
    1630         893 :             LiveRange* range = LiveRange::get(*iter);
    1631         893 :             if (range->hasDefinition()) {
    1632           0 :                 LDefinition* def = vregs[range->vreg()].def();
    1633           0 :                 if (def->policy() == LDefinition::FIXED) {
    1634           0 :                     MOZ_ASSERT(!def->output()->isRegister());
    1635           0 :                     MOZ_ASSERT(!def->output()->isStackSlot());
    1636           0 :                     spillSet->setAllocation(*def->output());
    1637           0 :                     return true;
    1638             :                 }
    1639             :             }
    1640             :         }
    1641             :     }
    1642             : 
    1643         105 :     LDefinition::Type type = vregs[spillSet->spilledBundle(0)->firstRange()->vreg()].type();
    1644             : 
    1645             :     SpillSlotList* slotList;
    1646         105 :     switch (StackSlotAllocator::width(type)) {
    1647          24 :       case 4:  slotList = &normalSlots; break;
    1648          81 :       case 8:  slotList = &doubleSlots; break;
    1649           0 :       case 16: slotList = &quadSlots;   break;
    1650             :       default:
    1651           0 :         MOZ_CRASH("Bad width");
    1652             :     }
    1653             : 
    1654             :     // Maximum number of existing spill slots we will look at before giving up
    1655             :     // and allocating a new slot.
    1656             :     static const size_t MAX_SEARCH_COUNT = 10;
    1657             : 
    1658         105 :     size_t searches = 0;
    1659         105 :     SpillSlot* stop = nullptr;
    1660         985 :     while (!slotList->empty()) {
    1661         533 :         SpillSlot* spillSlot = *slotList->begin();
    1662         533 :         if (!stop) {
    1663          93 :             stop = spillSlot;
    1664         440 :         } else if (stop == spillSlot) {
    1665             :             // We looked through every slot in the list.
    1666          53 :             break;
    1667             :         }
    1668             : 
    1669         480 :         bool success = true;
    1670         551 :         for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
    1671         530 :             LiveBundle* bundle = spillSet->spilledBundle(i);
    1672         950 :             for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
    1673         879 :                 LiveRange* range = LiveRange::get(*iter);
    1674             :                 LiveRange* existing;
    1675         879 :                 if (spillSlot->allocated.contains(range, &existing)) {
    1676         459 :                     success = false;
    1677         459 :                     break;
    1678             :                 }
    1679             :             }
    1680         530 :             if (!success)
    1681         459 :                 break;
    1682             :         }
    1683         480 :         if (success) {
    1684             :             // We can reuse this physical stack slot for the new bundles.
    1685             :             // Update the allocated ranges for the slot.
    1686          48 :             for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
    1687          27 :                 LiveBundle* bundle = spillSet->spilledBundle(i);
    1688          27 :                 if (!insertAllRanges(spillSlot->allocated, bundle))
    1689           0 :                     return false;
    1690             :             }
    1691          21 :             spillSet->setAllocation(spillSlot->alloc);
    1692          21 :             return true;
    1693             :         }
    1694             : 
    1695             :         // On a miss, move the spill to the end of the list. This will cause us
    1696             :         // to make fewer attempts to allocate from slots with a large and
    1697             :         // highly contended range.
    1698         459 :         slotList->popFront();
    1699         459 :         slotList->pushBack(spillSlot);
    1700             : 
    1701         459 :         if (++searches == MAX_SEARCH_COUNT)
    1702          19 :             break;
    1703             :     }
    1704             : 
    1705             :     // We need a new physical stack slot.
    1706          84 :     uint32_t stackSlot = stackSlotAllocator.allocateSlot(type);
    1707             : 
    1708          84 :     SpillSlot* spillSlot = new(alloc().fallible()) SpillSlot(stackSlot, alloc().lifoAlloc());
    1709          84 :     if (!spillSlot)
    1710           0 :         return false;
    1711             : 
    1712         251 :     for (size_t i = 0; i < spillSet->numSpilledBundles(); i++) {
    1713         167 :         LiveBundle* bundle = spillSet->spilledBundle(i);
    1714         167 :         if (!insertAllRanges(spillSlot->allocated, bundle))
    1715           0 :             return false;
    1716             :     }
    1717             : 
    1718          84 :     spillSet->setAllocation(spillSlot->alloc);
    1719             : 
    1720          84 :     slotList->pushFront(spillSlot);
    1721          84 :     return true;
    1722             : }
    1723             : 
    1724             : bool
    1725         194 : BacktrackingAllocator::insertAllRanges(LiveRangeSet& set, LiveBundle* bundle)
    1726             : {
    1727        1087 :     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
    1728         893 :         LiveRange* range = LiveRange::get(*iter);
    1729         893 :         if (!alloc().ensureBallast())
    1730           0 :             return false;
    1731         893 :         if (!set.insert(range))
    1732           0 :             return false;
    1733             :     }
    1734         194 :     return true;
    1735             : }
    1736             : 
    1737             : bool
    1738        2095 : BacktrackingAllocator::deadRange(LiveRange* range)
    1739             : {
    1740             :     // Check for direct uses of this range.
    1741        2095 :     if (range->hasUses() || range->hasDefinition())
    1742        1866 :         return false;
    1743             : 
    1744         229 :     CodePosition start = range->from();
    1745         229 :     LNode* ins = insData[start];
    1746         229 :     if (start == entryOf(ins->block()))
    1747          95 :         return false;
    1748             : 
    1749         134 :     VirtualRegister& reg = vregs[range->vreg()];
    1750             : 
    1751             :     // Check if there are later ranges for this vreg.
    1752         134 :     LiveRange::RegisterLinkIterator iter = reg.rangesBegin(range);
    1753         134 :     for (iter++; iter; iter++) {
    1754          19 :         LiveRange* laterRange = LiveRange::get(*iter);
    1755          19 :         if (laterRange->from() > range->from())
    1756          19 :             return false;
    1757             :     }
    1758             : 
    1759             :     // Check if this range ends at a loop backedge.
    1760         115 :     LNode* last = insData[range->to().previous()];
    1761         115 :     if (last->isGoto() && last->toGoto()->target()->id() < last->block()->mir()->id())
    1762           4 :         return false;
    1763             : 
    1764             :     // Check if there are phis which this vreg flows to.
    1765         111 :     if (reg.usedByPhi())
    1766         111 :         return false;
    1767             : 
    1768           0 :     return true;
    1769             : }
    1770             : 
    1771             : bool
    1772           8 : BacktrackingAllocator::resolveControlFlow()
    1773             : {
    1774             :     // Add moves to handle changing assignments for vregs over their lifetime.
    1775           8 :     JitSpew(JitSpew_RegAlloc, "Resolving control flow (vreg loop)");
    1776             : 
    1777             :     // Look for places where a register's assignment changes in the middle of a
    1778             :     // basic block.
    1779           8 :     MOZ_ASSERT(!vregs[0u].hasRanges());
    1780        1070 :     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
    1781        1062 :         VirtualRegister& reg = vregs[i];
    1782             : 
    1783        1062 :         if (mir->shouldCancel("Backtracking Resolve Control Flow (vreg outer loop)"))
    1784           0 :             return false;
    1785             : 
    1786        3157 :         for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; ) {
    1787        2095 :             LiveRange* range = LiveRange::get(*iter);
    1788             : 
    1789        2095 :             if (mir->shouldCancel("Backtracking Resolve Control Flow (vreg inner loop)"))
    1790           0 :                 return false;
    1791             : 
    1792             :             // Remove ranges which will never be used.
    1793        2095 :             if (deadRange(range)) {
    1794           0 :                 reg.removeRangeAndIncrement(iter);
    1795           0 :                 continue;
    1796             :             }
    1797             : 
    1798             :             // The range which defines the register does not have a predecessor
    1799             :             // to add moves from.
    1800        2095 :             if (range->hasDefinition()) {
    1801         887 :                 iter++;
    1802         887 :                 continue;
    1803             :             }
    1804             : 
    1805             :             // Ignore ranges that start at block boundaries. We will handle
    1806             :             // these in the next phase.
    1807        1208 :             CodePosition start = range->from();
    1808        1208 :             LNode* ins = insData[start];
    1809        1208 :             if (start == entryOf(ins->block())) {
    1810         915 :                 iter++;
    1811         915 :                 continue;
    1812             :             }
    1813             : 
    1814             :             // If we already saw a range which covers the start of this range
    1815             :             // and has the same allocation, we don't need an explicit move at
    1816             :             // the start of this range.
    1817         293 :             bool skip = false;
    1818         909 :             for (LiveRange::RegisterLinkIterator prevIter = reg.rangesBegin();
    1819             :                  prevIter != iter;
    1820             :                  prevIter++)
    1821             :             {
    1822         616 :                 LiveRange* prevRange = LiveRange::get(*prevIter);
    1823        2134 :                 if (prevRange->covers(start) &&
    1824        1188 :                     prevRange->bundle()->allocation() == range->bundle()->allocation())
    1825             :                 {
    1826           0 :                     skip = true;
    1827           0 :                     break;
    1828             :                 }
    1829             :             }
    1830         293 :             if (skip) {
    1831           0 :                 iter++;
    1832           0 :                 continue;
    1833             :             }
    1834             : 
    1835         293 :             if (!alloc().ensureBallast())
    1836           0 :                 return false;
    1837             : 
    1838         293 :             LiveRange* predecessorRange = reg.rangeFor(start.previous(), /* preferRegister = */ true);
    1839         293 :             if (start.subpos() == CodePosition::INPUT) {
    1840         293 :                 if (!moveInput(ins->toInstruction(), predecessorRange, range, reg.type()))
    1841           0 :                     return false;
    1842             :             } else {
    1843           0 :                 if (!moveAfter(ins->toInstruction(), predecessorRange, range, reg.type()))
    1844           0 :                     return false;
    1845             :             }
    1846             : 
    1847         293 :             iter++;
    1848             :         }
    1849             :     }
    1850             : 
    1851           8 :     JitSpew(JitSpew_RegAlloc, "Resolving control flow (block loop)");
    1852             : 
    1853         411 :     for (size_t i = 0; i < graph.numBlocks(); i++) {
    1854         403 :         if (mir->shouldCancel("Backtracking Resolve Control Flow (block loop)"))
    1855           0 :             return false;
    1856             : 
    1857         403 :         LBlock* successor = graph.getBlock(i);
    1858         403 :         MBasicBlock* mSuccessor = successor->mir();
    1859         403 :         if (mSuccessor->numPredecessors() < 1)
    1860          11 :             continue;
    1861             : 
    1862             :         // Resolve phis to moves.
    1863         567 :         for (size_t j = 0; j < successor->numPhis(); j++) {
    1864         175 :             LPhi* phi = successor->getPhi(j);
    1865         175 :             MOZ_ASSERT(phi->numDefs() == 1);
    1866         175 :             LDefinition* def = phi->getDef(0);
    1867         175 :             VirtualRegister& reg = vreg(def);
    1868         175 :             LiveRange* to = reg.rangeFor(entryOf(successor));
    1869         175 :             MOZ_ASSERT(to);
    1870             : 
    1871         605 :             for (size_t k = 0; k < mSuccessor->numPredecessors(); k++) {
    1872         430 :                 LBlock* predecessor = mSuccessor->getPredecessor(k)->lir();
    1873         430 :                 MOZ_ASSERT(predecessor->mir()->numSuccessors() == 1);
    1874             : 
    1875         430 :                 LAllocation* input = phi->getOperand(k);
    1876         430 :                 LiveRange* from = vreg(input).rangeFor(exitOf(predecessor), /* preferRegister = */ true);
    1877         430 :                 MOZ_ASSERT(from);
    1878             : 
    1879         430 :                 if (!alloc().ensureBallast())
    1880           0 :                     return false;
    1881         430 :                 if (!moveAtExit(predecessor, from, to, def->type()))
    1882           0 :                     return false;
    1883             :             }
    1884             :         }
    1885             :     }
    1886             : 
    1887             :     // Add moves to resolve graph edges with different allocations at their
    1888             :     // source and target.
    1889        1070 :     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
    1890        1062 :         VirtualRegister& reg = vregs[i];
    1891        3157 :         for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
    1892        2095 :             LiveRange* targetRange = LiveRange::get(*iter);
    1893             : 
    1894        2095 :             size_t firstBlockId = insData[targetRange->from()]->block()->mir()->id();
    1895        2095 :             if (!targetRange->covers(entryOf(graph.getBlock(firstBlockId))))
    1896        1125 :                 firstBlockId++;
    1897        9580 :             for (size_t id = firstBlockId; id < graph.numBlocks(); id++) {
    1898        9521 :                 LBlock* successor = graph.getBlock(id);
    1899        9521 :                 if (!targetRange->covers(entryOf(successor)))
    1900        2036 :                     break;
    1901             : 
    1902        7485 :                 BitSet& live = liveIn[id];
    1903        7485 :                 if (!live.contains(i))
    1904         230 :                     continue;
    1905             : 
    1906       16026 :                 for (size_t j = 0; j < successor->mir()->numPredecessors(); j++) {
    1907        8771 :                     LBlock* predecessor = successor->mir()->getPredecessor(j)->lir();
    1908        8771 :                     if (targetRange->covers(exitOf(predecessor)))
    1909        7865 :                         continue;
    1910             : 
    1911         906 :                     if (!alloc().ensureBallast())
    1912           0 :                         return false;
    1913         906 :                     LiveRange* from = reg.rangeFor(exitOf(predecessor), true);
    1914         906 :                     if (successor->mir()->numPredecessors() > 1) {
    1915         172 :                         MOZ_ASSERT(predecessor->mir()->numSuccessors() == 1);
    1916         172 :                         if (!moveAtExit(predecessor, from, targetRange, reg.type()))
    1917           0 :                             return false;
    1918             :                     } else {
    1919         734 :                         if (!moveAtEntry(successor, from, targetRange, reg.type()))
    1920           0 :                             return false;
    1921             :                     }
    1922             :                 }
    1923             :             }
    1924             :         }
    1925             :     }
    1926             : 
    1927           8 :     return true;
    1928             : }
    1929             : 
    1930             : bool
    1931          40 : BacktrackingAllocator::isReusedInput(LUse* use, LNode* ins, bool considerCopy)
    1932             : {
    1933          40 :     if (LDefinition* def = FindReusingDefOrTemp(ins, use))
    1934           8 :         return considerCopy || !vregs[def->virtualRegister()].mustCopyInput();
    1935          32 :     return false;
    1936             : }
    1937             : 
    1938             : bool
    1939        3554 : BacktrackingAllocator::isRegisterUse(UsePosition* use, LNode* ins, bool considerCopy)
    1940             : {
    1941        3554 :     switch (use->usePolicy()) {
    1942             :       case LUse::ANY:
    1943          40 :         return isReusedInput(use->use(), ins, considerCopy);
    1944             : 
    1945             :       case LUse::REGISTER:
    1946             :       case LUse::FIXED:
    1947         377 :         return true;
    1948             : 
    1949             :       default:
    1950        3137 :         return false;
    1951             :     }
    1952             : }
    1953             : 
    1954             : bool
    1955        5058 : BacktrackingAllocator::isRegisterDefinition(LiveRange* range)
    1956             : {
    1957        5058 :     if (!range->hasDefinition())
    1958        3348 :         return false;
    1959             : 
    1960        1710 :     VirtualRegister& reg = vregs[range->vreg()];
    1961        1710 :     if (reg.ins()->isPhi())
    1962           0 :         return false;
    1963             : 
    1964        1710 :     if (reg.def()->policy() == LDefinition::FIXED && !reg.def()->output()->isRegister())
    1965         369 :         return false;
    1966             : 
    1967        1341 :     return true;
    1968             : }
    1969             : 
    1970             : bool
    1971           8 : BacktrackingAllocator::reifyAllocations()
    1972             : {
    1973           8 :     JitSpew(JitSpew_RegAlloc, "Reifying Allocations");
    1974             : 
    1975           8 :     MOZ_ASSERT(!vregs[0u].hasRanges());
    1976        1070 :     for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
    1977        1062 :         VirtualRegister& reg = vregs[i];
    1978             : 
    1979        1062 :         if (mir->shouldCancel("Backtracking Reify Allocations (main loop)"))
    1980           0 :             return false;
    1981             : 
    1982        3157 :         for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
    1983        2095 :             LiveRange* range = LiveRange::get(*iter);
    1984             : 
    1985        2095 :             if (range->hasDefinition()) {
    1986         887 :                 reg.def()->setOutput(range->bundle()->allocation());
    1987         887 :                 if (reg.ins()->recoversInput()) {
    1988           1 :                     LSnapshot* snapshot = reg.ins()->toInstruction()->snapshot();
    1989          16 :                     for (size_t i = 0; i < snapshot->numEntries(); i++) {
    1990          15 :                         LAllocation* entry = snapshot->getEntry(i);
    1991          15 :                         if (entry->isUse() && entry->toUse()->policy() == LUse::RECOVERED_INPUT)
    1992           1 :                             *entry = *reg.def()->output();
    1993             :                     }
    1994             :                 }
    1995             :             }
    1996             : 
    1997        8065 :             for (UsePositionIterator iter(range->usesBegin()); iter; iter++) {
    1998        5970 :                 LAllocation* alloc = iter->use();
    1999        5970 :                 *alloc = range->bundle()->allocation();
    2000             : 
    2001             :                 // For any uses which feed into MUST_REUSE_INPUT definitions,
    2002             :                 // add copies if the use and def have different allocations.
    2003        5970 :                 LNode* ins = insData[iter->pos];
    2004        5970 :                 if (LDefinition* def = FindReusingDefOrTemp(ins, alloc)) {
    2005          13 :                     LiveRange* outputRange = vreg(def).rangeFor(outputOf(ins));
    2006          13 :                     LAllocation res = outputRange->bundle()->allocation();
    2007          13 :                     LAllocation sourceAlloc = range->bundle()->allocation();
    2008             : 
    2009          13 :                     if (res != *alloc) {
    2010           4 :                         if (!this->alloc().ensureBallast())
    2011           0 :                             return false;
    2012           4 :                         if (NumReusingDefs(ins) <= 1) {
    2013           4 :                             LMoveGroup* group = getInputMoveGroup(ins->toInstruction());
    2014           4 :                             if (!group->addAfter(sourceAlloc, res, reg.type()))
    2015           0 :                                 return false;
    2016             :                         } else {
    2017           0 :                             LMoveGroup* group = getFixReuseMoveGroup(ins->toInstruction());
    2018           0 :                             if (!group->add(sourceAlloc, res, reg.type()))
    2019           0 :                                 return false;
    2020             :                         }
    2021           4 :                         *alloc = res;
    2022             :                     }
    2023             :                 }
    2024             :             }
    2025             : 
    2026        2095 :             addLiveRegistersForRange(reg, range);
    2027             :         }
    2028             :     }
    2029             : 
    2030           8 :     graph.setLocalSlotCount(stackSlotAllocator.stackHeight());
    2031           8 :     return true;
    2032             : }
    2033             : 
    2034             : size_t
    2035        1164 : BacktrackingAllocator::findFirstNonCallSafepoint(CodePosition from)
    2036             : {
    2037        1164 :     size_t i = 0;
    2038       25744 :     for (; i < graph.numNonCallSafepoints(); i++) {
    2039       13391 :         const LInstruction* ins = graph.getNonCallSafepoint(i);
    2040       13391 :         if (from <= inputOf(ins))
    2041        1101 :             break;
    2042             :     }
    2043        1164 :     return i;
    2044             : }
    2045             : 
    2046             : void
    2047        2095 : BacktrackingAllocator::addLiveRegistersForRange(VirtualRegister& reg, LiveRange* range)
    2048             : {
    2049             :     // Fill in the live register sets for all non-call safepoints.
    2050        2095 :     LAllocation a = range->bundle()->allocation();
    2051        2095 :     if (!a.isRegister())
    2052         931 :         return;
    2053             : 
    2054             :     // Don't add output registers to the safepoint.
    2055        1164 :     CodePosition start = range->from();
    2056        1164 :     if (range->hasDefinition() && !reg.isTemp()) {
    2057             : #ifdef CHECK_OSIPOINT_REGISTERS
    2058             :         // We don't add the output register to the safepoint,
    2059             :         // but it still might get added as one of the inputs.
    2060             :         // So eagerly add this reg to the safepoint clobbered registers.
    2061         537 :         if (reg.ins()->isInstruction()) {
    2062         537 :             if (LSafepoint* safepoint = reg.ins()->toInstruction()->safepoint())
    2063         105 :                 safepoint->addClobberedRegister(a.toRegister());
    2064             :         }
    2065             : #endif
    2066         537 :         start = start.next();
    2067             :     }
    2068             : 
    2069        1164 :     size_t i = findFirstNonCallSafepoint(start);
    2070        1806 :     for (; i < graph.numNonCallSafepoints(); i++) {
    2071        1392 :         LInstruction* ins = graph.getNonCallSafepoint(i);
    2072        1392 :         CodePosition pos = inputOf(ins);
    2073             : 
    2074             :         // Safepoints are sorted, so we can shortcut out of this loop
    2075             :         // if we go out of range.
    2076        1392 :         if (range->to() <= pos)
    2077        1071 :             break;
    2078             : 
    2079         321 :         MOZ_ASSERT(range->covers(pos));
    2080             : 
    2081         321 :         LSafepoint* safepoint = ins->safepoint();
    2082         321 :         safepoint->addLiveRegister(a.toRegister());
    2083             : 
    2084             : #ifdef CHECK_OSIPOINT_REGISTERS
    2085         321 :         if (reg.isTemp())
    2086         101 :             safepoint->addClobberedRegister(a.toRegister());
    2087             : #endif
    2088             :     }
    2089             : }
    2090             : 
    2091             : static inline bool
    2092         534 : IsNunbox(VirtualRegister& reg)
    2093             : {
    2094             : #ifdef JS_NUNBOX32
    2095             :     return reg.type() == LDefinition::TYPE ||
    2096             :            reg.type() == LDefinition::PAYLOAD;
    2097             : #else
    2098         534 :     return false;
    2099             : #endif
    2100             : }
    2101             : 
    2102             : static inline bool
    2103         548 : IsSlotsOrElements(VirtualRegister& reg)
    2104             : {
    2105         548 :     return reg.type() == LDefinition::SLOTS;
    2106             : }
    2107             : 
    2108             : static inline bool
    2109        1060 : IsTraceable(VirtualRegister& reg)
    2110             : {
    2111        1060 :     if (reg.type() == LDefinition::OBJECT)
    2112         174 :         return true;
    2113             : #ifdef JS_PUNBOX64
    2114         886 :     if (reg.type() == LDefinition::BOX)
    2115         338 :         return true;
    2116             : #endif
    2117         548 :     return false;
    2118             : }
    2119             : 
    2120             : size_t
    2121         526 : BacktrackingAllocator::findFirstSafepoint(CodePosition pos, size_t startFrom)
    2122             : {
    2123         526 :     size_t i = startFrom;
    2124         788 :     for (; i < graph.numSafepoints(); i++) {
    2125         652 :         LInstruction* ins = graph.getSafepoint(i);
    2126         652 :         if (pos <= inputOf(ins))
    2127         521 :             break;
    2128             :     }
    2129         526 :     return i;
    2130             : }
    2131             : 
    2132             : bool
    2133           8 : BacktrackingAllocator::populateSafepoints()
    2134             : {
    2135           8 :     JitSpew(JitSpew_RegAlloc, "Populating Safepoints");
    2136             : 
    2137           8 :     size_t firstSafepoint = 0;
    2138             : 
    2139           8 :     MOZ_ASSERT(!vregs[0u].def());
    2140        1063 :     for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
    2141        1060 :         VirtualRegister& reg = vregs[i];
    2142             : 
    2143        1060 :         if (!reg.def() || (!IsTraceable(reg) && !IsSlotsOrElements(reg) && !IsNunbox(reg)))
    2144         534 :             continue;
    2145             : 
    2146         526 :         firstSafepoint = findFirstSafepoint(inputOf(reg.ins()), firstSafepoint);
    2147         526 :         if (firstSafepoint >= graph.numSafepoints())
    2148           5 :             break;
    2149             : 
    2150        1808 :         for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
    2151        1287 :             LiveRange* range = LiveRange::get(*iter);
    2152             : 
    2153        9606 :             for (size_t j = firstSafepoint; j < graph.numSafepoints(); j++) {
    2154        9530 :                 LInstruction* ins = graph.getSafepoint(j);
    2155             : 
    2156        9530 :                 if (!range->covers(inputOf(ins))) {
    2157        7927 :                     if (inputOf(ins) >= range->to())
    2158        1211 :                         break;
    2159       13462 :                     continue;
    2160             :                 }
    2161             : 
    2162             :                 // Include temps but not instruction outputs. Also make sure
    2163             :                 // MUST_REUSE_INPUT is not used with gcthings or nunboxes, or
    2164             :                 // we would have to add the input reg to this safepoint.
    2165        1603 :                 if (ins == reg.ins() && !reg.isTemp()) {
    2166           0 :                     DebugOnly<LDefinition*> def = reg.def();
    2167           0 :                     MOZ_ASSERT_IF(def->policy() == LDefinition::MUST_REUSE_INPUT,
    2168             :                                   def->type() == LDefinition::GENERAL ||
    2169             :                                   def->type() == LDefinition::INT32 ||
    2170             :                                   def->type() == LDefinition::FLOAT32 ||
    2171             :                                   def->type() == LDefinition::DOUBLE);
    2172           0 :                     continue;
    2173             :                 }
    2174             : 
    2175        1603 :                 LSafepoint* safepoint = ins->safepoint();
    2176             : 
    2177        1603 :                 LAllocation a = range->bundle()->allocation();
    2178        1603 :                 if (a.isGeneralReg() && ins->isCall())
    2179          30 :                     continue;
    2180             : 
    2181        1573 :                 switch (reg.type()) {
    2182             :                   case LDefinition::OBJECT:
    2183        1006 :                     if (!safepoint->addGcPointer(a))
    2184           0 :                         return false;
    2185        1006 :                     break;
    2186             :                   case LDefinition::SLOTS:
    2187           1 :                     if (!safepoint->addSlotsOrElementsPointer(a))
    2188           0 :                         return false;
    2189           1 :                     break;
    2190             : #ifdef JS_NUNBOX32
    2191             :                   case LDefinition::TYPE:
    2192             :                     if (!safepoint->addNunboxType(i, a))
    2193             :                         return false;
    2194             :                     break;
    2195             :                   case LDefinition::PAYLOAD:
    2196             :                     if (!safepoint->addNunboxPayload(i, a))
    2197             :                         return false;
    2198             :                     break;
    2199             : #else
    2200             :                   case LDefinition::BOX:
    2201         566 :                     if (!safepoint->addBoxedValue(a))
    2202           0 :                         return false;
    2203         566 :                     break;
    2204             : #endif
    2205             :                   default:
    2206           0 :                     MOZ_CRASH("Bad register type");
    2207             :                 }
    2208             :             }
    2209             :         }
    2210             :     }
    2211             : 
    2212           8 :     return true;
    2213             : }
    2214             : 
    2215             : bool
    2216           8 : BacktrackingAllocator::annotateMoveGroups()
    2217             : {
    2218             :     // Annotate move groups in the LIR graph with any register that is not
    2219             :     // allocated at that point and can be used as a scratch register. This is
    2220             :     // only required for x86, as other platforms always have scratch registers
    2221             :     // available for use.
    2222             : #ifdef JS_CODEGEN_X86
    2223             :     LiveRange* range = LiveRange::FallibleNew(alloc(), 0, CodePosition(), CodePosition().next());
    2224             :     if (!range)
    2225             :         return false;
    2226             : 
    2227             :     for (size_t i = 0; i < graph.numBlocks(); i++) {
    2228             :         if (mir->shouldCancel("Backtracking Annotate Move Groups"))
    2229             :             return false;
    2230             : 
    2231             :         LBlock* block = graph.getBlock(i);
    2232             :         LInstruction* last = nullptr;
    2233             :         for (LInstructionIterator iter = block->begin(); iter != block->end(); ++iter) {
    2234             :             if (iter->isMoveGroup()) {
    2235             :                 CodePosition from = last ? outputOf(last) : entryOf(block);
    2236             :                 range->setTo(from.next());
    2237             :                 range->setFrom(from);
    2238             : 
    2239             :                 for (size_t i = 0; i < AnyRegister::Total; i++) {
    2240             :                     PhysicalRegister& reg = registers[i];
    2241             :                     if (reg.reg.isFloat() || !reg.allocatable)
    2242             :                         continue;
    2243             : 
    2244             :                     // This register is unavailable for use if (a) it is in use
    2245             :                     // by some live range immediately before the move group,
    2246             :                     // or (b) it is an operand in one of the group's moves. The
    2247             :                     // latter case handles live ranges which end immediately
    2248             :                     // before the move group or start immediately after.
    2249             :                     // For (b) we need to consider move groups immediately
    2250             :                     // preceding or following this one.
    2251             : 
    2252             :                     if (iter->toMoveGroup()->uses(reg.reg.gpr()))
    2253             :                         continue;
    2254             :                     bool found = false;
    2255             :                     LInstructionIterator niter(iter);
    2256             :                     for (niter++; niter != block->end(); niter++) {
    2257             :                         if (niter->isMoveGroup()) {
    2258             :                             if (niter->toMoveGroup()->uses(reg.reg.gpr())) {
    2259             :                                 found = true;
    2260             :                                 break;
    2261             :                             }
    2262             :                         } else {
    2263             :                             break;
    2264             :                         }
    2265             :                     }
    2266             :                     if (iter != block->begin()) {
    2267             :                         LInstructionIterator riter(iter);
    2268             :                         do {
    2269             :                             riter--;
    2270             :                             if (riter->isMoveGroup()) {
    2271             :                                 if (riter->toMoveGroup()->uses(reg.reg.gpr())) {
    2272             :                                     found = true;
    2273             :                                     break;
    2274             :                                 }
    2275             :                             } else {
    2276             :                                 break;
    2277             :                             }
    2278             :                         } while (riter != block->begin());
    2279             :                     }
    2280             : 
    2281             :                     LiveRange* existing;
    2282             :                     if (found || reg.allocations.contains(range, &existing))
    2283             :                         continue;
    2284             : 
    2285             :                     iter->toMoveGroup()->setScratchRegister(reg.reg.gpr());
    2286             :                     break;
    2287             :                 }
    2288             :             } else {
    2289             :                 last = *iter;
    2290             :             }
    2291             :         }
    2292             :     }
    2293             : #endif
    2294             : 
    2295           8 :     return true;
    2296             : }
    2297             : 
    2298             : /////////////////////////////////////////////////////////////////////
    2299             : // Debugging methods
    2300             : /////////////////////////////////////////////////////////////////////
    2301             : 
    2302             : #ifdef JS_JITSPEW
    2303             : 
    2304             : UniqueChars
    2305        4021 : LiveRange::toString() const
    2306             : {
    2307        8042 :     AutoEnterOOMUnsafeRegion oomUnsafe;
    2308             : 
    2309        4021 :     UniqueChars buf = JS_smprintf("v%u [%u,%u)", hasVreg() ? vreg() : 0, from().bits(), to().bits());
    2310             : 
    2311        4021 :     if (buf && bundle() && !bundle()->allocation().isBogus())
    2312           0 :         buf = JS_sprintf_append(Move(buf), " %s", bundle()->allocation().toString().get());
    2313             : 
    2314        4021 :     if (buf && hasDefinition())
    2315           0 :         buf = JS_sprintf_append(Move(buf), " (def)");
    2316             : 
    2317        4021 :     for (UsePositionIterator iter = usesBegin(); buf && iter; iter++)
    2318           0 :         buf = JS_sprintf_append(Move(buf), " %s@%u", iter->use()->toString().get(), iter->pos.bits());
    2319             : 
    2320        4021 :     if (!buf)
    2321           0 :         oomUnsafe.crash("LiveRange::toString()");
    2322             : 
    2323        8042 :     return buf;
    2324             : }
    2325             : 
    2326             : UniqueChars
    2327           0 : LiveBundle::toString() const
    2328             : {
    2329           0 :     AutoEnterOOMUnsafeRegion oomUnsafe;
    2330             : 
    2331             :     // Suppress -Wformat warning.
    2332           0 :     UniqueChars buf = JS_smprintf("%s", "");
    2333             : 
    2334           0 :     for (LiveRange::BundleLinkIterator iter = rangesBegin(); buf && iter; iter++) {
    2335           0 :         buf = JS_sprintf_append(Move(buf), "%s %s",
    2336           0 :                                 (iter == rangesBegin()) ? "" : " ##",
    2337           0 :                                 LiveRange::get(*iter)->toString().get());
    2338             :     }
    2339             : 
    2340           0 :     if (!buf)
    2341           0 :         oomUnsafe.crash("LiveBundle::toString()");
    2342             : 
    2343           0 :     return buf;
    2344             : }
    2345             : 
    2346             : #endif // JS_JITSPEW
    2347             : 
    2348             : void
    2349           0 : BacktrackingAllocator::dumpVregs()
    2350             : {
    2351             : #ifdef JS_JITSPEW
    2352           0 :     MOZ_ASSERT(!vregs[0u].hasRanges());
    2353             : 
    2354           0 :     fprintf(stderr, "Live ranges by virtual register:\n");
    2355             : 
    2356           0 :     for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
    2357           0 :         fprintf(stderr, "  ");
    2358           0 :         VirtualRegister& reg = vregs[i];
    2359           0 :         for (LiveRange::RegisterLinkIterator iter = reg.rangesBegin(); iter; iter++) {
    2360           0 :             if (iter != reg.rangesBegin())
    2361           0 :                 fprintf(stderr, " ## ");
    2362           0 :             fprintf(stderr, "%s", LiveRange::get(*iter)->toString().get());
    2363             :         }
    2364           0 :         fprintf(stderr, "\n");
    2365             :     }
    2366             : 
    2367           0 :     fprintf(stderr, "\nLive ranges by bundle:\n");
    2368             : 
    2369           0 :     for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
    2370           0 :         VirtualRegister& reg = vregs[i];
    2371           0 :         for (LiveRange::RegisterLinkIterator baseIter = reg.rangesBegin(); baseIter; baseIter++) {
    2372           0 :             LiveRange* range = LiveRange::get(*baseIter);
    2373           0 :             LiveBundle* bundle = range->bundle();
    2374           0 :             if (range == bundle->firstRange()) {
    2375           0 :                 fprintf(stderr, "  ");
    2376           0 :                 for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
    2377           0 :                     if (iter != bundle->rangesBegin())
    2378           0 :                         fprintf(stderr, " ## ");
    2379           0 :                     fprintf(stderr, "%s", LiveRange::get(*iter)->toString().get());
    2380             :                 }
    2381           0 :                 fprintf(stderr, "\n");
    2382             :             }
    2383             :         }
    2384             :     }
    2385             : #endif
    2386           0 : }
    2387             : 
    2388             : #ifdef JS_JITSPEW
    2389             : struct BacktrackingAllocator::PrintLiveRange
    2390             : {
    2391             :     bool& first_;
    2392             : 
    2393           0 :     explicit PrintLiveRange(bool& first) : first_(first) {}
    2394             : 
    2395           0 :     void operator()(const LiveRange* range)
    2396             :     {
    2397           0 :         if (first_)
    2398           0 :             first_ = false;
    2399             :         else
    2400           0 :             fprintf(stderr, " /");
    2401           0 :         fprintf(stderr, " %s", range->toString().get());
    2402           0 :     }
    2403             : };
    2404             : #endif
    2405             : 
    2406             : void
    2407           0 : BacktrackingAllocator::dumpAllocations()
    2408             : {
    2409             : #ifdef JS_JITSPEW
    2410           0 :     fprintf(stderr, "Allocations:\n");
    2411             : 
    2412           0 :     dumpVregs();
    2413             : 
    2414           0 :     fprintf(stderr, "Allocations by physical register:\n");
    2415             : 
    2416           0 :     for (size_t i = 0; i < AnyRegister::Total; i++) {
    2417           0 :         if (registers[i].allocatable && !registers[i].allocations.empty()) {
    2418           0 :             fprintf(stderr, "  %s:", AnyRegister::FromCode(i).name());
    2419           0 :             bool first = true;
    2420           0 :             registers[i].allocations.forEach(PrintLiveRange(first));
    2421           0 :             fprintf(stderr, "\n");
    2422             :         }
    2423             :     }
    2424             : 
    2425           0 :     fprintf(stderr, "\n");
    2426             : #endif // JS_JITSPEW
    2427           0 : }
    2428             : 
    2429             : ///////////////////////////////////////////////////////////////////////////////
    2430             : // Heuristic Methods
    2431             : ///////////////////////////////////////////////////////////////////////////////
    2432             : 
    2433             : size_t
    2434        6713 : BacktrackingAllocator::computePriority(LiveBundle* bundle)
    2435             : {
    2436             :     // The priority of a bundle is its total length, so that longer lived
    2437             :     // bundles will be processed before shorter ones (even if the longer ones
    2438             :     // have a low spill weight). See processBundle().
    2439        6713 :     size_t lifetimeTotal = 0;
    2440             : 
    2441       23832 :     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
    2442       17119 :         LiveRange* range = LiveRange::get(*iter);
    2443       17119 :         lifetimeTotal += range->to() - range->from();
    2444             :     }
    2445             : 
    2446        6713 :     return lifetimeTotal;
    2447             : }
    2448             : 
    2449             : bool
    2450        2886 : BacktrackingAllocator::minimalDef(LiveRange* range, LNode* ins)
    2451             : {
    2452             :     // Whether this is a minimal range capturing a definition at ins.
    2453       10518 :     return (range->to() <= minimalDefEnd(ins).next()) &&
    2454        7632 :            ((!ins->isPhi() && range->from() == inputOf(ins)) || range->from() == outputOf(ins));
    2455             : }
    2456             : 
    2457             : bool
    2458        1486 : BacktrackingAllocator::minimalUse(LiveRange* range, UsePosition* use)
    2459             : {
    2460             :     // Whether this is a minimal range capturing |use|.
    2461        1486 :     LNode* ins = insData[use->pos];
    2462        5578 :     return (range->from() == inputOf(ins)) &&
    2463        4092 :            (range->to() == (use->use()->usedAtStart() ? outputOf(ins) : outputOf(ins).next()));
    2464             : }
    2465             : 
    2466             : bool
    2467        6736 : BacktrackingAllocator::minimalBundle(LiveBundle* bundle, bool* pfixed)
    2468             : {
    2469        6736 :     LiveRange::BundleLinkIterator iter = bundle->rangesBegin();
    2470        6736 :     LiveRange* range = LiveRange::get(*iter);
    2471             : 
    2472        6736 :     if (!range->hasVreg()) {
    2473           0 :         *pfixed = true;
    2474           0 :         return true;
    2475             :     }
    2476             : 
    2477             :     // If a bundle contains multiple ranges, splitAtAllRegisterUses will split
    2478             :     // each range into a separate bundle.
    2479        6736 :     if (++iter)
    2480        3225 :         return false;
    2481             : 
    2482        3511 :     if (range->hasDefinition()) {
    2483        2886 :         VirtualRegister& reg = vregs[range->vreg()];
    2484        2886 :         if (pfixed)
    2485        2853 :             *pfixed = reg.def()->policy() == LDefinition::FIXED && reg.def()->output()->isRegister();
    2486        2886 :         return minimalDef(range, reg.ins());
    2487             :     }
    2488             : 
    2489         625 :     bool fixed = false, minimal = false, multiple = false;
    2490             : 
    2491        4042 :     for (UsePositionIterator iter = range->usesBegin(); iter; iter++) {
    2492        3418 :         if (iter != range->usesBegin())
    2493        2793 :             multiple = true;
    2494             : 
    2495        3418 :         switch (iter->usePolicy()) {
    2496             :           case LUse::FIXED:
    2497         101 :             if (fixed)
    2498           1 :                 return false;
    2499         100 :             fixed = true;
    2500         100 :             if (minimalUse(range, *iter))
    2501          61 :                 minimal = true;
    2502         100 :             break;
    2503             : 
    2504             :           case LUse::REGISTER:
    2505        1386 :             if (minimalUse(range, *iter))
    2506         226 :                 minimal = true;
    2507        1386 :             break;
    2508             : 
    2509             :           default:
    2510        1931 :             break;
    2511             :         }
    2512             :     }
    2513             : 
    2514             :     // If a range contains a fixed use and at least one other use,
    2515             :     // splitAtAllRegisterUses will split each use into a different bundle.
    2516         624 :     if (multiple && fixed)
    2517          36 :         minimal = false;
    2518             : 
    2519         624 :     if (pfixed)
    2520         599 :         *pfixed = fixed;
    2521         624 :     return minimal;
    2522             : }
    2523             : 
    2524             : size_t
    2525        6492 : BacktrackingAllocator::computeSpillWeight(LiveBundle* bundle)
    2526             : {
    2527             :     // Minimal bundles have an extremely high spill weight, to ensure they
    2528             :     // can evict any other bundles and be allocated to a register.
    2529             :     bool fixed;
    2530        6492 :     if (minimalBundle(bundle, &fixed))
    2531        1217 :         return fixed ? 2000000 : 1000000;
    2532             : 
    2533        5275 :     size_t usesTotal = 0;
    2534        5275 :     fixed = false;
    2535             : 
    2536       18614 :     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
    2537       13339 :         LiveRange* range = LiveRange::get(*iter);
    2538             : 
    2539       13339 :         if (range->hasDefinition()) {
    2540        7125 :             VirtualRegister& reg = vregs[range->vreg()];
    2541        7125 :             if (reg.def()->policy() == LDefinition::FIXED && reg.def()->output()->isRegister()) {
    2542         121 :                 usesTotal += 2000;
    2543         121 :                 fixed = true;
    2544        7004 :             } else if (!reg.ins()->isPhi()) {
    2545        7004 :                 usesTotal += 2000;
    2546             :             }
    2547             :         }
    2548             : 
    2549       92425 :         for (UsePositionIterator iter = range->usesBegin(); iter; iter++) {
    2550       79086 :             switch (iter->usePolicy()) {
    2551             :               case LUse::ANY:
    2552         131 :                 usesTotal += 1000;
    2553         131 :                 break;
    2554             : 
    2555             :               case LUse::FIXED:
    2556         200 :                 fixed = true;
    2557             :                 MOZ_FALLTHROUGH;
    2558             :               case LUse::REGISTER:
    2559       10134 :                 usesTotal += 2000;
    2560       10134 :                 break;
    2561             : 
    2562             :               case LUse::KEEPALIVE:
    2563       68821 :                 break;
    2564             : 
    2565             :               default:
    2566             :                 // Note: RECOVERED_INPUT will not appear in UsePositionIterator.
    2567           0 :                 MOZ_CRASH("Bad use");
    2568             :             }
    2569             :         }
    2570             :     }
    2571             : 
    2572             :     // Bundles with fixed uses are given a higher spill weight, since they must
    2573             :     // be allocated to a specific register.
    2574        5275 :     if (testbed && fixed)
    2575           0 :         usesTotal *= 2;
    2576             : 
    2577             :     // Compute spill weight as a use density, lowering the weight for long
    2578             :     // lived bundles with relatively few uses.
    2579        5275 :     size_t lifetimeTotal = computePriority(bundle);
    2580        5275 :     return lifetimeTotal ? usesTotal / lifetimeTotal : 0;
    2581             : }
    2582             : 
    2583             : size_t
    2584        5112 : BacktrackingAllocator::maximumSpillWeight(const LiveBundleVector& bundles)
    2585             : {
    2586        5112 :     size_t maxWeight = 0;
    2587       11508 :     for (size_t i = 0; i < bundles.length(); i++)
    2588        6396 :         maxWeight = Max(maxWeight, computeSpillWeight(bundles[i]));
    2589        5112 :     return maxWeight;
    2590             : }
    2591             : 
    2592             : bool
    2593         244 : BacktrackingAllocator::trySplitAcrossHotcode(LiveBundle* bundle, bool* success)
    2594             : {
    2595             :     // If this bundle has portions that are hot and portions that are cold,
    2596             :     // split it at the boundaries between hot and cold code.
    2597             : 
    2598         244 :     LiveRange* hotRange = nullptr;
    2599             : 
    2600         721 :     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
    2601         625 :         LiveRange* range = LiveRange::get(*iter);
    2602         625 :         if (hotcode.contains(range, &hotRange))
    2603         148 :             break;
    2604             :     }
    2605             : 
    2606             :     // Don't split if there is no hot code in the bundle.
    2607         244 :     if (!hotRange) {
    2608          96 :         JitSpew(JitSpew_RegAlloc, "  bundle does not contain hot code");
    2609          96 :         return true;
    2610             :     }
    2611             : 
    2612             :     // Don't split if there is no cold code in the bundle.
    2613         148 :     bool coldCode = false;
    2614         536 :     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
    2615         466 :         LiveRange* range = LiveRange::get(*iter);
    2616         466 :         if (!hotRange->contains(range)) {
    2617          78 :             coldCode = true;
    2618          78 :             break;
    2619             :         }
    2620             :     }
    2621         148 :     if (!coldCode) {
    2622          70 :         JitSpew(JitSpew_RegAlloc, "  bundle does not contain cold code");
    2623          70 :         return true;
    2624             :     }
    2625             : 
    2626          78 :     JitSpew(JitSpew_RegAlloc, "  split across hot range %s", hotRange->toString().get());
    2627             : 
    2628             :     // Tweak the splitting method when compiling wasm code to look at actual
    2629             :     // uses within the hot/cold code. This heuristic is in place as the below
    2630             :     // mechanism regresses several asm.js tests. Hopefully this will be fixed
    2631             :     // soon and this special case removed. See bug 948838.
    2632          78 :     if (compilingWasm()) {
    2633           0 :         SplitPositionVector splitPositions;
    2634           0 :         if (!splitPositions.append(hotRange->from()) || !splitPositions.append(hotRange->to()))
    2635           0 :             return false;
    2636           0 :         *success = true;
    2637           0 :         return splitAt(bundle, splitPositions);
    2638             :     }
    2639             : 
    2640          78 :     LiveBundle* hotBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
    2641          78 :                                                     bundle->spillParent());
    2642          78 :     if (!hotBundle)
    2643           0 :         return false;
    2644          78 :     LiveBundle* preBundle = nullptr;
    2645          78 :     LiveBundle* postBundle = nullptr;
    2646          78 :     LiveBundle* coldBundle = nullptr;
    2647             : 
    2648          78 :     if (testbed) {
    2649           0 :         coldBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(), bundle->spillParent());
    2650           0 :         if (!coldBundle)
    2651           0 :             return false;
    2652             :     }
    2653             : 
    2654             :     // Accumulate the ranges of hot and cold code in the bundle. Note that
    2655             :     // we are only comparing with the single hot range found, so the cold code
    2656             :     // may contain separate hot ranges.
    2657         960 :     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
    2658         882 :         LiveRange* range = LiveRange::get(*iter);
    2659         882 :         LiveRange::Range hot, coldPre, coldPost;
    2660         882 :         range->intersect(hotRange, &coldPre, &hot, &coldPost);
    2661             : 
    2662         882 :         if (!hot.empty()) {
    2663         304 :             if (!hotBundle->addRangeAndDistributeUses(alloc(), range, hot.from, hot.to))
    2664           0 :                 return false;
    2665             :         }
    2666             : 
    2667         882 :         if (!coldPre.empty()) {
    2668         183 :             if (testbed) {
    2669           0 :                 if (!coldBundle->addRangeAndDistributeUses(alloc(), range, coldPre.from, coldPre.to))
    2670           0 :                     return false;
    2671             :             } else {
    2672         183 :                 if (!preBundle) {
    2673          62 :                     preBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
    2674             :                                                         bundle->spillParent());
    2675          62 :                     if (!preBundle)
    2676           0 :                         return false;
    2677             :                 }
    2678         183 :                 if (!preBundle->addRangeAndDistributeUses(alloc(), range, coldPre.from, coldPre.to))
    2679           0 :                     return false;
    2680             :             }
    2681             :         }
    2682             : 
    2683         882 :         if (!coldPost.empty()) {
    2684         449 :             if (testbed) {
    2685           0 :                 if (!coldBundle->addRangeAndDistributeUses(alloc(), range, coldPost.from, coldPost.to))
    2686           0 :                     return false;
    2687             :             } else {
    2688         449 :                 if (!postBundle) {
    2689          68 :                     postBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
    2690             :                                                          bundle->spillParent());
    2691          68 :                     if (!postBundle)
    2692           0 :                         return false;
    2693             :                 }
    2694         449 :                 if (!postBundle->addRangeAndDistributeUses(alloc(), range, coldPost.from, coldPost.to))
    2695           0 :                     return false;
    2696             :             }
    2697             :         }
    2698             :     }
    2699             : 
    2700          78 :     MOZ_ASSERT(hotBundle->numRanges() != 0);
    2701             : 
    2702         156 :     LiveBundleVector newBundles;
    2703          78 :     if (!newBundles.append(hotBundle))
    2704           0 :         return false;
    2705             : 
    2706          78 :     if (testbed) {
    2707           0 :         MOZ_ASSERT(coldBundle->numRanges() != 0);
    2708           0 :         if (!newBundles.append(coldBundle))
    2709           0 :             return false;
    2710             :     } else {
    2711          78 :         MOZ_ASSERT(preBundle || postBundle);
    2712          78 :         if (preBundle && !newBundles.append(preBundle))
    2713           0 :             return false;
    2714          78 :         if (postBundle && !newBundles.append(postBundle))
    2715           0 :             return false;
    2716             :     }
    2717             : 
    2718          78 :     *success = true;
    2719          78 :     return splitAndRequeueBundles(bundle, newBundles);
    2720             : }
    2721             : 
    2722             : bool
    2723          44 : BacktrackingAllocator::trySplitAfterLastRegisterUse(LiveBundle* bundle, LiveBundle* conflict,
    2724             :                                                     bool* success)
    2725             : {
    2726             :     // If this bundle's later uses do not require it to be in a register,
    2727             :     // split it after the last use which does require a register. If conflict
    2728             :     // is specified, only consider register uses before the conflict starts.
    2729             : 
    2730          44 :     CodePosition lastRegisterFrom, lastRegisterTo, lastUse;
    2731             : 
    2732         150 :     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
    2733         106 :         LiveRange* range = LiveRange::get(*iter);
    2734             : 
    2735             :         // If the range defines a register, consider that a register use for
    2736             :         // our purposes here.
    2737         106 :         if (isRegisterDefinition(range)) {
    2738          56 :             CodePosition spillStart = minimalDefEnd(insData[range->from()]).next();
    2739          56 :             if (!conflict || spillStart < conflict->firstRange()->from()) {
    2740          24 :                 lastUse = lastRegisterFrom = range->from();
    2741          24 :                 lastRegisterTo = spillStart;
    2742             :             }
    2743             :         }
    2744             : 
    2745         659 :         for (UsePositionIterator iter(range->usesBegin()); iter; iter++) {
    2746         553 :             LNode* ins = insData[iter->pos];
    2747             : 
    2748             :             // Uses in the bundle should be sorted.
    2749         553 :             MOZ_ASSERT(iter->pos >= lastUse);
    2750         553 :             lastUse = inputOf(ins);
    2751             : 
    2752         553 :             if (!conflict || outputOf(ins) < conflict->firstRange()->from()) {
    2753          79 :                 if (isRegisterUse(*iter, ins, /* considerCopy = */ true)) {
    2754          12 :                     lastRegisterFrom = inputOf(ins);
    2755          12 :                     lastRegisterTo = iter->pos.next();
    2756             :                 }
    2757             :             }
    2758             :         }
    2759             :     }
    2760             : 
    2761             :     // Can't trim non-register uses off the end by splitting.
    2762          44 :     if (!lastRegisterFrom.bits()) {
    2763          17 :         JitSpew(JitSpew_RegAlloc, "  bundle has no register uses");
    2764          17 :         return true;
    2765             :     }
    2766          27 :     if (lastUse < lastRegisterTo) {
    2767           5 :         JitSpew(JitSpew_RegAlloc, "  bundle's last use is a register use");
    2768           5 :         return true;
    2769             :     }
    2770             : 
    2771          22 :     JitSpew(JitSpew_RegAlloc, "  split after last register use at %u",
    2772          22 :             lastRegisterTo.bits());
    2773             : 
    2774          44 :     SplitPositionVector splitPositions;
    2775          22 :     if (!splitPositions.append(lastRegisterTo))
    2776           0 :         return false;
    2777          22 :     *success = true;
    2778          22 :     return splitAt(bundle, splitPositions);
    2779             : }
    2780             : 
    2781             : bool
    2782          55 : BacktrackingAllocator::trySplitBeforeFirstRegisterUse(LiveBundle* bundle, LiveBundle* conflict, bool* success)
    2783             : {
    2784             :     // If this bundle's earlier uses do not require it to be in a register,
    2785             :     // split it before the first use which does require a register. If conflict
    2786             :     // is specified, only consider register uses after the conflict ends.
    2787             : 
    2788          55 :     if (isRegisterDefinition(bundle->firstRange())) {
    2789          36 :         JitSpew(JitSpew_RegAlloc, "  bundle is defined by a register");
    2790          36 :         return true;
    2791             :     }
    2792          19 :     if (!bundle->firstRange()->hasDefinition()) {
    2793           8 :         JitSpew(JitSpew_RegAlloc, "  bundle does not have definition");
    2794           8 :         return true;
    2795             :     }
    2796             : 
    2797          11 :     CodePosition firstRegisterFrom;
    2798             : 
    2799          11 :     CodePosition conflictEnd;
    2800          11 :     if (conflict) {
    2801           0 :         for (LiveRange::BundleLinkIterator iter = conflict->rangesBegin(); iter; iter++) {
    2802           0 :             LiveRange* range = LiveRange::get(*iter);
    2803           0 :             if (range->to() > conflictEnd)
    2804           0 :                 conflictEnd = range->to();
    2805             :         }
    2806             :     }
    2807             : 
    2808          20 :     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
    2809          20 :         LiveRange* range = LiveRange::get(*iter);
    2810             : 
    2811          20 :         if (!conflict || range->from() > conflictEnd) {
    2812          20 :             if (range->hasDefinition() && isRegisterDefinition(range)) {
    2813           0 :                 firstRegisterFrom = range->from();
    2814           0 :                 break;
    2815             :             }
    2816             :         }
    2817             : 
    2818          84 :         for (UsePositionIterator iter(range->usesBegin()); iter; iter++) {
    2819          75 :             LNode* ins = insData[iter->pos];
    2820             : 
    2821          75 :             if (!conflict || outputOf(ins) >= conflictEnd) {
    2822          75 :                 if (isRegisterUse(*iter, ins, /* considerCopy = */ true)) {
    2823          11 :                     firstRegisterFrom = inputOf(ins);
    2824          11 :                     break;
    2825             :                 }
    2826             :             }
    2827             :         }
    2828          20 :         if (firstRegisterFrom.bits())
    2829          11 :             break;
    2830             :     }
    2831             : 
    2832          11 :     if (!firstRegisterFrom.bits()) {
    2833             :         // Can't trim non-register uses off the beginning by splitting.
    2834           0 :         JitSpew(JitSpew_RegAlloc, "  bundle has no register uses");
    2835           0 :         return true;
    2836             :     }
    2837             : 
    2838          11 :     JitSpew(JitSpew_RegAlloc, "  split before first register use at %u",
    2839          11 :             firstRegisterFrom.bits());
    2840             : 
    2841          22 :     SplitPositionVector splitPositions;
    2842          11 :     if (!splitPositions.append(firstRegisterFrom))
    2843           0 :         return false;
    2844          11 :     *success = true;
    2845          11 :     return splitAt(bundle, splitPositions);
    2846             : }
    2847             : 
    2848             : // When splitting a bundle according to a list of split positions, return
    2849             : // whether a use or range at |pos| should use a different bundle than the last
    2850             : // position this was called for.
    2851             : static bool
    2852         990 : UseNewBundle(const SplitPositionVector& splitPositions, CodePosition pos,
    2853             :              size_t* activeSplitPosition)
    2854             : {
    2855         990 :     if (splitPositions.empty()) {
    2856             :         // When the split positions are empty we are splitting at all uses.
    2857          69 :         return true;
    2858             :     }
    2859             : 
    2860         921 :     if (*activeSplitPosition == splitPositions.length()) {
    2861             :         // We've advanced past all split positions.
    2862         124 :         return false;
    2863             :     }
    2864             : 
    2865         797 :     if (splitPositions[*activeSplitPosition] > pos) {
    2866             :         // We haven't gotten to the next split position yet.
    2867         607 :         return false;
    2868             :     }
    2869             : 
    2870             :     // We've advanced past the next split position, find the next one which we
    2871             :     // should split at.
    2872        1087 :     while (*activeSplitPosition < splitPositions.length() &&
    2873         365 :            splitPositions[*activeSplitPosition] <= pos)
    2874             :     {
    2875         266 :         (*activeSplitPosition)++;
    2876             :     }
    2877         190 :     return true;
    2878             : }
    2879             : 
    2880             : static bool
    2881         536 : HasPrecedingRangeSharingVreg(LiveBundle* bundle, LiveRange* range)
    2882             : {
    2883         536 :     MOZ_ASSERT(range->bundle() == bundle);
    2884             : 
    2885         728 :     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
    2886         728 :         LiveRange* prevRange = LiveRange::get(*iter);
    2887         728 :         if (prevRange == range)
    2888        1012 :             return false;
    2889         252 :         if (prevRange->vreg() == range->vreg())
    2890          60 :             return true;
    2891             :     }
    2892             : 
    2893           0 :     MOZ_CRASH();
    2894             : }
    2895             : 
    2896             : static bool
    2897         431 : HasFollowingRangeSharingVreg(LiveBundle* bundle, LiveRange* range)
    2898             : {
    2899         431 :     MOZ_ASSERT(range->bundle() == bundle);
    2900             : 
    2901         431 :     bool foundRange = false;
    2902        1374 :     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
    2903        1003 :         LiveRange* prevRange = LiveRange::get(*iter);
    2904        1003 :         if (foundRange && prevRange->vreg() == range->vreg())
    2905          60 :             return true;
    2906         943 :         if (prevRange == range)
    2907         431 :             foundRange = true;
    2908             :     }
    2909             : 
    2910         371 :     MOZ_ASSERT(foundRange);
    2911         371 :     return false;
    2912             : }
    2913             : 
    2914             : bool
    2915         166 : BacktrackingAllocator::splitAt(LiveBundle* bundle, const SplitPositionVector& splitPositions)
    2916             : {
    2917             :     // Split the bundle at the given split points. Register uses which have no
    2918             :     // intervening split points are consolidated into the same bundle. If the
    2919             :     // list of split points is empty, then all register uses are placed in
    2920             :     // minimal bundles.
    2921             : 
    2922             :     // splitPositions should be sorted.
    2923         365 :     for (size_t i = 1; i < splitPositions.length(); ++i)
    2924         199 :         MOZ_ASSERT(splitPositions[i-1] < splitPositions[i]);
    2925             : 
    2926             :     // We don't need to create a new spill bundle if there already is one.
    2927         166 :     bool spillBundleIsNew = false;
    2928         166 :     LiveBundle* spillBundle = bundle->spillParent();
    2929         166 :     if (!spillBundle) {
    2930         158 :         spillBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(), nullptr);
    2931         158 :         if (!spillBundle)
    2932           0 :             return false;
    2933         158 :         spillBundleIsNew = true;
    2934             : 
    2935         779 :         for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
    2936         621 :             LiveRange* range = LiveRange::get(*iter);
    2937             : 
    2938         621 :             CodePosition from = range->from();
    2939         621 :             if (isRegisterDefinition(range))
    2940         189 :                 from = minimalDefEnd(insData[from]).next();
    2941             : 
    2942         621 :             if (from < range->to()) {
    2943         621 :                 if (!spillBundle->addRange(alloc(), range->vreg(), from, range->to()))
    2944           0 :                     return false;
    2945             : 
    2946         621 :                 if (range->hasDefinition() && !isRegisterDefinition(range))
    2947          21 :                     spillBundle->lastRange()->setHasDefinition();
    2948             :             }
    2949             :         }
    2950             :     }
    2951             : 
    2952         332 :     LiveBundleVector newBundles;
    2953             : 
    2954             :     // The bundle which ranges are currently being added to.
    2955         166 :     LiveBundle* activeBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(), spillBundle);
    2956         166 :     if (!activeBundle || !newBundles.append(activeBundle))
    2957           0 :         return false;
    2958             : 
    2959             :     // State for use by UseNewBundle.
    2960         166 :     size_t activeSplitPosition = 0;
    2961             : 
    2962             :     // Make new bundles according to the split positions, and distribute ranges
    2963             :     // and uses to them.
    2964         798 :     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
    2965         632 :         LiveRange* range = LiveRange::get(*iter);
    2966             : 
    2967         632 :         if (UseNewBundle(splitPositions, range->from(), &activeSplitPosition)) {
    2968         164 :             activeBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(), spillBundle);
    2969         164 :             if (!activeBundle || !newBundles.append(activeBundle))
    2970           0 :                 return false;
    2971             :         }
    2972             : 
    2973         632 :         LiveRange* activeRange = LiveRange::FallibleNew(alloc(), range->vreg(),
    2974         632 :                                                         range->from(), range->to());
    2975         632 :         if (!activeRange)
    2976           0 :             return false;
    2977         632 :         activeBundle->addRange(activeRange);
    2978             : 
    2979         632 :         if (isRegisterDefinition(range))
    2980         191 :             activeRange->setHasDefinition();
    2981             : 
    2982        7460 :         while (range->hasUses()) {
    2983        3414 :             UsePosition* use = range->popUse();
    2984        3414 :             LNode* ins = insData[use->pos];
    2985             : 
    2986             :             // Any uses of a register that appear before its definition has
    2987             :             // finished must be associated with the range for that definition.
    2988        3414 :             if (isRegisterDefinition(range) && use->pos <= minimalDefEnd(insData[range->from()])) {
    2989          14 :                 activeRange->addUse(use);
    2990        3400 :             } else if (isRegisterUse(use, ins)) {
    2991             :                 // Place this register use into a different bundle from the
    2992             :                 // last one if there are any split points between the two uses.
    2993             :                 // UseNewBundle always returns true if we are splitting at all
    2994             :                 // register uses, but we can still reuse the last range and
    2995             :                 // bundle if they have uses at the same position, except when
    2996             :                 // either use is fixed (the two uses might require incompatible
    2997             :                 // registers.)
    2998        1229 :                 if (UseNewBundle(splitPositions, use->pos, &activeSplitPosition) &&
    2999         165 :                     (!activeRange->hasUses() ||
    3000         463 :                      activeRange->usesBegin()->pos != use->pos ||
    3001         358 :                      activeRange->usesBegin()->usePolicy() == LUse::FIXED ||
    3002           0 :                      use->usePolicy() == LUse::FIXED))
    3003             :                 {
    3004          95 :                     activeBundle = LiveBundle::FallibleNew(alloc(), bundle->spillSet(),
    3005             :                                                            spillBundle);
    3006          95 :                     if (!activeBundle || !newBundles.append(activeBundle))
    3007           0 :                         return false;
    3008          95 :                     activeRange = LiveRange::FallibleNew(alloc(), range->vreg(),
    3009          95 :                                                          range->from(), range->to());
    3010          95 :                     if (!activeRange)
    3011           0 :                         return false;
    3012          95 :                     activeBundle->addRange(activeRange);
    3013             :                 }
    3014             : 
    3015         358 :                 activeRange->addUse(use);
    3016             :             } else {
    3017        3042 :                 MOZ_ASSERT(spillBundleIsNew);
    3018        3042 :                 spillBundle->rangeFor(use->pos)->addUse(use);
    3019             :             }
    3020             :         }
    3021             :     }
    3022             : 
    3023         332 :     LiveBundleVector filteredBundles;
    3024             : 
    3025             :     // Trim the ends of ranges in each new bundle when there are no other
    3026             :     // earlier or later ranges in the same bundle with the same vreg.
    3027        1182 :     for (size_t i = 0; i < newBundles.length(); i++) {
    3028         425 :         LiveBundle* bundle = newBundles[i];
    3029             : 
    3030        1152 :         for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; ) {
    3031         727 :             LiveRange* range = LiveRange::get(*iter);
    3032             : 
    3033         727 :             if (!range->hasDefinition()) {
    3034         536 :                 if (!HasPrecedingRangeSharingVreg(bundle, range)) {
    3035         476 :                     if (range->hasUses()) {
    3036         180 :                         UsePosition* use = *range->usesBegin();
    3037         180 :                         range->setFrom(inputOf(insData[use->pos]));
    3038             :                     } else {
    3039         296 :                         bundle->removeRangeAndIncrementIterator(iter);
    3040         296 :                         continue;
    3041             :                     }
    3042             :                 }
    3043             :             }
    3044             : 
    3045         431 :             if (!HasFollowingRangeSharingVreg(bundle, range)) {
    3046         371 :                 if (range->hasUses()) {
    3047         199 :                     UsePosition* use = range->lastUse();
    3048         199 :                     range->setTo(use->pos.next());
    3049         172 :                 } else if (range->hasDefinition()) {
    3050         143 :                     range->setTo(minimalDefEnd(insData[range->from()]).next());
    3051             :                 } else {
    3052          29 :                     bundle->removeRangeAndIncrementIterator(iter);
    3053          29 :                     continue;
    3054             :                 }
    3055             :             }
    3056             : 
    3057         402 :             iter++;
    3058             :         }
    3059             : 
    3060         425 :         if (bundle->hasRanges() && !filteredBundles.append(bundle))
    3061           0 :             return false;
    3062             :     }
    3063             : 
    3064         166 :     if (spillBundleIsNew && !filteredBundles.append(spillBundle))
    3065           0 :         return false;
    3066             : 
    3067         166 :     return splitAndRequeueBundles(bundle, filteredBundles);
    3068             : }
    3069             : 
    3070             : bool
    3071         111 : BacktrackingAllocator::splitAcrossCalls(LiveBundle* bundle)
    3072             : {
    3073             :     // Split the bundle to separate register uses and non-register uses and
    3074             :     // allow the vreg to be spilled across its range.
    3075             : 
    3076             :     // Find the locations of all calls in the bundle's range.
    3077         222 :     SplitPositionVector callPositions;
    3078         614 :     for (LiveRange::BundleLinkIterator iter = bundle->rangesBegin(); iter; iter++) {
    3079         503 :         LiveRange* range = LiveRange::get(*iter);
    3080         503 :         CallRange searchRange(range->from(), range->to());
    3081             :         CallRange* callRange;
    3082         503 :         if (!callRanges.contains(&searchRange, &callRange)) {
    3083             :             // There are no calls inside this range.
    3084         320 :             continue;
    3085             :         }
    3086         183 :         MOZ_ASSERT(range->covers(callRange->range.from));
    3087             : 
    3088             :         // The search above returns an arbitrary call within the range. Walk
    3089             :         // backwards to find the first call in the range.
    3090        1146 :         for (CallRangeList::reverse_iterator riter = callRangesList.rbegin(callRange);
    3091         764 :              riter != callRangesList.rend();
    3092             :              ++riter)
    3093             :         {
    3094         348 :             CodePosition pos = riter->range.from;
    3095         348 :             if (range->covers(pos))
    3096         199 :                 callRange = *riter;
    3097             :             else
    3098         149 :                 break;
    3099             :         }
    3100             : 
    3101             :         // Add all call positions within the range, by walking forwards.
    3102        1479 :         for (CallRangeList::iterator iter = callRangesList.begin(callRange);
    3103         986 :              iter != callRangesList.end();
    3104             :              ++iter)
    3105             :         {
    3106         468 :             CodePosition pos = iter->range.from;
    3107         468 :             if (!range->covers(pos))
    3108         158 :                 break;
    3109             : 
    3110             :             // Calls at the beginning of the range are ignored; there is no splitting to do.
    3111         310 :             if (range->covers(pos.previous())) {
    3112         310 :                 MOZ_ASSERT_IF(callPositions.length(), pos > callPositions.back());
    3113         310 :                 if (!callPositions.append(pos))
    3114           0 :                     return false;
    3115             :             }
    3116             :         }
    3117             :     }
    3118         111 :     MOZ_ASSERT(callPositions.length());
    3119             : 
    3120             : #ifdef JS_JITSPEW
    3121         111 :     JitSpewStart(JitSpew_RegAlloc, "  split across calls at ");
    3122         421 :     for (size_t i = 0; i < callPositions.length(); ++i)
    3123         310 :         JitSpewCont(JitSpew_RegAlloc, "%s%u", i != 0 ? ", " : "", callPositions[i].bits());
    3124         111 :     JitSpewFin(JitSpew_RegAlloc);
    3125             : #endif
    3126             : 
    3127         111 :     return splitAt(bundle, callPositions);
    3128             : }
    3129             : 
    3130             : bool
    3131         244 : BacktrackingAllocator::chooseBundleSplit(LiveBundle* bundle, bool fixed, LiveBundle* conflict)
    3132             : {
    3133         244 :     bool success = false;
    3134             : 
    3135         244 :     if (!trySplitAcrossHotcode(bundle, &success))
    3136           0 :         return false;
    3137         244 :     if (success)
    3138          78 :         return true;
    3139             : 
    3140         166 :     if (fixed)
    3141         111 :         return splitAcrossCalls(bundle);
    3142             : 
    3143          55 :     if (!trySplitBeforeFirstRegisterUse(bundle, conflict, &success))
    3144           0 :         return false;
    3145          55 :     if (success)
    3146          11 :         return true;
    3147             : 
    3148          44 :     if (!trySplitAfterLastRegisterUse(bundle, conflict, &success))
    3149           0 :         return false;
    3150          44 :     if (success)
    3151          22 :         return true;
    3152             : 
    3153             :     // Split at all register uses.
    3154          44 :     SplitPositionVector emptyPositions;
    3155          22 :     return splitAt(bundle, emptyPositions);
    3156             : }

Generated by: LCOV version 1.13