LCOV - code coverage report
Current view: top level - js/src/jit/shared - CodeGenerator-shared.cpp (source / functions) Hit Total Coverage
Test: output.info Lines: 292 928 31.5 %
Date: 2017-07-14 16:53:18 Functions: 37 74 50.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
       2             :  * vim: set ts=8 sts=4 et sw=4 tw=99:
       3             :  * This Source Code Form is subject to the terms of the Mozilla Public
       4             :  * License, v. 2.0. If a copy of the MPL was not distributed with this
       5             :  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
       6             : 
       7             : #include "jit/shared/CodeGenerator-shared-inl.h"
       8             : 
       9             : #include "mozilla/DebugOnly.h"
      10             : #include "mozilla/SizePrintfMacros.h"
      11             : 
      12             : #include "jit/CompactBuffer.h"
      13             : #include "jit/IonCaches.h"
      14             : #include "jit/JitcodeMap.h"
      15             : #include "jit/JitSpewer.h"
      16             : #include "jit/MacroAssembler.h"
      17             : #include "jit/MIR.h"
      18             : #include "jit/MIRGenerator.h"
      19             : #include "jit/OptimizationTracking.h"
      20             : #include "js/Conversions.h"
      21             : #include "vm/TraceLogging.h"
      22             : 
      23             : #include "jit/JitFrames-inl.h"
      24             : #include "jit/MacroAssembler-inl.h"
      25             : 
      26             : using namespace js;
      27             : using namespace js::jit;
      28             : 
      29             : using mozilla::BitwiseCast;
      30             : using mozilla::DebugOnly;
      31             : 
      32             : namespace js {
      33             : namespace jit {
      34             : 
      35             : MacroAssembler&
      36           8 : CodeGeneratorShared::ensureMasm(MacroAssembler* masmArg)
      37             : {
      38           8 :     if (masmArg)
      39           0 :         return *masmArg;
      40           8 :     maybeMasm_.emplace();
      41           8 :     return *maybeMasm_;
      42             : }
      43             : 
      44           8 : CodeGeneratorShared::CodeGeneratorShared(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masmArg)
      45             :   : maybeMasm_(),
      46           8 :     masm(ensureMasm(masmArg)),
      47             :     gen(gen),
      48             :     graph(*graph),
      49             :     current(nullptr),
      50             :     snapshots_(),
      51             :     recovers_(),
      52             :     deoptTable_(nullptr),
      53             : #ifdef DEBUG
      54             :     pushedArgs_(0),
      55             : #endif
      56             :     lastOsiPointOffset_(0),
      57           8 :     safepoints_(graph->totalSlotCount(), (gen->info().nargs() + 1) * sizeof(Value)),
      58             :     returnLabel_(),
      59             :     stubSpace_(),
      60             :     nativeToBytecodeMap_(nullptr),
      61             :     nativeToBytecodeMapSize_(0),
      62             :     nativeToBytecodeTableOffset_(0),
      63             :     nativeToBytecodeNumRegions_(0),
      64             :     nativeToBytecodeScriptList_(nullptr),
      65             :     nativeToBytecodeScriptListLength_(0),
      66             :     trackedOptimizationsMap_(nullptr),
      67             :     trackedOptimizationsMapSize_(0),
      68             :     trackedOptimizationsRegionTableOffset_(0),
      69             :     trackedOptimizationsTypesTableOffset_(0),
      70             :     trackedOptimizationsAttemptsTableOffset_(0),
      71             :     osrEntryOffset_(0),
      72             :     skipArgCheckEntryOffset_(0),
      73             : #ifdef CHECK_OSIPOINT_REGISTERS
      74           8 :     checkOsiPointRegisters(JitOptions.checkOsiPointRegisters),
      75             : #endif
      76           8 :     frameDepth_(graph->paddedLocalSlotsSize() + graph->argumentsSize()),
      77          40 :     frameInitialAdjustment_(0)
      78             : {
      79           8 :     if (gen->isProfilerInstrumentationEnabled())
      80           0 :         masm.enableProfilingInstrumentation();
      81             : 
      82           8 :     if (gen->compilingWasm()) {
      83             :         // Since wasm uses the system ABI which does not necessarily use a
      84             :         // regular array where all slots are sizeof(Value), it maintains the max
      85             :         // argument stack depth separately.
      86           0 :         MOZ_ASSERT(graph->argumentSlotCount() == 0);
      87           0 :         frameDepth_ += gen->wasmMaxStackArgBytes();
      88             : 
      89           0 :         if (gen->usesSimd()) {
      90             :             // If the function uses any SIMD then we may need to insert padding
      91             :             // so that local slots are aligned for SIMD.
      92           0 :             frameInitialAdjustment_ = ComputeByteAlignment(sizeof(wasm::Frame), WasmStackAlignment);
      93           0 :             frameDepth_ += frameInitialAdjustment_;
      94             : 
      95             :             // Keep the stack aligned. Some SIMD sequences build values on the
      96             :             // stack and need the stack aligned.
      97           0 :             frameDepth_ += ComputeByteAlignment(sizeof(wasm::Frame) + frameDepth_,
      98           0 :                                                 WasmStackAlignment);
      99           0 :         } else if (gen->needsStaticStackAlignment()) {
     100             :             // An MWasmCall does not align the stack pointer at calls sites but
     101             :             // instead relies on the a priori stack adjustment. This must be the
     102             :             // last adjustment of frameDepth_.
     103           0 :             frameDepth_ += ComputeByteAlignment(sizeof(wasm::Frame) + frameDepth_,
     104           0 :                                                 WasmStackAlignment);
     105             :         }
     106             : 
     107             :         // FrameSizeClass is only used for bailing, which cannot happen in
     108             :         // wasm code.
     109           0 :         frameClass_ = FrameSizeClass::None();
     110             :     } else {
     111           8 :         frameClass_ = FrameSizeClass::FromDepth(frameDepth_);
     112             :     }
     113           8 : }
     114             : 
     115             : bool
     116          16 : CodeGeneratorShared::generatePrologue()
     117             : {
     118          16 :     MOZ_ASSERT(masm.framePushed() == 0);
     119          16 :     MOZ_ASSERT(!gen->compilingWasm());
     120             : 
     121             : #ifdef JS_USE_LINK_REGISTER
     122             :     masm.pushReturnAddress();
     123             : #endif
     124             : 
     125             :     // If profiling, save the current frame pointer to a per-thread global field.
     126          16 :     if (isProfilerInstrumentationEnabled())
     127           0 :         masm.profilerEnterFrame(masm.getStackPointer(), CallTempReg0);
     128             : 
     129             :     // Ensure that the Ion frame is properly aligned.
     130          16 :     masm.assertStackAlignment(JitStackAlignment, 0);
     131             : 
     132             :     // Note that this automatically sets MacroAssembler::framePushed().
     133          16 :     masm.reserveStack(frameSize());
     134          16 :     masm.checkStackAlignment();
     135             : 
     136          16 :     emitTracelogIonStart();
     137          16 :     return true;
     138             : }
     139             : 
     140             : bool
     141           8 : CodeGeneratorShared::generateEpilogue()
     142             : {
     143           8 :     MOZ_ASSERT(!gen->compilingWasm());
     144           8 :     masm.bind(&returnLabel_);
     145             : 
     146           8 :     emitTracelogIonStop();
     147             : 
     148           8 :     masm.freeStack(frameSize());
     149           8 :     MOZ_ASSERT(masm.framePushed() == 0);
     150             : 
     151             :     // If profiling, reset the per-thread global lastJitFrame to point to
     152             :     // the previous frame.
     153           8 :     if (isProfilerInstrumentationEnabled())
     154           0 :         masm.profilerExitFrame();
     155             : 
     156           8 :     masm.ret();
     157             : 
     158             :     // On systems that use a constant pool, this is a good time to emit.
     159           8 :     masm.flushBuffer();
     160           8 :     return true;
     161             : }
     162             : 
     163             : bool
     164           8 : CodeGeneratorShared::generateOutOfLineCode()
     165             : {
     166         473 :     for (size_t i = 0; i < outOfLineCode_.length(); i++) {
     167             :         // Add native => bytecode mapping entries for OOL sites.
     168             :         // Not enabled on wasm yet since it doesn't contain bytecode mappings.
     169         465 :         if (!gen->compilingWasm()) {
     170         465 :             if (!addNativeToBytecodeEntry(outOfLineCode_[i]->bytecodeSite()))
     171           0 :                 return false;
     172             :         }
     173             : 
     174         465 :         if (!gen->alloc().ensureBallast())
     175           0 :             return false;
     176             : 
     177         465 :         JitSpew(JitSpew_Codegen, "# Emitting out of line code");
     178             : 
     179         465 :         masm.setFramePushed(outOfLineCode_[i]->framePushed());
     180         465 :         lastPC_ = outOfLineCode_[i]->pc();
     181         465 :         outOfLineCode_[i]->bind(&masm);
     182             : 
     183         465 :         outOfLineCode_[i]->generate(this);
     184             :     }
     185             : 
     186           8 :     return !masm.oom();
     187             : }
     188             : 
     189             : void
     190          95 : CodeGeneratorShared::addOutOfLineCode(OutOfLineCode* code, const MInstruction* mir)
     191             : {
     192          95 :     MOZ_ASSERT(mir);
     193          95 :     addOutOfLineCode(code, mir->trackedSite());
     194          95 : }
     195             : 
     196             : void
     197         465 : CodeGeneratorShared::addOutOfLineCode(OutOfLineCode* code, const BytecodeSite* site)
     198             : {
     199         465 :     code->setFramePushed(masm.framePushed());
     200         465 :     code->setBytecodeSite(site);
     201         465 :     MOZ_ASSERT_IF(!gen->compilingWasm(), code->script()->containsPC(code->pc()));
     202         465 :     masm.propagateOOM(outOfLineCode_.append(code));
     203         465 : }
     204             : 
     205             : bool
     206        1320 : CodeGeneratorShared::addNativeToBytecodeEntry(const BytecodeSite* site)
     207             : {
     208             :     // Skip the table entirely if profiling is not enabled.
     209        1320 :     if (!isProfilerInstrumentationEnabled())
     210        1320 :         return true;
     211             : 
     212             :     // Fails early if the last added instruction caused the macro assembler to
     213             :     // run out of memory as continuity assumption below do not hold.
     214           0 :     if (masm.oom())
     215           0 :         return false;
     216             : 
     217           0 :     MOZ_ASSERT(site);
     218           0 :     MOZ_ASSERT(site->tree());
     219           0 :     MOZ_ASSERT(site->pc());
     220             : 
     221           0 :     InlineScriptTree* tree = site->tree();
     222           0 :     jsbytecode* pc = site->pc();
     223           0 :     uint32_t nativeOffset = masm.currentOffset();
     224             : 
     225           0 :     MOZ_ASSERT_IF(nativeToBytecodeList_.empty(), nativeOffset == 0);
     226             : 
     227           0 :     if (!nativeToBytecodeList_.empty()) {
     228           0 :         size_t lastIdx = nativeToBytecodeList_.length() - 1;
     229           0 :         NativeToBytecode& lastEntry = nativeToBytecodeList_[lastIdx];
     230             : 
     231           0 :         MOZ_ASSERT(nativeOffset >= lastEntry.nativeOffset.offset());
     232             : 
     233             :         // If the new entry is for the same inlineScriptTree and same
     234             :         // bytecodeOffset, but the nativeOffset has changed, do nothing.
     235             :         // The same site just generated some more code.
     236           0 :         if (lastEntry.tree == tree && lastEntry.pc == pc) {
     237           0 :             JitSpew(JitSpew_Profiling, " => In-place update [%" PRIuSIZE "-%" PRIu32 "]",
     238           0 :                     lastEntry.nativeOffset.offset(), nativeOffset);
     239           0 :             return true;
     240             :         }
     241             : 
     242             :         // If the new entry is for the same native offset, then update the
     243             :         // previous entry with the new bytecode site, since the previous
     244             :         // bytecode site did not generate any native code.
     245           0 :         if (lastEntry.nativeOffset.offset() == nativeOffset) {
     246           0 :             lastEntry.tree = tree;
     247           0 :             lastEntry.pc = pc;
     248           0 :             JitSpew(JitSpew_Profiling, " => Overwriting zero-length native region.");
     249             : 
     250             :             // This overwrite might have made the entry merge-able with a
     251             :             // previous one.  If so, merge it.
     252           0 :             if (lastIdx > 0) {
     253           0 :                 NativeToBytecode& nextToLastEntry = nativeToBytecodeList_[lastIdx - 1];
     254           0 :                 if (nextToLastEntry.tree == lastEntry.tree && nextToLastEntry.pc == lastEntry.pc) {
     255           0 :                     JitSpew(JitSpew_Profiling, " => Merging with previous region");
     256           0 :                     nativeToBytecodeList_.erase(&lastEntry);
     257             :                 }
     258             :             }
     259             : 
     260           0 :             dumpNativeToBytecodeEntry(nativeToBytecodeList_.length() - 1);
     261           0 :             return true;
     262             :         }
     263             :     }
     264             : 
     265             :     // Otherwise, some native code was generated for the previous bytecode site.
     266             :     // Add a new entry for code that is about to be generated.
     267           0 :     NativeToBytecode entry;
     268           0 :     entry.nativeOffset = CodeOffset(nativeOffset);
     269           0 :     entry.tree = tree;
     270           0 :     entry.pc = pc;
     271           0 :     if (!nativeToBytecodeList_.append(entry))
     272           0 :         return false;
     273             : 
     274           0 :     JitSpew(JitSpew_Profiling, " => Push new entry.");
     275           0 :     dumpNativeToBytecodeEntry(nativeToBytecodeList_.length() - 1);
     276           0 :     return true;
     277             : }
     278             : 
     279             : void
     280           8 : CodeGeneratorShared::dumpNativeToBytecodeEntries()
     281             : {
     282             : #ifdef JS_JITSPEW
     283           8 :     InlineScriptTree* topTree = gen->info().inlineScriptTree();
     284           8 :     JitSpewStart(JitSpew_Profiling, "Native To Bytecode Entries for %s:%" PRIuSIZE "\n",
     285           8 :                  topTree->script()->filename(), topTree->script()->lineno());
     286           8 :     for (unsigned i = 0; i < nativeToBytecodeList_.length(); i++)
     287           0 :         dumpNativeToBytecodeEntry(i);
     288             : #endif
     289           8 : }
     290             : 
     291             : void
     292           0 : CodeGeneratorShared::dumpNativeToBytecodeEntry(uint32_t idx)
     293             : {
     294             : #ifdef JS_JITSPEW
     295           0 :     NativeToBytecode& ref = nativeToBytecodeList_[idx];
     296           0 :     InlineScriptTree* tree = ref.tree;
     297           0 :     JSScript* script = tree->script();
     298           0 :     uint32_t nativeOffset = ref.nativeOffset.offset();
     299           0 :     unsigned nativeDelta = 0;
     300           0 :     unsigned pcDelta = 0;
     301           0 :     if (idx + 1 < nativeToBytecodeList_.length()) {
     302           0 :         NativeToBytecode* nextRef = &ref + 1;
     303           0 :         nativeDelta = nextRef->nativeOffset.offset() - nativeOffset;
     304           0 :         if (nextRef->tree == ref.tree)
     305           0 :             pcDelta = nextRef->pc - ref.pc;
     306             :     }
     307           0 :     JitSpewStart(JitSpew_Profiling, "    %08" PRIxSIZE " [+%-6d] => %-6ld [%-4d] {%-10s} (%s:%" PRIuSIZE,
     308             :                  ref.nativeOffset.offset(),
     309             :                  nativeDelta,
     310           0 :                  (long) (ref.pc - script->code()),
     311             :                  pcDelta,
     312           0 :                  CodeName[JSOp(*ref.pc)],
     313           0 :                  script->filename(), script->lineno());
     314             : 
     315           0 :     for (tree = tree->caller(); tree; tree = tree->caller()) {
     316           0 :         JitSpewCont(JitSpew_Profiling, " <= %s:%" PRIuSIZE, tree->script()->filename(),
     317           0 :                                                     tree->script()->lineno());
     318             :     }
     319           0 :     JitSpewCont(JitSpew_Profiling, ")");
     320           0 :     JitSpewFin(JitSpew_Profiling);
     321             : #endif
     322           0 : }
     323             : 
     324             : bool
     325           0 : CodeGeneratorShared::addTrackedOptimizationsEntry(const TrackedOptimizations* optimizations)
     326             : {
     327           0 :     if (!isOptimizationTrackingEnabled())
     328           0 :         return true;
     329             : 
     330           0 :     MOZ_ASSERT(optimizations);
     331             : 
     332           0 :     uint32_t nativeOffset = masm.currentOffset();
     333             : 
     334           0 :     if (!trackedOptimizations_.empty()) {
     335           0 :         NativeToTrackedOptimizations& lastEntry = trackedOptimizations_.back();
     336           0 :         MOZ_ASSERT_IF(!masm.oom(), nativeOffset >= lastEntry.endOffset.offset());
     337             : 
     338             :         // If we're still generating code for the same set of optimizations,
     339             :         // we are done.
     340           0 :         if (lastEntry.optimizations == optimizations)
     341           0 :             return true;
     342             :     }
     343             : 
     344             :     // If we're generating code for a new set of optimizations, add a new
     345             :     // entry.
     346           0 :     NativeToTrackedOptimizations entry;
     347           0 :     entry.startOffset = CodeOffset(nativeOffset);
     348           0 :     entry.endOffset = CodeOffset(nativeOffset);
     349           0 :     entry.optimizations = optimizations;
     350           0 :     return trackedOptimizations_.append(entry);
     351             : }
     352             : 
     353             : void
     354           0 : CodeGeneratorShared::extendTrackedOptimizationsEntry(const TrackedOptimizations* optimizations)
     355             : {
     356           0 :     if (!isOptimizationTrackingEnabled())
     357           0 :         return;
     358             : 
     359           0 :     uint32_t nativeOffset = masm.currentOffset();
     360           0 :     NativeToTrackedOptimizations& entry = trackedOptimizations_.back();
     361           0 :     MOZ_ASSERT(entry.optimizations == optimizations);
     362           0 :     MOZ_ASSERT_IF(!masm.oom(), nativeOffset >= entry.endOffset.offset());
     363             : 
     364           0 :     entry.endOffset = CodeOffset(nativeOffset);
     365             : 
     366             :     // If we generated no code, remove the last entry.
     367           0 :     if (nativeOffset == entry.startOffset.offset())
     368           0 :         trackedOptimizations_.popBack();
     369             : }
     370             : 
     371             : // see OffsetOfFrameSlot
     372             : static inline int32_t
     373        4368 : ToStackIndex(LAllocation* a)
     374             : {
     375        4368 :     if (a->isStackSlot()) {
     376        4044 :         MOZ_ASSERT(a->toStackSlot()->slot() >= 1);
     377        4044 :         return a->toStackSlot()->slot();
     378             :     }
     379         324 :     return -int32_t(sizeof(JitFrameLayout) + a->toArgument()->index());
     380             : }
     381             : 
     382             : void
     383        7260 : CodeGeneratorShared::encodeAllocation(LSnapshot* snapshot, MDefinition* mir,
     384             :                                       uint32_t* allocIndex)
     385             : {
     386        7260 :     if (mir->isBox())
     387           2 :         mir = mir->toBox()->getOperand(0);
     388             : 
     389             :     MIRType type =
     390       14212 :         mir->isRecoveredOnBailout() ? MIRType::None :
     391        6952 :         mir->isUnused() ? MIRType::MagicOptimizedOut :
     392        7260 :         mir->type();
     393             : 
     394        7260 :     RValueAllocation alloc;
     395             : 
     396        7260 :     switch (type) {
     397             :       case MIRType::None:
     398             :       {
     399         308 :         MOZ_ASSERT(mir->isRecoveredOnBailout());
     400         308 :         uint32_t index = 0;
     401         308 :         LRecoverInfo* recoverInfo = snapshot->recoverInfo();
     402         308 :         MNode** it = recoverInfo->begin();
     403         308 :         MNode** end = recoverInfo->end();
     404        2172 :         while (it != end && mir != *it) {
     405         932 :             ++it;
     406         932 :             ++index;
     407             :         }
     408             : 
     409             :         // This MDefinition is recovered, thus it should be listed in the
     410             :         // LRecoverInfo.
     411         308 :         MOZ_ASSERT(it != end && mir == *it);
     412             : 
     413             :         // Lambda should have a default value readable for iterating over the
     414             :         // inner frames.
     415         308 :         if (mir->isLambda() || mir->isLambdaArrow()) {
     416           0 :             MConstant* constant = mir->isLambda() ? mir->toLambda()->functionOperand()
     417           0 :                                                   : mir->toLambdaArrow()->functionOperand();
     418             :             uint32_t cstIndex;
     419           0 :             masm.propagateOOM(graph.addConstantToPool(constant->toJSValue(), &cstIndex));
     420           0 :             alloc = RValueAllocation::RecoverInstruction(index, cstIndex);
     421           0 :             break;
     422             :         }
     423             : 
     424         308 :         alloc = RValueAllocation::RecoverInstruction(index);
     425         308 :         break;
     426             :       }
     427             :       case MIRType::Undefined:
     428        1043 :         alloc = RValueAllocation::Undefined();
     429        1043 :         break;
     430             :       case MIRType::Null:
     431           0 :         alloc = RValueAllocation::Null();
     432           0 :         break;
     433             :       case MIRType::Int32:
     434             :       case MIRType::String:
     435             :       case MIRType::Symbol:
     436             :       case MIRType::Object:
     437             :       case MIRType::ObjectOrNull:
     438             :       case MIRType::Boolean:
     439             :       case MIRType::Double:
     440             :       {
     441        3221 :         LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
     442        3221 :         if (payload->isConstant()) {
     443         453 :             MConstant* constant = mir->toConstant();
     444             :             uint32_t index;
     445         453 :             masm.propagateOOM(graph.addConstantToPool(constant->toJSValue(), &index));
     446         453 :             alloc = RValueAllocation::ConstantPool(index);
     447         453 :             break;
     448             :         }
     449             : 
     450             :         JSValueType valueType =
     451        2768 :             (type == MIRType::ObjectOrNull) ? JSVAL_TYPE_OBJECT : ValueTypeFromMIRType(type);
     452             : 
     453        2768 :         MOZ_ASSERT(payload->isMemory() || payload->isRegister());
     454        2768 :         if (payload->isMemory())
     455        2636 :             alloc = RValueAllocation::Typed(valueType, ToStackIndex(payload));
     456         132 :         else if (payload->isGeneralReg())
     457         131 :             alloc = RValueAllocation::Typed(valueType, ToRegister(payload));
     458           1 :         else if (payload->isFloatReg())
     459           1 :             alloc = RValueAllocation::Double(ToFloatRegister(payload));
     460        2768 :         break;
     461             :       }
     462             :       case MIRType::Float32:
     463             :       case MIRType::Int8x16:
     464             :       case MIRType::Int16x8:
     465             :       case MIRType::Int32x4:
     466             :       case MIRType::Float32x4:
     467             :       case MIRType::Bool8x16:
     468             :       case MIRType::Bool16x8:
     469             :       case MIRType::Bool32x4:
     470             :       {
     471           0 :         LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
     472           0 :         if (payload->isConstant()) {
     473           0 :             MConstant* constant = mir->toConstant();
     474             :             uint32_t index;
     475           0 :             masm.propagateOOM(graph.addConstantToPool(constant->toJSValue(), &index));
     476           0 :             alloc = RValueAllocation::ConstantPool(index);
     477           0 :             break;
     478             :         }
     479             : 
     480           0 :         MOZ_ASSERT(payload->isMemory() || payload->isFloatReg());
     481           0 :         if (payload->isFloatReg())
     482           0 :             alloc = RValueAllocation::AnyFloat(ToFloatRegister(payload));
     483             :         else
     484           0 :             alloc = RValueAllocation::AnyFloat(ToStackIndex(payload));
     485           0 :         break;
     486             :       }
     487             :       case MIRType::MagicOptimizedArguments:
     488             :       case MIRType::MagicOptimizedOut:
     489             :       case MIRType::MagicUninitializedLexical:
     490             :       case MIRType::MagicIsConstructing:
     491             :       {
     492             :         uint32_t index;
     493         313 :         JSWhyMagic why = JS_GENERIC_MAGIC;
     494         313 :         switch (type) {
     495             :           case MIRType::MagicOptimizedArguments:
     496          12 :             why = JS_OPTIMIZED_ARGUMENTS;
     497          12 :             break;
     498             :           case MIRType::MagicOptimizedOut:
     499         245 :             why = JS_OPTIMIZED_OUT;
     500         245 :             break;
     501             :           case MIRType::MagicUninitializedLexical:
     502          50 :             why = JS_UNINITIALIZED_LEXICAL;
     503          50 :             break;
     504             :           case MIRType::MagicIsConstructing:
     505           6 :             why = JS_IS_CONSTRUCTING;
     506           6 :             break;
     507             :           default:
     508           0 :             MOZ_CRASH("Invalid Magic MIRType");
     509             :         }
     510             : 
     511         313 :         Value v = MagicValue(why);
     512         313 :         masm.propagateOOM(graph.addConstantToPool(v, &index));
     513         313 :         alloc = RValueAllocation::ConstantPool(index);
     514         313 :         break;
     515             :       }
     516             :       default:
     517             :       {
     518        2375 :         MOZ_ASSERT(mir->type() == MIRType::Value);
     519        2375 :         LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
     520             : #ifdef JS_NUNBOX32
     521             :         LAllocation* type = snapshot->typeOfSlot(*allocIndex);
     522             :         if (type->isRegister()) {
     523             :             if (payload->isRegister())
     524             :                 alloc = RValueAllocation::Untyped(ToRegister(type), ToRegister(payload));
     525             :             else
     526             :                 alloc = RValueAllocation::Untyped(ToRegister(type), ToStackIndex(payload));
     527             :         } else {
     528             :             if (payload->isRegister())
     529             :                 alloc = RValueAllocation::Untyped(ToStackIndex(type), ToRegister(payload));
     530             :             else
     531             :                 alloc = RValueAllocation::Untyped(ToStackIndex(type), ToStackIndex(payload));
     532             :         }
     533             : #elif JS_PUNBOX64
     534        2375 :         if (payload->isRegister())
     535         643 :             alloc = RValueAllocation::Untyped(ToRegister(payload));
     536             :         else
     537        1732 :             alloc = RValueAllocation::Untyped(ToStackIndex(payload));
     538             : #endif
     539        2375 :         break;
     540             :       }
     541             :     }
     542             : 
     543             :     // This set an extra bit as part of the RValueAllocation, such that we know
     544             :     // that recover instruction have to be executed without wrapping the
     545             :     // instruction in a no-op recover instruction.
     546        7260 :     if (mir->isIncompleteObject())
     547         308 :         alloc.setNeedSideEffect();
     548             : 
     549        7260 :     masm.propagateOOM(snapshots_.add(alloc));
     550             : 
     551        7260 :     *allocIndex += mir->isRecoveredOnBailout() ? 0 : 1;
     552        7260 : }
     553             : 
     554             : void
     555         323 : CodeGeneratorShared::encode(LRecoverInfo* recover)
     556             : {
     557         323 :     if (recover->recoverOffset() != INVALID_RECOVER_OFFSET)
     558         149 :         return;
     559             : 
     560         174 :     uint32_t numInstructions = recover->numInstructions();
     561         174 :     JitSpew(JitSpew_IonSnapshots, "Encoding LRecoverInfo %p (frameCount %u, instructions %u)",
     562         174 :             (void*)recover, recover->mir()->frameCount(), numInstructions);
     563             : 
     564         174 :     MResumePoint::Mode mode = recover->mir()->mode();
     565         174 :     MOZ_ASSERT(mode != MResumePoint::Outer);
     566         174 :     bool resumeAfter = (mode == MResumePoint::ResumeAfter);
     567             : 
     568         174 :     RecoverOffset offset = recovers_.startRecover(numInstructions, resumeAfter);
     569             : 
     570         580 :     for (MNode* insn : *recover)
     571         406 :         recovers_.writeInstruction(insn);
     572             : 
     573         174 :     recovers_.endRecover();
     574         174 :     recover->setRecoverOffset(offset);
     575         174 :     masm.propagateOOM(!recovers_.oom());
     576             : }
     577             : 
     578             : void
     579         504 : CodeGeneratorShared::encode(LSnapshot* snapshot)
     580             : {
     581         504 :     if (snapshot->snapshotOffset() != INVALID_SNAPSHOT_OFFSET)
     582         181 :         return;
     583             : 
     584         323 :     LRecoverInfo* recoverInfo = snapshot->recoverInfo();
     585         323 :     encode(recoverInfo);
     586             : 
     587         323 :     RecoverOffset recoverOffset = recoverInfo->recoverOffset();
     588         323 :     MOZ_ASSERT(recoverOffset != INVALID_RECOVER_OFFSET);
     589             : 
     590             :     JitSpew(JitSpew_IonSnapshots, "Encoding LSnapshot %p (LRecover %p)",
     591         323 :             (void*)snapshot, (void*) recoverInfo);
     592             : 
     593         323 :     SnapshotOffset offset = snapshots_.startSnapshot(recoverOffset, snapshot->bailoutKind());
     594             : 
     595             : #ifdef TRACK_SNAPSHOTS
     596         323 :     uint32_t pcOpcode = 0;
     597         323 :     uint32_t lirOpcode = 0;
     598         323 :     uint32_t lirId = 0;
     599         323 :     uint32_t mirOpcode = 0;
     600         323 :     uint32_t mirId = 0;
     601             : 
     602         323 :     if (LNode* ins = instruction()) {
     603         315 :         lirOpcode = ins->op();
     604         315 :         lirId = ins->id();
     605         315 :         if (ins->mirRaw()) {
     606         181 :             mirOpcode = ins->mirRaw()->op();
     607         181 :             mirId = ins->mirRaw()->id();
     608         181 :             if (ins->mirRaw()->trackedPc())
     609         181 :                 pcOpcode = *ins->mirRaw()->trackedPc();
     610             :         }
     611             :     }
     612         323 :     snapshots_.trackSnapshot(pcOpcode, mirOpcode, mirId, lirOpcode, lirId);
     613             : #endif
     614             : 
     615         323 :     uint32_t allocIndex = 0;
     616        7583 :     for (LRecoverInfo::OperandIter it(recoverInfo); !it; ++it) {
     617       14520 :         DebugOnly<uint32_t> allocWritten = snapshots_.allocWritten();
     618        7260 :         encodeAllocation(snapshot, *it, &allocIndex);
     619        7260 :         MOZ_ASSERT_IF(!snapshots_.oom(), allocWritten + 1 == snapshots_.allocWritten());
     620             :     }
     621             : 
     622         323 :     MOZ_ASSERT(allocIndex == snapshot->numSlots());
     623         323 :     snapshots_.endSnapshot();
     624         323 :     snapshot->setSnapshotOffset(offset);
     625         323 :     masm.propagateOOM(!snapshots_.oom());
     626             : }
     627             : 
     628             : bool
     629           0 : CodeGeneratorShared::assignBailoutId(LSnapshot* snapshot)
     630             : {
     631           0 :     MOZ_ASSERT(snapshot->snapshotOffset() != INVALID_SNAPSHOT_OFFSET);
     632             : 
     633             :     // Can we not use bailout tables at all?
     634           0 :     if (!deoptTable_)
     635           0 :         return false;
     636             : 
     637           0 :     MOZ_ASSERT(frameClass_ != FrameSizeClass::None());
     638             : 
     639           0 :     if (snapshot->bailoutId() != INVALID_BAILOUT_ID)
     640           0 :         return true;
     641             : 
     642             :     // Is the bailout table full?
     643           0 :     if (bailouts_.length() >= BAILOUT_TABLE_SIZE)
     644           0 :         return false;
     645             : 
     646           0 :     unsigned bailoutId = bailouts_.length();
     647           0 :     snapshot->setBailoutId(bailoutId);
     648           0 :     JitSpew(JitSpew_IonSnapshots, "Assigned snapshot bailout id %u", bailoutId);
     649           0 :     masm.propagateOOM(bailouts_.append(snapshot->snapshotOffset()));
     650           0 :     return true;
     651             : }
     652             : 
     653             : bool
     654           5 : CodeGeneratorShared::encodeSafepoints()
     655             : {
     656          48 :     for (SafepointIndex& index : safepointIndices_) {
     657          43 :         LSafepoint* safepoint = index.safepoint();
     658             : 
     659          43 :         if (!safepoint->encoded())
     660          39 :             safepoints_.encode(safepoint);
     661             : 
     662          43 :         index.resolve();
     663             :     }
     664             : 
     665           5 :     return !safepoints_.oom();
     666             : }
     667             : 
     668             : bool
     669           0 : CodeGeneratorShared::createNativeToBytecodeScriptList(JSContext* cx)
     670             : {
     671           0 :     js::Vector<JSScript*, 0, SystemAllocPolicy> scriptList;
     672           0 :     InlineScriptTree* tree = gen->info().inlineScriptTree();
     673             :     for (;;) {
     674             :         // Add script from current tree.
     675           0 :         bool found = false;
     676           0 :         for (uint32_t i = 0; i < scriptList.length(); i++) {
     677           0 :             if (scriptList[i] == tree->script()) {
     678           0 :                 found = true;
     679           0 :                 break;
     680             :             }
     681             :         }
     682           0 :         if (!found) {
     683           0 :             if (!scriptList.append(tree->script()))
     684           0 :                 return false;
     685             :         }
     686             : 
     687             :         // Process rest of tree
     688             : 
     689             :         // If children exist, emit children.
     690           0 :         if (tree->hasChildren()) {
     691           0 :             tree = tree->firstChild();
     692           0 :             continue;
     693             :         }
     694             : 
     695             :         // Otherwise, find the first tree up the chain (including this one)
     696             :         // that contains a next sibling.
     697           0 :         while (!tree->hasNextCallee() && tree->hasCaller())
     698           0 :             tree = tree->caller();
     699             : 
     700             :         // If we found a sibling, use it.
     701           0 :         if (tree->hasNextCallee()) {
     702           0 :             tree = tree->nextCallee();
     703           0 :             continue;
     704             :         }
     705             : 
     706             :         // Otherwise, we must have reached the top without finding any siblings.
     707           0 :         MOZ_ASSERT(tree->isOutermostCaller());
     708           0 :         break;
     709           0 :     }
     710             : 
     711             :     // Allocate array for list.
     712           0 :     JSScript** data = cx->runtime()->pod_malloc<JSScript*>(scriptList.length());
     713           0 :     if (!data)
     714           0 :         return false;
     715             : 
     716           0 :     for (uint32_t i = 0; i < scriptList.length(); i++)
     717           0 :         data[i] = scriptList[i];
     718             : 
     719             :     // Success.
     720           0 :     nativeToBytecodeScriptListLength_ = scriptList.length();
     721           0 :     nativeToBytecodeScriptList_ = data;
     722           0 :     return true;
     723             : }
     724             : 
     725             : bool
     726           0 : CodeGeneratorShared::generateCompactNativeToBytecodeMap(JSContext* cx, JitCode* code)
     727             : {
     728           0 :     MOZ_ASSERT(nativeToBytecodeScriptListLength_ == 0);
     729           0 :     MOZ_ASSERT(nativeToBytecodeScriptList_ == nullptr);
     730           0 :     MOZ_ASSERT(nativeToBytecodeMap_ == nullptr);
     731           0 :     MOZ_ASSERT(nativeToBytecodeMapSize_ == 0);
     732           0 :     MOZ_ASSERT(nativeToBytecodeTableOffset_ == 0);
     733           0 :     MOZ_ASSERT(nativeToBytecodeNumRegions_ == 0);
     734             : 
     735           0 :     if (!createNativeToBytecodeScriptList(cx))
     736           0 :         return false;
     737             : 
     738           0 :     MOZ_ASSERT(nativeToBytecodeScriptListLength_ > 0);
     739           0 :     MOZ_ASSERT(nativeToBytecodeScriptList_ != nullptr);
     740             : 
     741           0 :     CompactBufferWriter writer;
     742           0 :     uint32_t tableOffset = 0;
     743           0 :     uint32_t numRegions = 0;
     744             : 
     745           0 :     if (!JitcodeIonTable::WriteIonTable(
     746             :             writer, nativeToBytecodeScriptList_, nativeToBytecodeScriptListLength_,
     747           0 :             &nativeToBytecodeList_[0],
     748           0 :             &nativeToBytecodeList_[0] + nativeToBytecodeList_.length(),
     749             :             &tableOffset, &numRegions))
     750             :     {
     751           0 :         js_free(nativeToBytecodeScriptList_);
     752           0 :         return false;
     753             :     }
     754             : 
     755           0 :     MOZ_ASSERT(tableOffset > 0);
     756           0 :     MOZ_ASSERT(numRegions > 0);
     757             : 
     758             :     // Writer is done, copy it to sized buffer.
     759           0 :     uint8_t* data = cx->runtime()->pod_malloc<uint8_t>(writer.length());
     760           0 :     if (!data) {
     761           0 :         js_free(nativeToBytecodeScriptList_);
     762           0 :         return false;
     763             :     }
     764             : 
     765           0 :     memcpy(data, writer.buffer(), writer.length());
     766           0 :     nativeToBytecodeMap_ = data;
     767           0 :     nativeToBytecodeMapSize_ = writer.length();
     768           0 :     nativeToBytecodeTableOffset_ = tableOffset;
     769           0 :     nativeToBytecodeNumRegions_ = numRegions;
     770             : 
     771           0 :     verifyCompactNativeToBytecodeMap(code);
     772             : 
     773           0 :     JitSpew(JitSpew_Profiling, "Compact Native To Bytecode Map [%p-%p]",
     774           0 :             data, data + nativeToBytecodeMapSize_);
     775             : 
     776           0 :     return true;
     777             : }
     778             : 
     779             : void
     780           0 : CodeGeneratorShared::verifyCompactNativeToBytecodeMap(JitCode* code)
     781             : {
     782             : #ifdef DEBUG
     783           0 :     MOZ_ASSERT(nativeToBytecodeScriptListLength_ > 0);
     784           0 :     MOZ_ASSERT(nativeToBytecodeScriptList_ != nullptr);
     785           0 :     MOZ_ASSERT(nativeToBytecodeMap_ != nullptr);
     786           0 :     MOZ_ASSERT(nativeToBytecodeMapSize_ > 0);
     787           0 :     MOZ_ASSERT(nativeToBytecodeTableOffset_ > 0);
     788           0 :     MOZ_ASSERT(nativeToBytecodeNumRegions_ > 0);
     789             : 
     790             :     // The pointer to the table must be 4-byte aligned
     791           0 :     const uint8_t* tablePtr = nativeToBytecodeMap_ + nativeToBytecodeTableOffset_;
     792           0 :     MOZ_ASSERT(uintptr_t(tablePtr) % sizeof(uint32_t) == 0);
     793             : 
     794             :     // Verify that numRegions was encoded correctly.
     795           0 :     const JitcodeIonTable* ionTable = reinterpret_cast<const JitcodeIonTable*>(tablePtr);
     796           0 :     MOZ_ASSERT(ionTable->numRegions() == nativeToBytecodeNumRegions_);
     797             : 
     798             :     // Region offset for first region should be at the start of the payload region.
     799             :     // Since the offsets are backward from the start of the table, the first entry
     800             :     // backoffset should be equal to the forward table offset from the start of the
     801             :     // allocated data.
     802           0 :     MOZ_ASSERT(ionTable->regionOffset(0) == nativeToBytecodeTableOffset_);
     803             : 
     804             :     // Verify each region.
     805           0 :     for (uint32_t i = 0; i < ionTable->numRegions(); i++) {
     806             :         // Back-offset must point into the payload region preceding the table, not before it.
     807           0 :         MOZ_ASSERT(ionTable->regionOffset(i) <= nativeToBytecodeTableOffset_);
     808             : 
     809             :         // Back-offset must point to a later area in the payload region than previous
     810             :         // back-offset.  This means that back-offsets decrease monotonically.
     811           0 :         MOZ_ASSERT_IF(i > 0, ionTable->regionOffset(i) < ionTable->regionOffset(i - 1));
     812             : 
     813           0 :         JitcodeRegionEntry entry = ionTable->regionEntry(i);
     814             : 
     815             :         // Ensure native code offset for region falls within jitcode.
     816           0 :         MOZ_ASSERT(entry.nativeOffset() <= code->instructionsSize());
     817             : 
     818             :         // Read out script/pc stack and verify.
     819           0 :         JitcodeRegionEntry::ScriptPcIterator scriptPcIter = entry.scriptPcIterator();
     820           0 :         while (scriptPcIter.hasMore()) {
     821           0 :             uint32_t scriptIdx = 0, pcOffset = 0;
     822           0 :             scriptPcIter.readNext(&scriptIdx, &pcOffset);
     823             : 
     824             :             // Ensure scriptIdx refers to a valid script in the list.
     825           0 :             MOZ_ASSERT(scriptIdx < nativeToBytecodeScriptListLength_);
     826           0 :             JSScript* script = nativeToBytecodeScriptList_[scriptIdx];
     827             : 
     828             :             // Ensure pcOffset falls within the script.
     829           0 :             MOZ_ASSERT(pcOffset < script->length());
     830             :         }
     831             : 
     832             :         // Obtain the original nativeOffset and pcOffset and script.
     833           0 :         uint32_t curNativeOffset = entry.nativeOffset();
     834           0 :         JSScript* script = nullptr;
     835           0 :         uint32_t curPcOffset = 0;
     836             :         {
     837           0 :             uint32_t scriptIdx = 0;
     838           0 :             scriptPcIter.reset();
     839           0 :             scriptPcIter.readNext(&scriptIdx, &curPcOffset);
     840           0 :             script = nativeToBytecodeScriptList_[scriptIdx];
     841             :         }
     842             : 
     843             :         // Read out nativeDeltas and pcDeltas and verify.
     844           0 :         JitcodeRegionEntry::DeltaIterator deltaIter = entry.deltaIterator();
     845           0 :         while (deltaIter.hasMore()) {
     846           0 :             uint32_t nativeDelta = 0;
     847           0 :             int32_t pcDelta = 0;
     848           0 :             deltaIter.readNext(&nativeDelta, &pcDelta);
     849             : 
     850           0 :             curNativeOffset += nativeDelta;
     851           0 :             curPcOffset = uint32_t(int32_t(curPcOffset) + pcDelta);
     852             : 
     853             :             // Ensure that nativeOffset still falls within jitcode after delta.
     854           0 :             MOZ_ASSERT(curNativeOffset <= code->instructionsSize());
     855             : 
     856             :             // Ensure that pcOffset still falls within bytecode after delta.
     857           0 :             MOZ_ASSERT(curPcOffset < script->length());
     858             :         }
     859             :     }
     860             : #endif // DEBUG
     861           0 : }
     862             : 
     863             : bool
     864           0 : CodeGeneratorShared::generateCompactTrackedOptimizationsMap(JSContext* cx, JitCode* code,
     865             :                                                             IonTrackedTypeVector* allTypes)
     866             : {
     867           0 :     MOZ_ASSERT(trackedOptimizationsMap_ == nullptr);
     868           0 :     MOZ_ASSERT(trackedOptimizationsMapSize_ == 0);
     869           0 :     MOZ_ASSERT(trackedOptimizationsRegionTableOffset_ == 0);
     870           0 :     MOZ_ASSERT(trackedOptimizationsTypesTableOffset_ == 0);
     871           0 :     MOZ_ASSERT(trackedOptimizationsAttemptsTableOffset_ == 0);
     872             : 
     873           0 :     if (trackedOptimizations_.empty())
     874           0 :         return true;
     875             : 
     876           0 :     UniqueTrackedOptimizations unique(cx);
     877           0 :     if (!unique.init())
     878           0 :         return false;
     879             : 
     880             :     // Iterate through all entries to deduplicate their optimization attempts.
     881           0 :     for (size_t i = 0; i < trackedOptimizations_.length(); i++) {
     882           0 :         NativeToTrackedOptimizations& entry = trackedOptimizations_[i];
     883           0 :         if (!unique.add(entry.optimizations))
     884           0 :             return false;
     885             :     }
     886             : 
     887             :     // Sort the unique optimization attempts by frequency to stabilize the
     888             :     // attempts' indices in the compact table we will write later.
     889           0 :     if (!unique.sortByFrequency(cx))
     890           0 :         return false;
     891             : 
     892             :     // Write out the ranges and the table.
     893           0 :     CompactBufferWriter writer;
     894             :     uint32_t numRegions;
     895             :     uint32_t regionTableOffset;
     896             :     uint32_t typesTableOffset;
     897             :     uint32_t attemptsTableOffset;
     898           0 :     if (!WriteIonTrackedOptimizationsTable(cx, writer,
     899           0 :                                            trackedOptimizations_.begin(),
     900           0 :                                            trackedOptimizations_.end(),
     901             :                                            unique, &numRegions,
     902             :                                            &regionTableOffset, &typesTableOffset,
     903             :                                            &attemptsTableOffset, allTypes))
     904             :     {
     905           0 :         return false;
     906             :     }
     907             : 
     908           0 :     MOZ_ASSERT(regionTableOffset > 0);
     909           0 :     MOZ_ASSERT(typesTableOffset > 0);
     910           0 :     MOZ_ASSERT(attemptsTableOffset > 0);
     911           0 :     MOZ_ASSERT(typesTableOffset > regionTableOffset);
     912           0 :     MOZ_ASSERT(attemptsTableOffset > typesTableOffset);
     913             : 
     914             :     // Copy over the table out of the writer's buffer.
     915           0 :     uint8_t* data = cx->runtime()->pod_malloc<uint8_t>(writer.length());
     916           0 :     if (!data)
     917           0 :         return false;
     918             : 
     919           0 :     memcpy(data, writer.buffer(), writer.length());
     920           0 :     trackedOptimizationsMap_ = data;
     921           0 :     trackedOptimizationsMapSize_ = writer.length();
     922           0 :     trackedOptimizationsRegionTableOffset_ = regionTableOffset;
     923           0 :     trackedOptimizationsTypesTableOffset_ = typesTableOffset;
     924           0 :     trackedOptimizationsAttemptsTableOffset_ = attemptsTableOffset;
     925             : 
     926           0 :     verifyCompactTrackedOptimizationsMap(code, numRegions, unique, allTypes);
     927             : 
     928           0 :     JitSpew(JitSpew_OptimizationTrackingExtended,
     929             :             "== Compact Native To Optimizations Map [%p-%p] size %u",
     930           0 :             data, data + trackedOptimizationsMapSize_, trackedOptimizationsMapSize_);
     931           0 :     JitSpew(JitSpew_OptimizationTrackingExtended,
     932             :             "     with type list of length %" PRIuSIZE ", size %" PRIuSIZE,
     933           0 :             allTypes->length(), allTypes->length() * sizeof(IonTrackedTypeWithAddendum));
     934             : 
     935           0 :     return true;
     936             : }
     937             : 
     938             : #ifdef DEBUG
     939             : class ReadTempAttemptsVectorOp : public JS::ForEachTrackedOptimizationAttemptOp
     940             : {
     941             :     TempOptimizationAttemptsVector* attempts_;
     942             :     bool oom_;
     943             : 
     944             :   public:
     945           0 :     explicit ReadTempAttemptsVectorOp(TempOptimizationAttemptsVector* attempts)
     946           0 :       : attempts_(attempts), oom_(false)
     947           0 :     { }
     948             : 
     949           0 :     bool oom() {
     950           0 :         return oom_;
     951             :     }
     952             : 
     953           0 :     void operator()(JS::TrackedStrategy strategy, JS::TrackedOutcome outcome) override {
     954           0 :         if (!attempts_->append(OptimizationAttempt(strategy, outcome)))
     955           0 :             oom_ = true;
     956           0 :     }
     957             : };
     958             : 
     959           0 : struct ReadTempTypeInfoVectorOp : public IonTrackedOptimizationsTypeInfo::ForEachOp
     960             : {
     961             :     TempAllocator& alloc_;
     962             :     TempOptimizationTypeInfoVector* types_;
     963             :     TempTypeList accTypes_;
     964             :     bool oom_;
     965             : 
     966             :   public:
     967           0 :     ReadTempTypeInfoVectorOp(TempAllocator& alloc, TempOptimizationTypeInfoVector* types)
     968           0 :       : alloc_(alloc),
     969             :         types_(types),
     970             :         accTypes_(alloc),
     971           0 :         oom_(false)
     972           0 :     { }
     973             : 
     974           0 :     bool oom() {
     975           0 :         return oom_;
     976             :     }
     977             : 
     978           0 :     void readType(const IonTrackedTypeWithAddendum& tracked) override {
     979           0 :         if (!accTypes_.append(tracked.type))
     980           0 :             oom_ = true;
     981           0 :     }
     982             : 
     983           0 :     void operator()(JS::TrackedTypeSite site, MIRType mirType) override {
     984           0 :         OptimizationTypeInfo ty(alloc_, site, mirType);
     985           0 :         for (uint32_t i = 0; i < accTypes_.length(); i++) {
     986           0 :             if (!ty.trackType(accTypes_[i]))
     987           0 :                 oom_ = true;
     988             :         }
     989           0 :         if (!types_->append(mozilla::Move(ty)))
     990           0 :             oom_ = true;
     991           0 :         accTypes_.clear();
     992           0 :     }
     993             : };
     994             : #endif // DEBUG
     995             : 
     996             : void
     997           0 : CodeGeneratorShared::verifyCompactTrackedOptimizationsMap(JitCode* code, uint32_t numRegions,
     998             :                                                           const UniqueTrackedOptimizations& unique,
     999             :                                                           const IonTrackedTypeVector* allTypes)
    1000             : {
    1001             : #ifdef DEBUG
    1002           0 :     MOZ_ASSERT(trackedOptimizationsMap_ != nullptr);
    1003           0 :     MOZ_ASSERT(trackedOptimizationsMapSize_ > 0);
    1004           0 :     MOZ_ASSERT(trackedOptimizationsRegionTableOffset_ > 0);
    1005           0 :     MOZ_ASSERT(trackedOptimizationsTypesTableOffset_ > 0);
    1006           0 :     MOZ_ASSERT(trackedOptimizationsAttemptsTableOffset_ > 0);
    1007             : 
    1008             :     // Table pointers must all be 4-byte aligned.
    1009           0 :     const uint8_t* regionTableAddr = trackedOptimizationsMap_ +
    1010           0 :                                      trackedOptimizationsRegionTableOffset_;
    1011           0 :     const uint8_t* typesTableAddr = trackedOptimizationsMap_ +
    1012           0 :                                     trackedOptimizationsTypesTableOffset_;
    1013           0 :     const uint8_t* attemptsTableAddr = trackedOptimizationsMap_ +
    1014           0 :                                        trackedOptimizationsAttemptsTableOffset_;
    1015           0 :     MOZ_ASSERT(uintptr_t(regionTableAddr) % sizeof(uint32_t) == 0);
    1016           0 :     MOZ_ASSERT(uintptr_t(typesTableAddr) % sizeof(uint32_t) == 0);
    1017           0 :     MOZ_ASSERT(uintptr_t(attemptsTableAddr) % sizeof(uint32_t) == 0);
    1018             : 
    1019             :     // Assert that the number of entries matches up for the tables.
    1020             :     const IonTrackedOptimizationsRegionTable* regionTable =
    1021           0 :         (const IonTrackedOptimizationsRegionTable*) regionTableAddr;
    1022           0 :     MOZ_ASSERT(regionTable->numEntries() == numRegions);
    1023             :     const IonTrackedOptimizationsTypesTable* typesTable =
    1024           0 :         (const IonTrackedOptimizationsTypesTable*) typesTableAddr;
    1025           0 :     MOZ_ASSERT(typesTable->numEntries() == unique.count());
    1026             :     const IonTrackedOptimizationsAttemptsTable* attemptsTable =
    1027           0 :         (const IonTrackedOptimizationsAttemptsTable*) attemptsTableAddr;
    1028           0 :     MOZ_ASSERT(attemptsTable->numEntries() == unique.count());
    1029             : 
    1030             :     // Verify each region.
    1031           0 :     uint32_t trackedIdx = 0;
    1032           0 :     for (uint32_t regionIdx = 0; regionIdx < regionTable->numEntries(); regionIdx++) {
    1033             :         // Check reverse offsets are within bounds.
    1034           0 :         MOZ_ASSERT(regionTable->entryOffset(regionIdx) <= trackedOptimizationsRegionTableOffset_);
    1035           0 :         MOZ_ASSERT_IF(regionIdx > 0, regionTable->entryOffset(regionIdx) <
    1036             :                                      regionTable->entryOffset(regionIdx - 1));
    1037             : 
    1038           0 :         IonTrackedOptimizationsRegion region = regionTable->entry(regionIdx);
    1039             : 
    1040             :         // Check the region range is covered by jitcode.
    1041           0 :         MOZ_ASSERT(region.startOffset() <= code->instructionsSize());
    1042           0 :         MOZ_ASSERT(region.endOffset() <= code->instructionsSize());
    1043             : 
    1044           0 :         IonTrackedOptimizationsRegion::RangeIterator iter = region.ranges();
    1045           0 :         while (iter.more()) {
    1046             :             // Assert that the offsets are correctly decoded from the delta.
    1047             :             uint32_t startOffset, endOffset;
    1048             :             uint8_t index;
    1049           0 :             iter.readNext(&startOffset, &endOffset, &index);
    1050           0 :             NativeToTrackedOptimizations& entry = trackedOptimizations_[trackedIdx++];
    1051           0 :             MOZ_ASSERT(startOffset == entry.startOffset.offset());
    1052           0 :             MOZ_ASSERT(endOffset == entry.endOffset.offset());
    1053           0 :             MOZ_ASSERT(index == unique.indexOf(entry.optimizations));
    1054             : 
    1055             :             // Assert that the type info and attempts vectors are correctly
    1056             :             // decoded. This is disabled for now if the types table might
    1057             :             // contain nursery pointers, in which case the types might not
    1058             :             // match, see bug 1175761.
    1059           0 :             if (!code->zone()->group()->storeBuffer().cancelIonCompilations()) {
    1060           0 :                 IonTrackedOptimizationsTypeInfo typeInfo = typesTable->entry(index);
    1061           0 :                 TempOptimizationTypeInfoVector tvec(alloc());
    1062           0 :                 ReadTempTypeInfoVectorOp top(alloc(), &tvec);
    1063           0 :                 typeInfo.forEach(top, allTypes);
    1064           0 :                 MOZ_ASSERT_IF(!top.oom(), entry.optimizations->matchTypes(tvec));
    1065             :             }
    1066             : 
    1067           0 :             IonTrackedOptimizationsAttempts attempts = attemptsTable->entry(index);
    1068           0 :             TempOptimizationAttemptsVector avec(alloc());
    1069           0 :             ReadTempAttemptsVectorOp aop(&avec);
    1070           0 :             attempts.forEach(aop);
    1071           0 :             MOZ_ASSERT_IF(!aop.oom(), entry.optimizations->matchAttempts(avec));
    1072             :         }
    1073             :     }
    1074             : #endif
    1075           0 : }
    1076             : 
    1077             : void
    1078           0 : CodeGeneratorShared::markSafepoint(LInstruction* ins)
    1079             : {
    1080           0 :     markSafepointAt(masm.currentOffset(), ins);
    1081           0 : }
    1082             : 
    1083             : void
    1084         147 : CodeGeneratorShared::markSafepointAt(uint32_t offset, LInstruction* ins)
    1085             : {
    1086         147 :     MOZ_ASSERT_IF(!safepointIndices_.empty() && !masm.oom(),
    1087             :                   offset - safepointIndices_.back().displacement() >= sizeof(uint32_t));
    1088         147 :     masm.propagateOOM(safepointIndices_.append(SafepointIndex(offset, ins->safepoint())));
    1089         147 : }
    1090             : 
    1091             : void
    1092         134 : CodeGeneratorShared::ensureOsiSpace()
    1093             : {
    1094             :     // For a refresher, an invalidation point is of the form:
    1095             :     // 1: call <target>
    1096             :     // 2: ...
    1097             :     // 3: <osipoint>
    1098             :     //
    1099             :     // The four bytes *before* instruction 2 are overwritten with an offset.
    1100             :     // Callers must ensure that the instruction itself has enough bytes to
    1101             :     // support this.
    1102             :     //
    1103             :     // The bytes *at* instruction 3 are overwritten with an invalidation jump.
    1104             :     // jump. These bytes may be in a completely different IR sequence, but
    1105             :     // represent the join point of the call out of the function.
    1106             :     //
    1107             :     // At points where we want to ensure that invalidation won't corrupt an
    1108             :     // important instruction, we make sure to pad with nops.
    1109         134 :     if (masm.currentOffset() - lastOsiPointOffset_ < Assembler::PatchWrite_NearCallSize()) {
    1110           0 :         int32_t paddingSize = Assembler::PatchWrite_NearCallSize();
    1111           0 :         paddingSize -= masm.currentOffset() - lastOsiPointOffset_;
    1112           0 :         for (int32_t i = 0; i < paddingSize; ++i)
    1113           0 :             masm.nop();
    1114             :     }
    1115         134 :     MOZ_ASSERT_IF(!masm.oom(),
    1116             :                   masm.currentOffset() - lastOsiPointOffset_ >= Assembler::PatchWrite_NearCallSize());
    1117         134 :     lastOsiPointOffset_ = masm.currentOffset();
    1118         134 : }
    1119             : 
    1120             : uint32_t
    1121         134 : CodeGeneratorShared::markOsiPoint(LOsiPoint* ins)
    1122             : {
    1123         134 :     encode(ins->snapshot());
    1124         134 :     ensureOsiSpace();
    1125             : 
    1126         134 :     uint32_t offset = masm.currentOffset();
    1127         134 :     SnapshotOffset so = ins->snapshot()->snapshotOffset();
    1128         134 :     masm.propagateOOM(osiIndices_.append(OsiIndex(offset, so)));
    1129             : 
    1130         134 :     return offset;
    1131             : }
    1132             : 
    1133             : #ifdef CHECK_OSIPOINT_REGISTERS
    1134             : template <class Op>
    1135             : static void
    1136           0 : HandleRegisterDump(Op op, MacroAssembler& masm, LiveRegisterSet liveRegs, Register activation,
    1137             :                    Register scratch)
    1138             : {
    1139           0 :     const size_t baseOffset = JitActivation::offsetOfRegs();
    1140             : 
    1141             :     // Handle live GPRs.
    1142           0 :     for (GeneralRegisterIterator iter(liveRegs.gprs()); iter.more(); ++iter) {
    1143           0 :         Register reg = *iter;
    1144           0 :         Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
    1145             : 
    1146           0 :         if (reg == activation) {
    1147             :             // To use the original value of the activation register (that's
    1148             :             // now on top of the stack), we need the scratch register.
    1149           0 :             masm.push(scratch);
    1150           0 :             masm.loadPtr(Address(masm.getStackPointer(), sizeof(uintptr_t)), scratch);
    1151           0 :             op(scratch, dump);
    1152           0 :             masm.pop(scratch);
    1153             :         } else {
    1154           0 :             op(reg, dump);
    1155             :         }
    1156             :     }
    1157             : 
    1158             :     // Handle live FPRs.
    1159           0 :     for (FloatRegisterIterator iter(liveRegs.fpus()); iter.more(); ++iter) {
    1160           0 :         FloatRegister reg = *iter;
    1161           0 :         Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
    1162           0 :         op(reg, dump);
    1163             :     }
    1164           0 : }
    1165             : 
    1166             : class StoreOp
    1167             : {
    1168             :     MacroAssembler& masm;
    1169             : 
    1170             :   public:
    1171           0 :     explicit StoreOp(MacroAssembler& masm)
    1172           0 :       : masm(masm)
    1173           0 :     {}
    1174             : 
    1175           0 :     void operator()(Register reg, Address dump) {
    1176           0 :         masm.storePtr(reg, dump);
    1177           0 :     }
    1178           0 :     void operator()(FloatRegister reg, Address dump) {
    1179           0 :         if (reg.isDouble())
    1180           0 :             masm.storeDouble(reg, dump);
    1181           0 :         else if (reg.isSingle())
    1182           0 :             masm.storeFloat32(reg, dump);
    1183             : #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
    1184           0 :         else if (reg.isSimd128())
    1185           0 :             masm.storeUnalignedSimd128Float(reg, dump);
    1186             : #endif
    1187             :         else
    1188           0 :             MOZ_CRASH("Unexpected register type.");
    1189           0 :     }
    1190             : };
    1191             : 
    1192             : static void
    1193           0 : StoreAllLiveRegs(MacroAssembler& masm, LiveRegisterSet liveRegs)
    1194             : {
    1195             :     // Store a copy of all live registers before performing the call.
    1196             :     // When we reach the OsiPoint, we can use this to check nothing
    1197             :     // modified them in the meantime.
    1198             : 
    1199             :     // Load pointer to the JitActivation in a scratch register.
    1200           0 :     AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
    1201           0 :     Register scratch = allRegs.takeAny();
    1202           0 :     masm.push(scratch);
    1203           0 :     masm.loadJitActivation(scratch);
    1204             : 
    1205           0 :     Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
    1206           0 :     masm.add32(Imm32(1), checkRegs);
    1207             : 
    1208           0 :     StoreOp op(masm);
    1209           0 :     HandleRegisterDump<StoreOp>(op, masm, liveRegs, scratch, allRegs.getAny());
    1210             : 
    1211           0 :     masm.pop(scratch);
    1212           0 : }
    1213             : 
    1214             : class VerifyOp
    1215             : {
    1216             :     MacroAssembler& masm;
    1217             :     Label* failure_;
    1218             : 
    1219             :   public:
    1220           0 :     VerifyOp(MacroAssembler& masm, Label* failure)
    1221           0 :       : masm(masm), failure_(failure)
    1222           0 :     {}
    1223             : 
    1224           0 :     void operator()(Register reg, Address dump) {
    1225           0 :         masm.branchPtr(Assembler::NotEqual, dump, reg, failure_);
    1226           0 :     }
    1227           0 :     void operator()(FloatRegister reg, Address dump) {
    1228           0 :         FloatRegister scratch;
    1229           0 :         if (reg.isDouble()) {
    1230           0 :             scratch = ScratchDoubleReg;
    1231           0 :             masm.loadDouble(dump, scratch);
    1232           0 :             masm.branchDouble(Assembler::DoubleNotEqual, scratch, reg, failure_);
    1233           0 :         } else if (reg.isSingle()) {
    1234           0 :             scratch = ScratchFloat32Reg;
    1235           0 :             masm.loadFloat32(dump, scratch);
    1236           0 :             masm.branchFloat(Assembler::DoubleNotEqual, scratch, reg, failure_);
    1237             :         }
    1238             : 
    1239             :         // :TODO: (Bug 1133745) Add support to verify SIMD registers.
    1240           0 :     }
    1241             : };
    1242             : 
    1243             : void
    1244           0 : CodeGeneratorShared::verifyOsiPointRegs(LSafepoint* safepoint)
    1245             : {
    1246             :     // Ensure the live registers stored by callVM did not change between
    1247             :     // the call and this OsiPoint. Try-catch relies on this invariant.
    1248             : 
    1249             :     // Load pointer to the JitActivation in a scratch register.
    1250           0 :     AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
    1251           0 :     Register scratch = allRegs.takeAny();
    1252           0 :     masm.push(scratch);
    1253           0 :     masm.loadJitActivation(scratch);
    1254             : 
    1255             :     // If we should not check registers (because the instruction did not call
    1256             :     // into the VM, or a GC happened), we're done.
    1257           0 :     Label failure, done;
    1258           0 :     Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
    1259           0 :     masm.branch32(Assembler::Equal, checkRegs, Imm32(0), &done);
    1260             : 
    1261             :     // Having more than one VM function call made in one visit function at
    1262             :     // runtime is a sec-ciritcal error, because if we conservatively assume that
    1263             :     // one of the function call can re-enter Ion, then the invalidation process
    1264             :     // will potentially add a call at a random location, by patching the code
    1265             :     // before the return address.
    1266           0 :     masm.branch32(Assembler::NotEqual, checkRegs, Imm32(1), &failure);
    1267             : 
    1268             :     // Set checkRegs to 0, so that we don't try to verify registers after we
    1269             :     // return from this script to the caller.
    1270           0 :     masm.store32(Imm32(0), checkRegs);
    1271             : 
    1272             :     // Ignore clobbered registers. Some instructions (like LValueToInt32) modify
    1273             :     // temps after calling into the VM. This is fine because no other
    1274             :     // instructions (including this OsiPoint) will depend on them. Also
    1275             :     // backtracking can also use the same register for an input and an output.
    1276             :     // These are marked as clobbered and shouldn't get checked.
    1277           0 :     LiveRegisterSet liveRegs;
    1278           0 :     liveRegs.set() = RegisterSet::Intersect(safepoint->liveRegs().set(),
    1279           0 :                                             RegisterSet::Not(safepoint->clobberedRegs().set()));
    1280             : 
    1281           0 :     VerifyOp op(masm, &failure);
    1282           0 :     HandleRegisterDump<VerifyOp>(op, masm, liveRegs, scratch, allRegs.getAny());
    1283             : 
    1284           0 :     masm.jump(&done);
    1285             : 
    1286             :     // Do not profile the callWithABI that occurs below.  This is to avoid a
    1287             :     // rare corner case that occurs when profiling interacts with itself:
    1288             :     //
    1289             :     // When slow profiling assertions are turned on, FunctionBoundary ops
    1290             :     // (which update the profiler pseudo-stack) may emit a callVM, which
    1291             :     // forces them to have an osi point associated with them.  The
    1292             :     // FunctionBoundary for inline function entry is added to the caller's
    1293             :     // graph with a PC from the caller's code, but during codegen it modifies
    1294             :     // Gecko Profiler instrumentation to add the callee as the current top-most
    1295             :     // script. When codegen gets to the OSIPoint, and the callWithABI below is
    1296             :     // emitted, the codegen thinks that the current frame is the callee, but
    1297             :     // the PC it's using from the OSIPoint refers to the caller.  This causes
    1298             :     // the profiler instrumentation of the callWithABI below to ASSERT, since
    1299             :     // the script and pc are mismatched.  To avoid this, we simply omit
    1300             :     // instrumentation for these callWithABIs.
    1301             : 
    1302             :     // Any live register captured by a safepoint (other than temp registers)
    1303             :     // must remain unchanged between the call and the OsiPoint instruction.
    1304           0 :     masm.bind(&failure);
    1305           0 :     masm.assumeUnreachable("Modified registers between VM call and OsiPoint");
    1306             : 
    1307           0 :     masm.bind(&done);
    1308           0 :     masm.pop(scratch);
    1309           0 : }
    1310             : 
    1311             : bool
    1312         388 : CodeGeneratorShared::shouldVerifyOsiPointRegs(LSafepoint* safepoint)
    1313             : {
    1314         388 :     if (!checkOsiPointRegisters)
    1315         388 :         return false;
    1316             : 
    1317           0 :     if (safepoint->liveRegs().emptyGeneral() && safepoint->liveRegs().emptyFloat())
    1318           0 :         return false; // No registers to check.
    1319             : 
    1320           0 :     return true;
    1321             : }
    1322             : 
    1323             : void
    1324         134 : CodeGeneratorShared::resetOsiPointRegs(LSafepoint* safepoint)
    1325             : {
    1326         134 :     if (!shouldVerifyOsiPointRegs(safepoint))
    1327         134 :         return;
    1328             : 
    1329             :     // Set checkRegs to 0. If we perform a VM call, the instruction
    1330             :     // will set it to 1.
    1331           0 :     AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
    1332           0 :     Register scratch = allRegs.takeAny();
    1333           0 :     masm.push(scratch);
    1334           0 :     masm.loadJitActivation(scratch);
    1335           0 :     Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
    1336           0 :     masm.store32(Imm32(0), checkRegs);
    1337           0 :     masm.pop(scratch);
    1338             : }
    1339             : #endif
    1340             : 
    1341             : // Before doing any call to Cpp, you should ensure that volatile
    1342             : // registers are evicted by the register allocator.
    1343             : void
    1344         120 : CodeGeneratorShared::callVM(const VMFunction& fun, LInstruction* ins, const Register* dynStack)
    1345             : {
    1346             :     // If we're calling a function with an out parameter type of double, make
    1347             :     // sure we have an FPU.
    1348         120 :     MOZ_ASSERT_IF(fun.outParam == Type_Double, GetJitContext()->runtime->jitSupportsFloatingPoint());
    1349             : 
    1350             : #ifdef DEBUG
    1351         120 :     if (ins->mirRaw()) {
    1352         120 :         MOZ_ASSERT(ins->mirRaw()->isInstruction());
    1353         120 :         MInstruction* mir = ins->mirRaw()->toInstruction();
    1354         120 :         MOZ_ASSERT_IF(mir->needsResumePoint(), mir->resumePoint());
    1355             :     }
    1356             : #endif
    1357             : 
    1358             :     // Stack is:
    1359             :     //    ... frame ...
    1360             :     //    [args]
    1361             : #ifdef DEBUG
    1362         120 :     MOZ_ASSERT(pushedArgs_ == fun.explicitArgs);
    1363         120 :     pushedArgs_ = 0;
    1364             : #endif
    1365             : 
    1366             :     // Get the wrapper of the VM function.
    1367         120 :     JitCode* wrapper = gen->jitRuntime()->getVMWrapper(fun);
    1368         120 :     if (!wrapper) {
    1369           0 :         masm.setOOM();
    1370           0 :         return;
    1371             :     }
    1372             : 
    1373             : #ifdef CHECK_OSIPOINT_REGISTERS
    1374         120 :     if (shouldVerifyOsiPointRegs(ins->safepoint()))
    1375           0 :         StoreAllLiveRegs(masm, ins->safepoint()->liveRegs());
    1376             : #endif
    1377             : 
    1378             :     // Push an exit frame descriptor. If |dynStack| is a valid pointer to a
    1379             :     // register, then its value is added to the value of the |framePushed()| to
    1380             :     // fill the frame descriptor.
    1381         120 :     if (dynStack) {
    1382           0 :         masm.addPtr(Imm32(masm.framePushed()), *dynStack);
    1383           0 :         masm.makeFrameDescriptor(*dynStack, JitFrame_IonJS, ExitFrameLayout::Size());
    1384           0 :         masm.Push(*dynStack); // descriptor
    1385             :     } else {
    1386         120 :         masm.pushStaticFrameDescriptor(JitFrame_IonJS, ExitFrameLayout::Size());
    1387             :     }
    1388             : 
    1389             :     // Call the wrapper function.  The wrapper is in charge to unwind the stack
    1390             :     // when returning from the call.  Failures are handled with exceptions based
    1391             :     // on the return value of the C functions.  To guard the outcome of the
    1392             :     // returned value, use another LIR instruction.
    1393         120 :     uint32_t callOffset = masm.callJit(wrapper);
    1394         120 :     markSafepointAt(callOffset, ins);
    1395             : 
    1396             :     // Remove rest of the frame left on the stack. We remove the return address
    1397             :     // which is implicitly poped when returning.
    1398         120 :     int framePop = sizeof(ExitFrameLayout) - sizeof(void*);
    1399             : 
    1400             :     // Pop arguments from framePushed.
    1401         120 :     masm.implicitPop(fun.explicitStackSlots() * sizeof(void*) + framePop);
    1402             :     // Stack is:
    1403             :     //    ... frame ...
    1404             : }
    1405             : 
    1406             : class OutOfLineTruncateSlow : public OutOfLineCodeBase<CodeGeneratorShared>
    1407             : {
    1408             :     FloatRegister src_;
    1409             :     Register dest_;
    1410             :     bool widenFloatToDouble_;
    1411             :     wasm::BytecodeOffset bytecodeOffset_;
    1412             : 
    1413             :   public:
    1414           1 :     OutOfLineTruncateSlow(FloatRegister src, Register dest, bool widenFloatToDouble = false,
    1415             :                           wasm::BytecodeOffset bytecodeOffset = wasm::BytecodeOffset())
    1416           1 :       : src_(src),
    1417             :         dest_(dest),
    1418             :         widenFloatToDouble_(widenFloatToDouble),
    1419           1 :         bytecodeOffset_(bytecodeOffset)
    1420           1 :     { }
    1421             : 
    1422           1 :     void accept(CodeGeneratorShared* codegen) {
    1423           1 :         codegen->visitOutOfLineTruncateSlow(this);
    1424           1 :     }
    1425           1 :     FloatRegister src() const {
    1426           1 :         return src_;
    1427             :     }
    1428           1 :     Register dest() const {
    1429           1 :         return dest_;
    1430             :     }
    1431           1 :     bool widenFloatToDouble() const {
    1432           1 :         return widenFloatToDouble_;
    1433             :     }
    1434           1 :     wasm::BytecodeOffset bytecodeOffset() const {
    1435           1 :         return bytecodeOffset_;
    1436             :     }
    1437             : };
    1438             : 
    1439             : OutOfLineCode*
    1440           1 : CodeGeneratorShared::oolTruncateDouble(FloatRegister src, Register dest, MInstruction* mir,
    1441             :                                        wasm::BytecodeOffset bytecodeOffset)
    1442             : {
    1443           1 :     MOZ_ASSERT_IF(IsCompilingWasm(), bytecodeOffset.isValid());
    1444             : 
    1445             :     OutOfLineTruncateSlow* ool = new(alloc()) OutOfLineTruncateSlow(src, dest, /* float32 */ false,
    1446           1 :                                                                     bytecodeOffset);
    1447           1 :     addOutOfLineCode(ool, mir);
    1448           1 :     return ool;
    1449             : }
    1450             : 
    1451             : void
    1452           0 : CodeGeneratorShared::emitTruncateDouble(FloatRegister src, Register dest, MTruncateToInt32* mir)
    1453             : {
    1454           0 :     OutOfLineCode* ool = oolTruncateDouble(src, dest, mir, mir->bytecodeOffset());
    1455             : 
    1456           0 :     masm.branchTruncateDoubleMaybeModUint32(src, dest, ool->entry());
    1457           0 :     masm.bind(ool->rejoin());
    1458           0 : }
    1459             : 
    1460             : void
    1461           0 : CodeGeneratorShared::emitTruncateFloat32(FloatRegister src, Register dest, MTruncateToInt32* mir)
    1462             : {
    1463             :     OutOfLineTruncateSlow* ool = new(alloc()) OutOfLineTruncateSlow(src, dest, /* float32 */ true,
    1464           0 :                                                                     mir->bytecodeOffset());
    1465           0 :     addOutOfLineCode(ool, mir);
    1466             : 
    1467           0 :     masm.branchTruncateFloat32MaybeModUint32(src, dest, ool->entry());
    1468           0 :     masm.bind(ool->rejoin());
    1469           0 : }
    1470             : 
    1471             : void
    1472           1 : CodeGeneratorShared::visitOutOfLineTruncateSlow(OutOfLineTruncateSlow* ool)
    1473             : {
    1474           1 :     FloatRegister src = ool->src();
    1475           1 :     Register dest = ool->dest();
    1476             : 
    1477           1 :     saveVolatile(dest);
    1478           1 :     masm.outOfLineTruncateSlow(src, dest, ool->widenFloatToDouble(), gen->compilingWasm(),
    1479           1 :                                ool->bytecodeOffset());
    1480           1 :     restoreVolatile(dest);
    1481             : 
    1482           1 :     masm.jump(ool->rejoin());
    1483           1 : }
    1484             : 
    1485             : bool
    1486           8 : CodeGeneratorShared::omitOverRecursedCheck() const
    1487             : {
    1488             :     // If the current function makes no calls (which means it isn't recursive)
    1489             :     // and it uses only a small amount of stack space, it doesn't need a
    1490             :     // stack overflow check. Note that the actual number here is somewhat
    1491             :     // arbitrary, and codegen actually uses small bounded amounts of
    1492             :     // additional stack space in some cases too.
    1493           8 :     return frameSize() < 64 && !gen->needsOverrecursedCheck();
    1494             : }
    1495             : 
    1496             : void
    1497           0 : CodeGeneratorShared::emitWasmCallBase(LWasmCallBase* ins)
    1498             : {
    1499           0 :     MWasmCall* mir = ins->mir();
    1500             : 
    1501           0 :     if (mir->spIncrement())
    1502           0 :         masm.freeStack(mir->spIncrement());
    1503             : 
    1504           0 :     MOZ_ASSERT((sizeof(wasm::Frame) + masm.framePushed()) % WasmStackAlignment == 0);
    1505             :     static_assert(WasmStackAlignment >= ABIStackAlignment &&
    1506             :                   WasmStackAlignment % ABIStackAlignment == 0,
    1507             :                   "The wasm stack alignment should subsume the ABI-required alignment");
    1508             : 
    1509             : #ifdef DEBUG
    1510           0 :     Label ok;
    1511           0 :     masm.branchTestStackPtr(Assembler::Zero, Imm32(WasmStackAlignment - 1), &ok);
    1512           0 :     masm.breakpoint();
    1513           0 :     masm.bind(&ok);
    1514             : #endif
    1515             : 
    1516             :     // LWasmCallBase::isCallPreserved() assumes that all MWasmCalls preserve the
    1517             :     // TLS and pinned regs. The only case where where we don't have to reload
    1518             :     // the TLS and pinned regs is when the callee preserves them.
    1519           0 :     bool reloadRegs = true;
    1520             : 
    1521           0 :     const wasm::CallSiteDesc& desc = mir->desc();
    1522           0 :     const wasm::CalleeDesc& callee = mir->callee();
    1523           0 :     switch (callee.which()) {
    1524             :       case wasm::CalleeDesc::Func:
    1525           0 :         masm.call(desc, callee.funcIndex());
    1526           0 :         reloadRegs = false;
    1527           0 :         break;
    1528             :       case wasm::CalleeDesc::Import:
    1529           0 :         masm.wasmCallImport(desc, callee);
    1530           0 :         break;
    1531             :       case wasm::CalleeDesc::AsmJSTable:
    1532             :       case wasm::CalleeDesc::WasmTable:
    1533           0 :         masm.wasmCallIndirect(desc, callee, ins->needsBoundsCheck());
    1534           0 :         reloadRegs = callee.which() == wasm::CalleeDesc::WasmTable && callee.wasmTableIsExternal();
    1535           0 :         break;
    1536             :       case wasm::CalleeDesc::Builtin:
    1537           0 :         masm.call(desc, callee.builtin());
    1538           0 :         reloadRegs = false;
    1539           0 :         break;
    1540             :       case wasm::CalleeDesc::BuiltinInstanceMethod:
    1541           0 :         masm.wasmCallBuiltinInstanceMethod(desc, mir->instanceArg(), callee.builtin());
    1542           0 :         break;
    1543             :     }
    1544             : 
    1545           0 :     if (reloadRegs) {
    1546           0 :         masm.loadWasmTlsRegFromFrame();
    1547           0 :         masm.loadWasmPinnedRegsFromTls();
    1548             :     }
    1549             : 
    1550           0 :     if (mir->spIncrement())
    1551           0 :         masm.reserveStack(mir->spIncrement());
    1552           0 : }
    1553             : 
    1554             : void
    1555           0 : CodeGeneratorShared::visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins)
    1556             : {
    1557           0 :     MWasmLoadGlobalVar* mir = ins->mir();
    1558             : 
    1559           0 :     MIRType type = mir->type();
    1560           0 :     MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
    1561             : 
    1562           0 :     Register tls = ToRegister(ins->tlsPtr());
    1563           0 :     Address addr(tls, offsetof(wasm::TlsData, globalArea) + mir->globalDataOffset());
    1564           0 :     switch (type) {
    1565             :       case MIRType::Int32:
    1566           0 :         masm.load32(addr, ToRegister(ins->output()));
    1567           0 :         break;
    1568             :       case MIRType::Float32:
    1569           0 :         masm.loadFloat32(addr, ToFloatRegister(ins->output()));
    1570           0 :         break;
    1571             :       case MIRType::Double:
    1572           0 :         masm.loadDouble(addr, ToFloatRegister(ins->output()));
    1573           0 :         break;
    1574             :       // Aligned access: code is aligned on PageSize + there is padding
    1575             :       // before the global data section.
    1576             :       case MIRType::Int8x16:
    1577             :       case MIRType::Int16x8:
    1578             :       case MIRType::Int32x4:
    1579             :       case MIRType::Bool8x16:
    1580             :       case MIRType::Bool16x8:
    1581             :       case MIRType::Bool32x4:
    1582           0 :         masm.loadInt32x4(addr, ToFloatRegister(ins->output()));
    1583           0 :         break;
    1584             :       case MIRType::Float32x4:
    1585           0 :         masm.loadFloat32x4(addr, ToFloatRegister(ins->output()));
    1586           0 :         break;
    1587             :       default:
    1588           0 :         MOZ_CRASH("unexpected type in visitWasmLoadGlobalVar");
    1589             :     }
    1590           0 : }
    1591             : 
    1592             : void
    1593           0 : CodeGeneratorShared::visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins)
    1594             : {
    1595           0 :     MWasmStoreGlobalVar* mir = ins->mir();
    1596             : 
    1597           0 :     MIRType type = mir->value()->type();
    1598           0 :     MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
    1599             : 
    1600           0 :     Register tls = ToRegister(ins->tlsPtr());
    1601           0 :     Address addr(tls, offsetof(wasm::TlsData, globalArea) + mir->globalDataOffset());
    1602           0 :     switch (type) {
    1603             :       case MIRType::Int32:
    1604           0 :         masm.store32(ToRegister(ins->value()), addr);
    1605           0 :         break;
    1606             :       case MIRType::Float32:
    1607           0 :         masm.storeFloat32(ToFloatRegister(ins->value()), addr);
    1608           0 :         break;
    1609             :       case MIRType::Double:
    1610           0 :         masm.storeDouble(ToFloatRegister(ins->value()), addr);
    1611           0 :         break;
    1612             :       // Aligned access: code is aligned on PageSize + there is padding
    1613             :       // before the global data section.
    1614             :       case MIRType::Int8x16:
    1615             :       case MIRType::Int16x8:
    1616             :       case MIRType::Int32x4:
    1617             :       case MIRType::Bool8x16:
    1618             :       case MIRType::Bool16x8:
    1619             :       case MIRType::Bool32x4:
    1620           0 :         masm.storeInt32x4(ToFloatRegister(ins->value()), addr);
    1621           0 :         break;
    1622             :       case MIRType::Float32x4:
    1623           0 :         masm.storeFloat32x4(ToFloatRegister(ins->value()), addr);
    1624           0 :         break;
    1625             :       default:
    1626           0 :         MOZ_CRASH("unexpected type in visitWasmStoreGlobalVar");
    1627             :     }
    1628           0 : }
    1629             : 
    1630             : void
    1631           0 : CodeGeneratorShared::visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins)
    1632             : {
    1633           0 :     MWasmLoadGlobalVar* mir = ins->mir();
    1634           0 :     MOZ_ASSERT(mir->type() == MIRType::Int64);
    1635             : 
    1636           0 :     Register tls = ToRegister(ins->tlsPtr());
    1637           0 :     Address addr(tls, offsetof(wasm::TlsData, globalArea) + mir->globalDataOffset());
    1638             : 
    1639           0 :     Register64 output = ToOutRegister64(ins);
    1640           0 :     masm.load64(addr, output);
    1641           0 : }
    1642             : 
    1643             : void
    1644           0 : CodeGeneratorShared::visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins)
    1645             : {
    1646           0 :     MWasmStoreGlobalVar* mir = ins->mir();
    1647           0 :     MOZ_ASSERT(mir->value()->type() == MIRType::Int64);
    1648             : 
    1649           0 :     Register tls = ToRegister(ins->tlsPtr());
    1650           0 :     Address addr(tls, offsetof(wasm::TlsData, globalArea) + mir->globalDataOffset());
    1651             : 
    1652           0 :     Register64 value = ToRegister64(ins->value());
    1653           0 :     masm.store64(value, addr);
    1654           0 : }
    1655             : 
    1656             : void
    1657           1 : CodeGeneratorShared::emitPreBarrier(Register base, const LAllocation* index, int32_t offsetAdjustment)
    1658             : {
    1659           1 :     if (index->isConstant()) {
    1660           0 :         Address address(base, ToInt32(index) * sizeof(Value) + offsetAdjustment);
    1661           0 :         masm.guardedCallPreBarrier(address, MIRType::Value);
    1662             :     } else {
    1663           1 :         BaseIndex address(base, ToRegister(index), TimesEight, offsetAdjustment);
    1664           1 :         masm.guardedCallPreBarrier(address, MIRType::Value);
    1665             :     }
    1666           1 : }
    1667             : 
    1668             : void
    1669          23 : CodeGeneratorShared::emitPreBarrier(Address address)
    1670             : {
    1671          23 :     masm.guardedCallPreBarrier(address, MIRType::Value);
    1672          23 : }
    1673             : 
    1674             : Label*
    1675         204 : CodeGeneratorShared::labelForBackedgeWithImplicitCheck(MBasicBlock* mir)
    1676             : {
    1677             :     // If this is a loop backedge to a loop header with an implicit interrupt
    1678             :     // check, use a patchable jump. Skip this search if compiling without a
    1679             :     // script for wasm, as there will be no interrupt check instruction.
    1680             :     // Due to critical edge unsplitting there may no longer be unique loop
    1681             :     // backedges, so just look for any edge going to an earlier block in RPO.
    1682         204 :     if (!gen->compilingWasm() && mir->isLoopHeader() && mir->id() <= current->mir()->id()) {
    1683           6 :         for (LInstructionIterator iter = mir->lir()->begin(); iter != mir->lir()->end(); iter++) {
    1684           6 :             if (iter->isMoveGroup()) {
    1685             :                 // Continue searching for an interrupt check.
    1686             :             } else {
    1687             :                 // The interrupt check should be the first instruction in the
    1688             :                 // loop header other than move groups.
    1689           6 :                 MOZ_ASSERT(iter->isInterruptCheck());
    1690           6 :                 if (iter->toInterruptCheck()->implicit())
    1691           6 :                     return iter->toInterruptCheck()->oolEntry();
    1692           6 :                 return nullptr;
    1693             :             }
    1694             :         }
    1695             :     }
    1696             : 
    1697         198 :     return nullptr;
    1698             : }
    1699             : 
    1700             : void
    1701         196 : CodeGeneratorShared::jumpToBlock(MBasicBlock* mir)
    1702             : {
    1703             :     // Skip past trivial blocks.
    1704         196 :     mir = skipTrivialBlocks(mir);
    1705             : 
    1706             :     // No jump necessary if we can fall through to the next block.
    1707         196 :     if (isNextBlock(mir->lir()))
    1708         130 :         return;
    1709             : 
    1710          66 :     if (Label* oolEntry = labelForBackedgeWithImplicitCheck(mir)) {
    1711             :         // Note: the backedge is initially a jump to the next instruction.
    1712             :         // It will be patched to the target block's label during link().
    1713           0 :         RepatchLabel rejoin;
    1714           0 :         CodeOffsetJump backedge = masm.backedgeJump(&rejoin, mir->lir()->label());
    1715           0 :         masm.bind(&rejoin);
    1716             : 
    1717           0 :         masm.propagateOOM(patchableBackedges_.append(PatchableBackedgeInfo(backedge, mir->lir()->label(), oolEntry)));
    1718             :     } else {
    1719          66 :         masm.jump(mir->lir()->label());
    1720             :     }
    1721             : }
    1722             : 
    1723             : Label*
    1724          60 : CodeGeneratorShared::getJumpLabelForBranch(MBasicBlock* block)
    1725             : {
    1726             :     // Skip past trivial blocks.
    1727          60 :     block = skipTrivialBlocks(block);
    1728             : 
    1729          60 :     if (!labelForBackedgeWithImplicitCheck(block))
    1730          60 :         return block->lir()->label();
    1731             : 
    1732             :     // We need to use a patchable jump for this backedge, but want to treat
    1733             :     // this as a normal label target to simplify codegen. Efficiency isn't so
    1734             :     // important here as these tests are extremely unlikely to be used in loop
    1735             :     // backedges, so emit inline code for the patchable jump. Heap allocating
    1736             :     // the label allows it to be used by out of line blocks.
    1737           0 :     Label* res = alloc().lifoAlloc()->newInfallible<Label>();
    1738           0 :     Label after;
    1739           0 :     masm.jump(&after);
    1740           0 :     masm.bind(res);
    1741           0 :     jumpToBlock(block);
    1742           0 :     masm.bind(&after);
    1743           0 :     return res;
    1744             : }
    1745             : 
    1746             : // This function is not used for MIPS/MIPS64. MIPS has branchToBlock.
    1747             : #if !defined(JS_CODEGEN_MIPS32) && !defined(JS_CODEGEN_MIPS64)
    1748             : void
    1749          78 : CodeGeneratorShared::jumpToBlock(MBasicBlock* mir, Assembler::Condition cond)
    1750             : {
    1751             :     // Skip past trivial blocks.
    1752          78 :     mir = skipTrivialBlocks(mir);
    1753             : 
    1754          78 :     if (Label* oolEntry = labelForBackedgeWithImplicitCheck(mir)) {
    1755             :         // Note: the backedge is initially a jump to the next instruction.
    1756             :         // It will be patched to the target block's label during link().
    1757           0 :         RepatchLabel rejoin;
    1758           0 :         CodeOffsetJump backedge = masm.jumpWithPatch(&rejoin, cond, mir->lir()->label());
    1759           0 :         masm.bind(&rejoin);
    1760             : 
    1761           0 :         masm.propagateOOM(patchableBackedges_.append(PatchableBackedgeInfo(backedge, mir->lir()->label(), oolEntry)));
    1762             :     } else {
    1763          78 :         masm.j(cond, mir->lir()->label());
    1764             :     }
    1765          78 : }
    1766             : #endif
    1767             : 
    1768             : ReciprocalMulConstants
    1769           0 : CodeGeneratorShared::computeDivisionConstants(uint32_t d, int maxLog) {
    1770           0 :     MOZ_ASSERT(maxLog >= 2 && maxLog <= 32);
    1771             :     // In what follows, 0 < d < 2^maxLog and d is not a power of 2.
    1772           0 :     MOZ_ASSERT(d < (uint64_t(1) << maxLog) && (d & (d - 1)) != 0);
    1773             : 
    1774             :     // Speeding up division by non power-of-2 constants is possible by
    1775             :     // calculating, during compilation, a value M such that high-order
    1776             :     // bits of M*n correspond to the result of the division of n by d.
    1777             :     // No value of M can serve this purpose for arbitrarily big values
    1778             :     // of n but, for optimizing integer division, we're just concerned
    1779             :     // with values of n whose absolute value is bounded (by fitting in
    1780             :     // an integer type, say). With this in mind, we'll find a constant
    1781             :     // M as above that works for -2^maxLog <= n < 2^maxLog; maxLog can
    1782             :     // then be 31 for signed division or 32 for unsigned division.
    1783             :     //
    1784             :     // The original presentation of this technique appears in Hacker's
    1785             :     // Delight, a book by Henry S. Warren, Jr.. A proof of correctness
    1786             :     // for our version follows; we'll denote maxLog by L in the proof,
    1787             :     // for conciseness.
    1788             :     //
    1789             :     // Formally, for |d| < 2^L, we'll compute two magic values M and s
    1790             :     // in the ranges 0 <= M < 2^(L+1) and 0 <= s <= L such that
    1791             :     //     (M * n) >> (32 + s) = floor(n/d)    if    0 <= n < 2^L
    1792             :     //     (M * n) >> (32 + s) = ceil(n/d) - 1 if -2^L <= n < 0.
    1793             :     //
    1794             :     // Define p = 32 + s, M = ceil(2^p/d), and assume that s satisfies
    1795             :     //                     M - 2^p/d <= 2^(p-L)/d.                 (1)
    1796             :     // (Observe that p = CeilLog32(d) + L satisfies this, as the right
    1797             :     // side of (1) is at least one in this case). Then,
    1798             :     //
    1799             :     // a) If p <= CeilLog32(d) + L, then M < 2^(L+1) - 1.
    1800             :     // Proof: Indeed, M is monotone in p and, for p equal to the above
    1801             :     // value, the bounds 2^L > d >= 2^(p-L-1) + 1 readily imply that
    1802             :     //    2^p / d <  2^p/(d - 1) * (d - 1)/d
    1803             :     //            <= 2^(L+1) * (1 - 1/d) < 2^(L+1) - 2.
    1804             :     // The claim follows by applying the ceiling function.
    1805             :     //
    1806             :     // b) For any 0 <= n < 2^L, floor(Mn/2^p) = floor(n/d).
    1807             :     // Proof: Put x = floor(Mn/2^p); it's the unique integer for which
    1808             :     //                    Mn/2^p - 1 < x <= Mn/2^p.                (2)
    1809             :     // Using M >= 2^p/d on the LHS and (1) on the RHS, we get
    1810             :     //           n/d - 1 < x <= n/d + n/(2^L d) < n/d + 1/d.
    1811             :     // Since x is an integer, it's not in the interval (n/d, (n+1)/d),
    1812             :     // and so n/d - 1 < x <= n/d, which implies x = floor(n/d).
    1813             :     //
    1814             :     // c) For any -2^L <= n < 0, floor(Mn/2^p) + 1 = ceil(n/d).
    1815             :     // Proof: The proof is similar. Equation (2) holds as above. Using
    1816             :     // M > 2^p/d (d isn't a power of 2) on the RHS and (1) on the LHS,
    1817             :     //                 n/d + n/(2^L d) - 1 < x < n/d.
    1818             :     // Using n >= -2^L and summing 1,
    1819             :     //                  n/d - 1/d < x + 1 < n/d + 1.
    1820             :     // Since x + 1 is an integer, this implies n/d <= x + 1 < n/d + 1.
    1821             :     // In other words, x + 1 = ceil(n/d).
    1822             :     //
    1823             :     // Condition (1) isn't necessary for the existence of M and s with
    1824             :     // the properties above. Hacker's Delight provides a slightly less
    1825             :     // restrictive condition when d >= 196611, at the cost of a 3-page
    1826             :     // proof of correctness, for the case L = 31.
    1827             :     //
    1828             :     // Note that, since d*M - 2^p = d - (2^p)%d, (1) can be written as
    1829             :     //                   2^(p-L) >= d - (2^p)%d.
    1830             :     // In order to avoid overflow in the (2^p) % d calculation, we can
    1831             :     // compute it as (2^p-1) % d + 1, where 2^p-1 can then be computed
    1832             :     // without overflow as UINT64_MAX >> (64-p).
    1833             : 
    1834             :     // We now compute the least p >= 32 with the property above...
    1835           0 :     int32_t p = 32;
    1836           0 :     while ((uint64_t(1) << (p-maxLog)) + (UINT64_MAX >> (64-p)) % d + 1 < d)
    1837           0 :         p++;
    1838             : 
    1839             :     // ...and the corresponding M. For either the signed (L=31) or the
    1840             :     // unsigned (L=32) case, this value can be too large (cf. item a).
    1841             :     // Codegen can still multiply by M by multiplying by (M - 2^L) and
    1842             :     // adjusting the value afterwards, if this is the case.
    1843             :     ReciprocalMulConstants rmc;
    1844           0 :     rmc.multiplier = (UINT64_MAX >> (64-p))/d + 1;
    1845           0 :     rmc.shiftAmount = p - 32;
    1846             : 
    1847           0 :     return rmc;
    1848             : }
    1849             : 
    1850             : #ifdef JS_TRACE_LOGGING
    1851             : 
    1852             : void
    1853          24 : CodeGeneratorShared::emitTracelogScript(bool isStart)
    1854             : {
    1855          24 :     if (!TraceLogTextIdEnabled(TraceLogger_Scripts))
    1856          24 :         return;
    1857             : 
    1858           0 :     Label done;
    1859             : 
    1860           0 :     AllocatableRegisterSet regs(RegisterSet::Volatile());
    1861           0 :     Register logger = regs.takeAnyGeneral();
    1862           0 :     Register script = regs.takeAnyGeneral();
    1863             : 
    1864           0 :     masm.Push(logger);
    1865             : 
    1866           0 :     masm.loadTraceLogger(logger);
    1867           0 :     masm.branchTestPtr(Assembler::Zero, logger, logger, &done);
    1868             : 
    1869           0 :     Address enabledAddress(logger, TraceLoggerThread::offsetOfEnabled());
    1870           0 :     masm.branch32(Assembler::Equal, enabledAddress, Imm32(0), &done);
    1871             : 
    1872           0 :     masm.Push(script);
    1873             : 
    1874           0 :     CodeOffset patchScript = masm.movWithPatch(ImmWord(0), script);
    1875           0 :     masm.propagateOOM(patchableTLScripts_.append(patchScript));
    1876             : 
    1877           0 :     if (isStart)
    1878           0 :         masm.tracelogStartId(logger, script);
    1879             :     else
    1880           0 :         masm.tracelogStopId(logger, script);
    1881             : 
    1882           0 :     masm.Pop(script);
    1883             : 
    1884           0 :     masm.bind(&done);
    1885             : 
    1886           0 :     masm.Pop(logger);
    1887             : }
    1888             : 
    1889             : void
    1890          48 : CodeGeneratorShared::emitTracelogTree(bool isStart, uint32_t textId)
    1891             : {
    1892          48 :     if (!TraceLogTextIdEnabled(textId))
    1893          48 :         return;
    1894             : 
    1895           0 :     Label done;
    1896           0 :     AllocatableRegisterSet regs(RegisterSet::Volatile());
    1897           0 :     Register logger = regs.takeAnyGeneral();
    1898             : 
    1899           0 :     masm.Push(logger);
    1900             : 
    1901           0 :     masm.loadTraceLogger(logger);
    1902           0 :     masm.branchTestPtr(Assembler::Zero, logger, logger, &done);
    1903             : 
    1904           0 :     Address enabledAddress(logger, TraceLoggerThread::offsetOfEnabled());
    1905           0 :     masm.branch32(Assembler::Equal, enabledAddress, Imm32(0), &done);
    1906             : 
    1907           0 :     if (isStart)
    1908           0 :         masm.tracelogStartId(logger, textId);
    1909             :     else
    1910           0 :         masm.tracelogStopId(logger, textId);
    1911             : 
    1912           0 :     masm.bind(&done);
    1913             : 
    1914           0 :     masm.Pop(logger);
    1915             : }
    1916             : 
    1917             : void
    1918           0 : CodeGeneratorShared::emitTracelogTree(bool isStart, const char* text,
    1919             :                                       TraceLoggerTextId enabledTextId)
    1920             : {
    1921           0 :     if (!TraceLogTextIdEnabled(enabledTextId))
    1922           0 :         return;
    1923             : 
    1924           0 :     Label done;
    1925             : 
    1926           0 :     AllocatableRegisterSet regs(RegisterSet::Volatile());
    1927           0 :     Register loggerReg = regs.takeAnyGeneral();
    1928           0 :     Register eventReg = regs.takeAnyGeneral();
    1929             : 
    1930           0 :     masm.Push(loggerReg);
    1931             : 
    1932           0 :     masm.loadTraceLogger(loggerReg);
    1933           0 :     masm.branchTestPtr(Assembler::Zero, loggerReg, loggerReg, &done);
    1934             : 
    1935           0 :     Address enabledAddress(loggerReg, TraceLoggerThread::offsetOfEnabled());
    1936           0 :     masm.branch32(Assembler::Equal, enabledAddress, Imm32(0), &done);
    1937             : 
    1938           0 :     masm.Push(eventReg);
    1939             : 
    1940           0 :     PatchableTLEvent patchEvent(masm.movWithPatch(ImmWord(0), eventReg), text);
    1941           0 :     masm.propagateOOM(patchableTLEvents_.append(Move(patchEvent)));
    1942             : 
    1943           0 :     if (isStart)
    1944           0 :         masm.tracelogStartId(loggerReg, eventReg);
    1945             :     else
    1946           0 :         masm.tracelogStopId(loggerReg, eventReg);
    1947             : 
    1948           0 :     masm.Pop(eventReg);
    1949             : 
    1950           0 :     masm.bind(&done);
    1951             : 
    1952           0 :     masm.Pop(loggerReg);
    1953             : }
    1954             : #endif
    1955             : 
    1956             : } // namespace jit
    1957             : } // namespace js

Generated by: LCOV version 1.13