LCOV - code coverage report
Current view: top level - js/src/jit/x64 - Assembler-x64.cpp (source / functions) Hit Total Coverage
Test: output.info Lines: 94 113 83.2 %
Date: 2017-07-14 16:53:18 Functions: 13 14 92.9 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
       2             :  * vim: set ts=8 sts=4 et sw=4 tw=99:
       3             :  * This Source Code Form is subject to the terms of the Mozilla Public
       4             :  * License, v. 2.0. If a copy of the MPL was not distributed with this
       5             :  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
       6             : 
       7             : #include "jit/x64/Assembler-x64.h"
       8             : 
       9             : #include "gc/Marking.h"
      10             : 
      11             : using namespace js;
      12             : using namespace js::jit;
      13             : 
      14       17526 : ABIArgGenerator::ABIArgGenerator()
      15             :   :
      16             : #if defined(XP_WIN)
      17             :     regIndex_(0),
      18             :     stackOffset_(ShadowStackSpace),
      19             : #else
      20             :     intRegIndex_(0),
      21             :     floatRegIndex_(0),
      22             :     stackOffset_(0),
      23             : #endif
      24       17526 :     current_()
      25       17526 : {}
      26             : 
      27             : ABIArg
      28       19673 : ABIArgGenerator::next(MIRType type)
      29             : {
      30             : #if defined(XP_WIN)
      31             :     JS_STATIC_ASSERT(NumIntArgRegs == NumFloatArgRegs);
      32             :     if (regIndex_ == NumIntArgRegs) {
      33             :         if (IsSimdType(type)) {
      34             :             // On Win64, >64 bit args need to be passed by reference, but wasm
      35             :             // doesn't allow passing SIMD values to FFIs. The only way to reach
      36             :             // here is asm to asm calls, so we can break the ABI here.
      37             :             stackOffset_ = AlignBytes(stackOffset_, SimdMemoryAlignment);
      38             :             current_ = ABIArg(stackOffset_);
      39             :             stackOffset_ += Simd128DataSize;
      40             :         } else {
      41             :             current_ = ABIArg(stackOffset_);
      42             :             stackOffset_ += sizeof(uint64_t);
      43             :         }
      44             :         return current_;
      45             :     }
      46             :     switch (type) {
      47             :       case MIRType::Int32:
      48             :       case MIRType::Int64:
      49             :       case MIRType::Pointer:
      50             :         current_ = ABIArg(IntArgRegs[regIndex_++]);
      51             :         break;
      52             :       case MIRType::Float32:
      53             :         current_ = ABIArg(FloatArgRegs[regIndex_++].asSingle());
      54             :         break;
      55             :       case MIRType::Double:
      56             :         current_ = ABIArg(FloatArgRegs[regIndex_++]);
      57             :         break;
      58             :       case MIRType::Int8x16:
      59             :       case MIRType::Int16x8:
      60             :       case MIRType::Int32x4:
      61             :       case MIRType::Float32x4:
      62             :       case MIRType::Bool8x16:
      63             :       case MIRType::Bool16x8:
      64             :       case MIRType::Bool32x4:
      65             :         // On Win64, >64 bit args need to be passed by reference, but wasm
      66             :         // doesn't allow passing SIMD values to FFIs. The only way to reach
      67             :         // here is asm to asm calls, so we can break the ABI here.
      68             :         current_ = ABIArg(FloatArgRegs[regIndex_++].asSimd128());
      69             :         break;
      70             :       default:
      71             :         MOZ_CRASH("Unexpected argument type");
      72             :     }
      73             :     return current_;
      74             : #else
      75       19673 :     switch (type) {
      76             :       case MIRType::Int32:
      77             :       case MIRType::Int64:
      78             :       case MIRType::Pointer:
      79       19668 :         if (intRegIndex_ == NumIntArgRegs) {
      80          16 :             current_ = ABIArg(stackOffset_);
      81          16 :             stackOffset_ += sizeof(uint64_t);
      82          16 :             break;
      83             :         }
      84       19652 :         current_ = ABIArg(IntArgRegs[intRegIndex_++]);
      85       19652 :         break;
      86             :       case MIRType::Double:
      87             :       case MIRType::Float32:
      88           5 :         if (floatRegIndex_ == NumFloatArgRegs) {
      89           0 :             current_ = ABIArg(stackOffset_);
      90           0 :             stackOffset_ += sizeof(uint64_t);
      91           0 :             break;
      92             :         }
      93           5 :         if (type == MIRType::Float32)
      94           0 :             current_ = ABIArg(FloatArgRegs[floatRegIndex_++].asSingle());
      95             :         else
      96           5 :             current_ = ABIArg(FloatArgRegs[floatRegIndex_++]);
      97           5 :         break;
      98             :       case MIRType::Int8x16:
      99             :       case MIRType::Int16x8:
     100             :       case MIRType::Int32x4:
     101             :       case MIRType::Float32x4:
     102             :       case MIRType::Bool8x16:
     103             :       case MIRType::Bool16x8:
     104             :       case MIRType::Bool32x4:
     105           0 :         if (floatRegIndex_ == NumFloatArgRegs) {
     106           0 :             stackOffset_ = AlignBytes(stackOffset_, SimdMemoryAlignment);
     107           0 :             current_ = ABIArg(stackOffset_);
     108           0 :             stackOffset_ += Simd128DataSize;
     109           0 :             break;
     110             :         }
     111           0 :         current_ = ABIArg(FloatArgRegs[floatRegIndex_++].asSimd128());
     112           0 :         break;
     113             :       default:
     114           0 :         MOZ_CRASH("Unexpected argument type");
     115             :     }
     116       19673 :     return current_;
     117             : #endif
     118             : }
     119             : 
     120             : void
     121        6704 : Assembler::writeRelocation(JmpSrc src, Relocation::Kind reloc)
     122             : {
     123        6704 :     if (!jumpRelocations_.length()) {
     124             :         // The jump relocation table starts with a fixed-width integer pointing
     125             :         // to the start of the extended jump table. But, we don't know the
     126             :         // actual extended jump table offset yet, so write a 0 which we'll
     127             :         // patch later.
     128        2879 :         jumpRelocations_.writeFixedUint32_t(0);
     129             :     }
     130        6704 :     if (reloc == Relocation::JITCODE) {
     131        6683 :         jumpRelocations_.writeUnsigned(src.offset());
     132        6683 :         jumpRelocations_.writeUnsigned(jumps_.length());
     133             :     }
     134        6704 : }
     135             : 
     136             : void
     137       19629 : Assembler::addPendingJump(JmpSrc src, ImmPtr target, Relocation::Kind reloc)
     138             : {
     139       19629 :     MOZ_ASSERT(target.value != nullptr);
     140             : 
     141             :     // Emit reloc before modifying the jump table, since it computes a 0-based
     142             :     // index. This jump is not patchable at runtime.
     143       19629 :     if (reloc == Relocation::JITCODE)
     144        6683 :         writeRelocation(src, reloc);
     145       19629 :     enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), target.value, reloc));
     146       19629 : }
     147             : 
     148             : size_t
     149          21 : Assembler::addPatchableJump(JmpSrc src, Relocation::Kind reloc)
     150             : {
     151             :     // This jump is patchable at runtime so we always need to make sure the
     152             :     // jump table is emitted.
     153          21 :     writeRelocation(src, reloc);
     154             : 
     155          21 :     size_t index = jumps_.length();
     156          21 :     enoughMemory_ &= jumps_.append(RelativePatch(src.offset(), nullptr, reloc));
     157          21 :     return index;
     158             : }
     159             : 
     160             : /* static */
     161             : uint8_t*
     162          21 : Assembler::PatchableJumpAddress(JitCode* code, size_t index)
     163             : {
     164             :     // The assembler stashed the offset into the code of the fragments used
     165             :     // for far jumps at the start of the relocation table.
     166          21 :     uint32_t jumpOffset = * (uint32_t*) code->jumpRelocTable();
     167          21 :     jumpOffset += index * SizeOfJumpTableEntry;
     168             : 
     169          21 :     MOZ_ASSERT(jumpOffset + SizeOfExtendedJump <= code->instructionsSize());
     170          21 :     return code->raw() + jumpOffset;
     171             : }
     172             : 
     173             : /* static */
     174             : void
     175           0 : Assembler::PatchJumpEntry(uint8_t* entry, uint8_t* target, ReprotectCode reprotect)
     176             : {
     177           0 :     uint8_t** index = (uint8_t**) (entry + SizeOfExtendedJump - sizeof(void*));
     178           0 :     MaybeAutoWritableJitCode awjc(index, sizeof(void*), reprotect);
     179           0 :     *index = target;
     180           0 : }
     181             : 
     182             : void
     183        4499 : Assembler::finish()
     184             : {
     185        4499 :     if (!jumps_.length() || oom())
     186        1357 :         return;
     187             : 
     188             :     // Emit the jump table.
     189        3142 :     masm.haltingAlign(SizeOfJumpTableEntry);
     190        3142 :     extendedJumpTable_ = masm.size();
     191             : 
     192             :     // Now that we know the offset to the jump table, squirrel it into the
     193             :     // jump relocation buffer if any JitCode references exist and must be
     194             :     // tracked for GC.
     195        3142 :     MOZ_ASSERT_IF(jumpRelocations_.length(), jumpRelocations_.length() >= sizeof(uint32_t));
     196        3142 :     if (jumpRelocations_.length())
     197        2875 :         *(uint32_t*)jumpRelocations_.buffer() = extendedJumpTable_;
     198             : 
     199             :     // Zero the extended jumps table.
     200       22269 :     for (size_t i = 0; i < jumps_.length(); i++) {
     201             : #ifdef DEBUG
     202       19127 :         size_t oldSize = masm.size();
     203             : #endif
     204       19127 :         masm.jmp_rip(2);
     205       19127 :         MOZ_ASSERT_IF(!masm.oom(), masm.size() - oldSize == 6);
     206             :         // Following an indirect branch with ud2 hints to the hardware that
     207             :         // there's no fall-through. This also aligns the 64-bit immediate.
     208       19127 :         masm.ud2();
     209       19127 :         MOZ_ASSERT_IF(!masm.oom(), masm.size() - oldSize == 8);
     210       19127 :         masm.immediate64(0);
     211       19127 :         MOZ_ASSERT_IF(!masm.oom(), masm.size() - oldSize == SizeOfExtendedJump);
     212       19127 :         MOZ_ASSERT_IF(!masm.oom(), masm.size() - oldSize == SizeOfJumpTableEntry);
     213             :     }
     214             : }
     215             : 
     216             : void
     217        4499 : Assembler::executableCopy(uint8_t* buffer, bool flushICache)
     218             : {
     219        4499 :     AssemblerX86Shared::executableCopy(buffer);
     220             : 
     221       23626 :     for (size_t i = 0; i < jumps_.length(); i++) {
     222       19127 :         RelativePatch& rp = jumps_[i];
     223       19127 :         uint8_t* src = buffer + rp.offset;
     224       19127 :         if (!rp.target) {
     225             :             // The patch target is nullptr for jumps that have been linked to
     226             :             // a label within the same code block, but may be repatched later
     227             :             // to jump to a different code block.
     228          21 :             continue;
     229             :         }
     230       19106 :         if (X86Encoding::CanRelinkJump(src, rp.target)) {
     231        6557 :             X86Encoding::SetRel32(src, rp.target);
     232             :         } else {
     233             :             // An extended jump table must exist, and its offset must be in
     234             :             // range.
     235       12549 :             MOZ_ASSERT(extendedJumpTable_);
     236       12549 :             MOZ_ASSERT((extendedJumpTable_ + i * SizeOfJumpTableEntry) <= size() - SizeOfJumpTableEntry);
     237             : 
     238             :             // Patch the jump to go to the extended jump entry.
     239       12549 :             uint8_t* entry = buffer + extendedJumpTable_ + i * SizeOfJumpTableEntry;
     240       12549 :             X86Encoding::SetRel32(src, entry);
     241             : 
     242             :             // Now patch the pointer, note that we need to align it to
     243             :             // *after* the extended jump, i.e. after the 64-bit immedate.
     244       12549 :             X86Encoding::SetPointer(entry + SizeOfExtendedJump, rp.target);
     245             :         }
     246             :     }
     247        4499 : }
     248             : 
     249             : class RelocationIterator
     250             : {
     251             :     CompactBufferReader reader_;
     252             :     uint32_t tableStart_;
     253             :     uint32_t offset_;
     254             :     uint32_t extOffset_;
     255             : 
     256             :   public:
     257          14 :     explicit RelocationIterator(CompactBufferReader& reader)
     258          14 :       : reader_(reader)
     259             :     {
     260          14 :         tableStart_ = reader_.readFixedUint32_t();
     261          14 :     }
     262             : 
     263          32 :     bool read() {
     264          32 :         if (!reader_.more())
     265          14 :             return false;
     266          18 :         offset_ = reader_.readUnsigned();
     267          18 :         extOffset_ = reader_.readUnsigned();
     268          18 :         return true;
     269             :     }
     270             : 
     271          36 :     uint32_t offset() const {
     272          36 :         return offset_;
     273             :     }
     274             :     uint32_t extendedOffset() const {
     275             :         return extOffset_;
     276             :     }
     277             : };
     278             : 
     279             : JitCode*
     280          36 : Assembler::CodeFromJump(JitCode* code, uint8_t* jump)
     281             : {
     282          36 :     uint8_t* target = (uint8_t*)X86Encoding::GetRel32Target(jump);
     283          36 :     if (target >= code->raw() && target < code->raw() + code->instructionsSize()) {
     284             :         // This jump is within the code buffer, so it has been redirected to
     285             :         // the extended jump table.
     286           0 :         MOZ_ASSERT(target + SizeOfJumpTableEntry <= code->raw() + code->instructionsSize());
     287             : 
     288           0 :         target = (uint8_t*)X86Encoding::GetPointer(target + SizeOfExtendedJump);
     289             :     }
     290             : 
     291          36 :     return JitCode::FromExecutable(target);
     292             : }
     293             : 
     294             : void
     295          14 : Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader)
     296             : {
     297          14 :     RelocationIterator iter(reader);
     298          50 :     while (iter.read()) {
     299          18 :         JitCode* child = CodeFromJump(code, code->raw() + iter.offset());
     300          18 :         TraceManuallyBarrieredEdge(trc, &child, "rel32");
     301          18 :         MOZ_ASSERT(child == CodeFromJump(code, code->raw() + iter.offset()));
     302             :     }
     303          14 : }

Generated by: LCOV version 1.13