Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #ifndef jit_shared_CodeGenerator_shared_h
8 : #define jit_shared_CodeGenerator_shared_h
9 :
10 : #include "mozilla/Alignment.h"
11 : #include "mozilla/Move.h"
12 : #include "mozilla/TypeTraits.h"
13 :
14 : #include "jit/JitFrames.h"
15 : #include "jit/LIR.h"
16 : #include "jit/MacroAssembler.h"
17 : #include "jit/MIRGenerator.h"
18 : #include "jit/MIRGraph.h"
19 : #include "jit/OptimizationTracking.h"
20 : #include "jit/Safepoints.h"
21 : #include "jit/Snapshots.h"
22 : #include "jit/VMFunctions.h"
23 :
24 : namespace js {
25 : namespace jit {
26 :
27 : class OutOfLineCode;
28 : class CodeGenerator;
29 : class MacroAssembler;
30 : class IonIC;
31 :
32 : template <class ArgSeq, class StoreOutputTo>
33 : class OutOfLineCallVM;
34 :
35 : class OutOfLineTruncateSlow;
36 : class OutOfLineWasmTruncateCheck;
37 :
38 : struct PatchableBackedgeInfo
39 : {
40 : CodeOffsetJump backedge;
41 : Label* loopHeader;
42 : Label* interruptCheck;
43 :
44 0 : PatchableBackedgeInfo(CodeOffsetJump backedge, Label* loopHeader, Label* interruptCheck)
45 0 : : backedge(backedge), loopHeader(loopHeader), interruptCheck(interruptCheck)
46 0 : {}
47 : };
48 :
49 : struct ReciprocalMulConstants {
50 : int64_t multiplier;
51 : int32_t shiftAmount;
52 : };
53 :
54 : // This should be nested in CodeGeneratorShared, but it is used in
55 : // optimization tracking implementation and nested classes cannot be
56 : // forward-declared.
57 0 : struct NativeToTrackedOptimizations
58 : {
59 : // [startOffset, endOffset]
60 : CodeOffset startOffset;
61 : CodeOffset endOffset;
62 : const TrackedOptimizations* optimizations;
63 : };
64 :
65 8 : class CodeGeneratorShared : public LElementVisitor
66 : {
67 : js::Vector<OutOfLineCode*, 0, SystemAllocPolicy> outOfLineCode_;
68 :
69 : MacroAssembler& ensureMasm(MacroAssembler* masm);
70 : mozilla::Maybe<MacroAssembler> maybeMasm_;
71 :
72 : public:
73 : MacroAssembler& masm;
74 :
75 : protected:
76 : MIRGenerator* gen;
77 : LIRGraph& graph;
78 : LBlock* current;
79 : SnapshotWriter snapshots_;
80 : RecoverWriter recovers_;
81 : JitCode* deoptTable_;
82 : #ifdef DEBUG
83 : uint32_t pushedArgs_;
84 : #endif
85 : uint32_t lastOsiPointOffset_;
86 : SafepointWriter safepoints_;
87 : Label invalidate_;
88 : CodeOffset invalidateEpilogueData_;
89 :
90 : // Label for the common return path.
91 : NonAssertingLabel returnLabel_;
92 :
93 : FallbackICStubSpace stubSpace_;
94 :
95 : js::Vector<SafepointIndex, 0, SystemAllocPolicy> safepointIndices_;
96 : js::Vector<OsiIndex, 0, SystemAllocPolicy> osiIndices_;
97 :
98 : // Mapping from bailout table ID to an offset in the snapshot buffer.
99 : js::Vector<SnapshotOffset, 0, SystemAllocPolicy> bailouts_;
100 :
101 : // Allocated data space needed at runtime.
102 : js::Vector<uint8_t, 0, SystemAllocPolicy> runtimeData_;
103 :
104 : // Vector mapping each IC index to its offset in runtimeData_.
105 : js::Vector<uint32_t, 0, SystemAllocPolicy> icList_;
106 :
107 : // IC data we need at compile-time. Discarded after creating the IonScript.
108 35 : struct CompileTimeICInfo {
109 : CodeOffset icOffsetForJump;
110 : CodeOffset icOffsetForPush;
111 : };
112 : js::Vector<CompileTimeICInfo, 0, SystemAllocPolicy> icInfo_;
113 :
114 : // Patchable backedges generated for loops.
115 : Vector<PatchableBackedgeInfo, 0, SystemAllocPolicy> patchableBackedges_;
116 :
117 : #ifdef JS_TRACE_LOGGING
118 : struct PatchableTLEvent {
119 : CodeOffset offset;
120 : const char* event;
121 0 : PatchableTLEvent(CodeOffset offset, const char* event)
122 0 : : offset(offset), event(event)
123 0 : {}
124 : };
125 : js::Vector<PatchableTLEvent, 0, SystemAllocPolicy> patchableTLEvents_;
126 : js::Vector<CodeOffset, 0, SystemAllocPolicy> patchableTLScripts_;
127 : #endif
128 :
129 : public:
130 0 : struct NativeToBytecode {
131 : CodeOffset nativeOffset;
132 : InlineScriptTree* tree;
133 : jsbytecode* pc;
134 : };
135 :
136 : protected:
137 : js::Vector<NativeToBytecode, 0, SystemAllocPolicy> nativeToBytecodeList_;
138 : uint8_t* nativeToBytecodeMap_;
139 : uint32_t nativeToBytecodeMapSize_;
140 : uint32_t nativeToBytecodeTableOffset_;
141 : uint32_t nativeToBytecodeNumRegions_;
142 :
143 : JSScript** nativeToBytecodeScriptList_;
144 : uint32_t nativeToBytecodeScriptListLength_;
145 :
146 1357 : bool isProfilerInstrumentationEnabled() {
147 1357 : return gen->isProfilerInstrumentationEnabled();
148 : }
149 :
150 : js::Vector<NativeToTrackedOptimizations, 0, SystemAllocPolicy> trackedOptimizations_;
151 : uint8_t* trackedOptimizationsMap_;
152 : uint32_t trackedOptimizationsMapSize_;
153 : uint32_t trackedOptimizationsRegionTableOffset_;
154 : uint32_t trackedOptimizationsTypesTableOffset_;
155 : uint32_t trackedOptimizationsAttemptsTableOffset_;
156 :
157 0 : bool isOptimizationTrackingEnabled() {
158 0 : return gen->isOptimizationTrackingEnabled();
159 : }
160 :
161 : protected:
162 : // The offset of the first instruction of the OSR entry block from the
163 : // beginning of the code buffer.
164 : size_t osrEntryOffset_;
165 :
166 2516 : TempAllocator& alloc() const {
167 2516 : return graph.mir().alloc();
168 : }
169 :
170 3 : inline void setOsrEntryOffset(size_t offset) {
171 3 : MOZ_ASSERT(osrEntryOffset_ == 0);
172 3 : osrEntryOffset_ = offset;
173 3 : }
174 5 : inline size_t getOsrEntryOffset() const {
175 5 : return osrEntryOffset_;
176 : }
177 :
178 : // The offset of the first instruction of the body.
179 : // This skips the arguments type checks.
180 : size_t skipArgCheckEntryOffset_;
181 :
182 8 : inline void setSkipArgCheckEntryOffset(size_t offset) {
183 8 : MOZ_ASSERT(skipArgCheckEntryOffset_ == 0);
184 8 : skipArgCheckEntryOffset_ = offset;
185 8 : }
186 5 : inline size_t getSkipArgCheckEntryOffset() const {
187 5 : return skipArgCheckEntryOffset_;
188 : }
189 :
190 : typedef js::Vector<SafepointIndex, 8, SystemAllocPolicy> SafepointIndices;
191 :
192 : protected:
193 : #ifdef CHECK_OSIPOINT_REGISTERS
194 : // See JitOptions.checkOsiPointRegisters. We set this here to avoid
195 : // races when enableOsiPointRegisterChecks is called while we're generating
196 : // code off-thread.
197 : bool checkOsiPointRegisters;
198 : #endif
199 :
200 : // The initial size of the frame in bytes. These are bytes beyond the
201 : // constant header present for every Ion frame, used for pre-determined
202 : // spills.
203 : int32_t frameDepth_;
204 :
205 : // In some cases, we force stack alignment to platform boundaries, see
206 : // also CodeGeneratorShared constructor. This value records the adjustment
207 : // we've done.
208 : int32_t frameInitialAdjustment_;
209 :
210 : // Frame class this frame's size falls into (see IonFrame.h).
211 : FrameSizeClass frameClass_;
212 :
213 : // For arguments to the current function.
214 : inline int32_t ArgToStackOffset(int32_t slot) const;
215 :
216 : inline int32_t SlotToStackOffset(int32_t slot) const;
217 : inline int32_t StackOffsetToSlot(int32_t offset) const;
218 :
219 : // For argument construction for calls. Argslots are Value-sized.
220 : inline int32_t StackOffsetOfPassedArg(int32_t slot) const;
221 :
222 : inline int32_t ToStackOffset(LAllocation a) const;
223 : inline int32_t ToStackOffset(const LAllocation* a) const;
224 :
225 : inline Address ToAddress(const LAllocation& a);
226 : inline Address ToAddress(const LAllocation* a);
227 :
228 187 : uint32_t frameSize() const {
229 187 : return frameClass_ == FrameSizeClass::None() ? frameDepth_ : frameClass_.frameSize();
230 : }
231 :
232 : protected:
233 : #ifdef CHECK_OSIPOINT_REGISTERS
234 : void resetOsiPointRegs(LSafepoint* safepoint);
235 : bool shouldVerifyOsiPointRegs(LSafepoint* safepoint);
236 : void verifyOsiPointRegs(LSafepoint* safepoint);
237 : #endif
238 :
239 : bool addNativeToBytecodeEntry(const BytecodeSite* site);
240 : void dumpNativeToBytecodeEntries();
241 : void dumpNativeToBytecodeEntry(uint32_t idx);
242 :
243 : bool addTrackedOptimizationsEntry(const TrackedOptimizations* optimizations);
244 : void extendTrackedOptimizationsEntry(const TrackedOptimizations* optimizations);
245 :
246 : public:
247 : MIRGenerator& mirGen() const {
248 : return *gen;
249 : }
250 :
251 : // When appending to runtimeData_, the vector might realloc, leaving pointers
252 : // int the origianl vector stale and unusable. DataPtr acts like a pointer,
253 : // but allows safety in the face of potentially realloc'ing vector appends.
254 : friend class DataPtr;
255 : template <typename T>
256 : class DataPtr
257 : {
258 : CodeGeneratorShared* cg_;
259 : size_t index_;
260 :
261 210 : T* lookup() {
262 210 : return reinterpret_cast<T*>(&cg_->runtimeData_[index_]);
263 : }
264 : public:
265 70 : DataPtr(CodeGeneratorShared* cg, size_t index)
266 70 : : cg_(cg), index_(index) { }
267 :
268 210 : T * operator ->() {
269 210 : return lookup();
270 : }
271 : T * operator*() {
272 : return lookup();
273 : }
274 : };
275 :
276 : protected:
277 : MOZ_MUST_USE
278 35 : bool allocateData(size_t size, size_t* offset) {
279 35 : MOZ_ASSERT(size % sizeof(void*) == 0);
280 35 : *offset = runtimeData_.length();
281 35 : masm.propagateOOM(runtimeData_.appendN(0, size));
282 35 : return !masm.oom();
283 : }
284 :
285 : template <typename T>
286 35 : inline size_t allocateIC(const T& cache) {
287 : static_assert(mozilla::IsBaseOf<IonIC, T>::value, "T must inherit from IonIC");
288 : size_t index;
289 35 : masm.propagateOOM(allocateData(sizeof(mozilla::AlignedStorage2<T>), &index));
290 35 : masm.propagateOOM(icList_.append(index));
291 35 : masm.propagateOOM(icInfo_.append(CompileTimeICInfo()));
292 35 : if (masm.oom())
293 0 : return SIZE_MAX;
294 : // Use the copy constructor on the allocated space.
295 35 : MOZ_ASSERT(index == icList_.back());
296 35 : new (&runtimeData_[index]) T(cache);
297 35 : return index;
298 : }
299 :
300 : protected:
301 : // Encodes an LSnapshot into the compressed snapshot buffer.
302 : void encode(LRecoverInfo* recover);
303 : void encode(LSnapshot* snapshot);
304 : void encodeAllocation(LSnapshot* snapshot, MDefinition* def, uint32_t* startIndex);
305 :
306 : // Attempts to assign a BailoutId to a snapshot, if one isn't already set.
307 : // If the bailout table is full, this returns false, which is not a fatal
308 : // error (the code generator may use a slower bailout mechanism).
309 : bool assignBailoutId(LSnapshot* snapshot);
310 :
311 : // Encode all encountered safepoints in CG-order, and resolve |indices| for
312 : // safepoint offsets.
313 : bool encodeSafepoints();
314 :
315 : // Fixup offsets of native-to-bytecode map.
316 : bool createNativeToBytecodeScriptList(JSContext* cx);
317 : bool generateCompactNativeToBytecodeMap(JSContext* cx, JitCode* code);
318 : void verifyCompactNativeToBytecodeMap(JitCode* code);
319 :
320 : bool generateCompactTrackedOptimizationsMap(JSContext* cx, JitCode* code,
321 : IonTrackedTypeVector* allTypes);
322 : void verifyCompactTrackedOptimizationsMap(JitCode* code, uint32_t numRegions,
323 : const UniqueTrackedOptimizations& unique,
324 : const IonTrackedTypeVector* allTypes);
325 :
326 : // Mark the safepoint on |ins| as corresponding to the current assembler location.
327 : // The location should be just after a call.
328 : void markSafepoint(LInstruction* ins);
329 : void markSafepointAt(uint32_t offset, LInstruction* ins);
330 :
331 : // Mark the OSI point |ins| as corresponding to the current
332 : // assembler location inside the |osiIndices_|. Return the assembler
333 : // location for the OSI point return location.
334 : uint32_t markOsiPoint(LOsiPoint* ins);
335 :
336 : // Ensure that there is enough room between the last OSI point and the
337 : // current instruction, such that:
338 : // (1) Invalidation will not overwrite the current instruction, and
339 : // (2) Overwriting the current instruction will not overwrite
340 : // an invalidation marker.
341 : void ensureOsiSpace();
342 :
343 : OutOfLineCode* oolTruncateDouble(FloatRegister src, Register dest, MInstruction* mir,
344 : wasm::BytecodeOffset callOffset = wasm::BytecodeOffset());
345 : void emitTruncateDouble(FloatRegister src, Register dest, MTruncateToInt32* mir);
346 : void emitTruncateFloat32(FloatRegister src, Register dest, MTruncateToInt32* mir);
347 :
348 : void emitWasmCallBase(LWasmCallBase* ins);
349 0 : void visitWasmCall(LWasmCall* ins) { emitWasmCallBase(ins); }
350 0 : void visitWasmCallI64(LWasmCallI64* ins) { emitWasmCallBase(ins); }
351 :
352 : void visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins);
353 : void visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins);
354 : void visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins);
355 : void visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins);
356 :
357 : void emitPreBarrier(Register base, const LAllocation* index, int32_t offsetAdjustment);
358 : void emitPreBarrier(Address address);
359 :
360 : // We don't emit code for trivial blocks, so if we want to branch to the
361 : // given block, and it's trivial, return the ultimate block we should
362 : // actually branch directly to.
363 791 : MBasicBlock* skipTrivialBlocks(MBasicBlock* block) {
364 968 : while (block->lir()->isTrivial()) {
365 177 : MOZ_ASSERT(block->lir()->rbegin()->numSuccessors() == 1);
366 177 : block = block->lir()->rbegin()->getSuccessor(0);
367 : }
368 614 : return block;
369 : }
370 :
371 : // Test whether the given block can be reached via fallthrough from the
372 : // current block.
373 274 : inline bool isNextBlock(LBlock* block) {
374 274 : uint32_t target = skipTrivialBlocks(block->mir())->id();
375 274 : uint32_t i = current->mir()->id() + 1;
376 274 : if (target < i)
377 6 : return false;
378 : // Trivial blocks can be crossed via fallthrough.
379 476 : for (; i != target; ++i) {
380 211 : if (!graph.getBlock(i)->isTrivial())
381 107 : return false;
382 : }
383 161 : return true;
384 : }
385 :
386 : public:
387 : // Save and restore all volatile registers to/from the stack, excluding the
388 : // specified register(s), before a function call made using callWithABI and
389 : // after storing the function call's return value to an output register.
390 : // (The only registers that don't need to be saved/restored are 1) the
391 : // temporary register used to store the return value of the function call,
392 : // if there is one [otherwise that stored value would be overwritten]; and
393 : // 2) temporary registers whose values aren't needed in the rest of the LIR
394 : // instruction [this is purely an optimization]. All other volatiles must
395 : // be saved and restored in case future LIR instructions need those values.)
396 1 : void saveVolatile(Register output) {
397 1 : LiveRegisterSet regs(RegisterSet::Volatile());
398 1 : regs.takeUnchecked(output);
399 1 : masm.PushRegsInMask(regs);
400 1 : }
401 1 : void restoreVolatile(Register output) {
402 1 : LiveRegisterSet regs(RegisterSet::Volatile());
403 1 : regs.takeUnchecked(output);
404 1 : masm.PopRegsInMask(regs);
405 1 : }
406 : void saveVolatile(FloatRegister output) {
407 : LiveRegisterSet regs(RegisterSet::Volatile());
408 : regs.takeUnchecked(output);
409 : masm.PushRegsInMask(regs);
410 : }
411 : void restoreVolatile(FloatRegister output) {
412 : LiveRegisterSet regs(RegisterSet::Volatile());
413 : regs.takeUnchecked(output);
414 : masm.PopRegsInMask(regs);
415 : }
416 0 : void saveVolatile(LiveRegisterSet temps) {
417 0 : masm.PushRegsInMask(LiveRegisterSet(RegisterSet::VolatileNot(temps.set())));
418 0 : }
419 0 : void restoreVolatile(LiveRegisterSet temps) {
420 0 : masm.PopRegsInMask(LiveRegisterSet(RegisterSet::VolatileNot(temps.set())));
421 0 : }
422 368 : void saveVolatile() {
423 368 : masm.PushRegsInMask(LiveRegisterSet(RegisterSet::Volatile()));
424 368 : }
425 368 : void restoreVolatile() {
426 368 : masm.PopRegsInMask(LiveRegisterSet(RegisterSet::Volatile()));
427 368 : }
428 :
429 : // These functions have to be called before and after any callVM and before
430 : // any modifications of the stack. Modification of the stack made after
431 : // these calls should update the framePushed variable, needed by the exit
432 : // frame produced by callVM.
433 : inline void saveLive(LInstruction* ins);
434 : inline void restoreLive(LInstruction* ins);
435 : inline void restoreLiveIgnore(LInstruction* ins, LiveRegisterSet reg);
436 :
437 : // Save/restore all registers that are both live and volatile.
438 : inline void saveLiveVolatile(LInstruction* ins);
439 : inline void restoreLiveVolatile(LInstruction* ins);
440 :
441 : template <typename T>
442 293 : void pushArg(const T& t) {
443 293 : masm.Push(t);
444 : #ifdef DEBUG
445 293 : pushedArgs_++;
446 : #endif
447 293 : }
448 :
449 : template <typename T>
450 35 : CodeOffset pushArgWithPatch(const T& t) {
451 : #ifdef DEBUG
452 35 : pushedArgs_++;
453 : #endif
454 35 : return masm.PushWithPatch(t);
455 : }
456 :
457 41 : void storeResultTo(Register reg) {
458 41 : masm.storeCallWordResult(reg);
459 41 : }
460 :
461 0 : void storeFloatResultTo(FloatRegister reg) {
462 0 : masm.storeCallFloatResult(reg);
463 0 : }
464 :
465 : template <typename T>
466 27 : void storeResultValueTo(const T& t) {
467 27 : masm.storeCallResultValue(t);
468 27 : }
469 :
470 : void callVM(const VMFunction& f, LInstruction* ins, const Register* dynStack = nullptr);
471 :
472 : template <class ArgSeq, class StoreOutputTo>
473 : inline OutOfLineCode* oolCallVM(const VMFunction& fun, LInstruction* ins, const ArgSeq& args,
474 : const StoreOutputTo& out);
475 :
476 : void addIC(LInstruction* lir, size_t cacheIndex);
477 :
478 : ReciprocalMulConstants computeDivisionConstants(uint32_t d, int maxLog);
479 :
480 : protected:
481 : bool generatePrologue();
482 : bool generateEpilogue();
483 :
484 : void addOutOfLineCode(OutOfLineCode* code, const MInstruction* mir);
485 : void addOutOfLineCode(OutOfLineCode* code, const BytecodeSite* site);
486 : bool generateOutOfLineCode();
487 :
488 : Label* getJumpLabelForBranch(MBasicBlock* block);
489 :
490 : // Generate a jump to the start of the specified block, adding information
491 : // if this is a loop backedge. Use this in place of jumping directly to
492 : // mir->lir()->label(), or use getJumpLabelForBranch() if a label to use
493 : // directly is needed.
494 : void jumpToBlock(MBasicBlock* mir);
495 :
496 : // Get a label for the start of block which can be used for jumping, in
497 : // place of jumpToBlock.
498 : Label* labelForBackedgeWithImplicitCheck(MBasicBlock* mir);
499 :
500 : // This function is not used for MIPS. MIPS has branchToBlock.
501 : #if !defined(JS_CODEGEN_MIPS32) && !defined(JS_CODEGEN_MIPS64)
502 : void jumpToBlock(MBasicBlock* mir, Assembler::Condition cond);
503 : #endif
504 :
505 : template <class T>
506 0 : wasm::TrapDesc trap(T* mir, wasm::Trap trap) {
507 0 : return wasm::TrapDesc(mir->bytecodeOffset(), trap, masm.framePushed());
508 : }
509 :
510 : private:
511 : void generateInvalidateEpilogue();
512 :
513 : public:
514 : CodeGeneratorShared(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
515 :
516 : public:
517 : template <class ArgSeq, class StoreOutputTo>
518 : void visitOutOfLineCallVM(OutOfLineCallVM<ArgSeq, StoreOutputTo>* ool);
519 :
520 : void visitOutOfLineTruncateSlow(OutOfLineTruncateSlow* ool);
521 :
522 0 : virtual void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool) {
523 0 : MOZ_CRASH("NYI");
524 : }
525 :
526 : bool omitOverRecursedCheck() const;
527 :
528 : #ifdef JS_TRACE_LOGGING
529 : protected:
530 : void emitTracelogScript(bool isStart);
531 : void emitTracelogTree(bool isStart, uint32_t textId);
532 : void emitTracelogTree(bool isStart, const char* text, TraceLoggerTextId enabledTextId);
533 : #endif
534 :
535 : public:
536 : #ifdef JS_TRACE_LOGGING
537 16 : void emitTracelogScriptStart() {
538 16 : emitTracelogScript(/* isStart =*/ true);
539 16 : }
540 8 : void emitTracelogScriptStop() {
541 8 : emitTracelogScript(/* isStart =*/ false);
542 8 : }
543 28 : void emitTracelogStartEvent(uint32_t textId) {
544 28 : emitTracelogTree(/* isStart =*/ true, textId);
545 28 : }
546 20 : void emitTracelogStopEvent(uint32_t textId) {
547 20 : emitTracelogTree(/* isStart =*/ false, textId);
548 20 : }
549 : // Log an arbitrary text. The TraceloggerTextId is used to toggle the
550 : // logging on and off.
551 : // Note: the text is not copied and need to be kept alive until linking.
552 : void emitTracelogStartEvent(const char* text, TraceLoggerTextId enabledTextId) {
553 : emitTracelogTree(/* isStart =*/ true, text, enabledTextId);
554 : }
555 : void emitTracelogStopEvent(const char* text, TraceLoggerTextId enabledTextId) {
556 : emitTracelogTree(/* isStart =*/ false, text, enabledTextId);
557 : }
558 16 : void emitTracelogIonStart() {
559 16 : emitTracelogScriptStart();
560 16 : emitTracelogStartEvent(TraceLogger_IonMonkey);
561 16 : }
562 8 : void emitTracelogIonStop() {
563 8 : emitTracelogStopEvent(TraceLogger_IonMonkey);
564 8 : emitTracelogScriptStop();
565 8 : }
566 : #else
567 : void emitTracelogScriptStart() {}
568 : void emitTracelogScriptStop() {}
569 : void emitTracelogStartEvent(uint32_t textId) {}
570 : void emitTracelogStopEvent(uint32_t textId) {}
571 : void emitTracelogStartEvent(const char* text, TraceLoggerTextId enabledTextId) {}
572 : void emitTracelogStopEvent(const char* text, TraceLoggerTextId enabledTextId) {}
573 : void emitTracelogIonStart() {}
574 : void emitTracelogIonStop() {}
575 : #endif
576 :
577 : protected:
578 : inline void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end, bool isLoad,
579 : Scalar::Type type, Operand mem, LAllocation alloc);
580 :
581 : public:
582 : inline void verifyLoadDisassembly(uint32_t begin, uint32_t end, Scalar::Type type,
583 : Operand mem, LAllocation alloc);
584 : inline void verifyStoreDisassembly(uint32_t begin, uint32_t end, Scalar::Type type,
585 : Operand mem, LAllocation alloc);
586 :
587 : bool isGlobalObject(JSObject* object);
588 : };
589 :
590 : // An out-of-line path is generated at the end of the function.
591 : class OutOfLineCode : public TempObject
592 : {
593 : Label entry_;
594 : Label rejoin_;
595 : uint32_t framePushed_;
596 : const BytecodeSite* site_;
597 :
598 : public:
599 465 : OutOfLineCode()
600 465 : : framePushed_(0),
601 465 : site_()
602 465 : { }
603 :
604 : virtual void generate(CodeGeneratorShared* codegen) = 0;
605 :
606 857 : Label* entry() {
607 857 : return &entry_;
608 : }
609 430 : virtual void bind(MacroAssembler* masm) {
610 430 : masm->bind(entry());
611 430 : }
612 226 : Label* rejoin() {
613 226 : return &rejoin_;
614 : }
615 465 : void setFramePushed(uint32_t framePushed) {
616 465 : framePushed_ = framePushed;
617 465 : }
618 465 : uint32_t framePushed() const {
619 465 : return framePushed_;
620 : }
621 465 : void setBytecodeSite(const BytecodeSite* site) {
622 465 : site_ = site;
623 465 : }
624 465 : const BytecodeSite* bytecodeSite() const {
625 465 : return site_;
626 : }
627 930 : jsbytecode* pc() const {
628 930 : return site_->pc();
629 : }
630 465 : JSScript* script() const {
631 465 : return site_->script();
632 : }
633 : };
634 :
635 : // For OOL paths that want a specific-typed code generator.
636 : template <typename T>
637 465 : class OutOfLineCodeBase : public OutOfLineCode
638 : {
639 : public:
640 465 : virtual void generate(CodeGeneratorShared* codegen) {
641 465 : accept(static_cast<T*>(codegen));
642 465 : }
643 :
644 : public:
645 : virtual void accept(T* codegen) = 0;
646 : };
647 :
648 : // ArgSeq store arguments for OutOfLineCallVM.
649 : //
650 : // OutOfLineCallVM are created with "oolCallVM" function. The third argument of
651 : // this function is an instance of a class which provides a "generate" in charge
652 : // of pushing the argument, with "pushArg", for a VMFunction.
653 : //
654 : // Such list of arguments can be created by using the "ArgList" function which
655 : // creates one instance of "ArgSeq", where the type of the arguments are inferred
656 : // from the type of the arguments.
657 : //
658 : // The list of arguments must be written in the same order as if you were
659 : // calling the function in C++.
660 : //
661 : // Example:
662 : // ArgList(ToRegister(lir->lhs()), ToRegister(lir->rhs()))
663 :
664 : template <typename... ArgTypes>
665 : class ArgSeq;
666 :
667 : template <>
668 : class ArgSeq<>
669 : {
670 : public:
671 44 : ArgSeq() { }
672 :
673 44 : inline void generate(CodeGeneratorShared* codegen) const {
674 44 : }
675 : };
676 :
677 : template <typename HeadType, typename... TailTypes>
678 : class ArgSeq<HeadType, TailTypes...> : public ArgSeq<TailTypes...>
679 : {
680 : private:
681 : using RawHeadType = typename mozilla::RemoveReference<HeadType>::Type;
682 : RawHeadType head_;
683 :
684 : public:
685 : template <typename ProvidedHead, typename... ProvidedTail>
686 60 : explicit ArgSeq(ProvidedHead&& head, ProvidedTail&&... tail)
687 : : ArgSeq<TailTypes...>(mozilla::Forward<ProvidedTail>(tail)...),
688 60 : head_(mozilla::Forward<ProvidedHead>(head))
689 60 : { }
690 :
691 : // Arguments are pushed in reverse order, from last argument to first
692 : // argument.
693 60 : inline void generate(CodeGeneratorShared* codegen) const {
694 60 : this->ArgSeq<TailTypes...>::generate(codegen);
695 60 : codegen->pushArg(head_);
696 60 : }
697 : };
698 :
699 : template <typename... ArgTypes>
700 : inline ArgSeq<ArgTypes...>
701 44 : ArgList(ArgTypes&&... args)
702 : {
703 44 : return ArgSeq<ArgTypes...>(mozilla::Forward<ArgTypes>(args)...);
704 : }
705 :
706 : // Store wrappers, to generate the right move of data after the VM call.
707 :
708 : struct StoreNothing
709 : {
710 5 : inline void generate(CodeGeneratorShared* codegen) const {
711 5 : }
712 5 : inline LiveRegisterSet clobbered() const {
713 5 : return LiveRegisterSet(); // No register gets clobbered
714 : }
715 : };
716 :
717 : class StoreRegisterTo
718 : {
719 : private:
720 : Register out_;
721 :
722 : public:
723 43 : explicit StoreRegisterTo(Register out)
724 43 : : out_(out)
725 43 : { }
726 :
727 41 : inline void generate(CodeGeneratorShared* codegen) const {
728 41 : codegen->storeResultTo(out_);
729 41 : }
730 41 : inline LiveRegisterSet clobbered() const {
731 41 : LiveRegisterSet set;
732 41 : set.add(out_);
733 41 : return set;
734 : }
735 : };
736 :
737 : class StoreFloatRegisterTo
738 : {
739 : private:
740 : FloatRegister out_;
741 :
742 : public:
743 0 : explicit StoreFloatRegisterTo(FloatRegister out)
744 0 : : out_(out)
745 0 : { }
746 :
747 0 : inline void generate(CodeGeneratorShared* codegen) const {
748 0 : codegen->storeFloatResultTo(out_);
749 0 : }
750 0 : inline LiveRegisterSet clobbered() const {
751 0 : LiveRegisterSet set;
752 0 : set.add(out_);
753 0 : return set;
754 : }
755 : };
756 :
757 : template <typename Output>
758 : class StoreValueTo_
759 : {
760 : private:
761 : Output out_;
762 :
763 : public:
764 54 : explicit StoreValueTo_(const Output& out)
765 54 : : out_(out)
766 54 : { }
767 :
768 27 : inline void generate(CodeGeneratorShared* codegen) const {
769 27 : codegen->storeResultValueTo(out_);
770 27 : }
771 27 : inline LiveRegisterSet clobbered() const {
772 27 : LiveRegisterSet set;
773 27 : set.add(out_);
774 27 : return set;
775 : }
776 : };
777 :
778 : template <typename Output>
779 54 : StoreValueTo_<Output> StoreValueTo(const Output& out)
780 : {
781 54 : return StoreValueTo_<Output>(out);
782 : }
783 :
784 : template <class ArgSeq, class StoreOutputTo>
785 : class OutOfLineCallVM : public OutOfLineCodeBase<CodeGeneratorShared>
786 : {
787 : private:
788 : LInstruction* lir_;
789 : const VMFunction& fun_;
790 : ArgSeq args_;
791 : StoreOutputTo out_;
792 :
793 : public:
794 44 : OutOfLineCallVM(LInstruction* lir, const VMFunction& fun, const ArgSeq& args,
795 : const StoreOutputTo& out)
796 : : lir_(lir),
797 : fun_(fun),
798 : args_(args),
799 44 : out_(out)
800 44 : { }
801 :
802 44 : void accept(CodeGeneratorShared* codegen) {
803 44 : codegen->visitOutOfLineCallVM(this);
804 44 : }
805 :
806 44 : LInstruction* lir() const { return lir_; }
807 44 : const VMFunction& function() const { return fun_; }
808 44 : const ArgSeq& args() const { return args_; }
809 88 : const StoreOutputTo& out() const { return out_; }
810 : };
811 :
812 : template <class ArgSeq, class StoreOutputTo>
813 : inline OutOfLineCode*
814 44 : CodeGeneratorShared::oolCallVM(const VMFunction& fun, LInstruction* lir, const ArgSeq& args,
815 : const StoreOutputTo& out)
816 : {
817 44 : MOZ_ASSERT(lir->mirRaw());
818 44 : MOZ_ASSERT(lir->mirRaw()->isInstruction());
819 :
820 44 : OutOfLineCode* ool = new(alloc()) OutOfLineCallVM<ArgSeq, StoreOutputTo>(lir, fun, args, out);
821 44 : addOutOfLineCode(ool, lir->mirRaw()->toInstruction());
822 44 : return ool;
823 : }
824 :
825 : template <class ArgSeq, class StoreOutputTo>
826 : void
827 44 : CodeGeneratorShared::visitOutOfLineCallVM(OutOfLineCallVM<ArgSeq, StoreOutputTo>* ool)
828 : {
829 44 : LInstruction* lir = ool->lir();
830 :
831 44 : saveLive(lir);
832 44 : ool->args().generate(this);
833 44 : callVM(ool->function(), lir);
834 44 : ool->out().generate(this);
835 44 : restoreLiveIgnore(lir, ool->out().clobbered());
836 44 : masm.jump(ool->rejoin());
837 44 : }
838 :
839 : class OutOfLineWasmTruncateCheck : public OutOfLineCodeBase<CodeGeneratorShared>
840 : {
841 : MIRType fromType_;
842 : MIRType toType_;
843 : FloatRegister input_;
844 : bool isUnsigned_;
845 : wasm::BytecodeOffset bytecodeOffset_;
846 :
847 : public:
848 0 : OutOfLineWasmTruncateCheck(MWasmTruncateToInt32* mir, FloatRegister input)
849 0 : : fromType_(mir->input()->type()), toType_(MIRType::Int32), input_(input),
850 0 : isUnsigned_(mir->isUnsigned()), bytecodeOffset_(mir->bytecodeOffset())
851 0 : { }
852 :
853 0 : OutOfLineWasmTruncateCheck(MWasmTruncateToInt64* mir, FloatRegister input)
854 0 : : fromType_(mir->input()->type()), toType_(MIRType::Int64), input_(input),
855 0 : isUnsigned_(mir->isUnsigned()), bytecodeOffset_(mir->bytecodeOffset())
856 0 : { }
857 :
858 0 : void accept(CodeGeneratorShared* codegen) {
859 0 : codegen->visitOutOfLineWasmTruncateCheck(this);
860 0 : }
861 :
862 0 : FloatRegister input() const { return input_; }
863 0 : MIRType toType() const { return toType_; }
864 0 : MIRType fromType() const { return fromType_; }
865 0 : bool isUnsigned() const { return isUnsigned_; }
866 0 : wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
867 : };
868 :
869 : } // namespace jit
870 : } // namespace js
871 :
872 : #endif /* jit_shared_CodeGenerator_shared_h */
|