Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #ifndef jit_MacroAssembler_h
8 : #define jit_MacroAssembler_h
9 :
10 : #include "mozilla/MacroForEach.h"
11 : #include "mozilla/MathAlgorithms.h"
12 :
13 : #include "jscompartment.h"
14 :
15 : #if defined(JS_CODEGEN_X86)
16 : # include "jit/x86/MacroAssembler-x86.h"
17 : #elif defined(JS_CODEGEN_X64)
18 : # include "jit/x64/MacroAssembler-x64.h"
19 : #elif defined(JS_CODEGEN_ARM)
20 : # include "jit/arm/MacroAssembler-arm.h"
21 : #elif defined(JS_CODEGEN_ARM64)
22 : # include "jit/arm64/MacroAssembler-arm64.h"
23 : #elif defined(JS_CODEGEN_MIPS32)
24 : # include "jit/mips32/MacroAssembler-mips32.h"
25 : #elif defined(JS_CODEGEN_MIPS64)
26 : # include "jit/mips64/MacroAssembler-mips64.h"
27 : #elif defined(JS_CODEGEN_NONE)
28 : # include "jit/none/MacroAssembler-none.h"
29 : #else
30 : # error "Unknown architecture!"
31 : #endif
32 : #include "jit/AtomicOp.h"
33 : #include "jit/IonInstrumentation.h"
34 : #include "jit/JitCompartment.h"
35 : #include "jit/VMFunctions.h"
36 : #include "vm/ProxyObject.h"
37 : #include "vm/Shape.h"
38 : #include "vm/TypedArrayObject.h"
39 : #include "vm/UnboxedObject.h"
40 :
41 : using mozilla::FloatingPoint;
42 :
43 : // * How to read/write MacroAssembler method declarations:
44 : //
45 : // The following macros are made to avoid #ifdef around each method declarations
46 : // of the Macro Assembler, and they are also used as an hint on the location of
47 : // the implementations of each method. For example, the following declaration
48 : //
49 : // void Pop(FloatRegister t) DEFINED_ON(x86_shared, arm);
50 : //
51 : // suggests the MacroAssembler::Pop(FloatRegister) method is implemented in
52 : // x86-shared/MacroAssembler-x86-shared.h, and also in arm/MacroAssembler-arm.h.
53 : //
54 : // - If there is no annotation, then there is only one generic definition in
55 : // MacroAssembler.cpp.
56 : //
57 : // - If the declaration is "inline", then the method definition(s) would be in
58 : // the "-inl.h" variant of the same file(s).
59 : //
60 : // The script check_macroassembler_style.py (check-masm target of the Makefile)
61 : // is used to verify that method definitions are matching the annotation added
62 : // to the method declarations. If there is any difference, then you either
63 : // forgot to define the method in one of the macro assembler, or you forgot to
64 : // update the annotation of the macro assembler declaration.
65 : //
66 : // Some convenient short-cuts are used to avoid repeating the same list of
67 : // architectures on each method declaration, such as PER_ARCH and
68 : // PER_SHARED_ARCH.
69 :
70 : # define ALL_ARCH mips32, mips64, arm, arm64, x86, x64
71 : # define ALL_SHARED_ARCH arm, arm64, x86_shared, mips_shared
72 :
73 : // * How this macro works:
74 : //
75 : // DEFINED_ON is a macro which check if, for the current architecture, the
76 : // method is defined on the macro assembler or not.
77 : //
78 : // For each architecture, we have a macro named DEFINED_ON_arch. This macro is
79 : // empty if this is not the current architecture. Otherwise it must be either
80 : // set to "define" or "crash" (only use for the none target so-far).
81 : //
82 : // The DEFINED_ON macro maps the list of architecture names given as argument to
83 : // a list of macro names. For example,
84 : //
85 : // DEFINED_ON(arm, x86_shared)
86 : //
87 : // is expanded to
88 : //
89 : // DEFINED_ON_none DEFINED_ON_arm DEFINED_ON_x86_shared
90 : //
91 : // which are later expanded on ARM, x86, x64 by DEFINED_ON_EXPAND_ARCH_RESULTS
92 : // to
93 : //
94 : // define
95 : //
96 : // or if the JIT is disabled or set to no architecture to
97 : //
98 : // crash
99 : //
100 : // or to nothing, if the current architecture is not listed in the list of
101 : // arguments of DEFINED_ON. Note, only one of the DEFINED_ON_arch macro
102 : // contributes to the non-empty result, which is the macro of the current
103 : // architecture if it is listed in the arguments of DEFINED_ON.
104 : //
105 : // This result is appended to DEFINED_ON_RESULT_ before expanding the macro,
106 : // which result is either no annotation, a MOZ_CRASH(), or a "= delete"
107 : // annotation on the method declaration.
108 :
109 : # define DEFINED_ON_x86
110 : # define DEFINED_ON_x64
111 : # define DEFINED_ON_x86_shared
112 : # define DEFINED_ON_arm
113 : # define DEFINED_ON_arm64
114 : # define DEFINED_ON_mips32
115 : # define DEFINED_ON_mips64
116 : # define DEFINED_ON_mips_shared
117 : # define DEFINED_ON_none
118 :
119 : // Specialize for each architecture.
120 : #if defined(JS_CODEGEN_X86)
121 : # undef DEFINED_ON_x86
122 : # define DEFINED_ON_x86 define
123 : # undef DEFINED_ON_x86_shared
124 : # define DEFINED_ON_x86_shared define
125 : #elif defined(JS_CODEGEN_X64)
126 : # undef DEFINED_ON_x64
127 : # define DEFINED_ON_x64 define
128 : # undef DEFINED_ON_x86_shared
129 : # define DEFINED_ON_x86_shared define
130 : #elif defined(JS_CODEGEN_ARM)
131 : # undef DEFINED_ON_arm
132 : # define DEFINED_ON_arm define
133 : #elif defined(JS_CODEGEN_ARM64)
134 : # undef DEFINED_ON_arm64
135 : # define DEFINED_ON_arm64 define
136 : #elif defined(JS_CODEGEN_MIPS32)
137 : # undef DEFINED_ON_mips32
138 : # define DEFINED_ON_mips32 define
139 : # undef DEFINED_ON_mips_shared
140 : # define DEFINED_ON_mips_shared define
141 : #elif defined(JS_CODEGEN_MIPS64)
142 : # undef DEFINED_ON_mips64
143 : # define DEFINED_ON_mips64 define
144 : # undef DEFINED_ON_mips_shared
145 : # define DEFINED_ON_mips_shared define
146 : #elif defined(JS_CODEGEN_NONE)
147 : # undef DEFINED_ON_none
148 : # define DEFINED_ON_none crash
149 : #else
150 : # error "Unknown architecture!"
151 : #endif
152 :
153 : # define DEFINED_ON_RESULT_crash { MOZ_CRASH(); }
154 : # define DEFINED_ON_RESULT_define
155 : # define DEFINED_ON_RESULT_ = delete
156 :
157 : # define DEFINED_ON_DISPATCH_RESULT_2(Macro, Result) \
158 : Macro ## Result
159 : # define DEFINED_ON_DISPATCH_RESULT(...) \
160 : DEFINED_ON_DISPATCH_RESULT_2(DEFINED_ON_RESULT_, __VA_ARGS__)
161 :
162 : // We need to let the evaluation of MOZ_FOR_EACH terminates.
163 : # define DEFINED_ON_EXPAND_ARCH_RESULTS_3(ParenResult) \
164 : DEFINED_ON_DISPATCH_RESULT ParenResult
165 : # define DEFINED_ON_EXPAND_ARCH_RESULTS_2(ParenResult) \
166 : DEFINED_ON_EXPAND_ARCH_RESULTS_3 (ParenResult)
167 : # define DEFINED_ON_EXPAND_ARCH_RESULTS(ParenResult) \
168 : DEFINED_ON_EXPAND_ARCH_RESULTS_2 (ParenResult)
169 :
170 : # define DEFINED_ON_FWDARCH(Arch) DEFINED_ON_ ## Arch
171 : # define DEFINED_ON_MAP_ON_ARCHS(ArchList) \
172 : DEFINED_ON_EXPAND_ARCH_RESULTS( \
173 : (MOZ_FOR_EACH(DEFINED_ON_FWDARCH, (), ArchList)))
174 :
175 : # define DEFINED_ON(...) \
176 : DEFINED_ON_MAP_ON_ARCHS((none, __VA_ARGS__))
177 :
178 : # define PER_ARCH DEFINED_ON(ALL_ARCH)
179 : # define PER_SHARED_ARCH DEFINED_ON(ALL_SHARED_ARCH)
180 :
181 :
182 : #if MOZ_LITTLE_ENDIAN
183 : #define IMM32_16ADJ(X) X << 16
184 : #else
185 : #define IMM32_16ADJ(X) X
186 : #endif
187 :
188 : namespace js {
189 : namespace jit {
190 :
191 : // Defined in JitFrames.h
192 : enum ExitFrameTokenValues;
193 :
194 : class AutoSaveLiveRegisters;
195 :
196 : // The public entrypoint for emitting assembly. Note that a MacroAssembler can
197 : // use cx->lifoAlloc, so take care not to interleave masm use with other
198 : // lifoAlloc use if one will be destroyed before the other.
199 4503 : class MacroAssembler : public MacroAssemblerSpecific
200 : {
201 : MacroAssembler* thisFromCtor() {
202 : return this;
203 : }
204 :
205 : public:
206 4502 : class AutoRooter : public JS::AutoGCRooter
207 : {
208 : MacroAssembler* masm_;
209 :
210 : public:
211 4502 : AutoRooter(JSContext* cx, MacroAssembler* masm)
212 4502 : : JS::AutoGCRooter(cx, IONMASM),
213 4502 : masm_(masm)
214 4502 : { }
215 :
216 0 : MacroAssembler* masm() const {
217 0 : return masm_;
218 : }
219 : };
220 :
221 : /*
222 : * Base class for creating a branch.
223 : */
224 344 : class Branch
225 : {
226 : bool init_;
227 : Condition cond_;
228 : Label* jump_;
229 : Register reg_;
230 :
231 : public:
232 355 : Branch()
233 355 : : init_(false),
234 : cond_(Equal),
235 : jump_(nullptr),
236 355 : reg_(Register::FromCode(0)) // Quell compiler warnings.
237 355 : { }
238 :
239 258 : Branch(Condition cond, Register reg, Label* jump)
240 258 : : init_(true),
241 : cond_(cond),
242 : jump_(jump),
243 258 : reg_(reg)
244 258 : { }
245 :
246 962 : bool isInitialized() const {
247 962 : return init_;
248 : }
249 :
250 258 : Condition cond() const {
251 258 : return cond_;
252 : }
253 :
254 258 : Label* jump() const {
255 258 : return jump_;
256 : }
257 :
258 258 : Register reg() const {
259 258 : return reg_;
260 : }
261 :
262 229 : void invertCondition() {
263 229 : cond_ = InvertCondition(cond_);
264 229 : }
265 :
266 229 : void relink(Label* jump) {
267 229 : jump_ = jump;
268 229 : }
269 :
270 : virtual void emit(MacroAssembler& masm) = 0;
271 : };
272 :
273 : /*
274 : * Creates a branch based on a specific TypeSet::Type.
275 : * Note: emits number test (int/double) for TypeSet::DoubleType()
276 : */
277 163 : class BranchType : public Branch
278 : {
279 : TypeSet::Type type_;
280 :
281 : public:
282 178 : BranchType()
283 178 : : Branch(),
284 178 : type_(TypeSet::UnknownType())
285 178 : { }
286 :
287 163 : BranchType(Condition cond, Register reg, TypeSet::Type type, Label* jump)
288 163 : : Branch(cond, reg, jump),
289 163 : type_(type)
290 163 : { }
291 :
292 : void emit(MacroAssembler& masm);
293 : };
294 :
295 : /*
296 : * Creates a branch based on a GCPtr.
297 : */
298 181 : class BranchGCPtr : public Branch
299 : {
300 : ImmGCPtr ptr_;
301 :
302 : public:
303 177 : BranchGCPtr()
304 177 : : Branch(),
305 177 : ptr_(ImmGCPtr(nullptr))
306 177 : { }
307 :
308 95 : BranchGCPtr(Condition cond, Register reg, ImmGCPtr ptr, Label* jump)
309 95 : : Branch(cond, reg, jump),
310 95 : ptr_(ptr)
311 95 : { }
312 :
313 : void emit(MacroAssembler& masm);
314 : };
315 :
316 : mozilla::Maybe<AutoRooter> autoRooter_;
317 : mozilla::Maybe<JitContext> jitContext_;
318 : mozilla::Maybe<AutoJitContextAlloc> alloc_;
319 :
320 : private:
321 : // Labels for handling exceptions and failures.
322 : NonAssertingLabel failureLabel_;
323 :
324 : public:
325 4471 : MacroAssembler()
326 4471 : : framePushed_(0),
327 : #ifdef DEBUG
328 : inCall_(false),
329 : #endif
330 4471 : emitProfilingInstrumentation_(false)
331 : {
332 4471 : JitContext* jcx = GetJitContext();
333 4471 : JSContext* cx = jcx->cx;
334 4471 : if (cx)
335 4463 : constructRoot(cx);
336 :
337 4471 : if (!jcx->temp) {
338 3835 : MOZ_ASSERT(cx);
339 3835 : alloc_.emplace(cx);
340 : }
341 :
342 4471 : moveResolver_.setAllocator(*jcx->temp);
343 :
344 : #if defined(JS_CODEGEN_ARM)
345 : initWithAllocator();
346 : m_buffer.id = jcx->getNextAssemblerId();
347 : #elif defined(JS_CODEGEN_ARM64)
348 : initWithAllocator();
349 : armbuffer_.id = jcx->getNextAssemblerId();
350 : #endif
351 4471 : }
352 :
353 : // This constructor should only be used when there is no JitContext active
354 : // (for example, Trampoline-$(ARCH).cpp and IonCaches.cpp).
355 : explicit MacroAssembler(JSContext* cx, IonScript* ion = nullptr,
356 : JSScript* script = nullptr, jsbytecode* pc = nullptr);
357 :
358 : // wasm compilation handles its own JitContext-pushing
359 : struct WasmToken {};
360 0 : explicit MacroAssembler(WasmToken, TempAllocator& alloc)
361 0 : : framePushed_(0),
362 : #ifdef DEBUG
363 : inCall_(false),
364 : #endif
365 0 : emitProfilingInstrumentation_(false)
366 : {
367 0 : moveResolver_.setAllocator(alloc);
368 :
369 : #if defined(JS_CODEGEN_ARM)
370 : initWithAllocator();
371 : m_buffer.id = 0;
372 : #elif defined(JS_CODEGEN_ARM64)
373 : initWithAllocator();
374 : armbuffer_.id = 0;
375 : #endif
376 :
377 : // Disable page protection for WASM.
378 0 : disableProtection();
379 0 : }
380 :
381 4495 : void constructRoot(JSContext* cx) {
382 4495 : autoRooter_.emplace(cx, this);
383 4495 : }
384 :
385 429 : MoveResolver& moveResolver() {
386 429 : return moveResolver_;
387 : }
388 :
389 4499 : size_t instructionsSize() const {
390 4499 : return size();
391 : }
392 :
393 : //{{{ check_macroassembler_style
394 : public:
395 : // ===============================================================
396 : // MacroAssembler high-level usage.
397 :
398 : // Flushes the assembly buffer, on platforms that need it.
399 : void flush() PER_SHARED_ARCH;
400 :
401 : // Add a comment that is visible in the pretty printed assembly code.
402 : void comment(const char* msg) PER_SHARED_ARCH;
403 :
404 : // ===============================================================
405 : // Frame manipulation functions.
406 :
407 : inline uint32_t framePushed() const;
408 : inline void setFramePushed(uint32_t framePushed);
409 : inline void adjustFrame(int32_t value);
410 :
411 : // Adjust the frame, to account for implicit modification of the stack
412 : // pointer, such that callee can remove arguments on the behalf of the
413 : // caller.
414 : inline void implicitPop(uint32_t bytes);
415 :
416 : private:
417 : // This field is used to statically (at compilation time) emulate a frame
418 : // pointer by keeping track of stack manipulations.
419 : //
420 : // It is maintained by all stack manipulation functions below.
421 : uint32_t framePushed_;
422 :
423 : public:
424 : // ===============================================================
425 : // Stack manipulation functions.
426 :
427 : void PushRegsInMask(LiveRegisterSet set)
428 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
429 : void PushRegsInMask(LiveGeneralRegisterSet set);
430 :
431 : // Like PushRegsInMask, but instead of pushing the registers, store them to
432 : // |dest|. |dest| should point to the end of the reserved space, so the
433 : // first register will be stored at |dest.offset - sizeof(register)|.
434 : void storeRegsInMask(LiveRegisterSet set, Address dest, Register scratch)
435 : DEFINED_ON(arm, arm64, x86_shared);
436 :
437 : void PopRegsInMask(LiveRegisterSet set);
438 : void PopRegsInMask(LiveGeneralRegisterSet set);
439 : void PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
440 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
441 :
442 : void Push(const Operand op) DEFINED_ON(x86_shared);
443 : void Push(Register reg) PER_SHARED_ARCH;
444 : void Push(Register reg1, Register reg2, Register reg3, Register reg4) DEFINED_ON(arm64);
445 : void Push(const Imm32 imm) PER_SHARED_ARCH;
446 : void Push(const ImmWord imm) PER_SHARED_ARCH;
447 : void Push(const ImmPtr imm) PER_SHARED_ARCH;
448 : void Push(const ImmGCPtr ptr) PER_SHARED_ARCH;
449 : void Push(FloatRegister reg) PER_SHARED_ARCH;
450 : void PushFlags() DEFINED_ON(x86_shared);
451 : void Push(jsid id, Register scratchReg);
452 : void Push(TypedOrValueRegister v);
453 : void Push(const ConstantOrRegister& v);
454 : void Push(const ValueOperand& val);
455 : void Push(const Value& val);
456 : void Push(JSValueType type, Register reg);
457 : void PushValue(const Address& addr);
458 : void PushEmptyRooted(VMFunction::RootType rootType);
459 : inline CodeOffset PushWithPatch(ImmWord word);
460 : inline CodeOffset PushWithPatch(ImmPtr imm);
461 :
462 : void Pop(const Operand op) DEFINED_ON(x86_shared);
463 : void Pop(Register reg) PER_SHARED_ARCH;
464 : void Pop(FloatRegister t) PER_SHARED_ARCH;
465 : void Pop(const ValueOperand& val) PER_SHARED_ARCH;
466 : void PopFlags() DEFINED_ON(x86_shared);
467 : void PopStackPtr() PER_SHARED_ARCH;
468 : void popRooted(VMFunction::RootType rootType, Register cellReg, const ValueOperand& valueReg);
469 :
470 : // Move the stack pointer based on the requested amount.
471 : void adjustStack(int amount);
472 : void freeStack(uint32_t amount);
473 :
474 : // Warning: This method does not update the framePushed() counter.
475 : void freeStack(Register amount);
476 :
477 : private:
478 : // ===============================================================
479 : // Register allocation fields.
480 : #ifdef DEBUG
481 : friend AutoRegisterScope;
482 : friend AutoFloatRegisterScope;
483 : // Used to track register scopes for debug builds.
484 : // Manipulated by the AutoGenericRegisterScope class.
485 : AllocatableRegisterSet debugTrackedRegisters_;
486 : #endif // DEBUG
487 :
488 : public:
489 : // ===============================================================
490 : // Simple call functions.
491 :
492 : CodeOffset call(Register reg) PER_SHARED_ARCH;
493 : CodeOffset call(Label* label) PER_SHARED_ARCH;
494 : void call(const Address& addr) DEFINED_ON(x86_shared, arm, arm64);
495 : void call(ImmWord imm) PER_SHARED_ARCH;
496 : // Call a target native function, which is neither traceable nor movable.
497 : void call(ImmPtr imm) PER_SHARED_ARCH;
498 : void call(wasm::SymbolicAddress imm) PER_SHARED_ARCH;
499 : inline void call(const wasm::CallSiteDesc& desc, wasm::SymbolicAddress imm);
500 :
501 : // Call a target JitCode, which must be traceable, and may be movable.
502 : void call(JitCode* c) PER_SHARED_ARCH;
503 :
504 : inline void call(const wasm::CallSiteDesc& desc, const Register reg);
505 : inline void call(const wasm::CallSiteDesc& desc, uint32_t funcDefIndex);
506 : inline void call(const wasm::CallSiteDesc& desc, wasm::Trap trap);
507 :
508 : CodeOffset callWithPatch() PER_SHARED_ARCH;
509 : void patchCall(uint32_t callerOffset, uint32_t calleeOffset) PER_SHARED_ARCH;
510 :
511 : // Push the return address and make a call. On platforms where this function
512 : // is not defined, push the link register (pushReturnAddress) at the entry
513 : // point of the callee.
514 : void callAndPushReturnAddress(Register reg) DEFINED_ON(x86_shared);
515 : void callAndPushReturnAddress(Label* label) DEFINED_ON(x86_shared);
516 :
517 : void pushReturnAddress() DEFINED_ON(mips_shared, arm, arm64);
518 : void popReturnAddress() DEFINED_ON(mips_shared, arm, arm64);
519 :
520 : public:
521 : // ===============================================================
522 : // Patchable near/far jumps.
523 :
524 : // "Far jumps" provide the ability to jump to any uint32_t offset from any
525 : // other uint32_t offset without using a constant pool (thus returning a
526 : // simple CodeOffset instead of a CodeOffsetJump).
527 : CodeOffset farJumpWithPatch() PER_SHARED_ARCH;
528 : void patchFarJump(CodeOffset farJump, uint32_t targetOffset) PER_SHARED_ARCH;
529 : static void repatchFarJump(uint8_t* code, uint32_t farJumpOffset, uint32_t targetOffset) PER_SHARED_ARCH;
530 :
531 : // Emit a nop that can be patched to and from a nop and a jump with an int8
532 : // relative displacement.
533 : CodeOffset nopPatchableToNearJump() PER_SHARED_ARCH;
534 : static void patchNopToNearJump(uint8_t* jump, uint8_t* target) PER_SHARED_ARCH;
535 : static void patchNearJumpToNop(uint8_t* jump) PER_SHARED_ARCH;
536 :
537 : // Emit a nop that can be patched to and from a nop and a call with int32
538 : // relative displacement.
539 : CodeOffset nopPatchableToCall(const wasm::CallSiteDesc& desc) PER_SHARED_ARCH;
540 : static void patchNopToCall(uint8_t* callsite, uint8_t* target) PER_SHARED_ARCH;
541 : static void patchCallToNop(uint8_t* callsite) PER_SHARED_ARCH;
542 :
543 : public:
544 : // ===============================================================
545 : // ABI function calls.
546 :
547 : // Setup a call to C/C++ code, given the assumption that the framePushed
548 : // accruately define the state of the stack, and that the top of the stack
549 : // was properly aligned. Note that this only supports cdecl.
550 : void setupAlignedABICall(); // CRASH_ON(arm64)
551 :
552 : // As setupAlignedABICall, but for WebAssembly native ABI calls, which pass
553 : // through a builtin thunk that uses the wasm ABI. All the wasm ABI calls
554 : // can be native, since we always know the stack alignment a priori.
555 : void setupWasmABICall(); // CRASH_ON(arm64)
556 :
557 : // Setup an ABI call for when the alignment is not known. This may need a
558 : // scratch register.
559 : void setupUnalignedABICall(Register scratch) PER_ARCH;
560 :
561 : // Arguments must be assigned to a C/C++ call in order. They are moved
562 : // in parallel immediately before performing the call. This process may
563 : // temporarily use more stack, in which case esp-relative addresses will be
564 : // automatically adjusted. It is extremely important that esp-relative
565 : // addresses are computed *after* setupABICall(). Furthermore, no
566 : // operations should be emitted while setting arguments.
567 : void passABIArg(const MoveOperand& from, MoveOp::Type type);
568 : inline void passABIArg(Register reg);
569 : inline void passABIArg(FloatRegister reg, MoveOp::Type type);
570 :
571 : template <typename T>
572 : inline void callWithABI(const T& fun, MoveOp::Type result = MoveOp::GENERAL);
573 :
574 : void callWithABI(wasm::BytecodeOffset offset, wasm::SymbolicAddress fun,
575 : MoveOp::Type result = MoveOp::GENERAL);
576 :
577 : private:
578 : // Reinitialize the variables which have to be cleared before making a call
579 : // with callWithABI.
580 : void setupABICall();
581 :
582 : // Reserve the stack and resolve the arguments move.
583 : void callWithABIPre(uint32_t* stackAdjust, bool callFromWasm = false) PER_ARCH;
584 :
585 : // Emits a call to a C/C++ function, resolving all argument moves.
586 : void callWithABINoProfiler(void* fun, MoveOp::Type result);
587 : void callWithABINoProfiler(Register fun, MoveOp::Type result) PER_ARCH;
588 : void callWithABINoProfiler(const Address& fun, MoveOp::Type result) PER_ARCH;
589 :
590 : // Restore the stack to its state before the setup function call.
591 : void callWithABIPost(uint32_t stackAdjust, MoveOp::Type result, bool callFromWasm = false) PER_ARCH;
592 :
593 : // Create the signature to be able to decode the arguments of a native
594 : // function, when calling a function within the simulator.
595 : inline void appendSignatureType(MoveOp::Type type);
596 : inline ABIFunctionType signature() const;
597 :
598 : // Private variables used to handle moves between registers given as
599 : // arguments to passABIArg and the list of ABI registers expected for the
600 : // signature of the function.
601 : MoveResolver moveResolver_;
602 :
603 : // Architecture specific implementation which specify how registers & stack
604 : // offsets are used for calling a function.
605 : ABIArgGenerator abiArgs_;
606 :
607 : #ifdef DEBUG
608 : // Flag use to assert that we use ABI function in the right context.
609 : bool inCall_;
610 : #endif
611 :
612 : // If set by setupUnalignedABICall then callWithABI will pop the stack
613 : // register which is on the stack.
614 : bool dynamicAlignment_;
615 :
616 : #ifdef JS_SIMULATOR
617 : // The signature is used to accumulate all types of arguments which are used
618 : // by the caller. This is used by the simulators to decode the arguments
619 : // properly, and cast the function pointer to the right type.
620 : uint32_t signature_;
621 : #endif
622 :
623 : public:
624 : // ===============================================================
625 : // Jit Frames.
626 : //
627 : // These functions are used to build the content of the Jit frames. See
628 : // CommonFrameLayout class, and all its derivatives. The content should be
629 : // pushed in the opposite order as the fields of the structures, such that
630 : // the structures can be used to interpret the content of the stack.
631 :
632 : // Call the Jit function, and push the return address (or let the callee
633 : // push the return address).
634 : //
635 : // These functions return the offset of the return address, in order to use
636 : // the return address to index the safepoints, which are used to list all
637 : // live registers.
638 : inline uint32_t callJitNoProfiler(Register callee);
639 : inline uint32_t callJit(Register callee);
640 : inline uint32_t callJit(JitCode* code);
641 :
642 : // The frame descriptor is the second field of all Jit frames, pushed before
643 : // calling the Jit function. It is a composite value defined in JitFrames.h
644 : inline void makeFrameDescriptor(Register frameSizeReg, FrameType type, uint32_t headerSize);
645 :
646 : // Push the frame descriptor, based on the statically known framePushed.
647 : inline void pushStaticFrameDescriptor(FrameType type, uint32_t headerSize);
648 :
649 : // Push the callee token of a JSFunction which pointer is stored in the
650 : // |callee| register. The callee token is packed with a |constructing| flag
651 : // which correspond to the fact that the JS function is called with "new" or
652 : // not.
653 : inline void PushCalleeToken(Register callee, bool constructing);
654 :
655 : // Unpack a callee token located at the |token| address, and return the
656 : // JSFunction pointer in the |dest| register.
657 : inline void loadFunctionFromCalleeToken(Address token, Register dest);
658 :
659 : // This function emulates a call by pushing an exit frame on the stack,
660 : // except that the fake-function is inlined within the body of the caller.
661 : //
662 : // This function assumes that the current frame is an IonJS frame.
663 : //
664 : // This function returns the offset of the /fake/ return address, in order to use
665 : // the return address to index the safepoints, which are used to list all
666 : // live registers.
667 : //
668 : // This function should be balanced with a call to adjustStack, to pop the
669 : // exit frame and emulate the return statement of the inlined function.
670 : inline uint32_t buildFakeExitFrame(Register scratch);
671 :
672 : private:
673 : // This function is used by buildFakeExitFrame to push a fake return address
674 : // on the stack. This fake return address should never be used for resuming
675 : // any execution, and can even be an invalid pointer into the instruction
676 : // stream, as long as it does not alias any other.
677 : uint32_t pushFakeReturnAddress(Register scratch) PER_SHARED_ARCH;
678 :
679 : public:
680 : // ===============================================================
681 : // Exit frame footer.
682 : //
683 : // When calling outside the Jit we push an exit frame. To mark the stack
684 : // correctly, we have to push additional information, called the Exit frame
685 : // footer, which is used to identify how the stack is marked.
686 : //
687 : // See JitFrames.h, and MarkJitExitFrame in JitFrames.cpp.
688 :
689 : // If the current piece of code might be garbage collected, then the exit
690 : // frame footer must contain a pointer to the current JitCode, such that the
691 : // garbage collector can keep the code alive as long this code is on the
692 : // stack. This function pushes a placeholder which is replaced when the code
693 : // is linked.
694 : inline void PushStubCode();
695 :
696 : // Return true if the code contains a self-reference which needs to be
697 : // patched when the code is linked.
698 : inline bool hasSelfReference() const;
699 :
700 : // Push stub code and the VMFunction pointer.
701 : inline void enterExitFrame(Register cxreg, Register scratch, const VMFunction* f = nullptr);
702 :
703 : // Push an exit frame token to identify which fake exit frame this footer
704 : // corresponds to.
705 : inline void enterFakeExitFrame(Register cxreg, Register scratch, enum ExitFrameTokenValues token);
706 :
707 : // Push an exit frame token for a native call.
708 : inline void enterFakeExitFrameForNative(Register cxreg, Register scratch, bool isConstructing);
709 :
710 : // Pop ExitFrame footer in addition to the extra frame.
711 : inline void leaveExitFrame(size_t extraFrame = 0);
712 :
713 : private:
714 : // Save the top of the stack into JitActivation::exitFP of the current
715 : // thread, which should be the location of the latest exit frame.
716 : void linkExitFrame(Register cxreg, Register scratch);
717 :
718 : // Patch the value of PushStubCode with the pointer to the finalized code.
719 : void linkSelfReference(JitCode* code);
720 :
721 : // If the JitCode that created this assembler needs to transition into the VM,
722 : // we want to store the JitCode on the stack in order to mark it during a GC.
723 : // This is a reference to a patch location where the JitCode* will be written.
724 : CodeOffset selfReferencePatch_;
725 :
726 : public:
727 : // ===============================================================
728 : // Move instructions
729 :
730 : inline void move64(Imm64 imm, Register64 dest) PER_ARCH;
731 : inline void move64(Register64 src, Register64 dest) PER_ARCH;
732 :
733 : inline void moveFloat32ToGPR(FloatRegister src, Register dest) PER_SHARED_ARCH;
734 : inline void moveGPRToFloat32(Register src, FloatRegister dest) PER_SHARED_ARCH;
735 :
736 : inline void move8SignExtend(Register src, Register dest) PER_SHARED_ARCH;
737 : inline void move16SignExtend(Register src, Register dest) PER_SHARED_ARCH;
738 :
739 : // ===============================================================
740 : // Logical instructions
741 :
742 : inline void not32(Register reg) PER_SHARED_ARCH;
743 :
744 : inline void and32(Register src, Register dest) PER_SHARED_ARCH;
745 : inline void and32(Imm32 imm, Register dest) PER_SHARED_ARCH;
746 : inline void and32(Imm32 imm, Register src, Register dest) DEFINED_ON(arm64);
747 : inline void and32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
748 : inline void and32(const Address& src, Register dest) PER_SHARED_ARCH;
749 :
750 : inline void andPtr(Register src, Register dest) PER_ARCH;
751 : inline void andPtr(Imm32 imm, Register dest) PER_ARCH;
752 :
753 : inline void and64(Imm64 imm, Register64 dest) PER_ARCH;
754 : inline void or64(Imm64 imm, Register64 dest) PER_ARCH;
755 : inline void xor64(Imm64 imm, Register64 dest) PER_ARCH;
756 :
757 : inline void or32(Register src, Register dest) PER_SHARED_ARCH;
758 : inline void or32(Imm32 imm, Register dest) PER_SHARED_ARCH;
759 : inline void or32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
760 :
761 : inline void orPtr(Register src, Register dest) PER_ARCH;
762 : inline void orPtr(Imm32 imm, Register dest) PER_ARCH;
763 :
764 : inline void and64(Register64 src, Register64 dest) PER_ARCH;
765 : inline void or64(Register64 src, Register64 dest) PER_ARCH;
766 : inline void xor64(Register64 src, Register64 dest) PER_ARCH;
767 :
768 : inline void xor32(Register src, Register dest) PER_SHARED_ARCH;
769 : inline void xor32(Imm32 imm, Register dest) PER_SHARED_ARCH;
770 :
771 : inline void xorPtr(Register src, Register dest) PER_ARCH;
772 : inline void xorPtr(Imm32 imm, Register dest) PER_ARCH;
773 :
774 : inline void and64(const Operand& src, Register64 dest) DEFINED_ON(x64, mips64);
775 : inline void or64(const Operand& src, Register64 dest) DEFINED_ON(x64, mips64);
776 : inline void xor64(const Operand& src, Register64 dest) DEFINED_ON(x64, mips64);
777 :
778 : // ===============================================================
779 : // Arithmetic functions
780 :
781 : inline void add32(Register src, Register dest) PER_SHARED_ARCH;
782 : inline void add32(Imm32 imm, Register dest) PER_SHARED_ARCH;
783 : inline void add32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
784 : inline void add32(Imm32 imm, const AbsoluteAddress& dest) DEFINED_ON(x86_shared);
785 :
786 : inline void addPtr(Register src, Register dest) PER_ARCH;
787 : inline void addPtr(Register src1, Register src2, Register dest) DEFINED_ON(arm64);
788 : inline void addPtr(Imm32 imm, Register dest) PER_ARCH;
789 : inline void addPtr(Imm32 imm, Register src, Register dest) DEFINED_ON(arm64);
790 : inline void addPtr(ImmWord imm, Register dest) PER_ARCH;
791 : inline void addPtr(ImmPtr imm, Register dest);
792 : inline void addPtr(Imm32 imm, const Address& dest) DEFINED_ON(mips_shared, arm, arm64, x86, x64);
793 : inline void addPtr(Imm32 imm, const AbsoluteAddress& dest) DEFINED_ON(x86, x64);
794 : inline void addPtr(const Address& src, Register dest) DEFINED_ON(mips_shared, arm, arm64, x86, x64);
795 :
796 : inline void add64(Register64 src, Register64 dest) PER_ARCH;
797 : inline void add64(Imm32 imm, Register64 dest) PER_ARCH;
798 : inline void add64(Imm64 imm, Register64 dest) PER_ARCH;
799 : inline void add64(const Operand& src, Register64 dest) DEFINED_ON(x64, mips64);
800 :
801 : inline void addFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
802 :
803 : // Compute dest=src+imm where `src` and `dest` are pointer registers; `src`
804 : // may be SP, and `src` may equal `dest`. `dest` should not normally be SP,
805 : // as stack probes are required for large negative immediates. The offset
806 : // returned from add32ToPtrWithPatch() must be passed to patchAdd32ToPtr().
807 : inline CodeOffset add32ToPtrWithPatch(Register src, Register dest) PER_ARCH;
808 : inline void patchAdd32ToPtr(CodeOffset offset, Imm32 imm) PER_ARCH;
809 :
810 : inline void addDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
811 : inline void addConstantDouble(double d, FloatRegister dest) DEFINED_ON(x86);
812 :
813 : inline void sub32(const Address& src, Register dest) PER_SHARED_ARCH;
814 : inline void sub32(Register src, Register dest) PER_SHARED_ARCH;
815 : inline void sub32(Imm32 imm, Register dest) PER_SHARED_ARCH;
816 :
817 : inline void subPtr(Register src, Register dest) PER_ARCH;
818 : inline void subPtr(Register src, const Address& dest) DEFINED_ON(mips_shared, arm, arm64, x86, x64);
819 : inline void subPtr(Imm32 imm, Register dest) PER_ARCH;
820 : inline void subPtr(ImmWord imm, Register dest) DEFINED_ON(x64);
821 : inline void subPtr(const Address& addr, Register dest) DEFINED_ON(mips_shared, arm, arm64, x86, x64);
822 :
823 : inline void sub64(Register64 src, Register64 dest) PER_ARCH;
824 : inline void sub64(Imm64 imm, Register64 dest) PER_ARCH;
825 : inline void sub64(const Operand& src, Register64 dest) DEFINED_ON(x64, mips64);
826 :
827 : inline void subFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
828 :
829 : inline void subDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
830 :
831 : // On x86-shared, srcDest must be eax and edx will be clobbered.
832 : inline void mul32(Register rhs, Register srcDest) PER_SHARED_ARCH;
833 :
834 : inline void mul32(Register src1, Register src2, Register dest, Label* onOver, Label* onZero) DEFINED_ON(arm64);
835 :
836 : inline void mul64(const Operand& src, const Register64& dest) DEFINED_ON(x64);
837 : inline void mul64(const Operand& src, const Register64& dest, const Register temp)
838 : DEFINED_ON(x64, mips64);
839 : inline void mul64(Imm64 imm, const Register64& dest) PER_ARCH;
840 : inline void mul64(Imm64 imm, const Register64& dest, const Register temp)
841 : DEFINED_ON(x86, x64, arm, mips32, mips64);
842 : inline void mul64(const Register64& src, const Register64& dest, const Register temp)
843 : PER_ARCH;
844 :
845 : inline void mulBy3(Register src, Register dest) PER_ARCH;
846 :
847 : inline void mulFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
848 : inline void mulDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
849 :
850 : inline void mulDoublePtr(ImmPtr imm, Register temp, FloatRegister dest) DEFINED_ON(mips_shared, arm, arm64, x86, x64);
851 :
852 : // Perform an integer division, returning the integer part rounded toward zero.
853 : // rhs must not be zero, and the division must not overflow.
854 : //
855 : // On x86_shared, srcDest must be eax and edx will be clobbered.
856 : // On ARM, the chip must have hardware division instructions.
857 : inline void quotient32(Register rhs, Register srcDest, bool isUnsigned) PER_SHARED_ARCH;
858 :
859 : // Perform an integer division, returning the remainder part.
860 : // rhs must not be zero, and the division must not overflow.
861 : //
862 : // On x86_shared, srcDest must be eax and edx will be clobbered.
863 : // On ARM, the chip must have hardware division instructions.
864 : inline void remainder32(Register rhs, Register srcDest, bool isUnsigned) PER_SHARED_ARCH;
865 :
866 : inline void divFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
867 : inline void divDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
868 :
869 : inline void inc32(RegisterOrInt32Constant* key);
870 : inline void inc64(AbsoluteAddress dest) PER_ARCH;
871 :
872 : inline void dec32(RegisterOrInt32Constant* key);
873 :
874 : inline void neg32(Register reg) PER_SHARED_ARCH;
875 : inline void neg64(Register64 reg) DEFINED_ON(x86, x64, arm, mips32, mips64);
876 :
877 : inline void negateFloat(FloatRegister reg) PER_SHARED_ARCH;
878 :
879 : inline void negateDouble(FloatRegister reg) PER_SHARED_ARCH;
880 :
881 : inline void absFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
882 : inline void absDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
883 :
884 : inline void sqrtFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
885 : inline void sqrtDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
886 :
887 : // srcDest = {min,max}{Float32,Double}(srcDest, other)
888 : // For min and max, handle NaN specially if handleNaN is true.
889 :
890 : inline void minFloat32(FloatRegister other, FloatRegister srcDest, bool handleNaN) PER_SHARED_ARCH;
891 : inline void minDouble(FloatRegister other, FloatRegister srcDest, bool handleNaN) PER_SHARED_ARCH;
892 :
893 : inline void maxFloat32(FloatRegister other, FloatRegister srcDest, bool handleNaN) PER_SHARED_ARCH;
894 : inline void maxDouble(FloatRegister other, FloatRegister srcDest, bool handleNaN) PER_SHARED_ARCH;
895 :
896 : // ===============================================================
897 : // Shift functions
898 :
899 : // For shift-by-register there may be platform-specific
900 : // variations, for example, x86 will perform the shift mod 32 but
901 : // ARM will perform the shift mod 256.
902 : //
903 : // For shift-by-immediate the platform assembler may restrict the
904 : // immediate, for example, the ARM assembler requires the count
905 : // for 32-bit shifts to be in the range [0,31].
906 :
907 : inline void lshift32(Imm32 shift, Register srcDest) PER_SHARED_ARCH;
908 : inline void rshift32(Imm32 shift, Register srcDest) PER_SHARED_ARCH;
909 : inline void rshift32Arithmetic(Imm32 shift, Register srcDest) PER_SHARED_ARCH;
910 :
911 : inline void lshiftPtr(Imm32 imm, Register dest) PER_ARCH;
912 : inline void rshiftPtr(Imm32 imm, Register dest) PER_ARCH;
913 : inline void rshiftPtr(Imm32 imm, Register src, Register dest) DEFINED_ON(arm64);
914 : inline void rshiftPtrArithmetic(Imm32 imm, Register dest) PER_ARCH;
915 :
916 : inline void lshift64(Imm32 imm, Register64 dest) PER_ARCH;
917 : inline void rshift64(Imm32 imm, Register64 dest) PER_ARCH;
918 : inline void rshift64Arithmetic(Imm32 imm, Register64 dest) PER_ARCH;
919 :
920 : // On x86_shared these have the constraint that shift must be in CL.
921 : inline void lshift32(Register shift, Register srcDest) PER_SHARED_ARCH;
922 : inline void rshift32(Register shift, Register srcDest) PER_SHARED_ARCH;
923 : inline void rshift32Arithmetic(Register shift, Register srcDest) PER_SHARED_ARCH;
924 :
925 : inline void lshift64(Register shift, Register64 srcDest) PER_ARCH;
926 : inline void rshift64(Register shift, Register64 srcDest) PER_ARCH;
927 : inline void rshift64Arithmetic(Register shift, Register64 srcDest) PER_ARCH;
928 :
929 : // ===============================================================
930 : // Rotation functions
931 : // Note: - on x86 and x64 the count register must be in CL.
932 : // - on x64 the temp register should be InvalidReg.
933 :
934 : inline void rotateLeft(Imm32 count, Register input, Register dest) PER_SHARED_ARCH;
935 : inline void rotateLeft(Register count, Register input, Register dest) PER_SHARED_ARCH;
936 : inline void rotateLeft64(Imm32 count, Register64 input, Register64 dest) DEFINED_ON(x64);
937 : inline void rotateLeft64(Register count, Register64 input, Register64 dest) DEFINED_ON(x64);
938 : inline void rotateLeft64(Imm32 count, Register64 input, Register64 dest, Register temp)
939 : PER_ARCH;
940 : inline void rotateLeft64(Register count, Register64 input, Register64 dest, Register temp)
941 : PER_ARCH;
942 :
943 : inline void rotateRight(Imm32 count, Register input, Register dest) PER_SHARED_ARCH;
944 : inline void rotateRight(Register count, Register input, Register dest) PER_SHARED_ARCH;
945 : inline void rotateRight64(Imm32 count, Register64 input, Register64 dest) DEFINED_ON(x64);
946 : inline void rotateRight64(Register count, Register64 input, Register64 dest) DEFINED_ON(x64);
947 : inline void rotateRight64(Imm32 count, Register64 input, Register64 dest, Register temp)
948 : PER_ARCH;
949 : inline void rotateRight64(Register count, Register64 input, Register64 dest, Register temp)
950 : PER_ARCH;
951 :
952 : // ===============================================================
953 : // Bit counting functions
954 :
955 : // knownNotZero may be true only if the src is known not to be zero.
956 : inline void clz32(Register src, Register dest, bool knownNotZero) PER_SHARED_ARCH;
957 : inline void ctz32(Register src, Register dest, bool knownNotZero) PER_SHARED_ARCH;
958 :
959 : inline void clz64(Register64 src, Register dest) PER_ARCH;
960 : inline void ctz64(Register64 src, Register dest) PER_ARCH;
961 :
962 : // On x86_shared, temp may be Invalid only if the chip has the POPCNT instruction.
963 : // On ARM, temp may never be Invalid.
964 : inline void popcnt32(Register src, Register dest, Register temp) PER_SHARED_ARCH;
965 :
966 : // temp may be invalid only if the chip has the POPCNT instruction.
967 : inline void popcnt64(Register64 src, Register64 dest, Register temp) PER_ARCH;
968 :
969 : // ===============================================================
970 : // Condition functions
971 :
972 : template <typename T1, typename T2>
973 : inline void cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest)
974 : DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
975 :
976 : template <typename T1, typename T2>
977 : inline void cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest)
978 : PER_ARCH;
979 :
980 : // ===============================================================
981 : // Branch functions
982 :
983 : template <class L>
984 : inline void branch32(Condition cond, Register lhs, Register rhs, L label) PER_SHARED_ARCH;
985 : template <class L>
986 : inline void branch32(Condition cond, Register lhs, Imm32 rhs, L label) PER_SHARED_ARCH;
987 : inline void branch32(Condition cond, Register length, const RegisterOrInt32Constant& key,
988 : Label* label);
989 :
990 : inline void branch32(Condition cond, const Address& lhs, Register rhs, Label* label) PER_SHARED_ARCH;
991 : inline void branch32(Condition cond, const Address& lhs, Imm32 rhs, Label* label) PER_SHARED_ARCH;
992 : inline void branch32(Condition cond, const Address& length, const RegisterOrInt32Constant& key,
993 : Label* label);
994 :
995 : inline void branch32(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label)
996 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
997 : inline void branch32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label)
998 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
999 :
1000 : inline void branch32(Condition cond, const BaseIndex& lhs, Register rhs, Label* label)
1001 : DEFINED_ON(x86_shared);
1002 : inline void branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs, Label* label) PER_SHARED_ARCH;
1003 :
1004 : inline void branch32(Condition cond, const Operand& lhs, Register rhs, Label* label) DEFINED_ON(x86_shared);
1005 : inline void branch32(Condition cond, const Operand& lhs, Imm32 rhs, Label* label) DEFINED_ON(x86_shared);
1006 :
1007 : inline void branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs, Label* label)
1008 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1009 :
1010 : // The supported condition are Equal, NotEqual, LessThan(orEqual), GreaterThan(orEqual),
1011 : // Below(orEqual) and Above(orEqual).
1012 : // When a fail label is not defined it will fall through to next instruction,
1013 : // else jump to the fail label.
1014 : inline void branch64(Condition cond, Register64 lhs, Imm64 val, Label* success,
1015 : Label* fail = nullptr) PER_ARCH;
1016 : inline void branch64(Condition cond, Register64 lhs, Register64 rhs, Label* success,
1017 : Label* fail = nullptr) PER_ARCH;
1018 : // On x86 and x64 NotEqual and Equal conditions are allowed for the branch64 variants
1019 : // with Address as lhs. On others only the NotEqual condition.
1020 : inline void branch64(Condition cond, const Address& lhs, Imm64 val, Label* label) PER_ARCH;
1021 :
1022 : // Compare the value at |lhs| with the value at |rhs|. The scratch
1023 : // register *must not* be the base of |lhs| or |rhs|.
1024 : inline void branch64(Condition cond, const Address& lhs, const Address& rhs, Register scratch,
1025 : Label* label) PER_ARCH;
1026 :
1027 : template <class L>
1028 : inline void branchPtr(Condition cond, Register lhs, Register rhs, L label) PER_SHARED_ARCH;
1029 : inline void branchPtr(Condition cond, Register lhs, Imm32 rhs, Label* label) PER_SHARED_ARCH;
1030 : inline void branchPtr(Condition cond, Register lhs, ImmPtr rhs, Label* label) PER_SHARED_ARCH;
1031 : inline void branchPtr(Condition cond, Register lhs, ImmGCPtr rhs, Label* label) PER_SHARED_ARCH;
1032 : inline void branchPtr(Condition cond, Register lhs, ImmWord rhs, Label* label) PER_SHARED_ARCH;
1033 :
1034 : template <class L>
1035 : inline void branchPtr(Condition cond, const Address& lhs, Register rhs, L label) PER_SHARED_ARCH;
1036 : inline void branchPtr(Condition cond, const Address& lhs, ImmPtr rhs, Label* label) PER_SHARED_ARCH;
1037 : inline void branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs, Label* label) PER_SHARED_ARCH;
1038 : inline void branchPtr(Condition cond, const Address& lhs, ImmWord rhs, Label* label) PER_SHARED_ARCH;
1039 :
1040 : inline void branchPtr(Condition cond, const BaseIndex& lhs, ImmWord rhs, Label* label) PER_SHARED_ARCH;
1041 :
1042 : inline void branchPtr(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label)
1043 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1044 : inline void branchPtr(Condition cond, const AbsoluteAddress& lhs, ImmWord rhs, Label* label)
1045 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1046 :
1047 : inline void branchPtr(Condition cond, wasm::SymbolicAddress lhs, Register rhs, Label* label)
1048 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1049 :
1050 : template <typename T>
1051 : inline CodeOffsetJump branchPtrWithPatch(Condition cond, Register lhs, T rhs, RepatchLabel* label) PER_SHARED_ARCH;
1052 : template <typename T>
1053 : inline CodeOffsetJump branchPtrWithPatch(Condition cond, Address lhs, T rhs, RepatchLabel* label) PER_SHARED_ARCH;
1054 :
1055 : void branchPtrInNurseryChunk(Condition cond, Register ptr, Register temp, Label* label)
1056 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1057 : void branchPtrInNurseryChunk(Condition cond, const Address& address, Register temp, Label* label)
1058 : DEFINED_ON(x86);
1059 : void branchValueIsNurseryObject(Condition cond, const Address& address, Register temp, Label* label) PER_ARCH;
1060 : void branchValueIsNurseryObject(Condition cond, ValueOperand value, Register temp, Label* label) PER_ARCH;
1061 :
1062 : // This function compares a Value (lhs) which is having a private pointer
1063 : // boxed inside a js::Value, with a raw pointer (rhs).
1064 : inline void branchPrivatePtr(Condition cond, const Address& lhs, Register rhs, Label* label) PER_ARCH;
1065 :
1066 : inline void branchFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
1067 : Label* label) PER_SHARED_ARCH;
1068 :
1069 : // Truncate a double/float32 to int32 and when it doesn't fit an int32 it will jump to
1070 : // the failure label. This particular variant is allowed to return the value module 2**32,
1071 : // which isn't implemented on all architectures.
1072 : // E.g. the x64 variants will do this only in the int64_t range.
1073 : inline void branchTruncateFloat32MaybeModUint32(FloatRegister src, Register dest, Label* fail)
1074 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1075 : inline void branchTruncateDoubleMaybeModUint32(FloatRegister src, Register dest, Label* fail)
1076 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1077 :
1078 : // Truncate a double/float32 to intptr and when it doesn't fit jump to the failure label.
1079 : inline void branchTruncateFloat32ToPtr(FloatRegister src, Register dest, Label* fail)
1080 : DEFINED_ON(x86, x64);
1081 : inline void branchTruncateDoubleToPtr(FloatRegister src, Register dest, Label* fail)
1082 : DEFINED_ON(x86, x64);
1083 :
1084 : // Truncate a double/float32 to int32 and when it doesn't fit jump to the failure label.
1085 : inline void branchTruncateFloat32ToInt32(FloatRegister src, Register dest, Label* fail)
1086 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1087 : inline void branchTruncateDoubleToInt32(FloatRegister src, Register dest, Label* fail)
1088 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1089 :
1090 : inline void branchDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
1091 : Label* label) PER_SHARED_ARCH;
1092 :
1093 : inline void branchDoubleNotInInt64Range(Address src, Register temp, Label* fail);
1094 : inline void branchDoubleNotInUInt64Range(Address src, Register temp, Label* fail);
1095 : inline void branchFloat32NotInInt64Range(Address src, Register temp, Label* fail);
1096 : inline void branchFloat32NotInUInt64Range(Address src, Register temp, Label* fail);
1097 :
1098 : template <typename T, typename L>
1099 : inline void branchAdd32(Condition cond, T src, Register dest, L label) PER_SHARED_ARCH;
1100 : template <typename T>
1101 : inline void branchSub32(Condition cond, T src, Register dest, Label* label) PER_SHARED_ARCH;
1102 :
1103 : inline void decBranchPtr(Condition cond, Register lhs, Imm32 rhs, Label* label) PER_SHARED_ARCH;
1104 :
1105 : template <class L>
1106 : inline void branchTest32(Condition cond, Register lhs, Register rhs, L label) PER_SHARED_ARCH;
1107 : template <class L>
1108 : inline void branchTest32(Condition cond, Register lhs, Imm32 rhs, L label) PER_SHARED_ARCH;
1109 : inline void branchTest32(Condition cond, const Address& lhs, Imm32 rhh, Label* label) PER_SHARED_ARCH;
1110 : inline void branchTest32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label)
1111 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1112 :
1113 : template <class L>
1114 : inline void branchTestPtr(Condition cond, Register lhs, Register rhs, L label) PER_SHARED_ARCH;
1115 : inline void branchTestPtr(Condition cond, Register lhs, Imm32 rhs, Label* label) PER_SHARED_ARCH;
1116 : inline void branchTestPtr(Condition cond, const Address& lhs, Imm32 rhs, Label* label) PER_SHARED_ARCH;
1117 :
1118 : template <class L>
1119 : inline void branchTest64(Condition cond, Register64 lhs, Register64 rhs, Register temp,
1120 : L label) PER_ARCH;
1121 :
1122 : // Branches to |label| if |reg| is false. |reg| should be a C++ bool.
1123 : template <class L>
1124 : inline void branchIfFalseBool(Register reg, L label);
1125 :
1126 : // Branches to |label| if |reg| is true. |reg| should be a C++ bool.
1127 : inline void branchIfTrueBool(Register reg, Label* label);
1128 :
1129 : inline void branchIfRope(Register str, Label* label);
1130 : inline void branchIfRopeOrExternal(Register str, Register temp, Label* label);
1131 :
1132 : inline void branchIfNotRope(Register str, Label* label);
1133 :
1134 : inline void branchLatin1String(Register string, Label* label);
1135 : inline void branchTwoByteString(Register string, Label* label);
1136 :
1137 : inline void branchIfFunctionHasNoScript(Register fun, Label* label);
1138 : inline void branchIfInterpreted(Register fun, Label* label);
1139 :
1140 : inline void branchFunctionKind(Condition cond, JSFunction::FunctionKind kind, Register fun,
1141 : Register scratch, Label* label);
1142 :
1143 : void branchIfNotInterpretedConstructor(Register fun, Register scratch, Label* label);
1144 :
1145 : inline void branchIfObjectEmulatesUndefined(Register objReg, Register scratch, Label* slowCheck,
1146 : Label* label);
1147 :
1148 : inline void branchTestObjClass(Condition cond, Register obj, Register scratch, const js::Class* clasp,
1149 : Label* label);
1150 : inline void branchTestObjShape(Condition cond, Register obj, const Shape* shape, Label* label);
1151 : inline void branchTestObjShape(Condition cond, Register obj, Register shape, Label* label);
1152 : inline void branchTestObjGroup(Condition cond, Register obj, ObjectGroup* group, Label* label);
1153 : inline void branchTestObjGroup(Condition cond, Register obj, Register group, Label* label);
1154 :
1155 : inline void branchTestClassIsProxy(bool proxy, Register clasp, Label* label);
1156 :
1157 : inline void branchTestObjectIsProxy(bool proxy, Register object, Register scratch, Label* label);
1158 :
1159 : inline void branchTestProxyHandlerFamily(Condition cond, Register proxy, Register scratch,
1160 : const void* handlerp, Label* label);
1161 :
1162 : template <typename Value>
1163 : inline void branchTestMIRType(Condition cond, const Value& val, MIRType type, Label* label);
1164 :
1165 : // Emit type case branch on tag matching if the type tag in the definition
1166 : // might actually be that type.
1167 : void maybeBranchTestType(MIRType type, MDefinition* maybeDef, Register tag, Label* label);
1168 :
1169 : inline void branchTestNeedsIncrementalBarrier(Condition cond, Label* label);
1170 :
1171 : // Perform a type-test on a tag of a Value (32bits boxing), or the tagged
1172 : // value (64bits boxing).
1173 : inline void branchTestUndefined(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
1174 : inline void branchTestInt32(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
1175 : inline void branchTestDouble(Condition cond, Register tag, Label* label)
1176 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1177 : inline void branchTestNumber(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
1178 : inline void branchTestBoolean(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
1179 : inline void branchTestString(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
1180 : inline void branchTestSymbol(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
1181 : inline void branchTestNull(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
1182 : inline void branchTestObject(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
1183 : inline void branchTestPrimitive(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
1184 : inline void branchTestMagic(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
1185 :
1186 : // Perform a type-test on a Value, addressed by Address or BaseIndex, or
1187 : // loaded into ValueOperand.
1188 : // BaseIndex and ValueOperand variants clobber the ScratchReg on x64.
1189 : // All Variants clobber the ScratchReg on arm64.
1190 : inline void branchTestUndefined(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
1191 : inline void branchTestUndefined(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
1192 : inline void branchTestUndefined(Condition cond, const ValueOperand& value, Label* label)
1193 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1194 :
1195 : inline void branchTestInt32(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
1196 : inline void branchTestInt32(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
1197 : inline void branchTestInt32(Condition cond, const ValueOperand& value, Label* label)
1198 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1199 :
1200 : inline void branchTestDouble(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
1201 : inline void branchTestDouble(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
1202 : inline void branchTestDouble(Condition cond, const ValueOperand& value, Label* label)
1203 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1204 :
1205 : inline void branchTestNumber(Condition cond, const ValueOperand& value, Label* label)
1206 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1207 :
1208 : inline void branchTestBoolean(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
1209 : inline void branchTestBoolean(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
1210 : inline void branchTestBoolean(Condition cond, const ValueOperand& value, Label* label)
1211 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1212 :
1213 : inline void branchTestString(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
1214 : inline void branchTestString(Condition cond, const ValueOperand& value, Label* label)
1215 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1216 :
1217 : inline void branchTestSymbol(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
1218 : inline void branchTestSymbol(Condition cond, const ValueOperand& value, Label* label)
1219 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1220 :
1221 : inline void branchTestNull(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
1222 : inline void branchTestNull(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
1223 : inline void branchTestNull(Condition cond, const ValueOperand& value, Label* label)
1224 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1225 :
1226 : // Clobbers the ScratchReg on x64.
1227 : inline void branchTestObject(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
1228 : inline void branchTestObject(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
1229 : inline void branchTestObject(Condition cond, const ValueOperand& value, Label* label)
1230 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1231 :
1232 : inline void branchTestGCThing(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
1233 : inline void branchTestGCThing(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
1234 :
1235 : inline void branchTestPrimitive(Condition cond, const ValueOperand& value, Label* label)
1236 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1237 :
1238 : inline void branchTestMagic(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
1239 : inline void branchTestMagic(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
1240 : template <class L>
1241 : inline void branchTestMagic(Condition cond, const ValueOperand& value, L label)
1242 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1243 :
1244 : inline void branchTestMagic(Condition cond, const Address& valaddr, JSWhyMagic why, Label* label) PER_ARCH;
1245 :
1246 : inline void branchTestMagicValue(Condition cond, const ValueOperand& val, JSWhyMagic why,
1247 : Label* label);
1248 :
1249 : void branchTestValue(Condition cond, const ValueOperand& lhs,
1250 : const Value& rhs, Label* label) PER_ARCH;
1251 :
1252 : // Checks if given Value is evaluated to true or false in a condition.
1253 : // The type of the value should match the type of the method.
1254 : inline void branchTestInt32Truthy(bool truthy, const ValueOperand& value, Label* label)
1255 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1256 : inline void branchTestDoubleTruthy(bool truthy, FloatRegister reg, Label* label) PER_SHARED_ARCH;
1257 : inline void branchTestBooleanTruthy(bool truthy, const ValueOperand& value, Label* label) PER_ARCH;
1258 : inline void branchTestStringTruthy(bool truthy, const ValueOperand& value, Label* label)
1259 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1260 :
1261 : private:
1262 :
1263 : // Implementation for branch* methods.
1264 : template <typename T>
1265 : inline void branch32Impl(Condition cond, const T& length, const RegisterOrInt32Constant& key,
1266 : Label* label);
1267 :
1268 : template <typename T, typename S, typename L>
1269 : inline void branchPtrImpl(Condition cond, const T& lhs, const S& rhs, L label)
1270 : DEFINED_ON(x86_shared);
1271 :
1272 : void branchPtrInNurseryChunkImpl(Condition cond, Register ptr, Label* label)
1273 : DEFINED_ON(x86);
1274 : template <typename T>
1275 : void branchValueIsNurseryObjectImpl(Condition cond, const T& value, Register temp, Label* label)
1276 : DEFINED_ON(arm64, mips64, x64);
1277 :
1278 : template <typename T>
1279 : inline void branchTestUndefinedImpl(Condition cond, const T& t, Label* label)
1280 : DEFINED_ON(arm, arm64, x86_shared);
1281 : template <typename T>
1282 : inline void branchTestInt32Impl(Condition cond, const T& t, Label* label)
1283 : DEFINED_ON(arm, arm64, x86_shared);
1284 : template <typename T>
1285 : inline void branchTestDoubleImpl(Condition cond, const T& t, Label* label)
1286 : DEFINED_ON(arm, arm64, x86_shared);
1287 : template <typename T>
1288 : inline void branchTestNumberImpl(Condition cond, const T& t, Label* label)
1289 : DEFINED_ON(arm, arm64, x86_shared);
1290 : template <typename T>
1291 : inline void branchTestBooleanImpl(Condition cond, const T& t, Label* label)
1292 : DEFINED_ON(arm, arm64, x86_shared);
1293 : template <typename T>
1294 : inline void branchTestStringImpl(Condition cond, const T& t, Label* label)
1295 : DEFINED_ON(arm, arm64, x86_shared);
1296 : template <typename T>
1297 : inline void branchTestSymbolImpl(Condition cond, const T& t, Label* label)
1298 : DEFINED_ON(arm, arm64, x86_shared);
1299 : template <typename T>
1300 : inline void branchTestNullImpl(Condition cond, const T& t, Label* label)
1301 : DEFINED_ON(arm, arm64, x86_shared);
1302 : template <typename T>
1303 : inline void branchTestObjectImpl(Condition cond, const T& t, Label* label)
1304 : DEFINED_ON(arm, arm64, x86_shared);
1305 : template <typename T>
1306 : inline void branchTestGCThingImpl(Condition cond, const T& t, Label* label)
1307 : DEFINED_ON(arm, arm64, x86_shared);
1308 : template <typename T>
1309 : inline void branchTestPrimitiveImpl(Condition cond, const T& t, Label* label)
1310 : DEFINED_ON(arm, arm64, x86_shared);
1311 : template <typename T, class L>
1312 : inline void branchTestMagicImpl(Condition cond, const T& t, L label)
1313 : DEFINED_ON(arm, arm64, x86_shared);
1314 :
1315 : public:
1316 : // ========================================================================
1317 : // Canonicalization primitives.
1318 : inline void canonicalizeDouble(FloatRegister reg);
1319 : inline void canonicalizeDoubleIfDeterministic(FloatRegister reg);
1320 :
1321 : inline void canonicalizeFloat(FloatRegister reg);
1322 : inline void canonicalizeFloatIfDeterministic(FloatRegister reg);
1323 :
1324 : inline void canonicalizeFloat32x4(FloatRegister reg, FloatRegister scratch)
1325 : DEFINED_ON(x86_shared);
1326 :
1327 : public:
1328 : // ========================================================================
1329 : // Memory access primitives.
1330 : inline void storeUncanonicalizedDouble(FloatRegister src, const Address& dest)
1331 : DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
1332 : inline void storeUncanonicalizedDouble(FloatRegister src, const BaseIndex& dest)
1333 : DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
1334 : inline void storeUncanonicalizedDouble(FloatRegister src, const Operand& dest)
1335 : DEFINED_ON(x86_shared);
1336 :
1337 : template<class T>
1338 : inline void storeDouble(FloatRegister src, const T& dest);
1339 :
1340 : inline void storeUncanonicalizedFloat32(FloatRegister src, const Address& dest)
1341 : DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
1342 : inline void storeUncanonicalizedFloat32(FloatRegister src, const BaseIndex& dest)
1343 : DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
1344 : inline void storeUncanonicalizedFloat32(FloatRegister src, const Operand& dest)
1345 : DEFINED_ON(x86_shared);
1346 :
1347 : template<class T>
1348 : inline void storeFloat32(FloatRegister src, const T& dest);
1349 :
1350 : inline void storeFloat32x3(FloatRegister src, const Address& dest) PER_SHARED_ARCH;
1351 : inline void storeFloat32x3(FloatRegister src, const BaseIndex& dest) PER_SHARED_ARCH;
1352 :
1353 : template <typename T>
1354 : void storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType, const T& dest,
1355 : MIRType slotType) PER_ARCH;
1356 :
1357 : inline void memoryBarrier(MemoryBarrierBits barrier) PER_SHARED_ARCH;
1358 :
1359 : public:
1360 : // ========================================================================
1361 : // Truncate floating point.
1362 :
1363 : // Undefined behaviour when truncation is outside Int64 range.
1364 : // Needs a temp register if SSE3 is not present.
1365 : inline void truncateFloat32ToInt64(Address src, Address dest, Register temp)
1366 : DEFINED_ON(x86_shared);
1367 : inline void truncateFloat32ToUInt64(Address src, Address dest, Register temp,
1368 : FloatRegister floatTemp)
1369 : DEFINED_ON(x86, x64);
1370 : inline void truncateDoubleToInt64(Address src, Address dest, Register temp)
1371 : DEFINED_ON(x86_shared);
1372 : inline void truncateDoubleToUInt64(Address src, Address dest, Register temp,
1373 : FloatRegister floatTemp)
1374 : DEFINED_ON(x86, x64);
1375 :
1376 : public:
1377 : // ========================================================================
1378 : // wasm support
1379 :
1380 : // Emit a bounds check against the wasm heap limit, jumping to 'label' if 'cond' holds.
1381 : // Required when WASM_HUGE_MEMORY is not defined.
1382 : template <class L>
1383 : inline void wasmBoundsCheck(Condition cond, Register index, Register boundsCheckLimit, L label)
1384 : DEFINED_ON(arm, arm64, mips32, mips64, x86);
1385 :
1386 : template <class L>
1387 : inline void wasmBoundsCheck(Condition cond, Register index, Address boundsCheckLimit, L label)
1388 : DEFINED_ON(arm, arm64, mips32, mips64, x86);
1389 :
1390 : // On x86, each instruction adds its own wasm::MemoryAccess's to the
1391 : // wasm::MemoryAccessVector (there can be multiple when i64 is involved).
1392 : // On x64, only some asm.js accesses need a wasm::MemoryAccess so the caller
1393 : // is responsible for doing this instead.
1394 : void wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr, AnyRegister out) DEFINED_ON(x86, x64);
1395 : void wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAddr, Register64 out) DEFINED_ON(x86, x64);
1396 : void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, Operand dstAddr) DEFINED_ON(x86, x64);
1397 : void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, Operand dstAddr) DEFINED_ON(x86);
1398 :
1399 : // For all the ARM wasmLoad and wasmStore functions, `ptr` MUST equal
1400 : // `ptrScratch`, and that register will be updated based on conditions
1401 : // listed below (where it is only mentioned as `ptr`).
1402 :
1403 : // `ptr` will be updated if access.offset() != 0 or access.type() == Scalar::Int64.
1404 : void wasmLoad(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
1405 : Register ptrScratch, AnyRegister output)
1406 : DEFINED_ON(arm);
1407 : void wasmLoadI64(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
1408 : Register ptrScratch, Register64 output)
1409 : DEFINED_ON(arm);
1410 : void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, Register memoryBase,
1411 : Register ptr, Register ptrScratch)
1412 : DEFINED_ON(arm);
1413 : void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, Register memoryBase,
1414 : Register ptr, Register ptrScratch)
1415 : DEFINED_ON(arm);
1416 :
1417 : // `ptr` will always be updated.
1418 : void wasmUnalignedLoad(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
1419 : Register ptrScratch, Register output, Register tmp)
1420 : DEFINED_ON(arm);
1421 :
1422 : // `ptr` will always be updated and `tmp1` is always needed. `tmp2` is
1423 : // needed for Float32; `tmp2` and `tmp3` are needed for Float64. Temps must
1424 : // be Invalid when they are not needed.
1425 : void wasmUnalignedLoadFP(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
1426 : Register ptrScratch, FloatRegister output, Register tmp1, Register tmp2,
1427 : Register tmp3)
1428 : DEFINED_ON(arm);
1429 :
1430 : // `ptr` will always be updated.
1431 : void wasmUnalignedLoadI64(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
1432 : Register ptrScratch, Register64 output, Register tmp)
1433 : DEFINED_ON(arm);
1434 :
1435 : // `ptr` and `value` will always be updated.
1436 : void wasmUnalignedStore(const wasm::MemoryAccessDesc& access, Register value, Register memoryBase,
1437 : Register ptr, Register ptrScratch)
1438 : DEFINED_ON(arm);
1439 :
1440 : // `ptr` will always be updated.
1441 : void wasmUnalignedStoreFP(const wasm::MemoryAccessDesc& access, FloatRegister floatValue,
1442 : Register memoryBase, Register ptr, Register ptrScratch, Register tmp)
1443 : DEFINED_ON(arm);
1444 :
1445 : // `ptr` will always be updated.
1446 : void wasmUnalignedStoreI64(const wasm::MemoryAccessDesc& access, Register64 value,
1447 : Register memoryBase, Register ptr, Register ptrScratch,
1448 : Register tmp)
1449 : DEFINED_ON(arm);
1450 :
1451 : // wasm specific methods, used in both the wasm baseline compiler and ion.
1452 : void wasmTruncateDoubleToUInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86, x64, arm);
1453 : void wasmTruncateDoubleToInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86_shared, arm);
1454 : void outOfLineWasmTruncateDoubleToInt32(FloatRegister input, bool isUnsigned, wasm::BytecodeOffset off, Label* rejoin) DEFINED_ON(x86_shared);
1455 :
1456 : void wasmTruncateFloat32ToUInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86, x64, arm);
1457 : void wasmTruncateFloat32ToInt32(FloatRegister input, Register output, Label* oolEntry) DEFINED_ON(x86_shared, arm);
1458 : void outOfLineWasmTruncateFloat32ToInt32(FloatRegister input, bool isUnsigned, wasm::BytecodeOffset off, Label* rejoin) DEFINED_ON(x86_shared);
1459 :
1460 : void outOfLineWasmTruncateDoubleToInt64(FloatRegister input, bool isUnsigned, wasm::BytecodeOffset off, Label* rejoin) DEFINED_ON(x86_shared);
1461 : void outOfLineWasmTruncateFloat32ToInt64(FloatRegister input, bool isUnsigned, wasm::BytecodeOffset off, Label* rejoin) DEFINED_ON(x86_shared);
1462 :
1463 : // This function takes care of loading the callee's TLS and pinned regs but
1464 : // it is the caller's responsibility to save/restore TLS or pinned regs.
1465 : void wasmCallImport(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee);
1466 :
1467 : // WasmTableCallIndexReg must contain the index of the indirect call.
1468 : void wasmCallIndirect(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee, bool needsBoundsCheck);
1469 :
1470 : // This function takes care of loading the pointer to the current instance
1471 : // as the implicit first argument. It preserves TLS and pinned registers.
1472 : // (TLS & pinned regs are non-volatile registers in the system ABI).
1473 : void wasmCallBuiltinInstanceMethod(const wasm::CallSiteDesc& desc, const ABIArg& instanceArg,
1474 : wasm::SymbolicAddress builtin);
1475 :
1476 : // Emit the out-of-line trap code to which trapping jumps/branches are
1477 : // bound. This should be called once per function after all other codegen,
1478 : // including "normal" OutOfLineCode.
1479 : void wasmEmitTrapOutOfLineCode();
1480 :
1481 : // Assert invariants that should be true within any non-exit-stub wasm code.
1482 : void wasmAssertNonExitInvariants(Register activation);
1483 :
1484 : // Perform a stack-overflow test, branching to the given Label on overflow.
1485 : void wasmEmitStackCheck(Register sp, Register scratch, Label* onOverflow);
1486 :
1487 : public:
1488 : // ========================================================================
1489 : // Clamping functions.
1490 :
1491 : inline void clampIntToUint8(Register reg) PER_SHARED_ARCH;
1492 :
1493 : //}}} check_macroassembler_style
1494 : public:
1495 :
1496 : // Emits a test of a value against all types in a TypeSet. A scratch
1497 : // register is required.
1498 : template <typename Source>
1499 : void guardTypeSet(const Source& address, const TypeSet* types, BarrierKind kind, Register scratch, Label* miss);
1500 :
1501 : void guardObjectType(Register obj, const TypeSet* types, Register scratch, Label* miss);
1502 :
1503 : template <typename TypeSet>
1504 : void guardTypeSetMightBeIncomplete(TypeSet* types, Register obj, Register scratch, Label* label);
1505 :
1506 0 : void loadObjShape(Register objReg, Register dest) {
1507 0 : loadPtr(Address(objReg, ShapedObject::offsetOfShape()), dest);
1508 0 : }
1509 132 : void loadObjGroup(Register objReg, Register dest) {
1510 132 : loadPtr(Address(objReg, JSObject::offsetOfGroup()), dest);
1511 132 : }
1512 : void loadBaseShape(Register objReg, Register dest) {
1513 : loadObjShape(objReg, dest);
1514 : loadPtr(Address(dest, Shape::offsetOfBase()), dest);
1515 : }
1516 89 : void loadObjClass(Register objReg, Register dest) {
1517 89 : loadObjGroup(objReg, dest);
1518 89 : loadPtr(Address(dest, ObjectGroup::offsetOfClasp()), dest);
1519 89 : }
1520 :
1521 12 : void loadObjPrivate(Register obj, uint32_t nfixed, Register dest) {
1522 12 : loadPtr(Address(obj, NativeObject::getPrivateDataOffset(nfixed)), dest);
1523 12 : }
1524 :
1525 102 : void loadObjProto(Register obj, Register dest) {
1526 102 : loadPtr(Address(obj, JSObject::offsetOfGroup()), dest);
1527 102 : loadPtr(Address(dest, ObjectGroup::offsetOfProto()), dest);
1528 102 : }
1529 :
1530 80 : void loadStringLength(Register str, Register dest) {
1531 80 : load32(Address(str, JSString::offsetOfLength()), dest);
1532 80 : }
1533 :
1534 : void loadStringChars(Register str, Register dest);
1535 : void loadStringChar(Register str, Register index, Register output, Label* fail);
1536 :
1537 : void loadStringIndexValue(Register str, Register dest, Label* fail);
1538 :
1539 : void loadJSContext(Register dest);
1540 0 : void loadJitActivation(Register dest) {
1541 0 : loadJSContext(dest);
1542 0 : loadPtr(Address(dest, offsetof(JSContext, activation_)), dest);
1543 0 : }
1544 :
1545 : void guardGroupHasUnanalyzedNewScript(Register group, Register scratch, Label* fail);
1546 :
1547 : void loadWasmTlsRegFromFrame(Register dest = WasmTlsReg);
1548 :
1549 : template<typename T>
1550 21 : void loadTypedOrValue(const T& src, TypedOrValueRegister dest) {
1551 21 : if (dest.hasValue())
1552 21 : loadValue(src, dest.valueReg());
1553 : else
1554 0 : loadUnboxedValue(src, dest.type(), dest.typedReg());
1555 21 : }
1556 :
1557 : template<typename T>
1558 0 : void loadElementTypedOrValue(const T& src, TypedOrValueRegister dest, bool holeCheck,
1559 : Label* hole) {
1560 0 : if (dest.hasValue()) {
1561 0 : loadValue(src, dest.valueReg());
1562 0 : if (holeCheck)
1563 0 : branchTestMagic(Assembler::Equal, dest.valueReg(), hole);
1564 : } else {
1565 0 : if (holeCheck)
1566 0 : branchTestMagic(Assembler::Equal, src, hole);
1567 0 : loadUnboxedValue(src, dest.type(), dest.typedReg());
1568 : }
1569 0 : }
1570 :
1571 : template <typename T>
1572 12 : void storeTypedOrValue(TypedOrValueRegister src, const T& dest) {
1573 12 : if (src.hasValue()) {
1574 0 : storeValue(src.valueReg(), dest);
1575 12 : } else if (IsFloatingPointType(src.type())) {
1576 0 : FloatRegister reg = src.typedReg().fpu();
1577 0 : if (src.type() == MIRType::Float32) {
1578 0 : convertFloat32ToDouble(reg, ScratchDoubleReg);
1579 0 : reg = ScratchDoubleReg;
1580 : }
1581 0 : storeDouble(reg, dest);
1582 : } else {
1583 12 : storeValue(ValueTypeFromMIRType(src.type()), src.typedReg().gpr(), dest);
1584 : }
1585 12 : }
1586 :
1587 : template <typename T>
1588 : inline void storeObjectOrNull(Register src, const T& dest);
1589 :
1590 : template <typename T>
1591 32 : void storeConstantOrRegister(const ConstantOrRegister& src, const T& dest) {
1592 32 : if (src.constant())
1593 20 : storeValue(src.value(), dest);
1594 : else
1595 12 : storeTypedOrValue(src.reg(), dest);
1596 32 : }
1597 :
1598 86 : void storeCallWordResult(Register reg) {
1599 86 : if (reg != ReturnReg)
1600 74 : mov(ReturnReg, reg);
1601 86 : }
1602 :
1603 : inline void storeCallBoolResult(Register reg);
1604 :
1605 0 : void storeCallFloatResult(FloatRegister reg) {
1606 0 : if (reg != ReturnDoubleReg)
1607 0 : moveDouble(ReturnDoubleReg, reg);
1608 0 : }
1609 :
1610 : inline void storeCallResultValue(AnyRegister dest);
1611 :
1612 21 : void storeCallResultValue(ValueOperand dest) {
1613 : #if defined(JS_NUNBOX32)
1614 : // reshuffle the return registers used for a call result to store into
1615 : // dest, using ReturnReg as a scratch register if necessary. This must
1616 : // only be called after returning from a call, at a point when the
1617 : // return register is not live. XXX would be better to allow wrappers
1618 : // to store the return value to different places.
1619 : if (dest.typeReg() == JSReturnReg_Data) {
1620 : if (dest.payloadReg() == JSReturnReg_Type) {
1621 : // swap the two registers.
1622 : mov(JSReturnReg_Type, ReturnReg);
1623 : mov(JSReturnReg_Data, JSReturnReg_Type);
1624 : mov(ReturnReg, JSReturnReg_Data);
1625 : } else {
1626 : mov(JSReturnReg_Data, dest.payloadReg());
1627 : mov(JSReturnReg_Type, dest.typeReg());
1628 : }
1629 : } else {
1630 : mov(JSReturnReg_Type, dest.typeReg());
1631 : mov(JSReturnReg_Data, dest.payloadReg());
1632 : }
1633 : #elif defined(JS_PUNBOX64)
1634 21 : if (dest.valueReg() != JSReturnReg)
1635 13 : mov(JSReturnReg, dest.valueReg());
1636 : #else
1637 : #error "Bad architecture"
1638 : #endif
1639 21 : }
1640 :
1641 : inline void storeCallResultValue(TypedOrValueRegister dest);
1642 :
1643 : using MacroAssemblerSpecific::store32;
1644 6 : void store32(const RegisterOrInt32Constant& key, const Address& dest) {
1645 6 : if (key.isRegister())
1646 2 : store32(key.reg(), dest);
1647 : else
1648 4 : store32(Imm32(key.constant()), dest);
1649 6 : }
1650 :
1651 : template <typename T>
1652 247 : void guardedCallPreBarrier(const T& address, MIRType type) {
1653 494 : Label done;
1654 :
1655 247 : branchTestNeedsIncrementalBarrier(Assembler::Zero, &done);
1656 :
1657 247 : if (type == MIRType::Value)
1658 207 : branchTestGCThing(Assembler::NotEqual, address, &done);
1659 40 : else if (type == MIRType::Object || type == MIRType::String)
1660 7 : branchPtr(Assembler::Equal, address, ImmWord(0), &done);
1661 :
1662 247 : Push(PreBarrierReg);
1663 247 : computeEffectiveAddress(address, PreBarrierReg);
1664 :
1665 247 : const JitRuntime* rt = GetJitContext()->runtime->jitRuntime();
1666 247 : JitCode* preBarrier = rt->preBarrier(type);
1667 :
1668 247 : call(preBarrier);
1669 247 : Pop(PreBarrierReg);
1670 :
1671 247 : bind(&done);
1672 247 : }
1673 :
1674 : template<typename T>
1675 : void loadFromTypedArray(Scalar::Type arrayType, const T& src, AnyRegister dest, Register temp, Label* fail,
1676 : bool canonicalizeDoubles = true, unsigned numElems = 0);
1677 :
1678 : template<typename T>
1679 : void loadFromTypedArray(Scalar::Type arrayType, const T& src, const ValueOperand& dest, bool allowDouble,
1680 : Register temp, Label* fail);
1681 :
1682 : template<typename S, typename T>
1683 0 : void storeToTypedIntArray(Scalar::Type arrayType, const S& value, const T& dest) {
1684 0 : switch (arrayType) {
1685 : case Scalar::Int8:
1686 : case Scalar::Uint8:
1687 : case Scalar::Uint8Clamped:
1688 0 : store8(value, dest);
1689 0 : break;
1690 : case Scalar::Int16:
1691 : case Scalar::Uint16:
1692 0 : store16(value, dest);
1693 0 : break;
1694 : case Scalar::Int32:
1695 : case Scalar::Uint32:
1696 0 : store32(value, dest);
1697 0 : break;
1698 : default:
1699 0 : MOZ_CRASH("Invalid typed array type");
1700 : }
1701 0 : }
1702 :
1703 : void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const BaseIndex& dest,
1704 : unsigned numElems = 0);
1705 : void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const Address& dest,
1706 : unsigned numElems = 0);
1707 :
1708 : // Load a property from an UnboxedPlainObject or UnboxedArrayObject.
1709 : template <typename T>
1710 : void loadUnboxedProperty(T address, JSValueType type, TypedOrValueRegister output);
1711 :
1712 : // Store a property to an UnboxedPlainObject, without triggering barriers.
1713 : // If failure is null, the value definitely has a type suitable for storing
1714 : // in the property.
1715 : template <typename T>
1716 : void storeUnboxedProperty(T address, JSValueType type,
1717 : const ConstantOrRegister& value, Label* failure);
1718 :
1719 : void checkUnboxedArrayCapacity(Register obj, const RegisterOrInt32Constant& index,
1720 : Register temp, Label* failure);
1721 :
1722 : template <typename T>
1723 4 : Register extractString(const T& source, Register scratch) {
1724 4 : return extractObject(source, scratch);
1725 : }
1726 :
1727 : template <typename T>
1728 0 : Register extractSymbol(const T& source, Register scratch) {
1729 0 : return extractObject(source, scratch);
1730 : }
1731 :
1732 : void debugAssertIsObject(const ValueOperand& val);
1733 :
1734 : using MacroAssemblerSpecific::extractTag;
1735 0 : Register extractTag(const TypedOrValueRegister& reg, Register scratch) {
1736 0 : if (reg.hasValue())
1737 0 : return extractTag(reg.valueReg(), scratch);
1738 0 : mov(ImmWord(MIRTypeToTag(reg.type())), scratch);
1739 0 : return scratch;
1740 : }
1741 :
1742 : using MacroAssemblerSpecific::extractObject;
1743 0 : Register extractObject(const TypedOrValueRegister& reg, Register scratch) {
1744 0 : if (reg.hasValue())
1745 0 : return extractObject(reg.valueReg(), scratch);
1746 0 : MOZ_ASSERT(reg.type() == MIRType::Object);
1747 0 : return reg.typedReg().gpr();
1748 : }
1749 :
1750 : // Inline version of js_TypedArray_uint8_clamp_double.
1751 : // This function clobbers the input register.
1752 : void clampDoubleToUint8(FloatRegister input, Register output) PER_ARCH;
1753 :
1754 : using MacroAssemblerSpecific::ensureDouble;
1755 :
1756 : template <typename S>
1757 0 : void ensureDouble(const S& source, FloatRegister dest, Label* failure) {
1758 0 : Label isDouble, done;
1759 0 : branchTestDouble(Assembler::Equal, source, &isDouble);
1760 0 : branchTestInt32(Assembler::NotEqual, source, failure);
1761 :
1762 0 : convertInt32ToDouble(source, dest);
1763 0 : jump(&done);
1764 :
1765 0 : bind(&isDouble);
1766 0 : unboxDouble(source, dest);
1767 :
1768 0 : bind(&done);
1769 0 : }
1770 :
1771 : // Inline allocation.
1772 : private:
1773 : void checkAllocatorState(Label* fail);
1774 : bool shouldNurseryAllocate(gc::AllocKind allocKind, gc::InitialHeap initialHeap);
1775 : void nurseryAllocate(Register result, Register temp, gc::AllocKind allocKind,
1776 : size_t nDynamicSlots, gc::InitialHeap initialHeap, Label* fail);
1777 : void freeListAllocate(Register result, Register temp, gc::AllocKind allocKind, Label* fail);
1778 : void allocateObject(Register result, Register temp, gc::AllocKind allocKind,
1779 : uint32_t nDynamicSlots, gc::InitialHeap initialHeap, Label* fail);
1780 : void allocateNonObject(Register result, Register temp, gc::AllocKind allocKind, Label* fail);
1781 : void copySlotsFromTemplate(Register obj, const NativeObject* templateObj,
1782 : uint32_t start, uint32_t end);
1783 : void fillSlotsWithConstantValue(Address addr, Register temp, uint32_t start, uint32_t end,
1784 : const Value& v);
1785 : void fillSlotsWithUndefined(Address addr, Register temp, uint32_t start, uint32_t end);
1786 : void fillSlotsWithUninitialized(Address addr, Register temp, uint32_t start, uint32_t end);
1787 :
1788 : void initGCSlots(Register obj, Register temp, NativeObject* templateObj, bool initContents);
1789 :
1790 : public:
1791 : void callMallocStub(size_t nbytes, Register result, Label* fail);
1792 : void callFreeStub(Register slots);
1793 : void createGCObject(Register result, Register temp, JSObject* templateObj,
1794 : gc::InitialHeap initialHeap, Label* fail, bool initContents = true,
1795 : bool convertDoubleElements = false);
1796 :
1797 : void initGCThing(Register obj, Register temp, JSObject* templateObj,
1798 : bool initContents = true, bool convertDoubleElements = false);
1799 : void initTypedArraySlots(Register obj, Register temp, Register lengthReg,
1800 : LiveRegisterSet liveRegs, Label* fail,
1801 : TypedArrayObject* templateObj, TypedArrayLength lengthKind);
1802 :
1803 : void initUnboxedObjectContents(Register object, UnboxedPlainObject* templateObject);
1804 :
1805 : void newGCString(Register result, Register temp, Label* fail);
1806 : void newGCFatInlineString(Register result, Register temp, Label* fail);
1807 :
1808 : // Compares two strings for equality based on the JSOP.
1809 : // This checks for identical pointers, atoms and length and fails for everything else.
1810 : void compareStrings(JSOp op, Register left, Register right, Register result,
1811 : Label* fail);
1812 :
1813 : // Result of the typeof operation. Falls back to slow-path for proxies.
1814 : void typeOfObject(Register objReg, Register scratch, Label* slow,
1815 : Label* isObject, Label* isCallable, Label* isUndefined);
1816 :
1817 : public:
1818 : // Generates code used to complete a bailout.
1819 : void generateBailoutTail(Register scratch, Register bailoutInfo);
1820 :
1821 : public:
1822 : #ifndef JS_CODEGEN_ARM64
1823 : // StackPointer manipulation functions.
1824 : // On ARM64, the StackPointer is implemented as two synchronized registers.
1825 : // Code shared across platforms must use these functions to be valid.
1826 : template <typename T> inline void addToStackPtr(T t);
1827 : template <typename T> inline void addStackPtrTo(T t);
1828 :
1829 : void subFromStackPtr(Imm32 imm32) DEFINED_ON(mips32, mips64, arm, x86, x64);
1830 : void subFromStackPtr(Register reg);
1831 :
1832 : template <typename T>
1833 4 : void subStackPtrFrom(T t) { subPtr(getStackPointer(), t); }
1834 :
1835 : template <typename T>
1836 56 : void andToStackPtr(T t) { andPtr(t, getStackPointer()); }
1837 : template <typename T>
1838 : void andStackPtrTo(T t) { andPtr(getStackPointer(), t); }
1839 :
1840 : template <typename T>
1841 677 : void moveToStackPtr(T t) { movePtr(t, getStackPointer()); }
1842 : template <typename T>
1843 2057 : void moveStackPtrTo(T t) { movePtr(getStackPointer(), t); }
1844 :
1845 : template <typename T>
1846 4 : void loadStackPtr(T t) { loadPtr(t, getStackPointer()); }
1847 : template <typename T>
1848 1138 : void storeStackPtr(T t) { storePtr(getStackPointer(), t); }
1849 :
1850 : // StackPointer testing functions.
1851 : // On ARM64, sp can function as the zero register depending on context.
1852 : // Code shared across platforms must use these functions to be valid.
1853 : template <typename T>
1854 : inline void branchTestStackPtr(Condition cond, T t, Label* label);
1855 : template <typename T>
1856 : inline void branchStackPtr(Condition cond, T rhs, Label* label);
1857 : template <typename T>
1858 : inline void branchStackPtrRhs(Condition cond, T lhs, Label* label);
1859 :
1860 : // Move the stack pointer based on the requested amount.
1861 : inline void reserveStack(uint32_t amount);
1862 : #else // !JS_CODEGEN_ARM64
1863 : void reserveStack(uint32_t amount);
1864 : #endif
1865 :
1866 : public:
1867 0 : void enableProfilingInstrumentation() {
1868 0 : emitProfilingInstrumentation_ = true;
1869 0 : }
1870 :
1871 : private:
1872 : // This class is used to surround call sites throughout the assembler. This
1873 : // is used by callWithABI, and callJit functions, except if suffixed by
1874 : // NoProfiler.
1875 : class AutoProfilerCallInstrumentation {
1876 : MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER;
1877 :
1878 : public:
1879 : explicit AutoProfilerCallInstrumentation(MacroAssembler& masm
1880 : MOZ_GUARD_OBJECT_NOTIFIER_PARAM);
1881 13224 : ~AutoProfilerCallInstrumentation() {}
1882 : };
1883 : friend class AutoProfilerCallInstrumentation;
1884 :
1885 0 : void appendProfilerCallSite(CodeOffset label) {
1886 0 : propagateOOM(profilerCallSites_.append(label));
1887 0 : }
1888 :
1889 : // Fix up the code pointers to be written for locations where profilerCallSite
1890 : // emitted moves of RIP to a register.
1891 : void linkProfilerCallSites(JitCode* code);
1892 :
1893 : // This field is used to manage profiling instrumentation output. If
1894 : // provided and enabled, then instrumentation will be emitted around call
1895 : // sites.
1896 : bool emitProfilingInstrumentation_;
1897 :
1898 : // Record locations of the call sites.
1899 : Vector<CodeOffset, 0, SystemAllocPolicy> profilerCallSites_;
1900 :
1901 : public:
1902 : void loadBaselineOrIonRaw(Register script, Register dest, Label* failure);
1903 : void loadBaselineOrIonNoArgCheck(Register callee, Register dest, Label* failure);
1904 :
1905 : void loadBaselineFramePtr(Register framePtr, Register dest);
1906 :
1907 993 : void pushBaselineFramePtr(Register framePtr, Register scratch) {
1908 993 : loadBaselineFramePtr(framePtr, scratch);
1909 993 : push(scratch);
1910 993 : }
1911 :
1912 620 : void PushBaselineFramePtr(Register framePtr, Register scratch) {
1913 620 : loadBaselineFramePtr(framePtr, scratch);
1914 620 : Push(scratch);
1915 620 : }
1916 :
1917 : private:
1918 : void handleFailure();
1919 :
1920 : public:
1921 93 : Label* exceptionLabel() {
1922 : // Exceptions are currently handled the same way as sequential failures.
1923 93 : return &failureLabel_;
1924 : }
1925 :
1926 1045 : Label* failureLabel() {
1927 1045 : return &failureLabel_;
1928 : }
1929 :
1930 : void finish();
1931 : void link(JitCode* code);
1932 :
1933 : void assumeUnreachable(const char* output);
1934 :
1935 : template<typename T>
1936 : void assertTestInt32(Condition cond, const T& value, const char* output);
1937 :
1938 : void printf(const char* output);
1939 : void printf(const char* output, Register value);
1940 :
1941 : #ifdef JS_TRACE_LOGGING
1942 1259 : void loadTraceLogger(Register logger) {
1943 1259 : loadJSContext(logger);
1944 1259 : loadPtr(Address(logger, offsetof(JSContext, traceLogger)), logger);
1945 1259 : }
1946 : void tracelogStartId(Register logger, uint32_t textId, bool force = false);
1947 : void tracelogStartId(Register logger, Register textId);
1948 : void tracelogStartEvent(Register logger, Register event);
1949 : void tracelogStopId(Register logger, uint32_t textId, bool force = false);
1950 : void tracelogStopId(Register logger, Register textId);
1951 : #endif
1952 :
1953 : #define DISPATCH_FLOATING_POINT_OP(method, type, arg1d, arg1f, arg2) \
1954 : MOZ_ASSERT(IsFloatingPointType(type)); \
1955 : if (type == MIRType::Double) \
1956 : method##Double(arg1d, arg2); \
1957 : else \
1958 : method##Float32(arg1f, arg2); \
1959 :
1960 0 : void loadConstantFloatingPoint(double d, float f, FloatRegister dest, MIRType destType) {
1961 0 : DISPATCH_FLOATING_POINT_OP(loadConstant, destType, d, f, dest);
1962 0 : }
1963 0 : void boolValueToFloatingPoint(ValueOperand value, FloatRegister dest, MIRType destType) {
1964 0 : DISPATCH_FLOATING_POINT_OP(boolValueTo, destType, value, value, dest);
1965 0 : }
1966 0 : void int32ValueToFloatingPoint(ValueOperand value, FloatRegister dest, MIRType destType) {
1967 0 : DISPATCH_FLOATING_POINT_OP(int32ValueTo, destType, value, value, dest);
1968 0 : }
1969 0 : void convertInt32ToFloatingPoint(Register src, FloatRegister dest, MIRType destType) {
1970 0 : DISPATCH_FLOATING_POINT_OP(convertInt32To, destType, src, src, dest);
1971 0 : }
1972 :
1973 : #undef DISPATCH_FLOATING_POINT_OP
1974 :
1975 : void convertValueToFloatingPoint(ValueOperand value, FloatRegister output, Label* fail,
1976 : MIRType outputType);
1977 : MOZ_MUST_USE bool convertValueToFloatingPoint(JSContext* cx, const Value& v,
1978 : FloatRegister output, Label* fail,
1979 : MIRType outputType);
1980 : MOZ_MUST_USE bool convertConstantOrRegisterToFloatingPoint(JSContext* cx,
1981 : const ConstantOrRegister& src,
1982 : FloatRegister output, Label* fail,
1983 : MIRType outputType);
1984 : void convertTypedOrValueToFloatingPoint(TypedOrValueRegister src, FloatRegister output,
1985 : Label* fail, MIRType outputType);
1986 :
1987 : void outOfLineTruncateSlow(FloatRegister src, Register dest, bool widenFloatToDouble,
1988 : bool compilingWasm, wasm::BytecodeOffset callOffset);
1989 :
1990 : void convertInt32ValueToDouble(const Address& address, Register scratch, Label* done);
1991 : void convertInt32ValueToDouble(ValueOperand val);
1992 :
1993 0 : void convertValueToDouble(ValueOperand value, FloatRegister output, Label* fail) {
1994 0 : convertValueToFloatingPoint(value, output, fail, MIRType::Double);
1995 0 : }
1996 : MOZ_MUST_USE bool convertValueToDouble(JSContext* cx, const Value& v, FloatRegister output,
1997 : Label* fail) {
1998 : return convertValueToFloatingPoint(cx, v, output, fail, MIRType::Double);
1999 : }
2000 0 : MOZ_MUST_USE bool convertConstantOrRegisterToDouble(JSContext* cx,
2001 : const ConstantOrRegister& src,
2002 : FloatRegister output, Label* fail)
2003 : {
2004 0 : return convertConstantOrRegisterToFloatingPoint(cx, src, output, fail, MIRType::Double);
2005 : }
2006 : void convertTypedOrValueToDouble(TypedOrValueRegister src, FloatRegister output, Label* fail) {
2007 : convertTypedOrValueToFloatingPoint(src, output, fail, MIRType::Double);
2008 : }
2009 :
2010 0 : void convertValueToFloat(ValueOperand value, FloatRegister output, Label* fail) {
2011 0 : convertValueToFloatingPoint(value, output, fail, MIRType::Float32);
2012 0 : }
2013 : MOZ_MUST_USE bool convertValueToFloat(JSContext* cx, const Value& v, FloatRegister output,
2014 : Label* fail) {
2015 : return convertValueToFloatingPoint(cx, v, output, fail, MIRType::Float32);
2016 : }
2017 0 : MOZ_MUST_USE bool convertConstantOrRegisterToFloat(JSContext* cx,
2018 : const ConstantOrRegister& src,
2019 : FloatRegister output, Label* fail)
2020 : {
2021 0 : return convertConstantOrRegisterToFloatingPoint(cx, src, output, fail, MIRType::Float32);
2022 : }
2023 : void convertTypedOrValueToFloat(TypedOrValueRegister src, FloatRegister output, Label* fail) {
2024 : convertTypedOrValueToFloatingPoint(src, output, fail, MIRType::Float32);
2025 : }
2026 :
2027 : enum IntConversionBehavior {
2028 : IntConversion_Normal,
2029 : IntConversion_NegativeZeroCheck,
2030 : IntConversion_Truncate,
2031 : IntConversion_ClampToUint8,
2032 : };
2033 :
2034 : enum IntConversionInputKind {
2035 : IntConversion_NumbersOnly,
2036 : IntConversion_NumbersOrBoolsOnly,
2037 : IntConversion_Any
2038 : };
2039 :
2040 : //
2041 : // Functions for converting values to int.
2042 : //
2043 : void convertDoubleToInt(FloatRegister src, Register output, FloatRegister temp,
2044 : Label* truncateFail, Label* fail, IntConversionBehavior behavior);
2045 :
2046 : // Strings may be handled by providing labels to jump to when the behavior
2047 : // is truncation or clamping. The subroutine, usually an OOL call, is
2048 : // passed the unboxed string in |stringReg| and should convert it to a
2049 : // double store into |temp|.
2050 : void convertValueToInt(ValueOperand value, MDefinition* input,
2051 : Label* handleStringEntry, Label* handleStringRejoin,
2052 : Label* truncateDoubleSlow,
2053 : Register stringReg, FloatRegister temp, Register output,
2054 : Label* fail, IntConversionBehavior behavior,
2055 : IntConversionInputKind conversion = IntConversion_Any);
2056 0 : void convertValueToInt(ValueOperand value, FloatRegister temp, Register output, Label* fail,
2057 : IntConversionBehavior behavior)
2058 : {
2059 : convertValueToInt(value, nullptr, nullptr, nullptr, nullptr, InvalidReg, temp, output,
2060 0 : fail, behavior);
2061 0 : }
2062 : MOZ_MUST_USE bool convertValueToInt(JSContext* cx, const Value& v, Register output, Label* fail,
2063 : IntConversionBehavior behavior);
2064 : MOZ_MUST_USE bool convertConstantOrRegisterToInt(JSContext* cx,
2065 : const ConstantOrRegister& src,
2066 : FloatRegister temp, Register output,
2067 : Label* fail, IntConversionBehavior behavior);
2068 : void convertTypedOrValueToInt(TypedOrValueRegister src, FloatRegister temp, Register output,
2069 : Label* fail, IntConversionBehavior behavior);
2070 :
2071 : //
2072 : // Convenience functions for converting values to int32.
2073 : //
2074 0 : void convertValueToInt32(ValueOperand value, FloatRegister temp, Register output, Label* fail,
2075 : bool negativeZeroCheck)
2076 : {
2077 0 : convertValueToInt(value, temp, output, fail, negativeZeroCheck
2078 : ? IntConversion_NegativeZeroCheck
2079 0 : : IntConversion_Normal);
2080 0 : }
2081 1 : void convertValueToInt32(ValueOperand value, MDefinition* input,
2082 : FloatRegister temp, Register output, Label* fail,
2083 : bool negativeZeroCheck, IntConversionInputKind conversion = IntConversion_Any)
2084 : {
2085 1 : convertValueToInt(value, input, nullptr, nullptr, nullptr, InvalidReg, temp, output, fail,
2086 : negativeZeroCheck
2087 : ? IntConversion_NegativeZeroCheck
2088 : : IntConversion_Normal,
2089 1 : conversion);
2090 1 : }
2091 : MOZ_MUST_USE bool convertValueToInt32(JSContext* cx, const Value& v, Register output,
2092 : Label* fail, bool negativeZeroCheck)
2093 : {
2094 : return convertValueToInt(cx, v, output, fail, negativeZeroCheck
2095 : ? IntConversion_NegativeZeroCheck
2096 : : IntConversion_Normal);
2097 : }
2098 : MOZ_MUST_USE bool convertConstantOrRegisterToInt32(JSContext* cx,
2099 : const ConstantOrRegister& src,
2100 : FloatRegister temp, Register output,
2101 : Label* fail, bool negativeZeroCheck)
2102 : {
2103 : return convertConstantOrRegisterToInt(cx, src, temp, output, fail, negativeZeroCheck
2104 : ? IntConversion_NegativeZeroCheck
2105 : : IntConversion_Normal);
2106 : }
2107 : void convertTypedOrValueToInt32(TypedOrValueRegister src, FloatRegister temp, Register output,
2108 : Label* fail, bool negativeZeroCheck)
2109 : {
2110 : convertTypedOrValueToInt(src, temp, output, fail, negativeZeroCheck
2111 : ? IntConversion_NegativeZeroCheck
2112 : : IntConversion_Normal);
2113 : }
2114 :
2115 : //
2116 : // Convenience functions for truncating values to int32.
2117 : //
2118 : void truncateValueToInt32(ValueOperand value, FloatRegister temp, Register output, Label* fail) {
2119 : convertValueToInt(value, temp, output, fail, IntConversion_Truncate);
2120 : }
2121 1 : void truncateValueToInt32(ValueOperand value, MDefinition* input,
2122 : Label* handleStringEntry, Label* handleStringRejoin,
2123 : Label* truncateDoubleSlow,
2124 : Register stringReg, FloatRegister temp, Register output, Label* fail)
2125 : {
2126 : convertValueToInt(value, input, handleStringEntry, handleStringRejoin, truncateDoubleSlow,
2127 1 : stringReg, temp, output, fail, IntConversion_Truncate);
2128 1 : }
2129 : void truncateValueToInt32(ValueOperand value, MDefinition* input,
2130 : FloatRegister temp, Register output, Label* fail)
2131 : {
2132 : convertValueToInt(value, input, nullptr, nullptr, nullptr, InvalidReg, temp, output, fail,
2133 : IntConversion_Truncate);
2134 : }
2135 : MOZ_MUST_USE bool truncateValueToInt32(JSContext* cx, const Value& v, Register output,
2136 : Label* fail) {
2137 : return convertValueToInt(cx, v, output, fail, IntConversion_Truncate);
2138 : }
2139 0 : MOZ_MUST_USE bool truncateConstantOrRegisterToInt32(JSContext* cx,
2140 : const ConstantOrRegister& src,
2141 : FloatRegister temp, Register output,
2142 : Label* fail)
2143 : {
2144 0 : return convertConstantOrRegisterToInt(cx, src, temp, output, fail, IntConversion_Truncate);
2145 : }
2146 : void truncateTypedOrValueToInt32(TypedOrValueRegister src, FloatRegister temp, Register output,
2147 : Label* fail)
2148 : {
2149 : convertTypedOrValueToInt(src, temp, output, fail, IntConversion_Truncate);
2150 : }
2151 :
2152 : // Convenience functions for clamping values to uint8.
2153 : void clampValueToUint8(ValueOperand value, FloatRegister temp, Register output, Label* fail) {
2154 : convertValueToInt(value, temp, output, fail, IntConversion_ClampToUint8);
2155 : }
2156 0 : void clampValueToUint8(ValueOperand value, MDefinition* input,
2157 : Label* handleStringEntry, Label* handleStringRejoin,
2158 : Register stringReg, FloatRegister temp, Register output, Label* fail)
2159 : {
2160 : convertValueToInt(value, input, handleStringEntry, handleStringRejoin, nullptr,
2161 0 : stringReg, temp, output, fail, IntConversion_ClampToUint8);
2162 0 : }
2163 : void clampValueToUint8(ValueOperand value, MDefinition* input,
2164 : FloatRegister temp, Register output, Label* fail)
2165 : {
2166 : convertValueToInt(value, input, nullptr, nullptr, nullptr, InvalidReg, temp, output, fail,
2167 : IntConversion_ClampToUint8);
2168 : }
2169 : MOZ_MUST_USE bool clampValueToUint8(JSContext* cx, const Value& v, Register output,
2170 : Label* fail) {
2171 : return convertValueToInt(cx, v, output, fail, IntConversion_ClampToUint8);
2172 : }
2173 0 : MOZ_MUST_USE bool clampConstantOrRegisterToUint8(JSContext* cx,
2174 : const ConstantOrRegister& src,
2175 : FloatRegister temp, Register output,
2176 : Label* fail)
2177 : {
2178 : return convertConstantOrRegisterToInt(cx, src, temp, output, fail,
2179 0 : IntConversion_ClampToUint8);
2180 : }
2181 : void clampTypedOrValueToUint8(TypedOrValueRegister src, FloatRegister temp, Register output,
2182 : Label* fail)
2183 : {
2184 : convertTypedOrValueToInt(src, temp, output, fail, IntConversion_ClampToUint8);
2185 : }
2186 :
2187 : public:
2188 : class AfterICSaveLive {
2189 : friend class MacroAssembler;
2190 0 : explicit AfterICSaveLive(uint32_t initialStack)
2191 : #ifdef JS_DEBUG
2192 0 : : initialStack(initialStack)
2193 : #endif
2194 0 : {}
2195 :
2196 : public:
2197 : #ifdef JS_DEBUG
2198 : uint32_t initialStack;
2199 : #endif
2200 : uint32_t alignmentPadding;
2201 : };
2202 :
2203 : void alignFrameForICArguments(AfterICSaveLive& aic) PER_ARCH;
2204 : void restoreFrameAlignmentForICArguments(AfterICSaveLive& aic) PER_ARCH;
2205 :
2206 : AfterICSaveLive icSaveLive(LiveRegisterSet& liveRegs);
2207 : MOZ_MUST_USE bool icBuildOOLFakeExitFrame(void* fakeReturnAddr, AfterICSaveLive& aic);
2208 : void icRestoreLive(LiveRegisterSet& liveRegs, AfterICSaveLive& aic);
2209 :
2210 : MOZ_MUST_USE bool icBuildOOLFakeExitFrame(void* fakeReturnAddr, AutoSaveLiveRegisters& save);
2211 :
2212 : // Align the stack pointer based on the number of arguments which are pushed
2213 : // on the stack, such that the JitFrameLayout would be correctly aligned on
2214 : // the JitStackAlignment.
2215 : void alignJitStackBasedOnNArgs(Register nargs);
2216 : void alignJitStackBasedOnNArgs(uint32_t nargs);
2217 :
2218 : inline void assertStackAlignment(uint32_t alignment, int32_t offset = 0);
2219 : };
2220 :
2221 : static inline Assembler::DoubleCondition
2222 11 : JSOpToDoubleCondition(JSOp op)
2223 : {
2224 11 : switch (op) {
2225 : case JSOP_EQ:
2226 : case JSOP_STRICTEQ:
2227 3 : return Assembler::DoubleEqual;
2228 : case JSOP_NE:
2229 : case JSOP_STRICTNE:
2230 3 : return Assembler::DoubleNotEqualOrUnordered;
2231 : case JSOP_LT:
2232 0 : return Assembler::DoubleLessThan;
2233 : case JSOP_LE:
2234 0 : return Assembler::DoubleLessThanOrEqual;
2235 : case JSOP_GT:
2236 5 : return Assembler::DoubleGreaterThan;
2237 : case JSOP_GE:
2238 0 : return Assembler::DoubleGreaterThanOrEqual;
2239 : default:
2240 0 : MOZ_CRASH("Unexpected comparison operation");
2241 : }
2242 : }
2243 :
2244 : // Note: the op may have been inverted during lowering (to put constants in a
2245 : // position where they can be immediates), so it is important to use the
2246 : // lir->jsop() instead of the mir->jsop() when it is present.
2247 : static inline Assembler::Condition
2248 202 : JSOpToCondition(JSOp op, bool isSigned)
2249 : {
2250 202 : if (isSigned) {
2251 202 : switch (op) {
2252 : case JSOP_EQ:
2253 : case JSOP_STRICTEQ:
2254 89 : return Assembler::Equal;
2255 : case JSOP_NE:
2256 : case JSOP_STRICTNE:
2257 16 : return Assembler::NotEqual;
2258 : case JSOP_LT:
2259 38 : return Assembler::LessThan;
2260 : case JSOP_LE:
2261 5 : return Assembler::LessThanOrEqual;
2262 : case JSOP_GT:
2263 17 : return Assembler::GreaterThan;
2264 : case JSOP_GE:
2265 37 : return Assembler::GreaterThanOrEqual;
2266 : default:
2267 0 : MOZ_CRASH("Unrecognized comparison operation");
2268 : }
2269 : } else {
2270 0 : switch (op) {
2271 : case JSOP_EQ:
2272 : case JSOP_STRICTEQ:
2273 0 : return Assembler::Equal;
2274 : case JSOP_NE:
2275 : case JSOP_STRICTNE:
2276 0 : return Assembler::NotEqual;
2277 : case JSOP_LT:
2278 0 : return Assembler::Below;
2279 : case JSOP_LE:
2280 0 : return Assembler::BelowOrEqual;
2281 : case JSOP_GT:
2282 0 : return Assembler::Above;
2283 : case JSOP_GE:
2284 0 : return Assembler::AboveOrEqual;
2285 : default:
2286 0 : MOZ_CRASH("Unrecognized comparison operation");
2287 : }
2288 : }
2289 : }
2290 :
2291 : static inline size_t
2292 0 : StackDecrementForCall(uint32_t alignment, size_t bytesAlreadyPushed, size_t bytesToPush)
2293 : {
2294 : return bytesToPush +
2295 0 : ComputeByteAlignment(bytesAlreadyPushed + bytesToPush, alignment);
2296 : }
2297 :
2298 : static inline MIRType
2299 0 : ToMIRType(MIRType t)
2300 : {
2301 0 : return t;
2302 : }
2303 :
2304 : static inline MIRType
2305 0 : ToMIRType(ABIArgType argType)
2306 : {
2307 0 : switch (argType & ArgType_Mask) {
2308 0 : case ArgType_General: return MIRType::Int32;
2309 0 : case ArgType_Double: return MIRType::Double;
2310 0 : case ArgType_Float32: return MIRType::Float32;
2311 0 : case ArgType_Int64: return MIRType::Int64;
2312 0 : default: break;
2313 : }
2314 0 : MOZ_CRASH("unexpected argType");
2315 : }
2316 :
2317 : template <class VecT>
2318 : class ABIArgIter
2319 : {
2320 : ABIArgGenerator gen_;
2321 : const VecT& types_;
2322 : unsigned i_;
2323 :
2324 0 : void settle() { if (!done()) gen_.next(ToMIRType(types_[i_])); }
2325 :
2326 : public:
2327 0 : explicit ABIArgIter(const VecT& types) : types_(types), i_(0) { settle(); }
2328 0 : void operator++(int) { MOZ_ASSERT(!done()); i_++; settle(); }
2329 0 : bool done() const { return i_ == types_.length(); }
2330 :
2331 0 : ABIArg* operator->() { MOZ_ASSERT(!done()); return &gen_.current(); }
2332 0 : ABIArg& operator*() { MOZ_ASSERT(!done()); return gen_.current(); }
2333 :
2334 0 : unsigned index() const { MOZ_ASSERT(!done()); return i_; }
2335 0 : MIRType mirType() const { MOZ_ASSERT(!done()); return ToMIRType(types_[i_]); }
2336 0 : uint32_t stackBytesConsumedSoFar() const { return gen_.stackBytesConsumedSoFar(); }
2337 : };
2338 :
2339 : } // namespace jit
2340 : } // namespace js
2341 :
2342 : #endif /* jit_MacroAssembler_h */
|