Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #ifndef jit_shared_Assembler_shared_h
8 : #define jit_shared_Assembler_shared_h
9 :
10 : #include "mozilla/PodOperations.h"
11 :
12 : #include <limits.h>
13 :
14 : #include "jit/AtomicOp.h"
15 : #include "jit/JitAllocPolicy.h"
16 : #include "jit/Label.h"
17 : #include "jit/Registers.h"
18 : #include "jit/RegisterSets.h"
19 : #include "vm/HelperThreads.h"
20 : #include "wasm/WasmTypes.h"
21 :
22 : #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
23 : defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
24 : // Push return addresses callee-side.
25 : # define JS_USE_LINK_REGISTER
26 : #endif
27 :
28 : #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
29 : // JS_SMALL_BRANCH means the range on a branch instruction
30 : // is smaller than the whole address space
31 : # define JS_SMALL_BRANCH
32 : #endif
33 :
34 : namespace js {
35 : namespace jit {
36 :
37 : namespace Disassembler {
38 : class HeapAccess;
39 : } // namespace Disassembler
40 :
41 : static const uint32_t Simd128DataSize = 4 * sizeof(int32_t);
42 : static_assert(Simd128DataSize == 4 * sizeof(int32_t), "SIMD data should be able to contain int32x4");
43 : static_assert(Simd128DataSize == 4 * sizeof(float), "SIMD data should be able to contain float32x4");
44 : static_assert(Simd128DataSize == 2 * sizeof(double), "SIMD data should be able to contain float64x2");
45 :
46 : enum Scale {
47 : TimesOne = 0,
48 : TimesTwo = 1,
49 : TimesFour = 2,
50 : TimesEight = 3
51 : };
52 :
53 : static_assert(sizeof(JS::Value) == 8,
54 : "required for TimesEight and 3 below to be correct");
55 : static const Scale ValueScale = TimesEight;
56 : static const size_t ValueShift = 3;
57 :
58 : static inline unsigned
59 0 : ScaleToShift(Scale scale)
60 : {
61 0 : return unsigned(scale);
62 : }
63 :
64 : static inline bool
65 0 : IsShiftInScaleRange(int i)
66 : {
67 0 : return i >= TimesOne && i <= TimesEight;
68 : }
69 :
70 : static inline Scale
71 0 : ShiftToScale(int i)
72 : {
73 0 : MOZ_ASSERT(IsShiftInScaleRange(i));
74 0 : return Scale(i);
75 : }
76 :
77 : static inline Scale
78 4 : ScaleFromElemWidth(int shift)
79 : {
80 4 : switch (shift) {
81 : case 1:
82 0 : return TimesOne;
83 : case 2:
84 0 : return TimesTwo;
85 : case 4:
86 0 : return TimesFour;
87 : case 8:
88 4 : return TimesEight;
89 : }
90 :
91 0 : MOZ_CRASH("Invalid scale");
92 : }
93 :
94 : // Used for 32-bit immediates which do not require relocation.
95 : struct Imm32
96 : {
97 : int32_t value;
98 :
99 176152 : explicit Imm32(int32_t value) : value(value)
100 176152 : { }
101 :
102 : static inline Imm32 ShiftOf(enum Scale s) {
103 : switch (s) {
104 : case TimesOne:
105 : return Imm32(0);
106 : case TimesTwo:
107 : return Imm32(1);
108 : case TimesFour:
109 : return Imm32(2);
110 : case TimesEight:
111 : return Imm32(3);
112 : };
113 : MOZ_CRASH("Invalid scale");
114 : }
115 :
116 : static inline Imm32 FactorOf(enum Scale s) {
117 : return Imm32(1 << ShiftOf(s).value);
118 : }
119 : };
120 :
121 : // Pointer-sized integer to be embedded as an immediate in an instruction.
122 : struct ImmWord
123 : {
124 : uintptr_t value;
125 :
126 67901 : explicit ImmWord(uintptr_t value) : value(value)
127 67901 : { }
128 : };
129 :
130 : // Used for 64-bit immediates which do not require relocation.
131 : struct Imm64
132 : {
133 : uint64_t value;
134 :
135 0 : explicit Imm64(int64_t value) : value(value)
136 0 : { }
137 :
138 : Imm32 low() const {
139 : return Imm32(int32_t(value));
140 : }
141 :
142 : Imm32 hi() const {
143 : return Imm32(int32_t(value >> 32));
144 : }
145 :
146 : inline Imm32 firstHalf() const;
147 : inline Imm32 secondHalf() const;
148 : };
149 :
150 : #ifdef DEBUG
151 : static inline bool
152 128173 : IsCompilingWasm()
153 : {
154 : // wasm compilation pushes a JitContext with a null JSCompartment.
155 128173 : return GetJitContext()->compartment == nullptr;
156 : }
157 : #endif
158 :
159 : // Pointer to be embedded as an immediate in an instruction.
160 : struct ImmPtr
161 : {
162 : void* value;
163 :
164 : struct NoCheckToken {};
165 :
166 0 : explicit ImmPtr(void* value, NoCheckToken) : value(value)
167 : {
168 : // A special unchecked variant for contexts where we know it is safe to
169 : // use an immptr. This is assuming the caller knows what they're doing.
170 0 : }
171 :
172 88258 : explicit ImmPtr(const void* value) : value(const_cast<void*>(value))
173 : {
174 : // To make code serialization-safe, wasm compilation should only
175 : // compile pointer immediates using a SymbolicAddress.
176 88258 : MOZ_ASSERT(!IsCompilingWasm());
177 88260 : }
178 :
179 : template <class R>
180 : explicit ImmPtr(R (*pf)())
181 : : value(JS_FUNC_TO_DATA_PTR(void*, pf))
182 : {
183 : MOZ_ASSERT(!IsCompilingWasm());
184 : }
185 :
186 : template <class R, class A1>
187 : explicit ImmPtr(R (*pf)(A1))
188 : : value(JS_FUNC_TO_DATA_PTR(void*, pf))
189 : {
190 : MOZ_ASSERT(!IsCompilingWasm());
191 : }
192 :
193 : template <class R, class A1, class A2>
194 : explicit ImmPtr(R (*pf)(A1, A2))
195 : : value(JS_FUNC_TO_DATA_PTR(void*, pf))
196 : {
197 : MOZ_ASSERT(!IsCompilingWasm());
198 : }
199 :
200 : template <class R, class A1, class A2, class A3>
201 5 : explicit ImmPtr(R (*pf)(A1, A2, A3))
202 5 : : value(JS_FUNC_TO_DATA_PTR(void*, pf))
203 : {
204 5 : MOZ_ASSERT(!IsCompilingWasm());
205 5 : }
206 :
207 : template <class R, class A1, class A2, class A3, class A4>
208 : explicit ImmPtr(R (*pf)(A1, A2, A3, A4))
209 : : value(JS_FUNC_TO_DATA_PTR(void*, pf))
210 : {
211 : MOZ_ASSERT(!IsCompilingWasm());
212 : }
213 : };
214 :
215 : // The same as ImmPtr except that the intention is to patch this
216 : // instruction. The initial value of the immediate is 'addr' and this value is
217 : // either clobbered or used in the patching process.
218 : struct PatchedImmPtr {
219 : void* value;
220 :
221 : explicit PatchedImmPtr()
222 : : value(nullptr)
223 : { }
224 41512 : explicit PatchedImmPtr(const void* value)
225 41512 : : value(const_cast<void*>(value))
226 41512 : { }
227 : };
228 :
229 : class AssemblerShared;
230 : class ImmGCPtr;
231 :
232 : // Used for immediates which require relocation.
233 : class ImmGCPtr
234 : {
235 : public:
236 : const gc::Cell* value;
237 :
238 4390 : explicit ImmGCPtr(const gc::Cell* ptr) : value(ptr)
239 : {
240 : // Nursery pointers can't be used if the main thread might be currently
241 : // performing a minor GC.
242 4390 : MOZ_ASSERT_IF(ptr && !ptr->isTenured(),
243 : !CurrentThreadIsIonCompilingSafeForMinorGC());
244 :
245 : // wasm shouldn't be creating GC things
246 4390 : MOZ_ASSERT(!IsCompilingWasm());
247 4390 : }
248 :
249 : private:
250 : ImmGCPtr() : value(0) {}
251 : };
252 :
253 : // Pointer to be embedded as an immediate that is loaded/stored from by an
254 : // instruction.
255 : struct AbsoluteAddress
256 : {
257 : void* addr;
258 :
259 13805 : explicit AbsoluteAddress(const void* addr)
260 13805 : : addr(const_cast<void*>(addr))
261 : {
262 13805 : MOZ_ASSERT(!IsCompilingWasm());
263 13805 : }
264 :
265 : AbsoluteAddress offset(ptrdiff_t delta) {
266 : return AbsoluteAddress(((uint8_t*) addr) + delta);
267 : }
268 : };
269 :
270 : // The same as AbsoluteAddress except that the intention is to patch this
271 : // instruction. The initial value of the immediate is 'addr' and this value is
272 : // either clobbered or used in the patching process.
273 : struct PatchedAbsoluteAddress
274 : {
275 : void* addr;
276 :
277 : explicit PatchedAbsoluteAddress()
278 : : addr(nullptr)
279 : { }
280 : explicit PatchedAbsoluteAddress(const void* addr)
281 : : addr(const_cast<void*>(addr))
282 : { }
283 : explicit PatchedAbsoluteAddress(uintptr_t addr)
284 : : addr(reinterpret_cast<void*>(addr))
285 : { }
286 : };
287 :
288 : // Specifies an address computed in the form of a register base and a constant,
289 : // 32-bit offset.
290 : struct Address
291 : {
292 : Register base;
293 : int32_t offset;
294 :
295 445010 : Address(Register base, int32_t offset) : base(base), offset(offset)
296 445010 : { }
297 :
298 957 : Address() { mozilla::PodZero(this); }
299 : };
300 :
301 : // Specifies an address computed in the form of a register base, a register
302 : // index with a scale, and a constant, 32-bit offset.
303 : struct BaseIndex
304 : {
305 : Register base;
306 : Register index;
307 : Scale scale;
308 : int32_t offset;
309 :
310 1101 : BaseIndex(Register base, Register index, Scale scale, int32_t offset = 0)
311 1101 : : base(base), index(index), scale(scale), offset(offset)
312 1101 : { }
313 :
314 : BaseIndex() { mozilla::PodZero(this); }
315 : };
316 :
317 : // A BaseIndex used to access Values. Note that |offset| is *not* scaled by
318 : // sizeof(Value). Use this *only* if you're indexing into a series of Values
319 : // that aren't object elements or object slots (for example, values on the
320 : // stack, values in an arguments object, &c.). If you're indexing into an
321 : // object's elements or slots, don't use this directly! Use
322 : // BaseObject{Element,Slot}Index instead.
323 : struct BaseValueIndex : BaseIndex
324 : {
325 225 : BaseValueIndex(Register base, Register index, int32_t offset = 0)
326 225 : : BaseIndex(base, index, ValueScale, offset)
327 225 : { }
328 : };
329 :
330 : // Specifies the address of an indexed Value within object elements from a
331 : // base. The index must not already be scaled by sizeof(Value)!
332 : struct BaseObjectElementIndex : BaseValueIndex
333 : {
334 25 : BaseObjectElementIndex(Register base, Register index, int32_t offset = 0)
335 25 : : BaseValueIndex(base, index, offset)
336 : {
337 25 : NativeObject::elementsSizeMustNotOverflow();
338 25 : }
339 : };
340 :
341 : // Like BaseObjectElementIndex, except for object slots.
342 : struct BaseObjectSlotIndex : BaseValueIndex
343 : {
344 : BaseObjectSlotIndex(Register base, Register index)
345 : : BaseValueIndex(base, index)
346 : {
347 : NativeObject::slotsSizeMustNotOverflow();
348 : }
349 : };
350 :
351 : class Relocation {
352 : public:
353 : enum Kind {
354 : // The target is immovable, so patching is only needed if the source
355 : // buffer is relocated and the reference is relative.
356 : HARDCODED,
357 :
358 : // The target is the start of a JitCode buffer, which must be traced
359 : // during garbage collection. Relocations and patching may be needed.
360 : JITCODE
361 : };
362 : };
363 :
364 : class RepatchLabel
365 : {
366 : static const int32_t INVALID_OFFSET = 0xC0000000;
367 : int32_t offset_ : 31;
368 : uint32_t bound_ : 1;
369 : public:
370 :
371 21 : RepatchLabel() : offset_(INVALID_OFFSET), bound_(0) {}
372 :
373 21 : void use(uint32_t newOffset) {
374 21 : MOZ_ASSERT(offset_ == INVALID_OFFSET);
375 21 : MOZ_ASSERT(newOffset != (uint32_t)INVALID_OFFSET);
376 21 : offset_ = newOffset;
377 21 : }
378 63 : bool bound() const {
379 63 : return bound_;
380 : }
381 21 : void bind(int32_t dest) {
382 21 : MOZ_ASSERT(!bound_);
383 21 : MOZ_ASSERT(dest != INVALID_OFFSET);
384 21 : offset_ = dest;
385 21 : bound_ = true;
386 21 : }
387 : int32_t target() {
388 : MOZ_ASSERT(bound());
389 : int32_t ret = offset_;
390 : offset_ = INVALID_OFFSET;
391 : return ret;
392 : }
393 21 : int32_t offset() {
394 21 : MOZ_ASSERT(!bound());
395 21 : return offset_;
396 : }
397 21 : bool used() const {
398 21 : return !bound() && offset_ != (INVALID_OFFSET);
399 : }
400 :
401 : };
402 : // An absolute label is like a Label, except it represents an absolute
403 : // reference rather than a relative one. Thus, it cannot be patched until after
404 : // linking.
405 : struct AbsoluteLabel : public LabelBase
406 : {
407 : public:
408 : AbsoluteLabel()
409 : { }
410 : AbsoluteLabel(const AbsoluteLabel& label) : LabelBase(label)
411 : { }
412 : int32_t prev() const {
413 : MOZ_ASSERT(!bound());
414 : if (!used())
415 : return INVALID_OFFSET;
416 : return offset();
417 : }
418 : void setPrev(int32_t offset) {
419 : use(offset);
420 : }
421 : void bind() {
422 : bound_ = true;
423 :
424 : // These labels cannot be used after being bound.
425 : offset_ = -1;
426 : }
427 : };
428 :
429 : class CodeOffset
430 : {
431 : size_t offset_;
432 :
433 : static const size_t NOT_BOUND = size_t(-1);
434 :
435 : public:
436 95910 : explicit CodeOffset(size_t offset) : offset_(offset) {}
437 56180 : CodeOffset() : offset_(NOT_BOUND) {}
438 :
439 140449 : size_t offset() const {
440 140449 : MOZ_ASSERT(bound());
441 140449 : return offset_;
442 : }
443 :
444 261 : void bind(size_t offset) {
445 261 : MOZ_ASSERT(!bound());
446 261 : offset_ = offset;
447 261 : MOZ_ASSERT(bound());
448 261 : }
449 147783 : bool bound() const {
450 147783 : return offset_ != NOT_BOUND;
451 : }
452 :
453 0 : void offsetBy(size_t delta) {
454 0 : MOZ_ASSERT(bound());
455 0 : MOZ_ASSERT(offset_ + delta >= offset_, "no overflow");
456 0 : offset_ += delta;
457 0 : }
458 : };
459 :
460 : // A code label contains an absolute reference to a point in the code. Thus, it
461 : // cannot be patched until after linking.
462 : // When the source label is resolved into a memory address, this address is
463 : // patched into the destination address.
464 : class CodeLabel
465 : {
466 : // The destination position, where the absolute reference should get
467 : // patched into.
468 : CodeOffset patchAt_;
469 :
470 : // The source label (relative) in the code to where the destination should
471 : // get patched to.
472 : CodeOffset target_;
473 :
474 : public:
475 25 : CodeLabel()
476 25 : { }
477 : explicit CodeLabel(const CodeOffset& patchAt)
478 : : patchAt_(patchAt)
479 : { }
480 : CodeLabel(const CodeOffset& patchAt, const CodeOffset& target)
481 : : patchAt_(patchAt),
482 : target_(target)
483 : { }
484 31 : CodeOffset* patchAt() {
485 31 : return &patchAt_;
486 : }
487 31 : CodeOffset* target() {
488 31 : return &target_;
489 : }
490 0 : void offsetBy(size_t delta) {
491 0 : patchAt_.offsetBy(delta);
492 0 : target_.offsetBy(delta);
493 0 : }
494 : };
495 :
496 : // Location of a jump or label in a generated JitCode block, relative to the
497 : // start of the block.
498 :
499 : class CodeOffsetJump
500 : {
501 : size_t offset_;
502 :
503 : #ifdef JS_SMALL_BRANCH
504 : size_t jumpTableIndex_;
505 : #endif
506 :
507 : public:
508 :
509 : #ifdef JS_SMALL_BRANCH
510 21 : CodeOffsetJump(size_t offset, size_t jumpTableIndex)
511 21 : : offset_(offset), jumpTableIndex_(jumpTableIndex)
512 21 : {}
513 21 : size_t jumpTableIndex() const {
514 21 : return jumpTableIndex_;
515 : }
516 : #else
517 : explicit CodeOffsetJump(size_t offset) : offset_(offset) {}
518 : #endif
519 :
520 21 : CodeOffsetJump() {
521 21 : mozilla::PodZero(this);
522 21 : }
523 :
524 21 : size_t offset() const {
525 21 : return offset_;
526 : }
527 : void fixup(MacroAssembler* masm);
528 : };
529 :
530 : // Absolute location of a jump or a label in some generated JitCode block.
531 : // Can also encode a CodeOffset{Jump,Label}, such that the offset is initially
532 : // set and the absolute location later filled in after the final JitCode is
533 : // allocated.
534 :
535 : class CodeLocationJump
536 : {
537 : uint8_t* raw_;
538 : #ifdef DEBUG
539 : enum State { Uninitialized, Absolute, Relative };
540 : State state_;
541 : void setUninitialized() {
542 : state_ = Uninitialized;
543 : }
544 21 : void setAbsolute() {
545 21 : state_ = Absolute;
546 21 : }
547 21 : void setRelative() {
548 21 : state_ = Relative;
549 21 : }
550 : #else
551 : void setUninitialized() const {
552 : }
553 : void setAbsolute() const {
554 : }
555 : void setRelative() const {
556 : }
557 : #endif
558 :
559 : #ifdef JS_SMALL_BRANCH
560 : uint8_t* jumpTableEntry_;
561 : #endif
562 :
563 : public:
564 : CodeLocationJump() {
565 : raw_ = nullptr;
566 : setUninitialized();
567 : #ifdef JS_SMALL_BRANCH
568 : jumpTableEntry_ = (uint8_t*) uintptr_t(0xdeadab1e);
569 : #endif
570 : }
571 21 : CodeLocationJump(JitCode* code, CodeOffsetJump base) {
572 21 : *this = base;
573 21 : repoint(code);
574 21 : }
575 :
576 21 : void operator = (CodeOffsetJump base) {
577 21 : raw_ = (uint8_t*) base.offset();
578 21 : setRelative();
579 : #ifdef JS_SMALL_BRANCH
580 21 : jumpTableEntry_ = (uint8_t*) base.jumpTableIndex();
581 : #endif
582 21 : }
583 :
584 : void repoint(JitCode* code, MacroAssembler* masm = nullptr);
585 :
586 63 : uint8_t* raw() const {
587 63 : MOZ_ASSERT(state_ == Absolute);
588 63 : return raw_;
589 : }
590 : uint8_t* offset() const {
591 : MOZ_ASSERT(state_ == Relative);
592 : return raw_;
593 : }
594 :
595 : #ifdef JS_SMALL_BRANCH
596 0 : uint8_t* jumpTableEntry() const {
597 0 : MOZ_ASSERT(state_ == Absolute);
598 0 : return jumpTableEntry_;
599 : }
600 : #endif
601 : };
602 :
603 : class CodeLocationLabel
604 : {
605 : uint8_t* raw_;
606 : #ifdef DEBUG
607 : enum State { Uninitialized, Absolute, Relative };
608 : State state_;
609 70 : void setUninitialized() {
610 70 : state_ = Uninitialized;
611 70 : }
612 20784 : void setAbsolute() {
613 20784 : state_ = Absolute;
614 20784 : }
615 20826 : void setRelative() {
616 20826 : state_ = Relative;
617 20826 : }
618 : #else
619 : void setUninitialized() const {
620 : }
621 : void setAbsolute() const {
622 : }
623 : void setRelative() const {
624 : }
625 : #endif
626 :
627 : public:
628 70 : CodeLocationLabel() {
629 70 : raw_ = nullptr;
630 70 : setUninitialized();
631 70 : }
632 20756 : CodeLocationLabel(JitCode* code, CodeOffset base) {
633 20756 : *this = base;
634 20756 : repoint(code);
635 20756 : }
636 : explicit CodeLocationLabel(JitCode* code) {
637 : raw_ = code->raw();
638 : setAbsolute();
639 : }
640 0 : explicit CodeLocationLabel(uint8_t* raw) {
641 0 : raw_ = raw;
642 0 : setAbsolute();
643 0 : }
644 :
645 20826 : void operator = (CodeOffset base) {
646 20826 : raw_ = (uint8_t*)base.offset();
647 20826 : setRelative();
648 20826 : }
649 0 : ptrdiff_t operator - (const CodeLocationLabel& other) {
650 0 : return raw_ - other.raw_;
651 : }
652 :
653 : void repoint(JitCode* code, MacroAssembler* masm = nullptr);
654 :
655 : #ifdef DEBUG
656 : bool isSet() const {
657 : return state_ != Uninitialized;
658 : }
659 : #endif
660 :
661 20834 : uint8_t* raw() const {
662 20834 : MOZ_ASSERT(state_ == Absolute);
663 20834 : return raw_;
664 : }
665 : uint8_t* offset() const {
666 : MOZ_ASSERT(state_ == Relative);
667 : return raw_;
668 : }
669 : };
670 :
671 : } // namespace jit
672 :
673 : namespace wasm {
674 :
675 : // Represents an instruction to be patched and the intended pointee. These
676 : // links are accumulated in the MacroAssembler, but patching is done outside
677 : // the MacroAssembler (in Module::staticallyLink).
678 :
679 : struct SymbolicAccess
680 : {
681 0 : SymbolicAccess(jit::CodeOffset patchAt, SymbolicAddress target)
682 0 : : patchAt(patchAt), target(target) {}
683 :
684 : jit::CodeOffset patchAt;
685 : SymbolicAddress target;
686 : };
687 :
688 : typedef Vector<SymbolicAccess, 0, SystemAllocPolicy> SymbolicAccessVector;
689 :
690 : // Describes a single wasm or asm.js memory access for the purpose of generating
691 : // code and metadata.
692 :
693 0 : class MemoryAccessDesc
694 : {
695 : uint32_t offset_;
696 : uint32_t align_;
697 : Scalar::Type type_;
698 : unsigned numSimdElems_;
699 : jit::MemoryBarrierBits barrierBefore_;
700 : jit::MemoryBarrierBits barrierAfter_;
701 : mozilla::Maybe<wasm::BytecodeOffset> trapOffset_;
702 :
703 : public:
704 0 : explicit MemoryAccessDesc(Scalar::Type type, uint32_t align, uint32_t offset,
705 : const mozilla::Maybe<BytecodeOffset>& trapOffset,
706 : unsigned numSimdElems = 0,
707 : jit::MemoryBarrierBits barrierBefore = jit::MembarNobits,
708 : jit::MemoryBarrierBits barrierAfter = jit::MembarNobits)
709 0 : : offset_(offset),
710 : align_(align),
711 : type_(type),
712 : numSimdElems_(numSimdElems),
713 : barrierBefore_(barrierBefore),
714 : barrierAfter_(barrierAfter),
715 0 : trapOffset_(trapOffset)
716 : {
717 0 : MOZ_ASSERT(Scalar::isSimdType(type) == (numSimdElems > 0));
718 0 : MOZ_ASSERT(numSimdElems <= jit::ScalarTypeToLength(type));
719 0 : MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
720 0 : MOZ_ASSERT_IF(isSimd(), hasTrap());
721 0 : MOZ_ASSERT_IF(isAtomic(), hasTrap());
722 0 : }
723 :
724 0 : uint32_t offset() const { return offset_; }
725 : uint32_t align() const { return align_; }
726 0 : Scalar::Type type() const { return type_; }
727 : unsigned byteSize() const {
728 : return Scalar::isSimdType(type())
729 : ? Scalar::scalarByteSize(type()) * numSimdElems()
730 : : Scalar::byteSize(type());
731 : }
732 0 : unsigned numSimdElems() const { MOZ_ASSERT(isSimd()); return numSimdElems_; }
733 0 : jit::MemoryBarrierBits barrierBefore() const { return barrierBefore_; }
734 0 : jit::MemoryBarrierBits barrierAfter() const { return barrierAfter_; }
735 0 : bool hasTrap() const { return !!trapOffset_; }
736 0 : BytecodeOffset trapOffset() const { return *trapOffset_; }
737 0 : bool isAtomic() const { return (barrierBefore_ | barrierAfter_) != jit::MembarNobits; }
738 0 : bool isSimd() const { return Scalar::isSimdType(type_); }
739 0 : bool isPlainAsmJS() const { return !hasTrap(); }
740 :
741 0 : void clearOffset() { offset_ = 0; }
742 : };
743 :
744 : // Summarizes a global access for a mutable (in asm.js) or immutable value (in
745 : // asm.js or the wasm MVP) that needs to get patched later.
746 :
747 : struct GlobalAccess
748 : {
749 : GlobalAccess(jit::CodeOffset patchAt, unsigned globalDataOffset)
750 : : patchAt(patchAt), globalDataOffset(globalDataOffset)
751 : {}
752 :
753 : jit::CodeOffset patchAt;
754 : unsigned globalDataOffset;
755 : };
756 :
757 : typedef Vector<GlobalAccess, 0, SystemAllocPolicy> GlobalAccessVector;
758 :
759 : // A CallFarJump records the offset of a jump that needs to be patched to a
760 : // call at the end of the module when all calls have been emitted.
761 :
762 : struct CallFarJump
763 : {
764 : uint32_t funcIndex;
765 : jit::CodeOffset jump;
766 :
767 0 : CallFarJump(uint32_t funcIndex, jit::CodeOffset jump)
768 0 : : funcIndex(funcIndex), jump(jump)
769 0 : {}
770 :
771 0 : void offsetBy(size_t delta) {
772 0 : jump.offsetBy(delta);
773 0 : }
774 : };
775 :
776 : typedef Vector<CallFarJump, 0, SystemAllocPolicy> CallFarJumpVector;
777 :
778 : // The TrapDesc struct describes a wasm trap that is about to be emitted. This
779 : // includes the logical wasm bytecode offset to report, the kind of instruction
780 : // causing the trap, and the stack depth right before control is transferred to
781 : // the trap out-of-line path.
782 :
783 : struct TrapDesc : BytecodeOffset
784 : {
785 : enum Kind { Jump, MemoryAccess };
786 : Kind kind;
787 : Trap trap;
788 : uint32_t framePushed;
789 :
790 0 : TrapDesc(BytecodeOffset offset, Trap trap, uint32_t framePushed, Kind kind = Jump)
791 0 : : BytecodeOffset(offset), kind(kind), trap(trap), framePushed(framePushed)
792 0 : {}
793 : };
794 :
795 : // A TrapSite captures all relevant information at the point of emitting the
796 : // in-line trapping instruction for the purpose of generating the out-of-line
797 : // trap code (at the end of the function).
798 :
799 : struct TrapSite : TrapDesc
800 : {
801 : uint32_t codeOffset;
802 :
803 0 : TrapSite(TrapDesc trap, uint32_t codeOffset)
804 0 : : TrapDesc(trap), codeOffset(codeOffset)
805 0 : {}
806 : };
807 :
808 : typedef Vector<TrapSite, 0, SystemAllocPolicy> TrapSiteVector;
809 :
810 : // A TrapFarJump records the offset of a jump that needs to be patched to a trap
811 : // exit at the end of the module when trap exits are emitted.
812 :
813 : struct TrapFarJump
814 : {
815 : Trap trap;
816 : jit::CodeOffset jump;
817 :
818 0 : TrapFarJump(Trap trap, jit::CodeOffset jump)
819 0 : : trap(trap), jump(jump)
820 0 : {}
821 :
822 0 : void offsetBy(size_t delta) {
823 0 : jump.offsetBy(delta);
824 0 : }
825 : };
826 :
827 : typedef Vector<TrapFarJump, 0, SystemAllocPolicy> TrapFarJumpVector;
828 :
829 : } // namespace wasm
830 :
831 : namespace jit {
832 :
833 : // The base class of all Assemblers for all archs.
834 4503 : class AssemblerShared
835 : {
836 : wasm::CallSiteAndTargetVector callSites_;
837 : wasm::CallFarJumpVector callFarJumps_;
838 : wasm::TrapSiteVector trapSites_;
839 : wasm::TrapFarJumpVector trapFarJumps_;
840 : wasm::MemoryAccessVector memoryAccesses_;
841 : wasm::SymbolicAccessVector symbolicAccesses_;
842 :
843 : protected:
844 : Vector<CodeLabel, 0, SystemAllocPolicy> codeLabels_;
845 :
846 : bool enoughMemory_;
847 : bool embedsNurseryPointers_;
848 :
849 : public:
850 4503 : AssemblerShared()
851 4503 : : enoughMemory_(true),
852 4503 : embedsNurseryPointers_(false)
853 4503 : {}
854 :
855 29372 : void propagateOOM(bool success) {
856 29372 : enoughMemory_ &= success;
857 29372 : }
858 :
859 0 : void setOOM() {
860 0 : enoughMemory_ = false;
861 0 : }
862 :
863 36937 : bool oom() const {
864 36937 : return !enoughMemory_;
865 : }
866 :
867 4499 : bool embedsNurseryPointers() const {
868 4499 : return embedsNurseryPointers_;
869 : }
870 :
871 : template <typename... Args>
872 0 : void append(const wasm::CallSiteDesc& desc, CodeOffset retAddr, Args&&... args)
873 : {
874 0 : wasm::CallSite cs(desc, retAddr.offset());
875 0 : enoughMemory_ &= callSites_.emplaceBack(cs, mozilla::Forward<Args>(args)...);
876 0 : }
877 0 : wasm::CallSiteAndTargetVector& callSites() { return callSites_; }
878 :
879 0 : void append(wasm::CallFarJump jmp) {
880 0 : enoughMemory_ &= callFarJumps_.append(jmp);
881 0 : }
882 0 : const wasm::CallFarJumpVector& callFarJumps() const { return callFarJumps_; }
883 :
884 0 : void append(wasm::TrapSite trapSite) {
885 0 : enoughMemory_ &= trapSites_.append(trapSite);
886 0 : }
887 0 : const wasm::TrapSiteVector& trapSites() const { return trapSites_; }
888 0 : void clearTrapSites() { trapSites_.clear(); }
889 :
890 0 : void append(wasm::TrapFarJump jmp) {
891 0 : enoughMemory_ &= trapFarJumps_.append(jmp);
892 0 : }
893 0 : const wasm::TrapFarJumpVector& trapFarJumps() const { return trapFarJumps_; }
894 :
895 0 : void append(wasm::MemoryAccess access) { enoughMemory_ &= memoryAccesses_.append(access); }
896 0 : wasm::MemoryAccessVector&& extractMemoryAccesses() { return Move(memoryAccesses_); }
897 :
898 0 : void append(const wasm::MemoryAccessDesc& access, size_t codeOffset, size_t framePushed) {
899 0 : if (access.hasTrap()) {
900 : // If a memory access is trapping (wasm, SIMD.js, Atomics), create a
901 : // TrapSite now which will generate a trap out-of-line path at the end
902 : // of the function which will *then* append a MemoryAccess.
903 : wasm::TrapDesc trap(access.trapOffset(), wasm::Trap::OutOfBounds, framePushed,
904 0 : wasm::TrapSite::MemoryAccess);
905 0 : append(wasm::TrapSite(trap, codeOffset));
906 : } else {
907 : // Otherwise, this is a plain asm.js access. On WASM_HUGE_MEMORY
908 : // platforms, asm.js uses signal handlers to remove bounds checks
909 : // and thus requires a MemoryAccess.
910 0 : MOZ_ASSERT(access.isPlainAsmJS());
911 : #ifdef WASM_HUGE_MEMORY
912 0 : append(wasm::MemoryAccess(codeOffset));
913 : #endif
914 : }
915 0 : }
916 :
917 0 : void append(wasm::SymbolicAccess access) { enoughMemory_ &= symbolicAccesses_.append(access); }
918 4507 : size_t numSymbolicAccesses() const { return symbolicAccesses_.length(); }
919 0 : wasm::SymbolicAccess symbolicAccess(size_t i) const { return symbolicAccesses_[i]; }
920 :
921 0 : static bool canUseInSingleByteInstruction(Register reg) { return true; }
922 :
923 17 : void addCodeLabel(CodeLabel label) {
924 17 : propagateOOM(codeLabels_.append(label));
925 17 : }
926 0 : size_t numCodeLabels() const {
927 0 : return codeLabels_.length();
928 : }
929 0 : CodeLabel codeLabel(size_t i) {
930 0 : return codeLabels_[i];
931 : }
932 :
933 : // Merge this assembler with the other one, invalidating it, by shifting all
934 : // offsets by a delta.
935 0 : bool asmMergeWith(size_t delta, const AssemblerShared& other) {
936 0 : size_t i = callSites_.length();
937 0 : enoughMemory_ &= callSites_.appendAll(other.callSites_);
938 0 : for (; i < callSites_.length(); i++)
939 0 : callSites_[i].offsetReturnAddressBy(delta);
940 :
941 0 : MOZ_ASSERT(other.trapSites_.empty(), "should have been cleared by wasmEmitTrapOutOfLineCode");
942 :
943 0 : i = callFarJumps_.length();
944 0 : enoughMemory_ &= callFarJumps_.appendAll(other.callFarJumps_);
945 0 : for (; i < callFarJumps_.length(); i++)
946 0 : callFarJumps_[i].offsetBy(delta);
947 :
948 0 : i = trapFarJumps_.length();
949 0 : enoughMemory_ &= trapFarJumps_.appendAll(other.trapFarJumps_);
950 0 : for (; i < trapFarJumps_.length(); i++)
951 0 : trapFarJumps_[i].offsetBy(delta);
952 :
953 0 : i = memoryAccesses_.length();
954 0 : enoughMemory_ &= memoryAccesses_.appendAll(other.memoryAccesses_);
955 0 : for (; i < memoryAccesses_.length(); i++)
956 0 : memoryAccesses_[i].offsetBy(delta);
957 :
958 0 : i = symbolicAccesses_.length();
959 0 : enoughMemory_ &= symbolicAccesses_.appendAll(other.symbolicAccesses_);
960 0 : for (; i < symbolicAccesses_.length(); i++)
961 0 : symbolicAccesses_[i].patchAt.offsetBy(delta);
962 :
963 0 : i = codeLabels_.length();
964 0 : enoughMemory_ &= codeLabels_.appendAll(other.codeLabels_);
965 0 : for (; i < codeLabels_.length(); i++)
966 0 : codeLabels_[i].offsetBy(delta);
967 :
968 0 : return !oom();
969 : }
970 : };
971 :
972 : } // namespace jit
973 : } // namespace js
974 :
975 : #endif /* jit_shared_Assembler_shared_h */
|