Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : *
4 : * Copyright 2015 Mozilla Foundation
5 : *
6 : * Licensed under the Apache License, Version 2.0 (the "License");
7 : * you may not use this file except in compliance with the License.
8 : * You may obtain a copy of the License at
9 : *
10 : * http://www.apache.org/licenses/LICENSE-2.0
11 : *
12 : * Unless required by applicable law or agreed to in writing, software
13 : * distributed under the License is distributed on an "AS IS" BASIS,
14 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 : * See the License for the specific language governing permissions and
16 : * limitations under the License.
17 : */
18 :
19 : #ifndef wasm_types_h
20 : #define wasm_types_h
21 :
22 : #include "mozilla/Alignment.h"
23 : #include "mozilla/EnumeratedArray.h"
24 : #include "mozilla/HashFunctions.h"
25 : #include "mozilla/Maybe.h"
26 : #include "mozilla/Move.h"
27 : #include "mozilla/RefPtr.h"
28 : #include "mozilla/Unused.h"
29 :
30 : #include "NamespaceImports.h"
31 :
32 : #include "ds/LifoAlloc.h"
33 : #include "jit/IonTypes.h"
34 : #include "js/RefCounted.h"
35 : #include "js/UniquePtr.h"
36 : #include "js/Utility.h"
37 : #include "js/Vector.h"
38 : #include "vm/MallocProvider.h"
39 : #include "wasm/WasmBinaryConstants.h"
40 :
41 : namespace js {
42 :
43 : class PropertyName;
44 : class WasmActivation;
45 : class WasmFunctionCallObject;
46 : namespace jit {
47 : struct BaselineScript;
48 : enum class RoundingMode;
49 : }
50 :
51 : // This is a widespread header, so lets keep out the core wasm impl types.
52 :
53 : class WasmMemoryObject;
54 : typedef GCPtr<WasmMemoryObject*> GCPtrWasmMemoryObject;
55 : typedef Rooted<WasmMemoryObject*> RootedWasmMemoryObject;
56 : typedef Handle<WasmMemoryObject*> HandleWasmMemoryObject;
57 : typedef MutableHandle<WasmMemoryObject*> MutableHandleWasmMemoryObject;
58 :
59 : class WasmModuleObject;
60 : typedef Rooted<WasmModuleObject*> RootedWasmModuleObject;
61 : typedef Handle<WasmModuleObject*> HandleWasmModuleObject;
62 : typedef MutableHandle<WasmModuleObject*> MutableHandleWasmModuleObject;
63 :
64 : class WasmInstanceObject;
65 : typedef GCVector<WasmInstanceObject*> WasmInstanceObjectVector;
66 : typedef Rooted<WasmInstanceObject*> RootedWasmInstanceObject;
67 : typedef Handle<WasmInstanceObject*> HandleWasmInstanceObject;
68 : typedef MutableHandle<WasmInstanceObject*> MutableHandleWasmInstanceObject;
69 :
70 : class WasmTableObject;
71 : typedef Rooted<WasmTableObject*> RootedWasmTableObject;
72 : typedef Handle<WasmTableObject*> HandleWasmTableObject;
73 : typedef MutableHandle<WasmTableObject*> MutableHandleWasmTableObject;
74 :
75 : namespace wasm {
76 :
77 : using mozilla::DebugOnly;
78 : using mozilla::EnumeratedArray;
79 : using mozilla::Maybe;
80 : using mozilla::Move;
81 : using mozilla::MallocSizeOf;
82 : using mozilla::Nothing;
83 : using mozilla::PodZero;
84 : using mozilla::PodCopy;
85 : using mozilla::PodEqual;
86 : using mozilla::Some;
87 : using mozilla::Unused;
88 :
89 : typedef Vector<uint32_t, 0, SystemAllocPolicy> Uint32Vector;
90 : typedef Vector<uint8_t, 0, SystemAllocPolicy> Bytes;
91 : typedef UniquePtr<Bytes> UniqueBytes;
92 : typedef UniquePtr<const Bytes> UniqueConstBytes;
93 : typedef Vector<char, 0, SystemAllocPolicy> UTF8Bytes;
94 :
95 : typedef int8_t I8x16[16];
96 : typedef int16_t I16x8[8];
97 : typedef int32_t I32x4[4];
98 : typedef float F32x4[4];
99 :
100 : class Code;
101 : class DebugState;
102 : class GeneratedSourceMap;
103 : class GlobalSegment;
104 : class Memory;
105 : class Module;
106 : class Instance;
107 : class Table;
108 :
109 : // To call Vector::podResizeToFit, a type must specialize mozilla::IsPod
110 : // which is pretty verbose to do within js::wasm, so factor that process out
111 : // into a macro.
112 :
113 : #define WASM_DECLARE_POD_VECTOR(Type, VectorName) \
114 : } } namespace mozilla { \
115 : template <> struct IsPod<js::wasm::Type> : TrueType {}; \
116 : } namespace js { namespace wasm { \
117 : typedef Vector<Type, 0, SystemAllocPolicy> VectorName;
118 :
119 : // A wasm Module and everything it contains must support serialization and
120 : // deserialization. Some data can be simply copied as raw bytes and,
121 : // as a convention, is stored in an inline CacheablePod struct. Everything else
122 : // should implement the below methods which are called recusively by the
123 : // containing Module.
124 :
125 : #define WASM_DECLARE_SERIALIZABLE(Type) \
126 : size_t serializedSize() const; \
127 : uint8_t* serialize(uint8_t* cursor) const; \
128 : const uint8_t* deserialize(const uint8_t* cursor); \
129 : size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
130 :
131 : #define WASM_DECLARE_SERIALIZABLE_VIRTUAL(Type) \
132 : virtual size_t serializedSize() const; \
133 : virtual uint8_t* serialize(uint8_t* cursor) const; \
134 : virtual const uint8_t* deserialize(const uint8_t* cursor); \
135 : virtual size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
136 :
137 : #define WASM_DECLARE_SERIALIZABLE_OVERRIDE(Type) \
138 : size_t serializedSize() const override; \
139 : uint8_t* serialize(uint8_t* cursor) const override; \
140 : const uint8_t* deserialize(const uint8_t* cursor) override; \
141 : size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const override;
142 :
143 : // This reusable base class factors out the logic for a resource that is shared
144 : // by multiple instances/modules but should only be counted once when computing
145 : // about:memory stats.
146 :
147 : template <class T>
148 0 : struct ShareableBase : RefCounted<T>
149 : {
150 : using SeenSet = HashSet<const T*, DefaultHasher<const T*>, SystemAllocPolicy>;
151 :
152 0 : size_t sizeOfIncludingThisIfNotSeen(MallocSizeOf mallocSizeOf, SeenSet* seen) const {
153 0 : const T* self = static_cast<const T*>(this);
154 0 : typename SeenSet::AddPtr p = seen->lookupForAdd(self);
155 0 : if (p)
156 0 : return 0;
157 0 : bool ok = seen->add(p, self);
158 : (void)ok; // oh well
159 0 : return mallocSizeOf(self) + self->sizeOfExcludingThis(mallocSizeOf);
160 : }
161 : };
162 :
163 : // ValType utilities
164 :
165 : static inline bool
166 0 : IsSimdType(ValType vt)
167 : {
168 0 : switch (vt) {
169 : case ValType::I8x16:
170 : case ValType::I16x8:
171 : case ValType::I32x4:
172 : case ValType::F32x4:
173 : case ValType::B8x16:
174 : case ValType::B16x8:
175 : case ValType::B32x4:
176 0 : return true;
177 : default:
178 0 : return false;
179 : }
180 : }
181 :
182 : static inline uint32_t
183 0 : NumSimdElements(ValType vt)
184 : {
185 0 : MOZ_ASSERT(IsSimdType(vt));
186 0 : switch (vt) {
187 : case ValType::I8x16:
188 : case ValType::B8x16:
189 0 : return 16;
190 : case ValType::I16x8:
191 : case ValType::B16x8:
192 0 : return 8;
193 : case ValType::I32x4:
194 : case ValType::F32x4:
195 : case ValType::B32x4:
196 0 : return 4;
197 : default:
198 0 : MOZ_CRASH("Unhandled SIMD type");
199 : }
200 : }
201 :
202 : static inline ValType
203 0 : SimdElementType(ValType vt)
204 : {
205 0 : MOZ_ASSERT(IsSimdType(vt));
206 0 : switch (vt) {
207 : case ValType::I8x16:
208 : case ValType::I16x8:
209 : case ValType::I32x4:
210 0 : return ValType::I32;
211 : case ValType::F32x4:
212 0 : return ValType::F32;
213 : case ValType::B8x16:
214 : case ValType::B16x8:
215 : case ValType::B32x4:
216 0 : return ValType::I32;
217 : default:
218 0 : MOZ_CRASH("Unhandled SIMD type");
219 : }
220 : }
221 :
222 : static inline ValType
223 0 : SimdBoolType(ValType vt)
224 : {
225 0 : MOZ_ASSERT(IsSimdType(vt));
226 0 : switch (vt) {
227 : case ValType::I8x16:
228 : case ValType::B8x16:
229 0 : return ValType::B8x16;
230 : case ValType::I16x8:
231 : case ValType::B16x8:
232 0 : return ValType::B16x8;
233 : case ValType::I32x4:
234 : case ValType::F32x4:
235 : case ValType::B32x4:
236 0 : return ValType::B32x4;
237 : default:
238 0 : MOZ_CRASH("Unhandled SIMD type");
239 : }
240 : }
241 :
242 : static inline bool
243 0 : IsSimdBoolType(ValType vt)
244 : {
245 0 : return vt == ValType::B8x16 || vt == ValType::B16x8 || vt == ValType::B32x4;
246 : }
247 :
248 : static inline jit::MIRType
249 0 : ToMIRType(ValType vt)
250 : {
251 0 : switch (vt) {
252 0 : case ValType::I32: return jit::MIRType::Int32;
253 0 : case ValType::I64: return jit::MIRType::Int64;
254 0 : case ValType::F32: return jit::MIRType::Float32;
255 0 : case ValType::F64: return jit::MIRType::Double;
256 0 : case ValType::I8x16: return jit::MIRType::Int8x16;
257 0 : case ValType::I16x8: return jit::MIRType::Int16x8;
258 0 : case ValType::I32x4: return jit::MIRType::Int32x4;
259 0 : case ValType::F32x4: return jit::MIRType::Float32x4;
260 0 : case ValType::B8x16: return jit::MIRType::Bool8x16;
261 0 : case ValType::B16x8: return jit::MIRType::Bool16x8;
262 0 : case ValType::B32x4: return jit::MIRType::Bool32x4;
263 : }
264 0 : MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("bad type");
265 : }
266 :
267 : // The ExprType enum represents the type of a WebAssembly expression or return
268 : // value and may either be a value type or void. Soon, expression types will be
269 : // generalized to a list of ValType and this enum will go away, replaced,
270 : // wherever it is used, by a varU32 + list of ValType.
271 :
272 : enum class ExprType
273 : {
274 : Void = uint8_t(TypeCode::BlockVoid),
275 :
276 : I32 = uint8_t(TypeCode::I32),
277 : I64 = uint8_t(TypeCode::I64),
278 : F32 = uint8_t(TypeCode::F32),
279 : F64 = uint8_t(TypeCode::F64),
280 :
281 : I8x16 = uint8_t(TypeCode::I8x16),
282 : I16x8 = uint8_t(TypeCode::I16x8),
283 : I32x4 = uint8_t(TypeCode::I32x4),
284 : F32x4 = uint8_t(TypeCode::F32x4),
285 : B8x16 = uint8_t(TypeCode::B8x16),
286 : B16x8 = uint8_t(TypeCode::B16x8),
287 : B32x4 = uint8_t(TypeCode::B32x4),
288 :
289 : Limit = uint8_t(TypeCode::Limit)
290 : };
291 :
292 : static inline bool
293 0 : IsVoid(ExprType et)
294 : {
295 0 : return et == ExprType::Void;
296 : }
297 :
298 : static inline ValType
299 0 : NonVoidToValType(ExprType et)
300 : {
301 0 : MOZ_ASSERT(!IsVoid(et));
302 0 : return ValType(et);
303 : }
304 :
305 : static inline ExprType
306 0 : ToExprType(ValType vt)
307 : {
308 0 : return ExprType(vt);
309 : }
310 :
311 : static inline bool
312 : IsSimdType(ExprType et)
313 : {
314 : return IsVoid(et) ? false : IsSimdType(ValType(et));
315 : }
316 :
317 : static inline jit::MIRType
318 0 : ToMIRType(ExprType et)
319 : {
320 0 : return IsVoid(et) ? jit::MIRType::None : ToMIRType(ValType(et));
321 : }
322 :
323 : static inline const char*
324 0 : ToCString(ExprType type)
325 : {
326 0 : switch (type) {
327 0 : case ExprType::Void: return "void";
328 0 : case ExprType::I32: return "i32";
329 0 : case ExprType::I64: return "i64";
330 0 : case ExprType::F32: return "f32";
331 0 : case ExprType::F64: return "f64";
332 0 : case ExprType::I8x16: return "i8x16";
333 0 : case ExprType::I16x8: return "i16x8";
334 0 : case ExprType::I32x4: return "i32x4";
335 0 : case ExprType::F32x4: return "f32x4";
336 0 : case ExprType::B8x16: return "b8x16";
337 0 : case ExprType::B16x8: return "b16x8";
338 0 : case ExprType::B32x4: return "b32x4";
339 : case ExprType::Limit:;
340 : }
341 0 : MOZ_CRASH("bad expression type");
342 : }
343 :
344 : static inline const char*
345 0 : ToCString(ValType type)
346 : {
347 0 : return ToCString(ToExprType(type));
348 : }
349 :
350 : // The Val class represents a single WebAssembly value of a given value type,
351 : // mostly for the purpose of numeric literals and initializers. A Val does not
352 : // directly map to a JS value since there is not (currently) a precise
353 : // representation of i64 values. A Val may contain non-canonical NaNs since,
354 : // within WebAssembly, floats are not canonicalized. Canonicalization must
355 : // happen at the JS boundary.
356 :
357 : class Val
358 : {
359 : ValType type_;
360 : union U {
361 : uint32_t i32_;
362 : uint64_t i64_;
363 : float f32_;
364 : double f64_;
365 : I8x16 i8x16_;
366 : I16x8 i16x8_;
367 : I32x4 i32x4_;
368 : F32x4 f32x4_;
369 0 : U() {}
370 : } u;
371 :
372 : public:
373 0 : Val() = default;
374 :
375 0 : explicit Val(uint32_t i32) : type_(ValType::I32) { u.i32_ = i32; }
376 0 : explicit Val(uint64_t i64) : type_(ValType::I64) { u.i64_ = i64; }
377 :
378 0 : explicit Val(float f32) : type_(ValType::F32) { u.f32_ = f32; }
379 0 : explicit Val(double f64) : type_(ValType::F64) { u.f64_ = f64; }
380 :
381 0 : explicit Val(const I8x16& i8x16, ValType type = ValType::I8x16) : type_(type) {
382 0 : MOZ_ASSERT(type_ == ValType::I8x16 || type_ == ValType::B8x16);
383 0 : memcpy(u.i8x16_, i8x16, sizeof(u.i8x16_));
384 0 : }
385 0 : explicit Val(const I16x8& i16x8, ValType type = ValType::I16x8) : type_(type) {
386 0 : MOZ_ASSERT(type_ == ValType::I16x8 || type_ == ValType::B16x8);
387 0 : memcpy(u.i16x8_, i16x8, sizeof(u.i16x8_));
388 0 : }
389 0 : explicit Val(const I32x4& i32x4, ValType type = ValType::I32x4) : type_(type) {
390 0 : MOZ_ASSERT(type_ == ValType::I32x4 || type_ == ValType::B32x4);
391 0 : memcpy(u.i32x4_, i32x4, sizeof(u.i32x4_));
392 0 : }
393 0 : explicit Val(const F32x4& f32x4) : type_(ValType::F32x4) {
394 0 : memcpy(u.f32x4_, f32x4, sizeof(u.f32x4_));
395 0 : }
396 :
397 0 : ValType type() const { return type_; }
398 : bool isSimd() const { return IsSimdType(type()); }
399 :
400 0 : uint32_t i32() const { MOZ_ASSERT(type_ == ValType::I32); return u.i32_; }
401 0 : uint64_t i64() const { MOZ_ASSERT(type_ == ValType::I64); return u.i64_; }
402 0 : const float& f32() const { MOZ_ASSERT(type_ == ValType::F32); return u.f32_; }
403 0 : const double& f64() const { MOZ_ASSERT(type_ == ValType::F64); return u.f64_; }
404 :
405 0 : const I8x16& i8x16() const {
406 0 : MOZ_ASSERT(type_ == ValType::I8x16 || type_ == ValType::B8x16);
407 0 : return u.i8x16_;
408 : }
409 0 : const I16x8& i16x8() const {
410 0 : MOZ_ASSERT(type_ == ValType::I16x8 || type_ == ValType::B16x8);
411 0 : return u.i16x8_;
412 : }
413 0 : const I32x4& i32x4() const {
414 0 : MOZ_ASSERT(type_ == ValType::I32x4 || type_ == ValType::B32x4);
415 0 : return u.i32x4_;
416 : }
417 0 : const F32x4& f32x4() const {
418 0 : MOZ_ASSERT(type_ == ValType::F32x4);
419 0 : return u.f32x4_;
420 : }
421 :
422 : void writePayload(uint8_t* dst) const;
423 : };
424 :
425 : typedef Vector<Val, 0, SystemAllocPolicy> ValVector;
426 :
427 : // The Sig class represents a WebAssembly function signature which takes a list
428 : // of value types and returns an expression type. The engine uses two in-memory
429 : // representations of the argument Vector's memory (when elements do not fit
430 : // inline): normal malloc allocation (via SystemAllocPolicy) and allocation in
431 : // a LifoAlloc (via LifoAllocPolicy). The former Sig objects can have any
432 : // lifetime since they own the memory. The latter Sig objects must not outlive
433 : // the associated LifoAlloc mark/release interval (which is currently the
434 : // duration of module validation+compilation). Thus, long-lived objects like
435 : // WasmModule must use malloced allocation.
436 :
437 0 : class Sig
438 : {
439 : ValTypeVector args_;
440 : ExprType ret_;
441 :
442 : public:
443 0 : Sig() : args_(), ret_(ExprType::Void) {}
444 0 : Sig(ValTypeVector&& args, ExprType ret) : args_(Move(args)), ret_(ret) {}
445 :
446 0 : MOZ_MUST_USE bool clone(const Sig& rhs) {
447 0 : ret_ = rhs.ret_;
448 0 : MOZ_ASSERT(args_.empty());
449 0 : return args_.appendAll(rhs.args_);
450 : }
451 :
452 0 : ValType arg(unsigned i) const { return args_[i]; }
453 0 : const ValTypeVector& args() const { return args_; }
454 0 : const ExprType& ret() const { return ret_; }
455 :
456 0 : HashNumber hash() const {
457 0 : return AddContainerToHash(args_, HashNumber(ret_));
458 : }
459 0 : bool operator==(const Sig& rhs) const {
460 0 : return ret() == rhs.ret() && EqualContainers(args(), rhs.args());
461 : }
462 0 : bool operator!=(const Sig& rhs) const {
463 0 : return !(*this == rhs);
464 : }
465 :
466 : WASM_DECLARE_SERIALIZABLE(Sig)
467 : };
468 :
469 : struct SigHashPolicy
470 : {
471 : typedef const Sig& Lookup;
472 0 : static HashNumber hash(Lookup sig) { return sig.hash(); }
473 0 : static bool match(const Sig* lhs, Lookup rhs) { return *lhs == rhs; }
474 : };
475 :
476 : // An InitExpr describes a deferred initializer expression, used to initialize
477 : // a global or a table element offset. Such expressions are created during
478 : // decoding and actually executed on module instantiation.
479 :
480 : class InitExpr
481 : {
482 : public:
483 : enum class Kind {
484 : Constant,
485 : GetGlobal
486 : };
487 :
488 : private:
489 : Kind kind_;
490 : union U {
491 : Val val_;
492 : struct {
493 : uint32_t index_;
494 : ValType type_;
495 : } global;
496 0 : U() {}
497 : } u;
498 :
499 : public:
500 0 : InitExpr() = default;
501 :
502 0 : explicit InitExpr(Val val) : kind_(Kind::Constant) {
503 0 : u.val_ = val;
504 0 : }
505 :
506 0 : explicit InitExpr(uint32_t globalIndex, ValType type) : kind_(Kind::GetGlobal) {
507 0 : u.global.index_ = globalIndex;
508 0 : u.global.type_ = type;
509 0 : }
510 :
511 0 : Kind kind() const { return kind_; }
512 :
513 0 : bool isVal() const { return kind() == Kind::Constant; }
514 0 : Val val() const { MOZ_ASSERT(isVal()); return u.val_; }
515 :
516 0 : uint32_t globalIndex() const { MOZ_ASSERT(kind() == Kind::GetGlobal); return u.global.index_; }
517 :
518 0 : ValType type() const {
519 0 : switch (kind()) {
520 0 : case Kind::Constant: return u.val_.type();
521 0 : case Kind::GetGlobal: return u.global.type_;
522 : }
523 0 : MOZ_CRASH("unexpected initExpr type");
524 : }
525 : };
526 :
527 : // CacheableChars is used to cacheably store UniqueChars.
528 :
529 0 : struct CacheableChars : UniqueChars
530 : {
531 0 : CacheableChars() = default;
532 : explicit CacheableChars(char* ptr) : UniqueChars(ptr) {}
533 0 : MOZ_IMPLICIT CacheableChars(UniqueChars&& rhs) : UniqueChars(Move(rhs)) {}
534 : WASM_DECLARE_SERIALIZABLE(CacheableChars)
535 : };
536 :
537 : typedef Vector<CacheableChars, 0, SystemAllocPolicy> CacheableCharsVector;
538 :
539 : // Import describes a single wasm import. An ImportVector describes all
540 : // of a single module's imports.
541 : //
542 : // ImportVector is built incrementally by ModuleGenerator and then stored
543 : // immutably by Module.
544 :
545 0 : struct Import
546 : {
547 : CacheableChars module;
548 : CacheableChars field;
549 : DefinitionKind kind;
550 :
551 0 : Import() = default;
552 0 : Import(UniqueChars&& module, UniqueChars&& field, DefinitionKind kind)
553 0 : : module(Move(module)), field(Move(field)), kind(kind)
554 0 : {}
555 :
556 : WASM_DECLARE_SERIALIZABLE(Import)
557 : };
558 :
559 : typedef Vector<Import, 0, SystemAllocPolicy> ImportVector;
560 :
561 : // Export describes the export of a definition in a Module to a field in the
562 : // export object. For functions, Export stores an index into the
563 : // FuncExportVector in Metadata. For memory and table exports, there is
564 : // at most one (default) memory/table so no index is needed. Note: a single
565 : // definition can be exported by multiple Exports in the ExportVector.
566 : //
567 : // ExportVector is built incrementally by ModuleGenerator and then stored
568 : // immutably by Module.
569 :
570 0 : class Export
571 : {
572 : CacheableChars fieldName_;
573 : struct CacheablePod {
574 : DefinitionKind kind_;
575 : uint32_t index_;
576 : } pod;
577 :
578 : public:
579 0 : Export() = default;
580 : explicit Export(UniqueChars fieldName, uint32_t index, DefinitionKind kind);
581 : explicit Export(UniqueChars fieldName, DefinitionKind kind);
582 :
583 0 : const char* fieldName() const { return fieldName_.get(); }
584 :
585 0 : DefinitionKind kind() const { return pod.kind_; }
586 : uint32_t funcIndex() const;
587 : uint32_t globalIndex() const;
588 :
589 : WASM_DECLARE_SERIALIZABLE(Export)
590 : };
591 :
592 : typedef Vector<Export, 0, SystemAllocPolicy> ExportVector;
593 :
594 : // A GlobalDesc describes a single global variable. Currently, asm.js and wasm
595 : // exposes mutable and immutable private globals, but can't import nor export
596 : // mutable globals.
597 :
598 : enum class GlobalKind
599 : {
600 : Import,
601 : Constant,
602 : Variable
603 : };
604 :
605 : class GlobalDesc
606 : {
607 : union V {
608 : struct {
609 : union U {
610 : InitExpr initial_;
611 : struct {
612 : ValType type_;
613 : uint32_t index_;
614 : } import;
615 : U() {}
616 : } val;
617 : unsigned offset_;
618 : bool isMutable_;
619 : } var;
620 : Val cst_;
621 0 : V() {}
622 : } u;
623 : GlobalKind kind_;
624 :
625 : public:
626 : GlobalDesc() = default;
627 :
628 0 : explicit GlobalDesc(InitExpr initial, bool isMutable)
629 0 : : kind_((isMutable || !initial.isVal()) ? GlobalKind::Variable : GlobalKind::Constant)
630 : {
631 0 : if (isVariable()) {
632 0 : u.var.val.initial_ = initial;
633 0 : u.var.isMutable_ = isMutable;
634 0 : u.var.offset_ = UINT32_MAX;
635 : } else {
636 0 : u.cst_ = initial.val();
637 : }
638 0 : }
639 :
640 0 : explicit GlobalDesc(ValType type, bool isMutable, uint32_t importIndex)
641 0 : : kind_(GlobalKind::Import)
642 : {
643 0 : u.var.val.import.type_ = type;
644 0 : u.var.val.import.index_ = importIndex;
645 0 : u.var.isMutable_ = isMutable;
646 0 : u.var.offset_ = UINT32_MAX;
647 0 : }
648 :
649 0 : void setOffset(unsigned offset) {
650 0 : MOZ_ASSERT(!isConstant());
651 0 : MOZ_ASSERT(u.var.offset_ == UINT32_MAX);
652 0 : u.var.offset_ = offset;
653 0 : }
654 0 : unsigned offset() const {
655 0 : MOZ_ASSERT(!isConstant());
656 0 : MOZ_ASSERT(u.var.offset_ != UINT32_MAX);
657 0 : return u.var.offset_;
658 : }
659 :
660 0 : GlobalKind kind() const { return kind_; }
661 0 : bool isVariable() const { return kind_ == GlobalKind::Variable; }
662 0 : bool isConstant() const { return kind_ == GlobalKind::Constant; }
663 0 : bool isImport() const { return kind_ == GlobalKind::Import; }
664 :
665 0 : bool isMutable() const { return !isConstant() && u.var.isMutable_; }
666 0 : Val constantValue() const { MOZ_ASSERT(isConstant()); return u.cst_; }
667 0 : const InitExpr& initExpr() const { MOZ_ASSERT(isVariable()); return u.var.val.initial_; }
668 0 : uint32_t importIndex() const { MOZ_ASSERT(isImport()); return u.var.val.import.index_; }
669 :
670 0 : ValType type() const {
671 0 : switch (kind_) {
672 0 : case GlobalKind::Import: return u.var.val.import.type_;
673 0 : case GlobalKind::Variable: return u.var.val.initial_.type();
674 0 : case GlobalKind::Constant: return u.cst_.type();
675 : }
676 0 : MOZ_CRASH("unexpected global kind");
677 : }
678 : };
679 :
680 : typedef Vector<GlobalDesc, 0, SystemAllocPolicy> GlobalDescVector;
681 :
682 : // ElemSegment represents an element segment in the module where each element
683 : // describes both its function index and its code range.
684 :
685 0 : struct ElemSegment
686 : {
687 : uint32_t tableIndex;
688 : InitExpr offset;
689 : Uint32Vector elemFuncIndices;
690 : Uint32Vector elemCodeRangeIndices;
691 :
692 0 : ElemSegment() = default;
693 0 : ElemSegment(uint32_t tableIndex, InitExpr offset, Uint32Vector&& elemFuncIndices)
694 0 : : tableIndex(tableIndex), offset(offset), elemFuncIndices(Move(elemFuncIndices))
695 0 : {}
696 :
697 : WASM_DECLARE_SERIALIZABLE(ElemSegment)
698 : };
699 :
700 : typedef Vector<ElemSegment, 0, SystemAllocPolicy> ElemSegmentVector;
701 :
702 : // DataSegment describes the offset of a data segment in the bytecode that is
703 : // to be copied at a given offset into linear memory upon instantiation.
704 :
705 0 : struct DataSegment
706 : {
707 : InitExpr offset;
708 : uint32_t bytecodeOffset;
709 : uint32_t length;
710 : };
711 :
712 : typedef Vector<DataSegment, 0, SystemAllocPolicy> DataSegmentVector;
713 :
714 : // SigIdDesc describes a signature id that can be used by call_indirect and
715 : // table-entry prologues to structurally compare whether the caller and callee's
716 : // signatures *structurally* match. To handle the general case, a Sig is
717 : // allocated and stored in a process-wide hash table, so that pointer equality
718 : // implies structural equality. As an optimization for the 99% case where the
719 : // Sig has a small number of parameters, the Sig is bit-packed into a uint32
720 : // immediate value so that integer equality implies structural equality. Both
721 : // cases can be handled with a single comparison by always setting the LSB for
722 : // the immediates (the LSB is necessarily 0 for allocated Sig pointers due to
723 : // alignment).
724 :
725 : class SigIdDesc
726 : {
727 : public:
728 : enum class Kind { None, Immediate, Global };
729 : static const uintptr_t ImmediateBit = 0x1;
730 :
731 : private:
732 : Kind kind_;
733 : size_t bits_;
734 :
735 0 : SigIdDesc(Kind kind, size_t bits) : kind_(kind), bits_(bits) {}
736 :
737 : public:
738 0 : Kind kind() const { return kind_; }
739 : static bool isGlobal(const Sig& sig);
740 :
741 0 : SigIdDesc() : kind_(Kind::None), bits_(0) {}
742 : static SigIdDesc global(const Sig& sig, uint32_t globalDataOffset);
743 : static SigIdDesc immediate(const Sig& sig);
744 :
745 : bool isGlobal() const { return kind_ == Kind::Global; }
746 :
747 0 : size_t immediate() const { MOZ_ASSERT(kind_ == Kind::Immediate); return bits_; }
748 0 : uint32_t globalDataOffset() const { MOZ_ASSERT(kind_ == Kind::Global); return bits_; }
749 : };
750 :
751 : // SigWithId pairs a Sig with SigIdDesc, describing either how to compile code
752 : // that compares this signature's id or, at instantiation what signature ids to
753 : // allocate in the global hash and where to put them.
754 :
755 0 : struct SigWithId : Sig
756 : {
757 : SigIdDesc id;
758 :
759 0 : SigWithId() = default;
760 0 : explicit SigWithId(Sig&& sig, SigIdDesc id) : Sig(Move(sig)), id(id) {}
761 0 : void operator=(Sig&& rhs) { Sig::operator=(Move(rhs)); }
762 :
763 : WASM_DECLARE_SERIALIZABLE(SigWithId)
764 : };
765 :
766 : typedef Vector<SigWithId, 0, SystemAllocPolicy> SigWithIdVector;
767 : typedef Vector<const SigWithId*, 0, SystemAllocPolicy> SigWithIdPtrVector;
768 :
769 : // The (,Callable,Func)Offsets classes are used to record the offsets of
770 : // different key points in a CodeRange during compilation.
771 :
772 : struct Offsets
773 : {
774 0 : explicit Offsets(uint32_t begin = 0, uint32_t end = 0)
775 0 : : begin(begin), end(end)
776 0 : {}
777 :
778 : // These define a [begin, end) contiguous range of instructions compiled
779 : // into a CodeRange.
780 : uint32_t begin;
781 : uint32_t end;
782 :
783 0 : void offsetBy(uint32_t offset) {
784 0 : begin += offset;
785 0 : end += offset;
786 0 : }
787 : };
788 :
789 : struct CallableOffsets : Offsets
790 : {
791 0 : MOZ_IMPLICIT CallableOffsets(uint32_t ret = 0)
792 0 : : Offsets(), ret(ret)
793 0 : {}
794 :
795 : // The offset of the return instruction precedes 'end' by a variable number
796 : // of instructions due to out-of-line codegen.
797 : uint32_t ret;
798 :
799 0 : void offsetBy(uint32_t offset) {
800 0 : Offsets::offsetBy(offset);
801 0 : ret += offset;
802 0 : }
803 : };
804 :
805 : struct FuncOffsets : CallableOffsets
806 : {
807 0 : MOZ_IMPLICIT FuncOffsets()
808 0 : : CallableOffsets(),
809 0 : normalEntry(0)
810 0 : {}
811 :
812 : // Function CodeRanges have a table entry which takes an extra signature
813 : // argument which is checked against the callee's signature before falling
814 : // through to the normal prologue. The table entry is thus at the beginning
815 : // of the CodeRange and the normal entry is at some offset after the table
816 : // entry.
817 : uint32_t normalEntry;
818 :
819 0 : void offsetBy(uint32_t offset) {
820 0 : CallableOffsets::offsetBy(offset);
821 0 : normalEntry += offset;
822 0 : }
823 : };
824 :
825 : // A CodeRange describes a single contiguous range of code within a wasm
826 : // module's code segment. A CodeRange describes what the code does and, for
827 : // function bodies, the name and source coordinates of the function.
828 :
829 : class CodeRange
830 : {
831 : public:
832 : enum Kind {
833 : Function, // function definition
834 : Entry, // calls into wasm from C++
835 : ImportJitExit, // fast-path calling from wasm into JIT code
836 : ImportInterpExit, // slow-path calling from wasm into C++ interp
837 : BuiltinThunk, // fast-path calling from wasm into a C++ native
838 : TrapExit, // calls C++ to report and jumps to throw stub
839 : DebugTrap, // calls C++ to handle debug event
840 : FarJumpIsland, // inserted to connect otherwise out-of-range insns
841 : Inline, // stub that is jumped-to within prologue/epilogue
842 : Throw, // special stack-unwinding stub
843 : Interrupt // stub executes asynchronously to interrupt wasm
844 : };
845 :
846 : private:
847 : // All fields are treated as cacheable POD:
848 : uint32_t begin_;
849 : uint32_t ret_;
850 : uint32_t end_;
851 : uint32_t funcIndex_;
852 : uint32_t funcLineOrBytecode_;
853 : uint8_t funcBeginToNormalEntry_;
854 : Kind kind_ : 8;
855 :
856 : public:
857 : CodeRange() = default;
858 : CodeRange(Kind kind, Offsets offsets);
859 : CodeRange(Kind kind, CallableOffsets offsets);
860 : CodeRange(uint32_t funcIndex, uint32_t lineOrBytecode, FuncOffsets offsets);
861 :
862 : // All CodeRanges have a begin and end.
863 :
864 0 : uint32_t begin() const {
865 0 : return begin_;
866 : }
867 0 : uint32_t end() const {
868 0 : return end_;
869 : }
870 :
871 : // Other fields are only available for certain CodeRange::Kinds.
872 :
873 0 : Kind kind() const {
874 0 : return kind_;
875 : }
876 :
877 0 : bool isFunction() const {
878 0 : return kind() == Function;
879 : }
880 0 : bool isImportExit() const {
881 0 : return kind() == ImportJitExit || kind() == ImportInterpExit || kind() == BuiltinThunk;
882 : }
883 0 : bool isTrapExit() const {
884 0 : return kind() == TrapExit;
885 : }
886 : bool isInline() const {
887 : return kind() == Inline;
888 : }
889 0 : bool isThunk() const {
890 0 : return kind() == FarJumpIsland;
891 : }
892 :
893 : // Every CodeRange except entry and inline stubs are callable and have a
894 : // return statement. Asynchronous frame iteration needs to know the offset
895 : // of the return instruction to calculate the frame pointer.
896 :
897 0 : uint32_t ret() const {
898 0 : MOZ_ASSERT(isFunction() || isImportExit() || isTrapExit());
899 0 : return ret_;
900 : }
901 :
902 : // Function CodeRanges have two entry points: one for normal calls (with a
903 : // known signature) and one for table calls (which involves dynamic
904 : // signature checking).
905 :
906 0 : uint32_t funcTableEntry() const {
907 0 : MOZ_ASSERT(isFunction());
908 0 : return begin_;
909 : }
910 0 : uint32_t funcNormalEntry() const {
911 0 : MOZ_ASSERT(isFunction());
912 0 : return begin_ + funcBeginToNormalEntry_;
913 : }
914 0 : uint32_t funcIndex() const {
915 0 : MOZ_ASSERT(isFunction());
916 0 : return funcIndex_;
917 : }
918 0 : uint32_t funcLineOrBytecode() const {
919 0 : MOZ_ASSERT(isFunction());
920 0 : return funcLineOrBytecode_;
921 : }
922 :
923 : // A sorted array of CodeRanges can be looked up via BinarySearch and
924 : // OffsetInCode.
925 :
926 : struct OffsetInCode {
927 : size_t offset;
928 0 : explicit OffsetInCode(size_t offset) : offset(offset) {}
929 0 : bool operator==(const CodeRange& rhs) const {
930 0 : return offset >= rhs.begin() && offset < rhs.end();
931 : }
932 0 : bool operator<(const CodeRange& rhs) const {
933 0 : return offset < rhs.begin();
934 : }
935 : };
936 : };
937 :
938 : WASM_DECLARE_POD_VECTOR(CodeRange, CodeRangeVector)
939 :
940 : extern const CodeRange*
941 : LookupInSorted(const CodeRangeVector& codeRanges, CodeRange::OffsetInCode target);
942 :
943 : // A wasm::Trap represents a wasm-defined trap that can occur during execution
944 : // which triggers a WebAssembly.RuntimeError. Generated code may jump to a Trap
945 : // symbolically, passing the bytecode offset to report as the trap offset. The
946 : // generated jump will be bound to a tiny stub which fills the offset and
947 : // then jumps to a per-Trap shared stub at the end of the module.
948 :
949 : enum class Trap
950 : {
951 : // The Unreachable opcode has been executed.
952 : Unreachable,
953 : // An integer arithmetic operation led to an overflow.
954 : IntegerOverflow,
955 : // Trying to coerce NaN to an integer.
956 : InvalidConversionToInteger,
957 : // Integer division by zero.
958 : IntegerDivideByZero,
959 : // Out of bounds on wasm memory accesses and asm.js SIMD/atomic accesses.
960 : OutOfBounds,
961 : // call_indirect to null.
962 : IndirectCallToNull,
963 : // call_indirect signature mismatch.
964 : IndirectCallBadSig,
965 :
966 : // (asm.js only) SIMD float to int conversion failed because the input
967 : // wasn't in bounds.
968 : ImpreciseSimdConversion,
969 :
970 : // The internal stack space was exhausted. For compatibility, this throws
971 : // the same over-recursed error as JS.
972 : StackOverflow,
973 :
974 : Limit
975 : };
976 :
977 : // A wrapper around the bytecode offset of a wasm instruction within a whole
978 : // module, used for trap offsets or call offsets. These offsets should refer to
979 : // the first byte of the instruction that triggered the trap / did the call and
980 : // should ultimately derive from OpIter::bytecodeOffset.
981 :
982 : struct BytecodeOffset
983 : {
984 : static const uint32_t INVALID = -1;
985 : uint32_t bytecodeOffset;
986 :
987 5 : BytecodeOffset() : bytecodeOffset(INVALID) {}
988 0 : explicit BytecodeOffset(uint32_t bytecodeOffset) : bytecodeOffset(bytecodeOffset) {}
989 :
990 0 : bool isValid() const { return bytecodeOffset != INVALID; }
991 : };
992 :
993 : // While the frame-pointer chain allows the stack to be unwound without
994 : // metadata, Error.stack still needs to know the line/column of every call in
995 : // the chain. A CallSiteDesc describes a single callsite to which CallSite adds
996 : // the metadata necessary to walk up to the next frame. Lastly CallSiteAndTarget
997 : // adds the function index of the callee.
998 :
999 : class CallSiteDesc
1000 : {
1001 : uint32_t lineOrBytecode_ : 29;
1002 : uint32_t kind_ : 3;
1003 : public:
1004 : enum Kind {
1005 : Func, // pc-relative call to a specific function
1006 : Dynamic, // dynamic callee called via register
1007 : Symbolic, // call to a single symbolic callee
1008 : TrapExit, // call to a trap exit
1009 : EnterFrame, // call to a enter frame handler
1010 : LeaveFrame, // call to a leave frame handler
1011 : Breakpoint // call to instruction breakpoint
1012 : };
1013 : CallSiteDesc() {}
1014 0 : explicit CallSiteDesc(Kind kind)
1015 0 : : lineOrBytecode_(0), kind_(kind)
1016 : {
1017 0 : MOZ_ASSERT(kind == Kind(kind_));
1018 0 : }
1019 0 : CallSiteDesc(uint32_t lineOrBytecode, Kind kind)
1020 0 : : lineOrBytecode_(lineOrBytecode), kind_(kind)
1021 : {
1022 0 : MOZ_ASSERT(kind == Kind(kind_));
1023 0 : MOZ_ASSERT(lineOrBytecode == lineOrBytecode_);
1024 0 : }
1025 0 : uint32_t lineOrBytecode() const { return lineOrBytecode_; }
1026 0 : Kind kind() const { return Kind(kind_); }
1027 : };
1028 :
1029 : class CallSite : public CallSiteDesc
1030 : {
1031 : uint32_t returnAddressOffset_;
1032 :
1033 : public:
1034 : CallSite() {}
1035 :
1036 0 : CallSite(CallSiteDesc desc, uint32_t returnAddressOffset)
1037 0 : : CallSiteDesc(desc),
1038 0 : returnAddressOffset_(returnAddressOffset)
1039 0 : { }
1040 :
1041 : void setReturnAddressOffset(uint32_t r) { returnAddressOffset_ = r; }
1042 0 : void offsetReturnAddressBy(int32_t o) { returnAddressOffset_ += o; }
1043 0 : uint32_t returnAddressOffset() const { return returnAddressOffset_; }
1044 : };
1045 :
1046 : WASM_DECLARE_POD_VECTOR(CallSite, CallSiteVector)
1047 :
1048 : class CallSiteAndTarget : public CallSite
1049 : {
1050 : uint32_t index_;
1051 :
1052 : public:
1053 0 : explicit CallSiteAndTarget(CallSite cs)
1054 0 : : CallSite(cs)
1055 : {
1056 0 : MOZ_ASSERT(cs.kind() != Func);
1057 0 : }
1058 0 : CallSiteAndTarget(CallSite cs, uint32_t funcIndex)
1059 0 : : CallSite(cs), index_(funcIndex)
1060 : {
1061 0 : MOZ_ASSERT(cs.kind() == Func);
1062 0 : }
1063 0 : CallSiteAndTarget(CallSite cs, Trap trap)
1064 0 : : CallSite(cs),
1065 0 : index_(uint32_t(trap))
1066 : {
1067 0 : MOZ_ASSERT(cs.kind() == TrapExit);
1068 0 : }
1069 :
1070 0 : uint32_t funcIndex() const { MOZ_ASSERT(kind() == Func); return index_; }
1071 0 : Trap trap() const { MOZ_ASSERT(kind() == TrapExit); return Trap(index_); }
1072 : };
1073 :
1074 : typedef Vector<CallSiteAndTarget, 0, SystemAllocPolicy> CallSiteAndTargetVector;
1075 :
1076 : // A wasm::SymbolicAddress represents a pointer to a well-known function that is
1077 : // embedded in wasm code. Since wasm code is serialized and later deserialized
1078 : // into a different address space, symbolic addresses must be used for *all*
1079 : // pointers into the address space. The MacroAssembler records a list of all
1080 : // SymbolicAddresses and the offsets of their use in the code for later patching
1081 : // during static linking.
1082 :
1083 : enum class SymbolicAddress
1084 : {
1085 : ToInt32,
1086 : #if defined(JS_CODEGEN_ARM)
1087 : aeabi_idivmod,
1088 : aeabi_uidivmod,
1089 : AtomicCmpXchg,
1090 : AtomicXchg,
1091 : AtomicFetchAdd,
1092 : AtomicFetchSub,
1093 : AtomicFetchAnd,
1094 : AtomicFetchOr,
1095 : AtomicFetchXor,
1096 : #endif
1097 : ModD,
1098 : SinD,
1099 : CosD,
1100 : TanD,
1101 : ASinD,
1102 : ACosD,
1103 : ATanD,
1104 : CeilD,
1105 : CeilF,
1106 : FloorD,
1107 : FloorF,
1108 : TruncD,
1109 : TruncF,
1110 : NearbyIntD,
1111 : NearbyIntF,
1112 : ExpD,
1113 : LogD,
1114 : PowD,
1115 : ATan2D,
1116 : HandleExecutionInterrupt,
1117 : HandleDebugTrap,
1118 : HandleThrow,
1119 : ReportTrap,
1120 : ReportOutOfBounds,
1121 : ReportUnalignedAccess,
1122 : CallImport_Void,
1123 : CallImport_I32,
1124 : CallImport_I64,
1125 : CallImport_F64,
1126 : CoerceInPlace_ToInt32,
1127 : CoerceInPlace_ToNumber,
1128 : DivI64,
1129 : UDivI64,
1130 : ModI64,
1131 : UModI64,
1132 : TruncateDoubleToInt64,
1133 : TruncateDoubleToUint64,
1134 : Uint64ToFloat32,
1135 : Uint64ToDouble,
1136 : Int64ToFloat32,
1137 : Int64ToDouble,
1138 : GrowMemory,
1139 : CurrentMemory,
1140 : Limit
1141 : };
1142 :
1143 : bool
1144 : IsRoundingFunction(SymbolicAddress callee, jit::RoundingMode* mode);
1145 :
1146 : // Assumptions captures ambient state that must be the same when compiling and
1147 : // deserializing a module for the compiled code to be valid. If it's not, then
1148 : // the module must be recompiled from scratch.
1149 :
1150 0 : struct Assumptions
1151 : {
1152 : uint32_t cpuId;
1153 : JS::BuildIdCharVector buildId;
1154 :
1155 : explicit Assumptions(JS::BuildIdCharVector&& buildId);
1156 :
1157 : // If Assumptions is constructed without arguments, initBuildIdFromContext()
1158 : // must be called to complete initialization.
1159 : Assumptions();
1160 : bool initBuildIdFromContext(JSContext* cx);
1161 :
1162 : bool clone(const Assumptions& other);
1163 :
1164 : bool operator==(const Assumptions& rhs) const;
1165 : bool operator!=(const Assumptions& rhs) const { return !(*this == rhs); }
1166 :
1167 : size_t serializedSize() const;
1168 : uint8_t* serialize(uint8_t* cursor) const;
1169 : const uint8_t* deserialize(const uint8_t* cursor, size_t remain);
1170 : size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
1171 : };
1172 :
1173 : // A Module can either be asm.js or wasm.
1174 :
1175 : enum ModuleKind
1176 : {
1177 : Wasm,
1178 : AsmJS
1179 : };
1180 :
1181 : // Code can be compiled either with the Baseline compiler or the Ion compiler,
1182 : // and tier-variant data are tagged with the Tier value.
1183 : //
1184 : // A tier value is used to request tier-variant aspects of code, metadata, or
1185 : // linkdata. The tiers are normally explicit (Baseline and Ion); implicit tiers
1186 : // can be obtained through accessors on Code objects (eg, anyTier).
1187 :
1188 : enum class Tier
1189 : {
1190 : Baseline,
1191 : Ion,
1192 :
1193 : Debug, // An alias for Baseline in calls to tier-variant accessors
1194 :
1195 : TBD, // A placeholder while tiering is being implemented
1196 : };
1197 :
1198 : // Iterator over tiers present in a tiered data structure.
1199 :
1200 : class Tiers
1201 : {
1202 : Tier t_[2];
1203 : uint32_t n_;
1204 :
1205 : public:
1206 : explicit Tiers() {
1207 : n_ = 0;
1208 : }
1209 0 : explicit Tiers(Tier t) {
1210 0 : MOZ_ASSERT(t == Tier::Baseline || t == Tier::Ion);
1211 0 : t_[0] = t;
1212 0 : n_ = 1;
1213 0 : }
1214 : explicit Tiers(Tier t, Tier u) {
1215 : MOZ_ASSERT(t == Tier::Baseline || t == Tier::Ion);
1216 : MOZ_ASSERT(u == Tier::Baseline || u == Tier::Ion);
1217 : MOZ_ASSERT(t != u);
1218 : t_[0] = t;
1219 : t_[1] = u;
1220 : n_ = 2;
1221 : }
1222 :
1223 0 : Tier* begin() {
1224 0 : return t_;
1225 : }
1226 0 : Tier* end() {
1227 0 : return t_ + n_;
1228 : }
1229 : };
1230 :
1231 : // Represents the resizable limits of memories and tables.
1232 :
1233 0 : struct Limits
1234 : {
1235 : uint32_t initial;
1236 : Maybe<uint32_t> maximum;
1237 : };
1238 :
1239 : // TableDesc describes a table as well as the offset of the table's base pointer
1240 : // in global memory. Currently, wasm only has "any function" and asm.js only
1241 : // "typed function".
1242 :
1243 : enum class TableKind
1244 : {
1245 : AnyFunction,
1246 : TypedFunction
1247 : };
1248 :
1249 0 : struct TableDesc
1250 : {
1251 : TableKind kind;
1252 : bool external;
1253 : uint32_t globalDataOffset;
1254 : Limits limits;
1255 :
1256 0 : TableDesc() = default;
1257 0 : TableDesc(TableKind kind, const Limits& limits)
1258 0 : : kind(kind),
1259 : external(false),
1260 : globalDataOffset(UINT32_MAX),
1261 0 : limits(limits)
1262 0 : {}
1263 : };
1264 :
1265 : typedef Vector<TableDesc, 0, SystemAllocPolicy> TableDescVector;
1266 :
1267 : // ExportArg holds the unboxed operands to the wasm entry trampoline which can
1268 : // be called through an ExportFuncPtr.
1269 :
1270 : struct ExportArg
1271 : {
1272 : uint64_t lo;
1273 : uint64_t hi;
1274 : };
1275 :
1276 : // TLS data for a single module instance.
1277 : //
1278 : // Every WebAssembly function expects to be passed a hidden TLS pointer argument
1279 : // in WasmTlsReg. The TLS pointer argument points to a TlsData struct.
1280 : // Compiled functions expect that the TLS pointer does not change for the
1281 : // lifetime of the thread.
1282 : //
1283 : // There is a TlsData per module instance per thread, so inter-module calls need
1284 : // to pass the TLS pointer appropriate for the callee module.
1285 : //
1286 : // After the TlsData struct follows the module's declared TLS variables.
1287 :
1288 : struct TlsData
1289 : {
1290 : // Pointer to the base of the default memory (or null if there is none).
1291 : uint8_t* memoryBase;
1292 :
1293 : #ifndef WASM_HUGE_MEMORY
1294 : // Bounds check limit of memory, in bytes (or zero if there is no memory).
1295 : uint32_t boundsCheckLimit;
1296 : #endif
1297 :
1298 : // Pointer to the Instance that contains this TLS data.
1299 : Instance* instance;
1300 :
1301 : // Shortcut to instance->zone->group->addressOfOwnerContext
1302 : JSContext** addressOfContext;
1303 :
1304 : // Pointer that should be freed (due to padding before the TlsData).
1305 : void* allocatedBase;
1306 :
1307 : // The globalArea must be the last field. Globals for the module start here
1308 : // and are inline in this structure. 16-byte alignment is required for SIMD
1309 : // data.
1310 : MOZ_ALIGNED_DECL(char globalArea, 16);
1311 :
1312 : };
1313 :
1314 : static_assert(offsetof(TlsData, globalArea) % 16 == 0, "aligned");
1315 :
1316 : typedef int32_t (*ExportFuncPtr)(ExportArg* args, TlsData* tls);
1317 :
1318 : // FuncImportTls describes the region of wasm global memory allocated in the
1319 : // instance's thread-local storage for a function import. This is accessed
1320 : // directly from JIT code and mutated by Instance as exits become optimized and
1321 : // deoptimized.
1322 :
1323 : struct FuncImportTls
1324 : {
1325 : // The code to call at an import site: a wasm callee, a thunk into C++, or a
1326 : // thunk into JIT code.
1327 : void* code;
1328 :
1329 : // The callee's TlsData pointer, which must be loaded to WasmTlsReg (along
1330 : // with any pinned registers) before calling 'code'.
1331 : TlsData* tls;
1332 :
1333 : // If 'code' points into a JIT code thunk, the BaselineScript of the callee,
1334 : // for bidirectional registration purposes.
1335 : jit::BaselineScript* baselineScript;
1336 :
1337 : // A GC pointer which keeps the callee alive. For imported wasm functions,
1338 : // this points to the wasm function's WasmInstanceObject. For all other
1339 : // imported functions, 'obj' points to the JSFunction.
1340 : GCPtrObject obj;
1341 : static_assert(sizeof(GCPtrObject) == sizeof(void*), "for JIT access");
1342 : };
1343 :
1344 : // TableTls describes the region of wasm global memory allocated in the
1345 : // instance's thread-local storage which is accessed directly from JIT code
1346 : // to bounds-check and index the table.
1347 :
1348 : struct TableTls
1349 : {
1350 : // Length of the table in number of elements (not bytes).
1351 : uint32_t length;
1352 :
1353 : // Pointer to the array of elements (of type either ExternalTableElem or
1354 : // void*).
1355 : void* base;
1356 : };
1357 :
1358 : // When a table can contain functions from other instances (it is "external"),
1359 : // the internal representation is an array of ExternalTableElem instead of just
1360 : // an array of code pointers.
1361 :
1362 : struct ExternalTableElem
1363 : {
1364 : // The code to call when calling this element. The table ABI is the system
1365 : // ABI with the additional ABI requirements that:
1366 : // - WasmTlsReg and any pinned registers have been loaded appropriately
1367 : // - if this is a heterogeneous table that requires a signature check,
1368 : // WasmTableCallSigReg holds the signature id.
1369 : void* code;
1370 :
1371 : // The pointer to the callee's instance's TlsData. This must be loaded into
1372 : // WasmTlsReg before calling 'code'.
1373 : TlsData* tls;
1374 : };
1375 :
1376 : // CalleeDesc describes how to compile one of the variety of asm.js/wasm calls.
1377 : // This is hoisted into WasmTypes.h for sharing between Ion and Baseline.
1378 :
1379 : class CalleeDesc
1380 : {
1381 : public:
1382 : enum Which {
1383 : // Calls a function defined in the same module by its index.
1384 : Func,
1385 :
1386 : // Calls the import identified by the offset of its FuncImportTls in
1387 : // thread-local data.
1388 : Import,
1389 :
1390 : // Calls a WebAssembly table (heterogeneous, index must be bounds
1391 : // checked, callee instance depends on TableDesc).
1392 : WasmTable,
1393 :
1394 : // Calls an asm.js table (homogeneous, masked index, same-instance).
1395 : AsmJSTable,
1396 :
1397 : // Call a C++ function identified by SymbolicAddress.
1398 : Builtin,
1399 :
1400 : // Like Builtin, but automatically passes Instance* as first argument.
1401 : BuiltinInstanceMethod
1402 : };
1403 :
1404 : private:
1405 : Which which_;
1406 : union U {
1407 0 : U() {}
1408 : uint32_t funcIndex_;
1409 : struct {
1410 : uint32_t globalDataOffset_;
1411 : } import;
1412 : struct {
1413 : uint32_t globalDataOffset_;
1414 : uint32_t minLength_;
1415 : bool external_;
1416 : SigIdDesc sigId_;
1417 : } table;
1418 : SymbolicAddress builtin_;
1419 : } u;
1420 :
1421 : public:
1422 0 : CalleeDesc() {}
1423 0 : static CalleeDesc function(uint32_t funcIndex) {
1424 0 : CalleeDesc c;
1425 0 : c.which_ = Func;
1426 0 : c.u.funcIndex_ = funcIndex;
1427 0 : return c;
1428 : }
1429 0 : static CalleeDesc import(uint32_t globalDataOffset) {
1430 0 : CalleeDesc c;
1431 0 : c.which_ = Import;
1432 0 : c.u.import.globalDataOffset_ = globalDataOffset;
1433 0 : return c;
1434 : }
1435 0 : static CalleeDesc wasmTable(const TableDesc& desc, SigIdDesc sigId) {
1436 0 : CalleeDesc c;
1437 0 : c.which_ = WasmTable;
1438 0 : c.u.table.globalDataOffset_ = desc.globalDataOffset;
1439 0 : c.u.table.minLength_ = desc.limits.initial;
1440 0 : c.u.table.external_ = desc.external;
1441 0 : c.u.table.sigId_ = sigId;
1442 0 : return c;
1443 : }
1444 0 : static CalleeDesc asmJSTable(const TableDesc& desc) {
1445 0 : CalleeDesc c;
1446 0 : c.which_ = AsmJSTable;
1447 0 : c.u.table.globalDataOffset_ = desc.globalDataOffset;
1448 0 : return c;
1449 : }
1450 0 : static CalleeDesc builtin(SymbolicAddress callee) {
1451 0 : CalleeDesc c;
1452 0 : c.which_ = Builtin;
1453 0 : c.u.builtin_ = callee;
1454 0 : return c;
1455 : }
1456 0 : static CalleeDesc builtinInstanceMethod(SymbolicAddress callee) {
1457 0 : CalleeDesc c;
1458 0 : c.which_ = BuiltinInstanceMethod;
1459 0 : c.u.builtin_ = callee;
1460 0 : return c;
1461 : }
1462 0 : Which which() const {
1463 0 : return which_;
1464 : }
1465 0 : uint32_t funcIndex() const {
1466 0 : MOZ_ASSERT(which_ == Func);
1467 0 : return u.funcIndex_;
1468 : }
1469 0 : uint32_t importGlobalDataOffset() const {
1470 0 : MOZ_ASSERT(which_ == Import);
1471 0 : return u.import.globalDataOffset_;
1472 : }
1473 0 : bool isTable() const {
1474 0 : return which_ == WasmTable || which_ == AsmJSTable;
1475 : }
1476 0 : uint32_t tableLengthGlobalDataOffset() const {
1477 0 : MOZ_ASSERT(isTable());
1478 0 : return u.table.globalDataOffset_ + offsetof(TableTls, length);
1479 : }
1480 0 : uint32_t tableBaseGlobalDataOffset() const {
1481 0 : MOZ_ASSERT(isTable());
1482 0 : return u.table.globalDataOffset_ + offsetof(TableTls, base);
1483 : }
1484 0 : bool wasmTableIsExternal() const {
1485 0 : MOZ_ASSERT(which_ == WasmTable);
1486 0 : return u.table.external_;
1487 : }
1488 0 : SigIdDesc wasmTableSigId() const {
1489 0 : MOZ_ASSERT(which_ == WasmTable);
1490 0 : return u.table.sigId_;
1491 : }
1492 0 : uint32_t wasmTableMinLength() const {
1493 0 : MOZ_ASSERT(which_ == WasmTable);
1494 0 : return u.table.minLength_;
1495 : }
1496 0 : SymbolicAddress builtin() const {
1497 0 : MOZ_ASSERT(which_ == Builtin || which_ == BuiltinInstanceMethod);
1498 0 : return u.builtin_;
1499 : }
1500 : };
1501 :
1502 : // Because ARM has a fixed-width instruction encoding, ARM can only express a
1503 : // limited subset of immediates (in a single instruction).
1504 :
1505 : extern bool
1506 : IsValidARMImmediate(uint32_t i);
1507 :
1508 : extern uint32_t
1509 : RoundUpToNextValidARMImmediate(uint32_t i);
1510 :
1511 : // The WebAssembly spec hard-codes the virtual page size to be 64KiB and
1512 : // requires the size of linear memory to always be a multiple of 64KiB.
1513 :
1514 : static const unsigned PageSize = 64 * 1024;
1515 :
1516 : // Bounds checks always compare the base of the memory access with the bounds
1517 : // check limit. If the memory access is unaligned, this means that, even if the
1518 : // bounds check succeeds, a few bytes of the access can extend past the end of
1519 : // memory. To guard against this, extra space is included in the guard region to
1520 : // catch the overflow. MaxMemoryAccessSize is a conservative approximation of
1521 : // the maximum guard space needed to catch all unaligned overflows.
1522 :
1523 : static const unsigned MaxMemoryAccessSize = sizeof(Val);
1524 :
1525 : #ifdef JS_CODEGEN_X64
1526 :
1527 : // All other code should use WASM_HUGE_MEMORY instead of JS_CODEGEN_X64 so that
1528 : // it is easy to use the huge-mapping optimization for other 64-bit platforms in
1529 : // the future.
1530 : # define WASM_HUGE_MEMORY
1531 :
1532 : // On WASM_HUGE_MEMORY platforms, every asm.js or WebAssembly memory
1533 : // unconditionally allocates a huge region of virtual memory of size
1534 : // wasm::HugeMappedSize. This allows all memory resizing to work without
1535 : // reallocation and provides enough guard space for all offsets to be folded
1536 : // into memory accesses.
1537 :
1538 : static const uint64_t IndexRange = uint64_t(UINT32_MAX) + 1;
1539 : static const uint64_t OffsetGuardLimit = uint64_t(INT32_MAX) + 1;
1540 : static const uint64_t UnalignedGuardPage = PageSize;
1541 : static const uint64_t HugeMappedSize = IndexRange + OffsetGuardLimit + UnalignedGuardPage;
1542 :
1543 : static_assert(MaxMemoryAccessSize <= UnalignedGuardPage, "rounded up to static page size");
1544 :
1545 : #else // !WASM_HUGE_MEMORY
1546 :
1547 : // On !WASM_HUGE_MEMORY platforms:
1548 : // - To avoid OOM in ArrayBuffer::prepareForAsmJS, asm.js continues to use the
1549 : // original ArrayBuffer allocation which has no guard region at all.
1550 : // - For WebAssembly memories, an additional GuardSize is mapped after the
1551 : // accessible region of the memory to catch folded (base+offset) accesses
1552 : // where `offset < OffsetGuardLimit` as well as the overflow from unaligned
1553 : // accesses, as described above for MaxMemoryAccessSize.
1554 :
1555 : static const size_t OffsetGuardLimit = PageSize - MaxMemoryAccessSize;
1556 : static const size_t GuardSize = PageSize;
1557 :
1558 : // Return whether the given immediate satisfies the constraints of the platform
1559 : // (viz. that, on ARM, IsValidARMImmediate).
1560 :
1561 : extern bool
1562 : IsValidBoundsCheckImmediate(uint32_t i);
1563 :
1564 : // For a given WebAssembly/asm.js max size, return the number of bytes to
1565 : // map which will necessarily be a multiple of the system page size and greater
1566 : // than maxSize. For a returned mappedSize:
1567 : // boundsCheckLimit = mappedSize - GuardSize
1568 : // IsValidBoundsCheckImmediate(boundsCheckLimit)
1569 :
1570 : extern size_t
1571 : ComputeMappedSize(uint32_t maxSize);
1572 :
1573 : #endif // WASM_HUGE_MEMORY
1574 :
1575 : // Metadata for memory accesses. On WASM_HUGE_MEMORY platforms, only
1576 : // (non-SIMD/Atomic) asm.js loads and stores create a MemoryAccess so that the
1577 : // signal handler can implement the semantically-correct wraparound logic; the
1578 : // rest simply redirect to the out-of-bounds stub in the signal handler. On x86,
1579 : // the base address of memory is baked into each memory access instruction so
1580 : // the MemoryAccess records the location of each for patching. On all other
1581 : // platforms, no MemoryAccess is created.
1582 :
1583 : class MemoryAccess
1584 : {
1585 : uint32_t insnOffset_;
1586 : uint32_t trapOutOfLineOffset_;
1587 :
1588 : public:
1589 : MemoryAccess() = default;
1590 0 : explicit MemoryAccess(uint32_t insnOffset, uint32_t trapOutOfLineOffset = UINT32_MAX)
1591 0 : : insnOffset_(insnOffset),
1592 0 : trapOutOfLineOffset_(trapOutOfLineOffset)
1593 0 : {}
1594 :
1595 0 : uint32_t insnOffset() const {
1596 0 : return insnOffset_;
1597 : }
1598 0 : bool hasTrapOutOfLineCode() const {
1599 0 : return trapOutOfLineOffset_ != UINT32_MAX;
1600 : }
1601 0 : uint8_t* trapOutOfLineCode(uint8_t* code) const {
1602 0 : MOZ_ASSERT(hasTrapOutOfLineCode());
1603 0 : return code + trapOutOfLineOffset_;
1604 : }
1605 :
1606 0 : void offsetBy(uint32_t delta) {
1607 0 : insnOffset_ += delta;
1608 0 : if (hasTrapOutOfLineCode())
1609 0 : trapOutOfLineOffset_ += delta;
1610 0 : }
1611 : };
1612 :
1613 : WASM_DECLARE_POD_VECTOR(MemoryAccess, MemoryAccessVector)
1614 :
1615 : // wasm::Frame represents the bytes pushed by the call instruction and the fixed
1616 : // prologue generated by wasm::GenerateCallablePrologue.
1617 : //
1618 : // Across all architectures it is assumed that, before the call instruction, the
1619 : // stack pointer is WasmStackAlignment-aligned. Thus after the prologue, and
1620 : // before the function has made its stack reservation, the stack alignment is
1621 : // sizeof(Frame) % WasmStackAlignment.
1622 : //
1623 : // During MacroAssembler code generation, the bytes pushed after the wasm::Frame
1624 : // are counted by masm.framePushed. Thus, the stack alignment at any point in
1625 : // time is (sizeof(wasm::Frame) + masm.framePushed) % WasmStackAlignment.
1626 :
1627 : struct Frame
1628 : {
1629 : // The caller's Frame*. See GenerateCallableEpilogue for why this must be
1630 : // the first field of wasm::Frame (in a downward-growing stack).
1631 : Frame* callerFP;
1632 :
1633 : // The raw payload of an ExitReason describing why we've left wasm. It is
1634 : // non null if and only if a call exited wasm code.
1635 : uint32_t encodedExitReason;
1636 :
1637 : // The saved value of WasmTlsReg on entry to the function. This is
1638 : // effectively the callee's instance.
1639 : TlsData* tls;
1640 :
1641 : // The return address pushed by the call (in the case of ARM/MIPS the return
1642 : // address is pushed by the first instruction of the prologue).
1643 : void* returnAddress;
1644 :
1645 : // Helper functions:
1646 :
1647 0 : Instance* instance() const { return tls->instance; }
1648 : };
1649 :
1650 : // A DebugFrame is a Frame with additional fields that are added after the
1651 : // normal function prologue by the baseline compiler. If a Module is compiled
1652 : // with debugging enabled, then all its code creates DebugFrames on the stack
1653 : // instead of just Frames. These extra fields are used by the Debugger API.
1654 :
1655 : class DebugFrame
1656 : {
1657 : // The results field left uninitialized and only used during the baseline
1658 : // compiler's return sequence to allow the debugger to inspect and modify
1659 : // the return value of a frame being debugged.
1660 : union
1661 : {
1662 : int32_t resultI32_;
1663 : int64_t resultI64_;
1664 : float resultF32_;
1665 : double resultF64_;
1666 : };
1667 :
1668 : // The returnValue() method returns a HandleValue pointing to this field.
1669 : js::Value cachedReturnJSValue_;
1670 :
1671 : // The function index of this frame. Technically, this could be derived
1672 : // given a PC into this frame (which could lookup the CodeRange which has
1673 : // the function index), but this isn't always readily available.
1674 : uint32_t funcIndex_;
1675 :
1676 : // Flags whose meaning are described below.
1677 : union
1678 : {
1679 : struct
1680 : {
1681 : bool observing_ : 1;
1682 : bool isDebuggee_ : 1;
1683 : bool prevUpToDate_ : 1;
1684 : bool hasCachedSavedFrame_ : 1;
1685 : bool hasCachedReturnJSValue_ : 1;
1686 : };
1687 : void* flagsWord_;
1688 : };
1689 :
1690 : // The Frame goes at the end since the stack grows down.
1691 : Frame frame_;
1692 :
1693 : public:
1694 : Frame& frame() { return frame_; }
1695 0 : uint32_t funcIndex() const { return funcIndex_; }
1696 0 : Instance* instance() const { return frame_.instance(); }
1697 : GlobalObject* global() const;
1698 : JSObject* environmentChain() const;
1699 : bool getLocal(uint32_t localIndex, MutableHandleValue vp);
1700 :
1701 : // The return value must be written from the unboxed representation in the
1702 : // results union into cachedReturnJSValue_ by updateReturnJSValue() before
1703 : // returnValue() can return a Handle to it.
1704 :
1705 : void updateReturnJSValue();
1706 : HandleValue returnValue() const;
1707 : void clearReturnJSValue();
1708 :
1709 : // Once the debugger observes a frame, it must be notified via
1710 : // onLeaveFrame() before the frame is popped. Calling observe() ensures the
1711 : // leave frame traps are enabled. Both methods are idempotent so the caller
1712 : // doesn't have to worry about calling them more than once.
1713 :
1714 : void observe(JSContext* cx);
1715 : void leave(JSContext* cx);
1716 :
1717 : // The 'isDebugge' bit is initialized to false and set by the WebAssembly
1718 : // runtime right before a frame is exposed to the debugger, as required by
1719 : // the Debugger API. The bit is then used for Debugger-internal purposes
1720 : // afterwards.
1721 :
1722 0 : bool isDebuggee() const { return isDebuggee_; }
1723 0 : void setIsDebuggee() { isDebuggee_ = true; }
1724 0 : void unsetIsDebuggee() { isDebuggee_ = false; }
1725 :
1726 : // These are opaque boolean flags used by the debugger to implement
1727 : // AbstractFramePtr. They are initialized to false and not otherwise read or
1728 : // written by wasm code or runtime.
1729 :
1730 0 : bool prevUpToDate() const { return prevUpToDate_; }
1731 0 : void setPrevUpToDate() { prevUpToDate_ = true; }
1732 0 : void unsetPrevUpToDate() { prevUpToDate_ = false; }
1733 :
1734 0 : bool hasCachedSavedFrame() const { return hasCachedSavedFrame_; }
1735 0 : void setHasCachedSavedFrame() { hasCachedSavedFrame_ = true; }
1736 :
1737 : // DebugFrame is accessed directly by JIT code.
1738 :
1739 0 : static constexpr size_t offsetOfResults() { return offsetof(DebugFrame, resultI32_); }
1740 0 : static constexpr size_t offsetOfFlagsWord() { return offsetof(DebugFrame, flagsWord_); }
1741 0 : static constexpr size_t offsetOfFuncIndex() { return offsetof(DebugFrame, funcIndex_); }
1742 0 : static constexpr size_t offsetOfFrame() { return offsetof(DebugFrame, frame_); }
1743 :
1744 : // DebugFrames are aligned to 8-byte aligned, allowing them to be placed in
1745 : // an AbstractFramePtr.
1746 :
1747 : static const unsigned Alignment = 8;
1748 : static void alignmentStaticAsserts();
1749 : };
1750 :
1751 : } // namespace wasm
1752 : } // namespace js
1753 :
1754 : #endif // wasm_types_h
|