Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : *
4 : * Copyright 2015 Mozilla Foundation
5 : *
6 : * Licensed under the Apache License, Version 2.0 (the "License");
7 : * you may not use this file except in compliance with the License.
8 : * You may obtain a copy of the License at
9 : *
10 : * http://www.apache.org/licenses/LICENSE-2.0
11 : *
12 : * Unless required by applicable law or agreed to in writing, software
13 : * distributed under the License is distributed on an "AS IS" BASIS,
14 : * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 : * See the License for the specific language governing permissions and
16 : * limitations under the License.
17 : */
18 :
19 : #include "wasm/WasmStubs.h"
20 :
21 : #include "mozilla/ArrayUtils.h"
22 :
23 : #include "wasm/WasmCode.h"
24 : #include "wasm/WasmGenerator.h"
25 :
26 : #include "jit/MacroAssembler-inl.h"
27 :
28 : using namespace js;
29 : using namespace js::jit;
30 : using namespace js::wasm;
31 :
32 : using mozilla::ArrayLength;
33 :
34 : static void
35 0 : FinishOffsets(MacroAssembler& masm, Offsets* offsets)
36 : {
37 : // On old ARM hardware, constant pools could be inserted and they need to
38 : // be flushed before considering the size of the masm.
39 0 : masm.flushBuffer();
40 0 : offsets->end = masm.size();
41 0 : }
42 :
43 : static void
44 0 : AssertStackAlignment(MacroAssembler& masm, uint32_t alignment, uint32_t addBeforeAssert = 0)
45 : {
46 0 : MOZ_ASSERT((sizeof(Frame) + masm.framePushed() + addBeforeAssert) % alignment == 0);
47 0 : masm.assertStackAlignment(alignment, addBeforeAssert);
48 0 : }
49 :
50 : static unsigned
51 0 : StackDecrementForCall(MacroAssembler& masm, uint32_t alignment, unsigned bytesToPush)
52 : {
53 0 : return StackDecrementForCall(alignment, sizeof(Frame) + masm.framePushed(), bytesToPush);
54 : }
55 :
56 : template <class VectorT>
57 : static unsigned
58 0 : StackArgBytes(const VectorT& args)
59 : {
60 0 : ABIArgIter<VectorT> iter(args);
61 0 : while (!iter.done())
62 0 : iter++;
63 0 : return iter.stackBytesConsumedSoFar();
64 : }
65 :
66 : template <class VectorT>
67 : static unsigned
68 0 : StackDecrementForCall(MacroAssembler& masm, uint32_t alignment, const VectorT& args,
69 : unsigned extraBytes = 0)
70 : {
71 0 : return StackDecrementForCall(masm, alignment, StackArgBytes(args) + extraBytes);
72 : }
73 :
74 : static void
75 0 : SetupABIArguments(MacroAssembler& masm, const FuncExport& fe, Register argv, Register scratch)
76 : {
77 : // Copy parameters out of argv and into the registers/stack-slots specified by
78 : // the system ABI.
79 0 : for (ABIArgValTypeIter iter(fe.sig().args()); !iter.done(); iter++) {
80 0 : unsigned argOffset = iter.index() * sizeof(ExportArg);
81 0 : Address src(argv, argOffset);
82 0 : MIRType type = iter.mirType();
83 0 : switch (iter->kind()) {
84 : case ABIArg::GPR:
85 0 : if (type == MIRType::Int32)
86 0 : masm.load32(src, iter->gpr());
87 0 : else if (type == MIRType::Int64)
88 0 : masm.load64(src, iter->gpr64());
89 0 : break;
90 : #ifdef JS_CODEGEN_REGISTER_PAIR
91 : case ABIArg::GPR_PAIR:
92 : if (type == MIRType::Int64)
93 : masm.load64(src, iter->gpr64());
94 : else
95 : MOZ_CRASH("wasm uses hardfp for function calls.");
96 : break;
97 : #endif
98 : case ABIArg::FPU: {
99 : static_assert(sizeof(ExportArg) >= jit::Simd128DataSize,
100 : "ExportArg must be big enough to store SIMD values");
101 0 : switch (type) {
102 : case MIRType::Int8x16:
103 : case MIRType::Int16x8:
104 : case MIRType::Int32x4:
105 : case MIRType::Bool8x16:
106 : case MIRType::Bool16x8:
107 : case MIRType::Bool32x4:
108 0 : masm.loadUnalignedSimd128Int(src, iter->fpu());
109 0 : break;
110 : case MIRType::Float32x4:
111 0 : masm.loadUnalignedSimd128Float(src, iter->fpu());
112 0 : break;
113 : case MIRType::Double:
114 0 : masm.loadDouble(src, iter->fpu());
115 0 : break;
116 : case MIRType::Float32:
117 0 : masm.loadFloat32(src, iter->fpu());
118 0 : break;
119 : default:
120 0 : MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected FPU type");
121 : break;
122 : }
123 0 : break;
124 : }
125 : case ABIArg::Stack:
126 0 : switch (type) {
127 : case MIRType::Int32:
128 0 : masm.load32(src, scratch);
129 0 : masm.storePtr(scratch, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
130 0 : break;
131 : case MIRType::Int64: {
132 0 : Register sp = masm.getStackPointer();
133 : #if JS_BITS_PER_WORD == 32
134 : masm.load32(Address(src.base, src.offset + INT64LOW_OFFSET), scratch);
135 : masm.store32(scratch, Address(sp, iter->offsetFromArgBase() + INT64LOW_OFFSET));
136 : masm.load32(Address(src.base, src.offset + INT64HIGH_OFFSET), scratch);
137 : masm.store32(scratch, Address(sp, iter->offsetFromArgBase() + INT64HIGH_OFFSET));
138 : #else
139 0 : Register64 scratch64(scratch);
140 0 : masm.load64(src, scratch64);
141 0 : masm.store64(scratch64, Address(sp, iter->offsetFromArgBase()));
142 : #endif
143 0 : break;
144 : }
145 : case MIRType::Double:
146 0 : masm.loadDouble(src, ScratchDoubleReg);
147 : masm.storeDouble(ScratchDoubleReg,
148 0 : Address(masm.getStackPointer(), iter->offsetFromArgBase()));
149 0 : break;
150 : case MIRType::Float32:
151 0 : masm.loadFloat32(src, ScratchFloat32Reg);
152 : masm.storeFloat32(ScratchFloat32Reg,
153 0 : Address(masm.getStackPointer(), iter->offsetFromArgBase()));
154 0 : break;
155 : case MIRType::Int8x16:
156 : case MIRType::Int16x8:
157 : case MIRType::Int32x4:
158 : case MIRType::Bool8x16:
159 : case MIRType::Bool16x8:
160 : case MIRType::Bool32x4:
161 0 : masm.loadUnalignedSimd128Int(src, ScratchSimd128Reg);
162 0 : masm.storeAlignedSimd128Int(
163 0 : ScratchSimd128Reg, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
164 0 : break;
165 : case MIRType::Float32x4:
166 0 : masm.loadUnalignedSimd128Float(src, ScratchSimd128Reg);
167 0 : masm.storeAlignedSimd128Float(
168 0 : ScratchSimd128Reg, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
169 0 : break;
170 : default:
171 0 : MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected stack arg type");
172 : }
173 0 : break;
174 : }
175 : }
176 0 : }
177 :
178 : static void
179 0 : StoreABIReturn(MacroAssembler& masm, const FuncExport& fe, Register argv)
180 : {
181 : // Store the return value in argv[0].
182 0 : switch (fe.sig().ret()) {
183 : case ExprType::Void:
184 0 : break;
185 : case ExprType::I32:
186 0 : masm.store32(ReturnReg, Address(argv, 0));
187 0 : break;
188 : case ExprType::I64:
189 0 : masm.store64(ReturnReg64, Address(argv, 0));
190 0 : break;
191 : case ExprType::F32:
192 0 : if (!JitOptions.wasmTestMode)
193 0 : masm.canonicalizeFloat(ReturnFloat32Reg);
194 0 : masm.storeFloat32(ReturnFloat32Reg, Address(argv, 0));
195 0 : break;
196 : case ExprType::F64:
197 0 : if (!JitOptions.wasmTestMode)
198 0 : masm.canonicalizeDouble(ReturnDoubleReg);
199 0 : masm.storeDouble(ReturnDoubleReg, Address(argv, 0));
200 0 : break;
201 : case ExprType::I8x16:
202 : case ExprType::I16x8:
203 : case ExprType::I32x4:
204 : case ExprType::B8x16:
205 : case ExprType::B16x8:
206 : case ExprType::B32x4:
207 : // We don't have control on argv alignment, do an unaligned access.
208 0 : masm.storeUnalignedSimd128Int(ReturnSimd128Reg, Address(argv, 0));
209 0 : break;
210 : case ExprType::F32x4:
211 : // We don't have control on argv alignment, do an unaligned access.
212 0 : masm.storeUnalignedSimd128Float(ReturnSimd128Reg, Address(argv, 0));
213 0 : break;
214 : case ExprType::Limit:
215 0 : MOZ_CRASH("Limit");
216 : }
217 0 : }
218 :
219 : #if defined(JS_CODEGEN_ARM)
220 : // The ARM system ABI also includes d15 & s31 in the non volatile float registers.
221 : // Also exclude lr (a.k.a. r14) as we preserve it manually)
222 : static const LiveRegisterSet NonVolatileRegs =
223 : LiveRegisterSet(GeneralRegisterSet(Registers::NonVolatileMask&
224 : ~(uint32_t(1) << Registers::lr)),
225 : FloatRegisterSet(FloatRegisters::NonVolatileMask
226 : | (1ULL << FloatRegisters::d15)
227 : | (1ULL << FloatRegisters::s31)));
228 : #else
229 : static const LiveRegisterSet NonVolatileRegs =
230 : LiveRegisterSet(GeneralRegisterSet(Registers::NonVolatileMask),
231 : FloatRegisterSet(FloatRegisters::NonVolatileMask));
232 : #endif
233 :
234 : #if defined(JS_CODEGEN_MIPS32)
235 : static const unsigned NonVolatileRegsPushSize = NonVolatileRegs.gprs().size() * sizeof(intptr_t) +
236 : NonVolatileRegs.fpus().getPushSizeInBytes() +
237 : sizeof(double);
238 : #elif defined(JS_CODEGEN_NONE)
239 : static const unsigned NonVolatileRegsPushSize = 0;
240 : #else
241 9 : static const unsigned NonVolatileRegsPushSize = NonVolatileRegs.gprs().size() * sizeof(intptr_t) +
242 6 : NonVolatileRegs.fpus().getPushSizeInBytes();
243 : #endif
244 3 : static const unsigned FramePushedBeforeAlign = NonVolatileRegsPushSize + sizeof(void*);
245 : static const unsigned FailFP = 0xbad;
246 :
247 : // Generate a stub that enters wasm from a C++ caller via the native ABI. The
248 : // signature of the entry point is Module::ExportFuncPtr. The exported wasm
249 : // function has an ABI derived from its specific signature, so this function
250 : // must map from the ABI of ExportFuncPtr to the export's signature's ABI.
251 : Offsets
252 0 : wasm::GenerateEntry(MacroAssembler& masm, const FuncExport& fe)
253 : {
254 0 : masm.haltingAlign(CodeAlignment);
255 :
256 0 : Offsets offsets;
257 0 : offsets.begin = masm.currentOffset();
258 :
259 : // Save the return address if it wasn't already saved by the call insn.
260 : #if defined(JS_CODEGEN_ARM)
261 : masm.push(lr);
262 : #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
263 : masm.push(ra);
264 : #endif
265 :
266 : // Save all caller non-volatile registers before we clobber them here and in
267 : // the asm.js callee (which does not preserve non-volatile registers).
268 0 : masm.setFramePushed(0);
269 0 : masm.PushRegsInMask(NonVolatileRegs);
270 0 : MOZ_ASSERT(masm.framePushed() == NonVolatileRegsPushSize);
271 :
272 : // Put the 'argv' argument into a non-argument/return/TLS register so that
273 : // we can use 'argv' while we fill in the arguments for the asm.js callee.
274 : // Use a second non-argument/return register as temporary scratch.
275 0 : Register argv = ABINonArgReturnReg0;
276 0 : Register scratch = ABINonArgReturnReg1;
277 :
278 : // Read the arguments of wasm::ExportFuncPtr according to the native ABI.
279 : // The entry stub's frame is 1 word.
280 0 : const unsigned argBase = sizeof(void*) + masm.framePushed();
281 0 : ABIArgGenerator abi;
282 0 : ABIArg arg;
283 :
284 : // arg 1: ExportArg*
285 0 : arg = abi.next(MIRType::Pointer);
286 0 : if (arg.kind() == ABIArg::GPR)
287 0 : masm.movePtr(arg.gpr(), argv);
288 : else
289 0 : masm.loadPtr(Address(masm.getStackPointer(), argBase + arg.offsetFromArgBase()), argv);
290 :
291 : // Arg 2: TlsData*
292 0 : arg = abi.next(MIRType::Pointer);
293 0 : if (arg.kind() == ABIArg::GPR)
294 0 : masm.movePtr(arg.gpr(), WasmTlsReg);
295 : else
296 0 : masm.loadPtr(Address(masm.getStackPointer(), argBase + arg.offsetFromArgBase()), WasmTlsReg);
297 :
298 : // Save 'argv' on the stack so that we can recover it after the call.
299 0 : masm.Push(argv);
300 :
301 : // Since we're about to dynamically align the stack, reset the frame depth
302 : // so we can still assert static stack depth balancing.
303 0 : MOZ_ASSERT(masm.framePushed() == FramePushedBeforeAlign);
304 0 : masm.setFramePushed(0);
305 :
306 : // Dynamically align the stack since ABIStackAlignment is not necessarily
307 : // WasmStackAlignment. Preserve SP so it can be restored after the call.
308 0 : masm.moveStackPtrTo(scratch);
309 0 : masm.andToStackPtr(Imm32(~(WasmStackAlignment - 1)));
310 0 : masm.Push(scratch);
311 :
312 : // Reserve stack space for the call.
313 0 : unsigned argDecrement = StackDecrementForCall(WasmStackAlignment,
314 0 : masm.framePushed(),
315 0 : StackArgBytes(fe.sig().args()));
316 0 : masm.reserveStack(argDecrement);
317 :
318 : // Copy parameters out of argv and into the wasm ABI registers/stack-slots.
319 0 : SetupABIArguments(masm, fe, argv, scratch);
320 :
321 : // Setup wasm register state. The nullness of the frame pointer is used to
322 : // determine whether the call ended in success or failure.
323 0 : masm.movePtr(ImmWord(0), FramePointer);
324 0 : masm.loadWasmPinnedRegsFromTls();
325 :
326 : // Call into the real function. Note that, due to the throw stub, fp, tls
327 : // and pinned registers may be clobbered.
328 0 : masm.assertStackAlignment(WasmStackAlignment);
329 0 : masm.call(CallSiteDesc(CallSiteDesc::Func), fe.funcIndex());
330 0 : masm.assertStackAlignment(WasmStackAlignment);
331 :
332 : // Pop the arguments pushed after the dynamic alignment.
333 0 : masm.freeStack(argDecrement);
334 :
335 : // Pop the stack pointer to its value right before dynamic alignment.
336 0 : masm.PopStackPtr();
337 0 : MOZ_ASSERT(masm.framePushed() == 0);
338 0 : masm.setFramePushed(FramePushedBeforeAlign);
339 :
340 : // Recover the 'argv' pointer which was saved before aligning the stack.
341 0 : masm.Pop(argv);
342 :
343 : // Store the return value in argv[0].
344 0 : StoreABIReturn(masm, fe, argv);
345 :
346 : // After the ReturnReg is stored into argv[0] but before fp is clobbered by
347 : // the PopRegsInMask(NonVolatileRegs) below, set the return value based on
348 : // whether fp is null (which is the case for successful returns) or the
349 : // FailFP magic value (set by the throw stub);
350 0 : Label success, join;
351 0 : masm.branchTestPtr(Assembler::Zero, FramePointer, FramePointer, &success);
352 : #ifdef DEBUG
353 0 : Label ok;
354 0 : masm.branchPtr(Assembler::Equal, FramePointer, Imm32(FailFP), &ok);
355 0 : masm.breakpoint();
356 0 : masm.bind(&ok);
357 : #endif
358 0 : masm.move32(Imm32(false), ReturnReg);
359 0 : masm.jump(&join);
360 0 : masm.bind(&success);
361 0 : masm.move32(Imm32(true), ReturnReg);
362 0 : masm.bind(&join);
363 :
364 : // Restore clobbered non-volatile registers of the caller.
365 0 : masm.PopRegsInMask(NonVolatileRegs);
366 0 : MOZ_ASSERT(masm.framePushed() == 0);
367 :
368 0 : masm.ret();
369 :
370 0 : FinishOffsets(masm, &offsets);
371 0 : return offsets;
372 : }
373 :
374 : static void
375 0 : StackCopy(MacroAssembler& masm, MIRType type, Register scratch, Address src, Address dst)
376 : {
377 0 : if (type == MIRType::Int32) {
378 0 : masm.load32(src, scratch);
379 0 : masm.store32(scratch, dst);
380 0 : } else if (type == MIRType::Int64) {
381 : #if JS_BITS_PER_WORD == 32
382 : masm.load32(Address(src.base, src.offset + INT64LOW_OFFSET), scratch);
383 : masm.store32(scratch, Address(dst.base, dst.offset + INT64LOW_OFFSET));
384 : masm.load32(Address(src.base, src.offset + INT64HIGH_OFFSET), scratch);
385 : masm.store32(scratch, Address(dst.base, dst.offset + INT64HIGH_OFFSET));
386 : #else
387 0 : Register64 scratch64(scratch);
388 0 : masm.load64(src, scratch64);
389 0 : masm.store64(scratch64, dst);
390 : #endif
391 0 : } else if (type == MIRType::Float32) {
392 0 : masm.loadFloat32(src, ScratchFloat32Reg);
393 0 : masm.storeFloat32(ScratchFloat32Reg, dst);
394 : } else {
395 0 : MOZ_ASSERT(type == MIRType::Double);
396 0 : masm.loadDouble(src, ScratchDoubleReg);
397 0 : masm.storeDouble(ScratchDoubleReg, dst);
398 : }
399 0 : }
400 :
401 : typedef bool ToValue;
402 :
403 : static void
404 0 : FillArgumentArray(MacroAssembler& masm, const ValTypeVector& args, unsigned argOffset,
405 : unsigned offsetToCallerStackArgs, Register scratch, ToValue toValue)
406 : {
407 0 : for (ABIArgValTypeIter i(args); !i.done(); i++) {
408 0 : Address dst(masm.getStackPointer(), argOffset + i.index() * sizeof(Value));
409 :
410 0 : MIRType type = i.mirType();
411 0 : switch (i->kind()) {
412 : case ABIArg::GPR:
413 0 : if (type == MIRType::Int32) {
414 0 : if (toValue)
415 0 : masm.storeValue(JSVAL_TYPE_INT32, i->gpr(), dst);
416 : else
417 0 : masm.store32(i->gpr(), dst);
418 0 : } else if (type == MIRType::Int64) {
419 : // We can't box int64 into Values (yet).
420 0 : if (toValue)
421 0 : masm.breakpoint();
422 : else
423 0 : masm.store64(i->gpr64(), dst);
424 : } else {
425 0 : MOZ_CRASH("unexpected input type?");
426 : }
427 0 : break;
428 : #ifdef JS_CODEGEN_REGISTER_PAIR
429 : case ABIArg::GPR_PAIR:
430 : if (type == MIRType::Int64)
431 : masm.store64(i->gpr64(), dst);
432 : else
433 : MOZ_CRASH("wasm uses hardfp for function calls.");
434 : break;
435 : #endif
436 : case ABIArg::FPU: {
437 0 : MOZ_ASSERT(IsFloatingPointType(type));
438 0 : FloatRegister srcReg = i->fpu();
439 0 : if (type == MIRType::Double) {
440 0 : if (toValue) {
441 : // Preserve the NaN pattern in the input.
442 0 : masm.moveDouble(srcReg, ScratchDoubleReg);
443 0 : srcReg = ScratchDoubleReg;
444 0 : masm.canonicalizeDouble(srcReg);
445 : }
446 0 : masm.storeDouble(srcReg, dst);
447 : } else {
448 0 : MOZ_ASSERT(type == MIRType::Float32);
449 0 : if (toValue) {
450 : // JS::Values can't store Float32, so convert to a Double.
451 0 : masm.convertFloat32ToDouble(srcReg, ScratchDoubleReg);
452 0 : masm.canonicalizeDouble(ScratchDoubleReg);
453 0 : masm.storeDouble(ScratchDoubleReg, dst);
454 : } else {
455 : // Preserve the NaN pattern in the input.
456 0 : masm.moveFloat32(srcReg, ScratchFloat32Reg);
457 0 : masm.canonicalizeFloat(ScratchFloat32Reg);
458 0 : masm.storeFloat32(ScratchFloat32Reg, dst);
459 : }
460 : }
461 0 : break;
462 : }
463 : case ABIArg::Stack: {
464 0 : Address src(masm.getStackPointer(), offsetToCallerStackArgs + i->offsetFromArgBase());
465 0 : if (toValue) {
466 0 : if (type == MIRType::Int32) {
467 0 : masm.load32(src, scratch);
468 0 : masm.storeValue(JSVAL_TYPE_INT32, scratch, dst);
469 0 : } else if (type == MIRType::Int64) {
470 : // We can't box int64 into Values (yet).
471 0 : masm.breakpoint();
472 : } else {
473 0 : MOZ_ASSERT(IsFloatingPointType(type));
474 0 : if (type == MIRType::Float32) {
475 0 : masm.loadFloat32(src, ScratchFloat32Reg);
476 0 : masm.convertFloat32ToDouble(ScratchFloat32Reg, ScratchDoubleReg);
477 : } else {
478 0 : masm.loadDouble(src, ScratchDoubleReg);
479 : }
480 0 : masm.canonicalizeDouble(ScratchDoubleReg);
481 0 : masm.storeDouble(ScratchDoubleReg, dst);
482 : }
483 : } else {
484 0 : StackCopy(masm, type, scratch, src, dst);
485 : }
486 0 : break;
487 : }
488 : }
489 : }
490 0 : }
491 :
492 : // Generate a wrapper function with the standard intra-wasm call ABI which simply
493 : // calls an import. This wrapper function allows any import to be treated like a
494 : // normal wasm function for the purposes of exports and table calls. In
495 : // particular, the wrapper function provides:
496 : // - a table entry, so JS imports can be put into tables
497 : // - normal entries, so that, if the import is re-exported, an entry stub can
498 : // be generated and called without any special cases
499 : FuncOffsets
500 0 : wasm::GenerateImportFunction(jit::MacroAssembler& masm, const FuncImport& fi, SigIdDesc sigId)
501 : {
502 0 : masm.setFramePushed(0);
503 :
504 0 : unsigned framePushed = StackDecrementForCall(masm, WasmStackAlignment, fi.sig().args());
505 :
506 0 : FuncOffsets offsets;
507 0 : GenerateFunctionPrologue(masm, framePushed, sigId, &offsets);
508 :
509 : // The argument register state is already setup by our caller. We just need
510 : // to be sure not to clobber it before the call.
511 0 : Register scratch = ABINonArgReg0;
512 :
513 : // Copy our frame's stack arguments to the callee frame's stack argument.
514 0 : unsigned offsetToCallerStackArgs = sizeof(Frame) + masm.framePushed();
515 0 : ABIArgValTypeIter i(fi.sig().args());
516 0 : for (; !i.done(); i++) {
517 0 : if (i->kind() != ABIArg::Stack)
518 0 : continue;
519 :
520 0 : Address src(masm.getStackPointer(), offsetToCallerStackArgs + i->offsetFromArgBase());
521 0 : Address dst(masm.getStackPointer(), i->offsetFromArgBase());
522 0 : StackCopy(masm, i.mirType(), scratch, src, dst);
523 : }
524 :
525 : // Call the import exit stub.
526 0 : CallSiteDesc desc(CallSiteDesc::Dynamic);
527 0 : masm.wasmCallImport(desc, CalleeDesc::import(fi.tlsDataOffset()));
528 :
529 : // Restore the TLS register and pinned regs, per wasm function ABI.
530 0 : masm.loadWasmTlsRegFromFrame();
531 0 : masm.loadWasmPinnedRegsFromTls();
532 :
533 0 : GenerateFunctionEpilogue(masm, framePushed, &offsets);
534 :
535 0 : masm.wasmEmitTrapOutOfLineCode();
536 :
537 0 : FinishOffsets(masm, &offsets);
538 0 : return offsets;
539 : }
540 :
541 : // Generate a stub that is called via the internal ABI derived from the
542 : // signature of the import and calls into an appropriate callImport C++
543 : // function, having boxed all the ABI arguments into a homogeneous Value array.
544 : CallableOffsets
545 0 : wasm::GenerateImportInterpExit(MacroAssembler& masm, const FuncImport& fi, uint32_t funcImportIndex,
546 : Label* throwLabel)
547 : {
548 0 : masm.setFramePushed(0);
549 :
550 : // Argument types for Instance::callImport_*:
551 : static const MIRType typeArray[] = { MIRType::Pointer, // Instance*
552 : MIRType::Pointer, // funcImportIndex
553 : MIRType::Int32, // argc
554 : MIRType::Pointer }; // argv
555 0 : MIRTypeVector invokeArgTypes;
556 0 : MOZ_ALWAYS_TRUE(invokeArgTypes.append(typeArray, ArrayLength(typeArray)));
557 :
558 : // At the point of the call, the stack layout shall be (sp grows to the left):
559 : // | stack args | padding | Value argv[] | padding | retaddr | caller stack args |
560 : // The padding between stack args and argv ensures that argv is aligned. The
561 : // padding between argv and retaddr ensures that sp is aligned.
562 0 : unsigned argOffset = AlignBytes(StackArgBytes(invokeArgTypes), sizeof(double));
563 0 : unsigned argBytes = Max<size_t>(1, fi.sig().args().length()) * sizeof(Value);
564 0 : unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, argOffset + argBytes);
565 :
566 0 : CallableOffsets offsets;
567 0 : GenerateExitPrologue(masm, framePushed, ExitReason::Fixed::ImportInterp, &offsets);
568 :
569 : // Fill the argument array.
570 0 : unsigned offsetToCallerStackArgs = sizeof(Frame) + masm.framePushed();
571 0 : Register scratch = ABINonArgReturnReg0;
572 0 : FillArgumentArray(masm, fi.sig().args(), argOffset, offsetToCallerStackArgs, scratch, ToValue(false));
573 :
574 : // Prepare the arguments for the call to Instance::callImport_*.
575 0 : ABIArgMIRTypeIter i(invokeArgTypes);
576 :
577 : // argument 0: Instance*
578 0 : Address instancePtr(WasmTlsReg, offsetof(TlsData, instance));
579 0 : if (i->kind() == ABIArg::GPR) {
580 0 : masm.loadPtr(instancePtr, i->gpr());
581 : } else {
582 0 : masm.loadPtr(instancePtr, scratch);
583 0 : masm.storePtr(scratch, Address(masm.getStackPointer(), i->offsetFromArgBase()));
584 : }
585 0 : i++;
586 :
587 : // argument 1: funcImportIndex
588 0 : if (i->kind() == ABIArg::GPR)
589 0 : masm.mov(ImmWord(funcImportIndex), i->gpr());
590 : else
591 0 : masm.store32(Imm32(funcImportIndex), Address(masm.getStackPointer(), i->offsetFromArgBase()));
592 0 : i++;
593 :
594 : // argument 2: argc
595 0 : unsigned argc = fi.sig().args().length();
596 0 : if (i->kind() == ABIArg::GPR)
597 0 : masm.mov(ImmWord(argc), i->gpr());
598 : else
599 0 : masm.store32(Imm32(argc), Address(masm.getStackPointer(), i->offsetFromArgBase()));
600 0 : i++;
601 :
602 : // argument 3: argv
603 0 : Address argv(masm.getStackPointer(), argOffset);
604 0 : if (i->kind() == ABIArg::GPR) {
605 0 : masm.computeEffectiveAddress(argv, i->gpr());
606 : } else {
607 0 : masm.computeEffectiveAddress(argv, scratch);
608 0 : masm.storePtr(scratch, Address(masm.getStackPointer(), i->offsetFromArgBase()));
609 : }
610 0 : i++;
611 0 : MOZ_ASSERT(i.done());
612 :
613 : // Make the call, test whether it succeeded, and extract the return value.
614 0 : AssertStackAlignment(masm, ABIStackAlignment);
615 0 : switch (fi.sig().ret()) {
616 : case ExprType::Void:
617 0 : masm.call(SymbolicAddress::CallImport_Void);
618 0 : masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
619 0 : break;
620 : case ExprType::I32:
621 0 : masm.call(SymbolicAddress::CallImport_I32);
622 0 : masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
623 0 : masm.load32(argv, ReturnReg);
624 0 : break;
625 : case ExprType::I64:
626 0 : masm.call(SymbolicAddress::CallImport_I64);
627 0 : masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
628 0 : masm.load64(argv, ReturnReg64);
629 0 : break;
630 : case ExprType::F32:
631 0 : masm.call(SymbolicAddress::CallImport_F64);
632 0 : masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
633 0 : masm.loadDouble(argv, ReturnDoubleReg);
634 0 : masm.convertDoubleToFloat32(ReturnDoubleReg, ReturnFloat32Reg);
635 0 : break;
636 : case ExprType::F64:
637 0 : masm.call(SymbolicAddress::CallImport_F64);
638 0 : masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
639 0 : masm.loadDouble(argv, ReturnDoubleReg);
640 0 : break;
641 : case ExprType::I8x16:
642 : case ExprType::I16x8:
643 : case ExprType::I32x4:
644 : case ExprType::F32x4:
645 : case ExprType::B8x16:
646 : case ExprType::B16x8:
647 : case ExprType::B32x4:
648 0 : MOZ_CRASH("SIMD types shouldn't be returned from a FFI");
649 : case ExprType::Limit:
650 0 : MOZ_CRASH("Limit");
651 : }
652 :
653 : // The native ABI preserves the TLS, heap and global registers since they
654 : // are non-volatile.
655 0 : MOZ_ASSERT(NonVolatileRegs.has(WasmTlsReg));
656 : #if defined(JS_CODEGEN_X64) || \
657 : defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
658 : defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
659 0 : MOZ_ASSERT(NonVolatileRegs.has(HeapReg));
660 : #endif
661 :
662 0 : GenerateExitEpilogue(masm, framePushed, ExitReason::Fixed::ImportInterp, &offsets);
663 :
664 0 : FinishOffsets(masm, &offsets);
665 0 : return offsets;
666 : }
667 :
668 : // Generate a stub that is called via the internal ABI derived from the
669 : // signature of the import and calls into a compatible JIT function,
670 : // having boxed all the ABI arguments into the JIT stack frame layout.
671 : CallableOffsets
672 0 : wasm::GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* throwLabel)
673 : {
674 0 : masm.setFramePushed(0);
675 :
676 : // JIT calls use the following stack layout (sp grows to the left):
677 : // | retaddr | descriptor | callee | argc | this | arg1..N |
678 : // After the JIT frame, the global register (if present) is saved since the
679 : // JIT's ABI does not preserve non-volatile regs. Also, unlike most ABIs,
680 : // the JIT ABI requires that sp be JitStackAlignment-aligned *after* pushing
681 : // the return address.
682 : static_assert(WasmStackAlignment >= JitStackAlignment, "subsumes");
683 0 : unsigned sizeOfRetAddr = sizeof(void*);
684 0 : unsigned jitFrameBytes = 3 * sizeof(void*) + (1 + fi.sig().args().length()) * sizeof(Value);
685 0 : unsigned totalJitFrameBytes = sizeOfRetAddr + jitFrameBytes;
686 0 : unsigned jitFramePushed = StackDecrementForCall(masm, JitStackAlignment, totalJitFrameBytes) -
687 0 : sizeOfRetAddr;
688 :
689 0 : CallableOffsets offsets;
690 0 : GenerateExitPrologue(masm, jitFramePushed, ExitReason::Fixed::ImportJit, &offsets);
691 :
692 : // 1. Descriptor
693 0 : size_t argOffset = 0;
694 0 : uint32_t descriptor = MakeFrameDescriptor(jitFramePushed, JitFrame_Entry,
695 0 : JitFrameLayout::Size());
696 0 : masm.storePtr(ImmWord(uintptr_t(descriptor)), Address(masm.getStackPointer(), argOffset));
697 0 : argOffset += sizeof(size_t);
698 :
699 : // 2. Callee
700 0 : Register callee = ABINonArgReturnReg0; // live until call
701 0 : Register scratch = ABINonArgReturnReg1; // repeatedly clobbered
702 :
703 : // 2.1. Get callee
704 0 : masm.loadWasmGlobalPtr(fi.tlsDataOffset() + offsetof(FuncImportTls, obj), callee);
705 :
706 : // 2.2. Save callee
707 0 : masm.storePtr(callee, Address(masm.getStackPointer(), argOffset));
708 0 : argOffset += sizeof(size_t);
709 :
710 : // 2.3. Load callee executable entry point
711 0 : masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), callee);
712 0 : masm.loadBaselineOrIonNoArgCheck(callee, callee, nullptr);
713 :
714 : // 3. Argc
715 0 : unsigned argc = fi.sig().args().length();
716 0 : masm.storePtr(ImmWord(uintptr_t(argc)), Address(masm.getStackPointer(), argOffset));
717 0 : argOffset += sizeof(size_t);
718 :
719 : // 4. |this| value
720 0 : masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), argOffset));
721 0 : argOffset += sizeof(Value);
722 :
723 : // 5. Fill the arguments
724 0 : unsigned offsetToCallerStackArgs = jitFramePushed + sizeof(Frame);
725 0 : FillArgumentArray(masm, fi.sig().args(), argOffset, offsetToCallerStackArgs, scratch, ToValue(true));
726 0 : argOffset += fi.sig().args().length() * sizeof(Value);
727 0 : MOZ_ASSERT(argOffset == jitFrameBytes);
728 :
729 : {
730 : // Enable Activation.
731 : //
732 : // This sequence requires two registers, and needs to preserve the
733 : // 'callee' register, so there are three live registers.
734 0 : MOZ_ASSERT(callee == WasmIonExitRegCallee);
735 0 : Register cx = WasmIonExitRegE0;
736 0 : Register act = WasmIonExitRegE1;
737 :
738 : // JitActivation* act = cx->activation();
739 0 : masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, addressOfContext)), cx);
740 0 : masm.loadPtr(Address(cx, 0), cx);
741 0 : masm.loadPtr(Address(cx, JSContext::offsetOfActivation()), act);
742 :
743 : // act.active_ = true;
744 0 : masm.store8(Imm32(1), Address(act, JitActivation::offsetOfActiveUint8()));
745 :
746 : // cx->jitActivation = act;
747 0 : masm.storePtr(act, Address(cx, offsetof(JSContext, jitActivation)));
748 :
749 : // cx->profilingActivation_ = act;
750 0 : masm.storePtr(act, Address(cx, JSContext::offsetOfProfilingActivation()));
751 : }
752 :
753 0 : AssertStackAlignment(masm, JitStackAlignment, sizeOfRetAddr);
754 0 : masm.callJitNoProfiler(callee);
755 0 : AssertStackAlignment(masm, JitStackAlignment, sizeOfRetAddr);
756 :
757 : // The JIT callee clobbers all registers, including WasmTlsReg and
758 : // FramePointer, so restore those here.
759 0 : masm.loadWasmTlsRegFromFrame();
760 0 : masm.moveStackPtrTo(FramePointer);
761 0 : masm.addPtr(Imm32(masm.framePushed()), FramePointer);
762 :
763 : {
764 : // Disable Activation.
765 : //
766 : // This sequence needs three registers and must preserve WasmTlsReg,
767 : // JSReturnReg_Data and JSReturnReg_Type.
768 0 : MOZ_ASSERT(JSReturnReg_Data == WasmIonExitRegReturnData);
769 0 : MOZ_ASSERT(JSReturnReg_Type == WasmIonExitRegReturnType);
770 0 : MOZ_ASSERT(WasmTlsReg == WasmIonExitTlsReg);
771 0 : Register cx = WasmIonExitRegD0;
772 0 : Register act = WasmIonExitRegD1;
773 0 : Register tmp = WasmIonExitRegD2;
774 :
775 : // JitActivation* act = cx->activation();
776 0 : masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, addressOfContext)), cx);
777 0 : masm.loadPtr(Address(cx, 0), cx);
778 0 : masm.loadPtr(Address(cx, JSContext::offsetOfActivation()), act);
779 :
780 : // cx->jitActivation = act->prevJitActivation_;
781 0 : masm.loadPtr(Address(act, JitActivation::offsetOfPrevJitActivation()), tmp);
782 0 : masm.storePtr(tmp, Address(cx, offsetof(JSContext, jitActivation)));
783 :
784 : // cx->profilingActivation = act->prevProfilingActivation_;
785 0 : masm.loadPtr(Address(act, Activation::offsetOfPrevProfiling()), tmp);
786 0 : masm.storePtr(tmp, Address(cx, JSContext::offsetOfProfilingActivation()));
787 :
788 : // act->active_ = false;
789 0 : masm.store8(Imm32(0), Address(act, JitActivation::offsetOfActiveUint8()));
790 : }
791 :
792 : // As explained above, the frame was aligned for the JIT ABI such that
793 : // (sp + sizeof(void*)) % JitStackAlignment == 0
794 : // But now we possibly want to call one of several different C++ functions,
795 : // so subtract the sizeof(void*) so that sp is aligned for an ABI call.
796 : static_assert(ABIStackAlignment <= JitStackAlignment, "subsumes");
797 0 : masm.reserveStack(sizeOfRetAddr);
798 0 : unsigned nativeFramePushed = masm.framePushed();
799 0 : AssertStackAlignment(masm, ABIStackAlignment);
800 :
801 0 : masm.branchTestMagic(Assembler::Equal, JSReturnOperand, throwLabel);
802 :
803 0 : Label oolConvert;
804 0 : switch (fi.sig().ret()) {
805 : case ExprType::Void:
806 0 : break;
807 : case ExprType::I32:
808 : masm.convertValueToInt32(JSReturnOperand, ReturnDoubleReg, ReturnReg, &oolConvert,
809 0 : /* -0 check */ false);
810 0 : break;
811 : case ExprType::I64:
812 : // We don't expect int64 to be returned from Ion yet, because of a
813 : // guard in callImport.
814 0 : masm.breakpoint();
815 0 : break;
816 : case ExprType::F32:
817 0 : masm.convertValueToFloat(JSReturnOperand, ReturnFloat32Reg, &oolConvert);
818 0 : break;
819 : case ExprType::F64:
820 0 : masm.convertValueToDouble(JSReturnOperand, ReturnDoubleReg, &oolConvert);
821 0 : break;
822 : case ExprType::I8x16:
823 : case ExprType::I16x8:
824 : case ExprType::I32x4:
825 : case ExprType::F32x4:
826 : case ExprType::B8x16:
827 : case ExprType::B16x8:
828 : case ExprType::B32x4:
829 0 : MOZ_CRASH("SIMD types shouldn't be returned from an import");
830 : case ExprType::Limit:
831 0 : MOZ_CRASH("Limit");
832 : }
833 :
834 0 : Label done;
835 0 : masm.bind(&done);
836 :
837 0 : GenerateExitEpilogue(masm, masm.framePushed(), ExitReason::Fixed::ImportJit, &offsets);
838 :
839 0 : if (oolConvert.used()) {
840 0 : masm.bind(&oolConvert);
841 0 : masm.setFramePushed(nativeFramePushed);
842 :
843 : // Coercion calls use the following stack layout (sp grows to the left):
844 : // | args | padding | Value argv[1] | padding | exit Frame |
845 0 : MIRTypeVector coerceArgTypes;
846 0 : JS_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
847 0 : unsigned offsetToCoerceArgv = AlignBytes(StackArgBytes(coerceArgTypes), sizeof(Value));
848 0 : MOZ_ASSERT(nativeFramePushed >= offsetToCoerceArgv + sizeof(Value));
849 0 : AssertStackAlignment(masm, ABIStackAlignment);
850 :
851 : // Store return value into argv[0]
852 0 : masm.storeValue(JSReturnOperand, Address(masm.getStackPointer(), offsetToCoerceArgv));
853 :
854 : // argument 0: argv
855 0 : ABIArgMIRTypeIter i(coerceArgTypes);
856 0 : Address argv(masm.getStackPointer(), offsetToCoerceArgv);
857 0 : if (i->kind() == ABIArg::GPR) {
858 0 : masm.computeEffectiveAddress(argv, i->gpr());
859 : } else {
860 0 : masm.computeEffectiveAddress(argv, scratch);
861 0 : masm.storePtr(scratch, Address(masm.getStackPointer(), i->offsetFromArgBase()));
862 : }
863 0 : i++;
864 0 : MOZ_ASSERT(i.done());
865 :
866 : // Call coercion function
867 0 : AssertStackAlignment(masm, ABIStackAlignment);
868 0 : switch (fi.sig().ret()) {
869 : case ExprType::I32:
870 0 : masm.call(SymbolicAddress::CoerceInPlace_ToInt32);
871 0 : masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
872 0 : masm.unboxInt32(Address(masm.getStackPointer(), offsetToCoerceArgv), ReturnReg);
873 0 : break;
874 : case ExprType::F64:
875 0 : masm.call(SymbolicAddress::CoerceInPlace_ToNumber);
876 0 : masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
877 0 : masm.loadDouble(Address(masm.getStackPointer(), offsetToCoerceArgv), ReturnDoubleReg);
878 0 : break;
879 : case ExprType::F32:
880 0 : masm.call(SymbolicAddress::CoerceInPlace_ToNumber);
881 0 : masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
882 0 : masm.loadDouble(Address(masm.getStackPointer(), offsetToCoerceArgv), ReturnDoubleReg);
883 0 : masm.convertDoubleToFloat32(ReturnDoubleReg, ReturnFloat32Reg);
884 0 : break;
885 : default:
886 0 : MOZ_CRASH("Unsupported convert type");
887 : }
888 :
889 0 : masm.jump(&done);
890 0 : masm.setFramePushed(0);
891 : }
892 :
893 0 : MOZ_ASSERT(masm.framePushed() == 0);
894 :
895 0 : FinishOffsets(masm, &offsets);
896 0 : return offsets;
897 : }
898 :
899 : struct ABIFunctionArgs
900 : {
901 : ABIFunctionType abiType;
902 : size_t len;
903 :
904 0 : explicit ABIFunctionArgs(ABIFunctionType sig)
905 0 : : abiType(ABIFunctionType(sig >> ArgType_Shift))
906 : {
907 0 : len = 0;
908 0 : uint32_t i = uint32_t(abiType);
909 0 : while (i) {
910 0 : i = i >> ArgType_Shift;
911 0 : len++;
912 : }
913 0 : }
914 :
915 0 : size_t length() const { return len; }
916 :
917 0 : MIRType operator[](size_t i) const {
918 0 : MOZ_ASSERT(i < len);
919 0 : uint32_t abi = uint32_t(abiType);
920 0 : while (i--)
921 0 : abi = abi >> ArgType_Shift;
922 0 : return ToMIRType(ABIArgType(abi));
923 : }
924 : };
925 :
926 : CallableOffsets
927 0 : wasm::GenerateBuiltinThunk(MacroAssembler& masm, ABIFunctionType abiType, ExitReason exitReason,
928 : void* funcPtr)
929 : {
930 0 : masm.setFramePushed(0);
931 :
932 0 : ABIFunctionArgs args(abiType);
933 0 : uint32_t framePushed = StackDecrementForCall(masm, ABIStackAlignment, args);
934 :
935 0 : CallableOffsets offsets;
936 0 : GenerateExitPrologue(masm, framePushed, exitReason, &offsets);
937 :
938 : // Copy out and convert caller arguments, if needed.
939 0 : unsigned offsetToCallerStackArgs = sizeof(Frame) + masm.framePushed();
940 0 : Register scratch = ABINonArgReturnReg0;
941 0 : for (ABIArgIter<ABIFunctionArgs> i(args); !i.done(); i++) {
942 0 : if (i->argInRegister()) {
943 : #ifdef JS_CODEGEN_ARM
944 : // Non hard-fp passes the args values in GPRs.
945 : if (!UseHardFpABI() && IsFloatingPointType(i.mirType())) {
946 : FloatRegister input = i->fpu();
947 : if (i.mirType() == MIRType::Float32) {
948 : masm.ma_vxfer(input, Register::FromCode(input.id()));
949 : } else if (i.mirType() == MIRType::Double) {
950 : uint32_t regId = input.singleOverlay().id();
951 : masm.ma_vxfer(input, Register::FromCode(regId), Register::FromCode(regId + 1));
952 : }
953 : }
954 : #endif
955 0 : continue;
956 : }
957 :
958 0 : Address src(masm.getStackPointer(), offsetToCallerStackArgs + i->offsetFromArgBase());
959 0 : Address dst(masm.getStackPointer(), i->offsetFromArgBase());
960 0 : StackCopy(masm, i.mirType(), scratch, src, dst);
961 : }
962 :
963 0 : AssertStackAlignment(masm, ABIStackAlignment);
964 0 : masm.call(ImmPtr(funcPtr, ImmPtr::NoCheckToken()));
965 :
966 : #if defined(JS_CODEGEN_X86)
967 : // x86 passes the return value on the x87 FP stack.
968 : Operand op(esp, 0);
969 : MIRType retType = ToMIRType(ABIArgType(abiType & ArgType_Mask));
970 : if (retType == MIRType::Float32) {
971 : masm.fstp32(op);
972 : masm.loadFloat32(op, ReturnFloat32Reg);
973 : } else if (retType == MIRType::Double) {
974 : masm.fstp(op);
975 : masm.loadDouble(op, ReturnDoubleReg);
976 : }
977 : #elif defined(JS_CODEGEN_ARM)
978 : // Non hard-fp passes the return values in GPRs.
979 : MIRType retType = ToMIRType(ABIArgType(abiType & ArgType_Mask));
980 : if (!UseHardFpABI() && IsFloatingPointType(retType))
981 : masm.ma_vxfer(r0, r1, d0);
982 : #endif
983 :
984 0 : GenerateExitEpilogue(masm, framePushed, exitReason, &offsets);
985 0 : offsets.end = masm.currentOffset();
986 0 : return offsets;
987 : }
988 :
989 : // Generate a stub that calls into ReportTrap with the right trap reason.
990 : // This stub is called with ABIStackAlignment by a trap out-of-line path. An
991 : // exit prologue/epilogue is used so that stack unwinding picks up the
992 : // current WasmActivation. Unwinding will begin at the caller of this trap exit.
993 : CallableOffsets
994 0 : wasm::GenerateTrapExit(MacroAssembler& masm, Trap trap, Label* throwLabel)
995 : {
996 0 : masm.haltingAlign(CodeAlignment);
997 :
998 0 : masm.setFramePushed(0);
999 :
1000 0 : MIRTypeVector args;
1001 0 : MOZ_ALWAYS_TRUE(args.append(MIRType::Int32));
1002 :
1003 0 : uint32_t framePushed = StackDecrementForCall(masm, ABIStackAlignment, args);
1004 :
1005 0 : CallableOffsets offsets;
1006 0 : GenerateExitPrologue(masm, framePushed, ExitReason::Fixed::Trap, &offsets);
1007 :
1008 0 : ABIArgMIRTypeIter i(args);
1009 0 : if (i->kind() == ABIArg::GPR)
1010 0 : masm.move32(Imm32(int32_t(trap)), i->gpr());
1011 : else
1012 0 : masm.store32(Imm32(int32_t(trap)), Address(masm.getStackPointer(), i->offsetFromArgBase()));
1013 0 : i++;
1014 0 : MOZ_ASSERT(i.done());
1015 :
1016 0 : masm.assertStackAlignment(ABIStackAlignment);
1017 0 : masm.call(SymbolicAddress::ReportTrap);
1018 :
1019 0 : masm.jump(throwLabel);
1020 :
1021 0 : GenerateExitEpilogue(masm, framePushed, ExitReason::Fixed::Trap, &offsets);
1022 :
1023 0 : FinishOffsets(masm, &offsets);
1024 0 : return offsets;
1025 : }
1026 :
1027 : // Generate a stub which is only used by the signal handlers to handle out of
1028 : // bounds access by experimental SIMD.js and Atomics and unaligned accesses on
1029 : // ARM. This stub is executed by direct PC transfer from the faulting memory
1030 : // access and thus the stack depth is unknown. Since WasmActivation::exitFP is
1031 : // not set before calling the error reporter, the current wasm activation will
1032 : // be lost. This stub should be removed when SIMD.js and Atomics are moved to
1033 : // wasm and given proper traps and when we use a non-faulting strategy for
1034 : // unaligned ARM access.
1035 : static Offsets
1036 0 : GenerateGenericMemoryAccessTrap(MacroAssembler& masm, SymbolicAddress reporter, Label* throwLabel)
1037 : {
1038 0 : masm.haltingAlign(CodeAlignment);
1039 :
1040 0 : Offsets offsets;
1041 0 : offsets.begin = masm.currentOffset();
1042 :
1043 : // sp can be anything at this point, so ensure it is aligned when calling
1044 : // into C++. We unconditionally jump to throw so don't worry about
1045 : // restoring sp.
1046 0 : masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
1047 : if (ShadowStackSpace)
1048 : masm.subFromStackPtr(Imm32(ShadowStackSpace));
1049 :
1050 0 : masm.call(reporter);
1051 0 : masm.jump(throwLabel);
1052 :
1053 0 : FinishOffsets(masm, &offsets);
1054 0 : return offsets;
1055 : }
1056 :
1057 : Offsets
1058 0 : wasm::GenerateOutOfBoundsExit(MacroAssembler& masm, Label* throwLabel)
1059 : {
1060 0 : return GenerateGenericMemoryAccessTrap(masm, SymbolicAddress::ReportOutOfBounds, throwLabel);
1061 : }
1062 :
1063 : Offsets
1064 0 : wasm::GenerateUnalignedExit(MacroAssembler& masm, Label* throwLabel)
1065 : {
1066 0 : return GenerateGenericMemoryAccessTrap(masm, SymbolicAddress::ReportUnalignedAccess, throwLabel);
1067 : }
1068 :
1069 : #if defined(JS_CODEGEN_ARM)
1070 : static const LiveRegisterSet AllRegsExceptPCSP(
1071 : GeneralRegisterSet(Registers::AllMask & ~((uint32_t(1) << Registers::sp) |
1072 : (uint32_t(1) << Registers::pc))),
1073 : FloatRegisterSet(FloatRegisters::AllDoubleMask));
1074 : static_assert(!SupportsSimd, "high lanes of SIMD registers need to be saved too.");
1075 : #else
1076 : static const LiveRegisterSet AllRegsExceptSP(
1077 : GeneralRegisterSet(Registers::AllMask & ~(uint32_t(1) << Registers::StackPointer)),
1078 : FloatRegisterSet(FloatRegisters::AllMask));
1079 : #endif
1080 :
1081 : // The async interrupt-callback exit is called from arbitrarily-interrupted wasm
1082 : // code. It calls into the WasmHandleExecutionInterrupt to determine whether we must
1083 : // really halt execution which can reenter the VM (e.g., to display the slow
1084 : // script dialog). If execution is not interrupted, this stub must carefully
1085 : // preserve *all* register state. If execution is interrupted, the entire
1086 : // activation will be popped by the throw stub, so register state does not need
1087 : // to be restored.
1088 : Offsets
1089 0 : wasm::GenerateInterruptExit(MacroAssembler& masm, Label* throwLabel)
1090 : {
1091 0 : masm.haltingAlign(CodeAlignment);
1092 :
1093 0 : Offsets offsets;
1094 0 : offsets.begin = masm.currentOffset();
1095 :
1096 : #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
1097 : // Be very careful here not to perturb the machine state before saving it
1098 : // to the stack. In particular, add/sub instructions may set conditions in
1099 : // the flags register.
1100 0 : masm.push(Imm32(0)); // space used as return address, updated below
1101 0 : masm.setFramePushed(0); // set to 0 now so that framePushed is offset of return address
1102 0 : masm.PushFlags(); // after this we are safe to use sub
1103 0 : masm.PushRegsInMask(AllRegsExceptSP); // save all GP/FP registers (except SP)
1104 :
1105 : // We know that StackPointer is word-aligned, but not necessarily
1106 : // stack-aligned, so we need to align it dynamically.
1107 0 : masm.moveStackPtrTo(ABINonVolatileReg);
1108 0 : masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
1109 : if (ShadowStackSpace)
1110 : masm.subFromStackPtr(Imm32(ShadowStackSpace));
1111 :
1112 : // Make the call to C++, which preserves ABINonVolatileReg.
1113 0 : masm.assertStackAlignment(ABIStackAlignment);
1114 0 : masm.call(SymbolicAddress::HandleExecutionInterrupt);
1115 :
1116 : // HandleExecutionInterrupt returns null if execution is interrupted and
1117 : // the resumption pc otherwise.
1118 0 : masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
1119 :
1120 : // Restore the stack pointer then store resumePC into the stack slow that
1121 : // will be popped by the 'ret' below.
1122 0 : masm.moveToStackPtr(ABINonVolatileReg);
1123 0 : masm.storePtr(ReturnReg, Address(StackPointer, masm.framePushed()));
1124 :
1125 : // Restore the machine state to before the interrupt. After popping flags,
1126 : // no instructions can be executed which set flags.
1127 0 : masm.PopRegsInMask(AllRegsExceptSP);
1128 0 : masm.PopFlags();
1129 :
1130 : // Return to the resumePC stored into this stack slot above.
1131 0 : MOZ_ASSERT(masm.framePushed() == 0);
1132 0 : masm.ret();
1133 : #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
1134 : // Reserve space to store resumePC and HeapReg.
1135 : masm.subFromStackPtr(Imm32(2 * sizeof(intptr_t)));
1136 : // set to zero so we can use masm.framePushed() below.
1137 : masm.setFramePushed(0);
1138 : static_assert(!SupportsSimd, "high lanes of SIMD registers need to be saved too.");
1139 : // save all registers,except sp. After this stack is alligned.
1140 : masm.PushRegsInMask(AllRegsExceptSP);
1141 :
1142 : // Save the stack pointer in a non-volatile register.
1143 : masm.moveStackPtrTo(s0);
1144 : // Align the stack.
1145 : masm.ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
1146 :
1147 : // Store resumePC into the reserved space.
1148 : masm.loadWasmActivationFromSymbolicAddress(IntArgReg0);
1149 : masm.loadPtr(Address(IntArgReg0, WasmActivation::offsetOfResumePC()), IntArgReg1);
1150 : masm.storePtr(IntArgReg1, Address(s0, masm.framePushed()));
1151 : // Store HeapReg into the reserved space.
1152 : masm.storePtr(HeapReg, Address(s0, masm.framePushed() + sizeof(intptr_t)));
1153 :
1154 : # ifdef USES_O32_ABI
1155 : // MIPS ABI requires rewserving stack for registes $a0 to $a3.
1156 : masm.subFromStackPtr(Imm32(4 * sizeof(intptr_t)));
1157 : # endif
1158 :
1159 : masm.assertStackAlignment(ABIStackAlignment);
1160 : masm.call(SymbolicAddress::HandleExecutionInterrupt);
1161 :
1162 : # ifdef USES_O32_ABI
1163 : masm.addToStackPtr(Imm32(4 * sizeof(intptr_t)));
1164 : # endif
1165 :
1166 : masm.branchIfFalseBool(ReturnReg, throwLabel);
1167 :
1168 : // This will restore stack to the address before the call.
1169 : masm.moveToStackPtr(s0);
1170 : masm.PopRegsInMask(AllRegsExceptSP);
1171 :
1172 : // Pop resumePC into PC. Clobber HeapReg to make the jump and restore it
1173 : // during jump delay slot.
1174 : masm.loadPtr(Address(StackPointer, 0), HeapReg);
1175 : // Reclaim the reserve space.
1176 : masm.addToStackPtr(Imm32(2 * sizeof(intptr_t)));
1177 : masm.as_jr(HeapReg);
1178 : masm.loadPtr(Address(StackPointer, -sizeof(intptr_t)), HeapReg);
1179 : #elif defined(JS_CODEGEN_ARM)
1180 : {
1181 : // Be careful not to clobber scratch registers before they are saved.
1182 : ScratchRegisterScope scratch(masm);
1183 : SecondScratchRegisterScope secondScratch(masm);
1184 :
1185 : // Reserve a word to receive the return address.
1186 : masm.as_alu(StackPointer, StackPointer, Imm8(4), OpSub);
1187 :
1188 : // Set framePushed to 0 now so that framePushed can be used later as the
1189 : // stack offset to the return-address space reserved above.
1190 : masm.setFramePushed(0);
1191 :
1192 : // Save all GP/FP registers (except PC and SP).
1193 : masm.PushRegsInMask(AllRegsExceptPCSP);
1194 : }
1195 :
1196 : // Save SP, APSR and FPSCR in non-volatile registers.
1197 : masm.as_mrs(r4);
1198 : masm.as_vmrs(r5);
1199 : masm.mov(sp, r6);
1200 :
1201 : // We know that StackPointer is word-aligned, but not necessarily
1202 : // stack-aligned, so we need to align it dynamically.
1203 : masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
1204 :
1205 : // Make the call to C++, which preserves the non-volatile registers.
1206 : masm.assertStackAlignment(ABIStackAlignment);
1207 : masm.call(SymbolicAddress::HandleExecutionInterrupt);
1208 :
1209 : // HandleExecutionInterrupt returns null if execution is interrupted and
1210 : // the resumption pc otherwise.
1211 : masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
1212 :
1213 : // Restore the stack pointer then store resumePC into the stack slot that
1214 : // will be popped by the 'ret' below.
1215 : masm.mov(r6, sp);
1216 : masm.storePtr(ReturnReg, Address(sp, masm.framePushed()));
1217 :
1218 : // Restore the machine state to before the interrupt. After popping flags,
1219 : // no instructions can be executed which set flags.
1220 : masm.as_vmsr(r5);
1221 : masm.as_msr(r4);
1222 : masm.PopRegsInMask(AllRegsExceptPCSP);
1223 :
1224 : // Return to the resumePC stored into this stack slot above.
1225 : MOZ_ASSERT(masm.framePushed() == 0);
1226 : masm.ret();
1227 : #elif defined(JS_CODEGEN_ARM64)
1228 : MOZ_CRASH();
1229 : #elif defined (JS_CODEGEN_NONE)
1230 : MOZ_CRASH();
1231 : #else
1232 : # error "Unknown architecture!"
1233 : #endif
1234 :
1235 0 : FinishOffsets(masm, &offsets);
1236 0 : return offsets;
1237 : }
1238 :
1239 : // Generate a stub that restores the stack pointer to what it was on entry to
1240 : // the wasm activation, sets the return register to 'false' and then executes a
1241 : // return which will return from this wasm activation to the caller. This stub
1242 : // should only be called after the caller has reported an error (or, in the case
1243 : // of the interrupt stub, intends to interrupt execution).
1244 : Offsets
1245 0 : wasm::GenerateThrowStub(MacroAssembler& masm, Label* throwLabel)
1246 : {
1247 0 : masm.haltingAlign(CodeAlignment);
1248 :
1249 0 : masm.bind(throwLabel);
1250 :
1251 0 : Offsets offsets;
1252 0 : offsets.begin = masm.currentOffset();
1253 :
1254 : // The throw stub can be jumped to from an async interrupt that is halting
1255 : // execution. Thus the stack pointer can be unaligned and we must align it
1256 : // dynamically.
1257 0 : masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
1258 : if (ShadowStackSpace)
1259 : masm.subFromStackPtr(Imm32(ShadowStackSpace));
1260 :
1261 : // WasmHandleThrow unwinds WasmActivation::exitFP and returns the address of
1262 : // the return address on the stack this stub should return to. Set the
1263 : // FramePointer to a magic value to indicate a return by throw.
1264 0 : masm.call(SymbolicAddress::HandleThrow);
1265 0 : masm.moveToStackPtr(ReturnReg);
1266 0 : masm.move32(Imm32(FailFP), FramePointer);
1267 0 : masm.ret();
1268 :
1269 0 : FinishOffsets(masm, &offsets);
1270 0 : return offsets;
1271 : }
1272 :
1273 : static const LiveRegisterSet AllAllocatableRegs = LiveRegisterSet(
1274 : GeneralRegisterSet(Registers::AllocatableMask),
1275 : FloatRegisterSet(FloatRegisters::AllMask));
1276 :
1277 : // Generate a stub that handle toggable enter/leave frame traps or breakpoints.
1278 : // The trap records frame pointer (via GenerateExitPrologue) and saves most of
1279 : // registers to not affect the code generated by WasmBaselineCompile.
1280 : Offsets
1281 0 : wasm::GenerateDebugTrapStub(MacroAssembler& masm, Label* throwLabel)
1282 : {
1283 0 : masm.haltingAlign(CodeAlignment);
1284 :
1285 0 : masm.setFramePushed(0);
1286 :
1287 0 : CallableOffsets offsets;
1288 0 : GenerateExitPrologue(masm, 0, ExitReason::Fixed::DebugTrap, &offsets);
1289 :
1290 : // Save all registers used between baseline compiler operations.
1291 0 : masm.PushRegsInMask(AllAllocatableRegs);
1292 :
1293 0 : uint32_t framePushed = masm.framePushed();
1294 :
1295 : // This method might be called with unaligned stack -- aligning and
1296 : // saving old stack pointer at the top.
1297 0 : Register scratch = ABINonArgReturnReg0;
1298 0 : masm.moveStackPtrTo(scratch);
1299 0 : masm.subFromStackPtr(Imm32(sizeof(intptr_t)));
1300 0 : masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
1301 0 : masm.storePtr(scratch, Address(masm.getStackPointer(), 0));
1302 :
1303 : if (ShadowStackSpace)
1304 : masm.subFromStackPtr(Imm32(ShadowStackSpace));
1305 0 : masm.assertStackAlignment(ABIStackAlignment);
1306 0 : masm.call(SymbolicAddress::HandleDebugTrap);
1307 :
1308 0 : masm.branchIfFalseBool(ReturnReg, throwLabel);
1309 :
1310 : if (ShadowStackSpace)
1311 : masm.addToStackPtr(Imm32(ShadowStackSpace));
1312 0 : masm.Pop(scratch);
1313 0 : masm.moveToStackPtr(scratch);
1314 :
1315 0 : masm.setFramePushed(framePushed);
1316 0 : masm.PopRegsInMask(AllAllocatableRegs);
1317 :
1318 0 : GenerateExitEpilogue(masm, 0, ExitReason::Fixed::DebugTrap, &offsets);
1319 :
1320 0 : FinishOffsets(masm, &offsets);
1321 0 : return offsets;
1322 : }
|