Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #include "jit/MacroAssembler-inl.h"
8 :
9 : #include "mozilla/CheckedInt.h"
10 :
11 : #include "jsfriendapi.h"
12 : #include "jsprf.h"
13 :
14 : #include "builtin/TypedObject.h"
15 : #include "gc/GCTrace.h"
16 : #include "jit/AtomicOp.h"
17 : #include "jit/Bailouts.h"
18 : #include "jit/BaselineFrame.h"
19 : #include "jit/BaselineIC.h"
20 : #include "jit/BaselineJIT.h"
21 : #include "jit/Lowering.h"
22 : #include "jit/MIR.h"
23 : #include "js/Conversions.h"
24 : #include "js/GCAPI.h"
25 : #include "vm/TraceLogging.h"
26 :
27 : #include "jsobjinlines.h"
28 :
29 : #include "gc/Nursery-inl.h"
30 : #include "jit/shared/Lowering-shared-inl.h"
31 : #include "vm/Interpreter-inl.h"
32 :
33 : using namespace js;
34 : using namespace js::jit;
35 :
36 : using JS::GenericNaN;
37 : using JS::ToInt32;
38 :
39 : using mozilla::CheckedUint32;
40 :
41 : template <typename Source> void
42 178 : MacroAssembler::guardTypeSet(const Source& address, const TypeSet* types, BarrierKind kind,
43 : Register scratch, Label* miss)
44 : {
45 178 : MOZ_ASSERT(kind == BarrierKind::TypeTagOnly || kind == BarrierKind::TypeSet);
46 178 : MOZ_ASSERT(!types->unknown());
47 :
48 205 : Label matched;
49 : TypeSet::Type tests[8] = {
50 : TypeSet::Int32Type(),
51 : TypeSet::UndefinedType(),
52 : TypeSet::BooleanType(),
53 : TypeSet::StringType(),
54 : TypeSet::SymbolType(),
55 : TypeSet::NullType(),
56 : TypeSet::MagicArgType(),
57 : TypeSet::AnyObjectType()
58 178 : };
59 :
60 : // The double type also implies Int32.
61 : // So replace the int32 test with the double one.
62 178 : if (types->hasType(TypeSet::DoubleType())) {
63 1 : MOZ_ASSERT(types->hasType(TypeSet::Int32Type()));
64 1 : tests[0] = TypeSet::DoubleType();
65 : }
66 :
67 178 : Register tag = extractTag(address, scratch);
68 :
69 : // Emit all typed tests.
70 178 : BranchType lastBranch;
71 1602 : for (size_t i = 0; i < mozilla::ArrayLength(tests); i++) {
72 1424 : if (!types->hasType(tests[i]))
73 1261 : continue;
74 :
75 163 : if (lastBranch.isInitialized())
76 25 : lastBranch.emit(*this);
77 163 : lastBranch = BranchType(Equal, tag, tests[i], &matched);
78 : }
79 :
80 : // If this is the last check, invert the last branch.
81 178 : if (types->hasType(TypeSet::AnyObjectType()) || !types->getObjectCount()) {
82 151 : if (!lastBranch.isInitialized()) {
83 13 : jump(miss);
84 13 : return;
85 : }
86 :
87 138 : lastBranch.invertCondition();
88 138 : lastBranch.relink(miss);
89 138 : lastBranch.emit(*this);
90 :
91 138 : bind(&matched);
92 138 : return;
93 : }
94 :
95 27 : if (lastBranch.isInitialized())
96 0 : lastBranch.emit(*this);
97 :
98 : // Test specific objects.
99 27 : MOZ_ASSERT(scratch != InvalidReg);
100 27 : branchTestObject(NotEqual, tag, miss);
101 27 : if (kind != BarrierKind::TypeTagOnly) {
102 27 : Register obj = extractObject(address, scratch);
103 27 : guardObjectType(obj, types, scratch, miss);
104 : } else {
105 : #ifdef DEBUG
106 0 : Label fail;
107 0 : Register obj = extractObject(address, scratch);
108 0 : guardObjectType(obj, types, scratch, &fail);
109 0 : jump(&matched);
110 0 : bind(&fail);
111 :
112 0 : if (obj == scratch)
113 0 : extractObject(address, scratch);
114 0 : guardTypeSetMightBeIncomplete(types, obj, scratch, &matched);
115 :
116 0 : assumeUnreachable("Unexpected object type");
117 : #endif
118 : }
119 :
120 27 : bind(&matched);
121 : }
122 :
123 : template <typename TypeSet>
124 : void
125 155 : MacroAssembler::guardTypeSetMightBeIncomplete(TypeSet* types, Register obj, Register scratch, Label* label)
126 : {
127 : // Type set guards might miss when an object's group changes. In this case
128 : // either its old group's properties will become unknown, or it will change
129 : // to a native object with an original unboxed group. Jump to label if this
130 : // might have happened for the input object.
131 :
132 155 : if (types->unknownObject()) {
133 4 : jump(label);
134 4 : return;
135 : }
136 :
137 151 : loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
138 151 : load32(Address(scratch, ObjectGroup::offsetOfFlags()), scratch);
139 151 : and32(Imm32(OBJECT_FLAG_ADDENDUM_MASK), scratch);
140 151 : branch32(Assembler::Equal,
141 : scratch, Imm32(ObjectGroup::addendumOriginalUnboxedGroupValue()), label);
142 :
143 211 : for (size_t i = 0; i < types->getObjectCount(); i++) {
144 60 : if (JSObject* singleton = types->getSingletonNoBarrier(i)) {
145 5 : movePtr(ImmGCPtr(singleton), scratch);
146 5 : loadPtr(Address(scratch, JSObject::offsetOfGroup()), scratch);
147 55 : } else if (ObjectGroup* group = types->getGroupNoBarrier(i)) {
148 55 : movePtr(ImmGCPtr(group), scratch);
149 : } else {
150 0 : continue;
151 : }
152 60 : branchTest32(Assembler::NonZero, Address(scratch, ObjectGroup::offsetOfFlags()),
153 : Imm32(OBJECT_FLAG_UNKNOWN_PROPERTIES), label);
154 : }
155 : }
156 :
157 : void
158 91 : MacroAssembler::guardObjectType(Register obj, const TypeSet* types,
159 : Register scratch, Label* miss)
160 : {
161 91 : MOZ_ASSERT(!types->unknown());
162 91 : MOZ_ASSERT(!types->hasType(TypeSet::AnyObjectType()));
163 91 : MOZ_ASSERT_IF(types->getObjectCount() > 0, scratch != InvalidReg);
164 :
165 : // Note: this method elides read barriers on values read from type sets, as
166 : // this may be called off thread during Ion compilation. This is
167 : // safe to do as the final JitCode object will be allocated during the
168 : // incremental GC (or the compilation canceled before we start sweeping),
169 : // see CodeGenerator::link. Other callers should use TypeSet::readBarrier
170 : // to trigger the barrier on the contents of type sets passed in here.
171 182 : Label matched;
172 :
173 91 : BranchGCPtr lastBranch;
174 91 : MOZ_ASSERT(!lastBranch.isInitialized());
175 91 : bool hasObjectGroups = false;
176 91 : unsigned count = types->getObjectCount();
177 186 : for (unsigned i = 0; i < count; i++) {
178 95 : if (!types->getSingletonNoBarrier(i)) {
179 90 : hasObjectGroups = hasObjectGroups || types->getGroupNoBarrier(i);
180 90 : continue;
181 : }
182 :
183 5 : if (lastBranch.isInitialized()) {
184 0 : comment("emit GC pointer checks");
185 0 : lastBranch.emit(*this);
186 : }
187 :
188 5 : JSObject* object = types->getSingletonNoBarrier(i);
189 5 : lastBranch = BranchGCPtr(Equal, obj, ImmGCPtr(object), &matched);
190 : }
191 :
192 91 : if (hasObjectGroups) {
193 86 : comment("has object groups");
194 : // We are possibly going to overwrite the obj register. So already
195 : // emit the branch, since branch depends on previous value of obj
196 : // register and there is definitely a branch following. So no need
197 : // to invert the condition.
198 86 : if (lastBranch.isInitialized())
199 0 : lastBranch.emit(*this);
200 86 : lastBranch = BranchGCPtr();
201 :
202 : // Note: Some platforms give the same register for obj and scratch.
203 : // Make sure when writing to scratch, the obj register isn't used anymore!
204 86 : loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
205 :
206 176 : for (unsigned i = 0; i < count; i++) {
207 90 : if (!types->getGroupNoBarrier(i))
208 0 : continue;
209 :
210 90 : if (lastBranch.isInitialized())
211 4 : lastBranch.emit(*this);
212 :
213 90 : ObjectGroup* group = types->getGroupNoBarrier(i);
214 90 : lastBranch = BranchGCPtr(Equal, scratch, ImmGCPtr(group), &matched);
215 : }
216 : }
217 :
218 91 : if (!lastBranch.isInitialized()) {
219 0 : jump(miss);
220 0 : return;
221 : }
222 :
223 91 : lastBranch.invertCondition();
224 91 : lastBranch.relink(miss);
225 91 : lastBranch.emit(*this);
226 :
227 91 : bind(&matched);
228 : }
229 :
230 : template void MacroAssembler::guardTypeSet(const Address& address, const TypeSet* types,
231 : BarrierKind kind, Register scratch, Label* miss);
232 : template void MacroAssembler::guardTypeSet(const ValueOperand& value, const TypeSet* types,
233 : BarrierKind kind, Register scratch, Label* miss);
234 : template void MacroAssembler::guardTypeSet(const TypedOrValueRegister& value, const TypeSet* types,
235 : BarrierKind kind, Register scratch, Label* miss);
236 :
237 : template void MacroAssembler::guardTypeSetMightBeIncomplete(const TemporaryTypeSet* types,
238 : Register obj, Register scratch,
239 : Label* label);
240 :
241 : template<typename S, typename T>
242 : static void
243 0 : StoreToTypedFloatArray(MacroAssembler& masm, int arrayType, const S& value, const T& dest,
244 : unsigned numElems)
245 : {
246 0 : switch (arrayType) {
247 : case Scalar::Float32:
248 0 : masm.storeFloat32(value, dest);
249 0 : break;
250 : case Scalar::Float64:
251 0 : masm.storeDouble(value, dest);
252 0 : break;
253 : case Scalar::Float32x4:
254 0 : switch (numElems) {
255 : case 1:
256 0 : masm.storeFloat32(value, dest);
257 0 : break;
258 : case 2:
259 0 : masm.storeDouble(value, dest);
260 0 : break;
261 : case 3:
262 0 : masm.storeFloat32x3(value, dest);
263 0 : break;
264 : case 4:
265 0 : masm.storeUnalignedSimd128Float(value, dest);
266 0 : break;
267 0 : default: MOZ_CRASH("unexpected number of elements in simd write");
268 : }
269 0 : break;
270 : case Scalar::Int32x4:
271 0 : switch (numElems) {
272 : case 1:
273 0 : masm.storeInt32x1(value, dest);
274 0 : break;
275 : case 2:
276 0 : masm.storeInt32x2(value, dest);
277 0 : break;
278 : case 3:
279 0 : masm.storeInt32x3(value, dest);
280 0 : break;
281 : case 4:
282 0 : masm.storeUnalignedSimd128Int(value, dest);
283 0 : break;
284 0 : default: MOZ_CRASH("unexpected number of elements in simd write");
285 : }
286 0 : break;
287 : case Scalar::Int8x16:
288 0 : MOZ_ASSERT(numElems == 16, "unexpected partial store");
289 0 : masm.storeUnalignedSimd128Int(value, dest);
290 0 : break;
291 : case Scalar::Int16x8:
292 0 : MOZ_ASSERT(numElems == 8, "unexpected partial store");
293 0 : masm.storeUnalignedSimd128Int(value, dest);
294 0 : break;
295 : default:
296 0 : MOZ_CRASH("Invalid typed array type");
297 : }
298 0 : }
299 :
300 : void
301 0 : MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value,
302 : const BaseIndex& dest, unsigned numElems)
303 : {
304 0 : StoreToTypedFloatArray(*this, arrayType, value, dest, numElems);
305 0 : }
306 : void
307 0 : MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value,
308 : const Address& dest, unsigned numElems)
309 : {
310 0 : StoreToTypedFloatArray(*this, arrayType, value, dest, numElems);
311 0 : }
312 :
313 : template<typename T>
314 : void
315 0 : MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src, AnyRegister dest, Register temp,
316 : Label* fail, bool canonicalizeDoubles, unsigned numElems)
317 : {
318 0 : switch (arrayType) {
319 : case Scalar::Int8:
320 0 : load8SignExtend(src, dest.gpr());
321 0 : break;
322 : case Scalar::Uint8:
323 : case Scalar::Uint8Clamped:
324 0 : load8ZeroExtend(src, dest.gpr());
325 0 : break;
326 : case Scalar::Int16:
327 0 : load16SignExtend(src, dest.gpr());
328 0 : break;
329 : case Scalar::Uint16:
330 0 : load16ZeroExtend(src, dest.gpr());
331 0 : break;
332 : case Scalar::Int32:
333 0 : load32(src, dest.gpr());
334 0 : break;
335 : case Scalar::Uint32:
336 0 : if (dest.isFloat()) {
337 0 : load32(src, temp);
338 0 : convertUInt32ToDouble(temp, dest.fpu());
339 : } else {
340 0 : load32(src, dest.gpr());
341 :
342 : // Bail out if the value doesn't fit into a signed int32 value. This
343 : // is what allows MLoadUnboxedScalar to have a type() of
344 : // MIRType::Int32 for UInt32 array loads.
345 0 : branchTest32(Assembler::Signed, dest.gpr(), dest.gpr(), fail);
346 : }
347 0 : break;
348 : case Scalar::Float32:
349 0 : loadFloat32(src, dest.fpu());
350 0 : canonicalizeFloat(dest.fpu());
351 0 : break;
352 : case Scalar::Float64:
353 0 : loadDouble(src, dest.fpu());
354 0 : if (canonicalizeDoubles)
355 0 : canonicalizeDouble(dest.fpu());
356 0 : break;
357 : case Scalar::Int32x4:
358 0 : switch (numElems) {
359 : case 1:
360 0 : loadInt32x1(src, dest.fpu());
361 0 : break;
362 : case 2:
363 0 : loadInt32x2(src, dest.fpu());
364 0 : break;
365 : case 3:
366 0 : loadInt32x3(src, dest.fpu());
367 0 : break;
368 : case 4:
369 0 : loadUnalignedSimd128Int(src, dest.fpu());
370 0 : break;
371 0 : default: MOZ_CRASH("unexpected number of elements in SIMD load");
372 : }
373 0 : break;
374 : case Scalar::Float32x4:
375 0 : switch (numElems) {
376 : case 1:
377 0 : loadFloat32(src, dest.fpu());
378 0 : break;
379 : case 2:
380 0 : loadDouble(src, dest.fpu());
381 0 : break;
382 : case 3:
383 0 : loadFloat32x3(src, dest.fpu());
384 0 : break;
385 : case 4:
386 0 : loadUnalignedSimd128Float(src, dest.fpu());
387 0 : break;
388 0 : default: MOZ_CRASH("unexpected number of elements in SIMD load");
389 : }
390 0 : break;
391 : case Scalar::Int8x16:
392 0 : MOZ_ASSERT(numElems == 16, "unexpected partial load");
393 0 : loadUnalignedSimd128Int(src, dest.fpu());
394 0 : break;
395 : case Scalar::Int16x8:
396 0 : MOZ_ASSERT(numElems == 8, "unexpected partial load");
397 0 : loadUnalignedSimd128Int(src, dest.fpu());
398 0 : break;
399 : default:
400 0 : MOZ_CRASH("Invalid typed array type");
401 : }
402 0 : }
403 :
404 : template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const Address& src, AnyRegister dest,
405 : Register temp, Label* fail, bool canonicalizeDoubles,
406 : unsigned numElems);
407 : template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const BaseIndex& src, AnyRegister dest,
408 : Register temp, Label* fail, bool canonicalizeDoubles,
409 : unsigned numElems);
410 :
411 : template<typename T>
412 : void
413 0 : MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src, const ValueOperand& dest,
414 : bool allowDouble, Register temp, Label* fail)
415 : {
416 0 : switch (arrayType) {
417 : case Scalar::Int8:
418 : case Scalar::Uint8:
419 : case Scalar::Uint8Clamped:
420 : case Scalar::Int16:
421 : case Scalar::Uint16:
422 : case Scalar::Int32:
423 0 : loadFromTypedArray(arrayType, src, AnyRegister(dest.scratchReg()), InvalidReg, nullptr);
424 0 : tagValue(JSVAL_TYPE_INT32, dest.scratchReg(), dest);
425 0 : break;
426 : case Scalar::Uint32:
427 : // Don't clobber dest when we could fail, instead use temp.
428 0 : load32(src, temp);
429 0 : if (allowDouble) {
430 : // If the value fits in an int32, store an int32 type tag.
431 : // Else, convert the value to double and box it.
432 0 : Label done, isDouble;
433 0 : branchTest32(Assembler::Signed, temp, temp, &isDouble);
434 : {
435 0 : tagValue(JSVAL_TYPE_INT32, temp, dest);
436 0 : jump(&done);
437 : }
438 0 : bind(&isDouble);
439 : {
440 0 : convertUInt32ToDouble(temp, ScratchDoubleReg);
441 0 : boxDouble(ScratchDoubleReg, dest);
442 : }
443 0 : bind(&done);
444 : } else {
445 : // Bailout if the value does not fit in an int32.
446 0 : branchTest32(Assembler::Signed, temp, temp, fail);
447 0 : tagValue(JSVAL_TYPE_INT32, temp, dest);
448 : }
449 0 : break;
450 : case Scalar::Float32:
451 0 : loadFromTypedArray(arrayType, src, AnyRegister(ScratchFloat32Reg), dest.scratchReg(),
452 : nullptr);
453 0 : convertFloat32ToDouble(ScratchFloat32Reg, ScratchDoubleReg);
454 0 : boxDouble(ScratchDoubleReg, dest);
455 0 : break;
456 : case Scalar::Float64:
457 0 : loadFromTypedArray(arrayType, src, AnyRegister(ScratchDoubleReg), dest.scratchReg(),
458 : nullptr);
459 0 : boxDouble(ScratchDoubleReg, dest);
460 0 : break;
461 : default:
462 0 : MOZ_CRASH("Invalid typed array type");
463 : }
464 0 : }
465 :
466 : template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const Address& src, const ValueOperand& dest,
467 : bool allowDouble, Register temp, Label* fail);
468 : template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const BaseIndex& src, const ValueOperand& dest,
469 : bool allowDouble, Register temp, Label* fail);
470 :
471 : template <typename T>
472 : void
473 3 : MacroAssembler::loadUnboxedProperty(T address, JSValueType type, TypedOrValueRegister output)
474 : {
475 3 : switch (type) {
476 : case JSVAL_TYPE_INT32: {
477 : // Handle loading an int32 into a double reg.
478 0 : if (output.type() == MIRType::Double) {
479 0 : convertInt32ToDouble(address, output.typedReg().fpu());
480 0 : break;
481 : }
482 : MOZ_FALLTHROUGH;
483 : }
484 :
485 : case JSVAL_TYPE_BOOLEAN:
486 : case JSVAL_TYPE_STRING: {
487 2 : Register outReg;
488 2 : if (output.hasValue()) {
489 2 : outReg = output.valueReg().scratchReg();
490 : } else {
491 0 : MOZ_ASSERT(output.type() == MIRTypeFromValueType(type));
492 0 : outReg = output.typedReg().gpr();
493 : }
494 :
495 2 : switch (type) {
496 : case JSVAL_TYPE_BOOLEAN:
497 1 : load8ZeroExtend(address, outReg);
498 1 : break;
499 : case JSVAL_TYPE_INT32:
500 0 : load32(address, outReg);
501 0 : break;
502 : case JSVAL_TYPE_STRING:
503 1 : loadPtr(address, outReg);
504 1 : break;
505 : default:
506 0 : MOZ_CRASH();
507 : }
508 :
509 2 : if (output.hasValue())
510 2 : tagValue(type, outReg, output.valueReg());
511 2 : break;
512 : }
513 :
514 : case JSVAL_TYPE_OBJECT:
515 1 : if (output.hasValue()) {
516 1 : Register scratch = output.valueReg().scratchReg();
517 1 : loadPtr(address, scratch);
518 :
519 2 : Label notNull, done;
520 1 : branchPtr(Assembler::NotEqual, scratch, ImmWord(0), ¬Null);
521 :
522 1 : moveValue(NullValue(), output.valueReg());
523 1 : jump(&done);
524 :
525 1 : bind(¬Null);
526 1 : tagValue(JSVAL_TYPE_OBJECT, scratch, output.valueReg());
527 :
528 1 : bind(&done);
529 : } else {
530 : // Reading null can't be possible here, as otherwise the result
531 : // would be a value (either because null has been read before or
532 : // because there is a barrier).
533 0 : Register reg = output.typedReg().gpr();
534 0 : loadPtr(address, reg);
535 : #ifdef DEBUG
536 0 : Label ok;
537 0 : branchTestPtr(Assembler::NonZero, reg, reg, &ok);
538 0 : assumeUnreachable("Null not possible");
539 0 : bind(&ok);
540 : #endif
541 : }
542 1 : break;
543 :
544 : case JSVAL_TYPE_DOUBLE:
545 : // Note: doubles in unboxed objects are not accessed through other
546 : // views and do not need canonicalization.
547 0 : if (output.hasValue())
548 0 : loadValue(address, output.valueReg());
549 : else
550 0 : loadDouble(address, output.typedReg().fpu());
551 0 : break;
552 :
553 : default:
554 0 : MOZ_CRASH();
555 : }
556 3 : }
557 :
558 : template void
559 : MacroAssembler::loadUnboxedProperty(Address address, JSValueType type,
560 : TypedOrValueRegister output);
561 :
562 : template void
563 : MacroAssembler::loadUnboxedProperty(BaseIndex address, JSValueType type,
564 : TypedOrValueRegister output);
565 :
566 : static void
567 0 : StoreUnboxedFailure(MacroAssembler& masm, Label* failure)
568 : {
569 : // Storing a value to an unboxed property is a fallible operation and
570 : // the caller must provide a failure label if a particular unboxed store
571 : // might fail. Sometimes, however, a store that cannot succeed (such as
572 : // storing a string to an int32 property) will be marked as infallible.
573 : // This can only happen if the code involved is unreachable.
574 0 : if (failure)
575 0 : masm.jump(failure);
576 : else
577 0 : masm.assumeUnreachable("Incompatible write to unboxed property");
578 0 : }
579 :
580 : template <typename T>
581 : void
582 10 : MacroAssembler::storeUnboxedProperty(T address, JSValueType type,
583 : const ConstantOrRegister& value, Label* failure)
584 : {
585 10 : switch (type) {
586 : case JSVAL_TYPE_BOOLEAN:
587 3 : if (value.constant()) {
588 0 : if (value.value().isBoolean())
589 0 : store8(Imm32(value.value().toBoolean()), address);
590 : else
591 0 : StoreUnboxedFailure(*this, failure);
592 3 : } else if (value.reg().hasTyped()) {
593 0 : if (value.reg().type() == MIRType::Boolean)
594 0 : store8(value.reg().typedReg().gpr(), address);
595 : else
596 0 : StoreUnboxedFailure(*this, failure);
597 : } else {
598 3 : if (failure)
599 0 : branchTestBoolean(Assembler::NotEqual, value.reg().valueReg(), failure);
600 3 : storeUnboxedPayload(value.reg().valueReg(), address, /* width = */ 1);
601 : }
602 3 : break;
603 :
604 : case JSVAL_TYPE_INT32:
605 0 : if (value.constant()) {
606 0 : if (value.value().isInt32())
607 0 : store32(Imm32(value.value().toInt32()), address);
608 : else
609 0 : StoreUnboxedFailure(*this, failure);
610 0 : } else if (value.reg().hasTyped()) {
611 0 : if (value.reg().type() == MIRType::Int32)
612 0 : store32(value.reg().typedReg().gpr(), address);
613 : else
614 0 : StoreUnboxedFailure(*this, failure);
615 : } else {
616 0 : if (failure)
617 0 : branchTestInt32(Assembler::NotEqual, value.reg().valueReg(), failure);
618 0 : storeUnboxedPayload(value.reg().valueReg(), address, /* width = */ 4);
619 : }
620 0 : break;
621 :
622 : case JSVAL_TYPE_DOUBLE:
623 0 : if (value.constant()) {
624 0 : if (value.value().isNumber()) {
625 0 : loadConstantDouble(value.value().toNumber(), ScratchDoubleReg);
626 0 : storeDouble(ScratchDoubleReg, address);
627 : } else {
628 0 : StoreUnboxedFailure(*this, failure);
629 : }
630 0 : } else if (value.reg().hasTyped()) {
631 0 : if (value.reg().type() == MIRType::Int32) {
632 0 : convertInt32ToDouble(value.reg().typedReg().gpr(), ScratchDoubleReg);
633 0 : storeDouble(ScratchDoubleReg, address);
634 0 : } else if (value.reg().type() == MIRType::Double) {
635 0 : storeDouble(value.reg().typedReg().fpu(), address);
636 : } else {
637 0 : StoreUnboxedFailure(*this, failure);
638 : }
639 : } else {
640 0 : ValueOperand reg = value.reg().valueReg();
641 0 : Label notInt32, end;
642 0 : branchTestInt32(Assembler::NotEqual, reg, ¬Int32);
643 0 : int32ValueToDouble(reg, ScratchDoubleReg);
644 0 : storeDouble(ScratchDoubleReg, address);
645 0 : jump(&end);
646 0 : bind(¬Int32);
647 0 : if (failure)
648 0 : branchTestDouble(Assembler::NotEqual, reg, failure);
649 0 : storeValue(reg, address);
650 0 : bind(&end);
651 : }
652 0 : break;
653 :
654 : case JSVAL_TYPE_OBJECT:
655 4 : if (value.constant()) {
656 0 : if (value.value().isObjectOrNull())
657 0 : storePtr(ImmGCPtr(value.value().toObjectOrNull()), address);
658 : else
659 0 : StoreUnboxedFailure(*this, failure);
660 4 : } else if (value.reg().hasTyped()) {
661 0 : MOZ_ASSERT(value.reg().type() != MIRType::Null);
662 0 : if (value.reg().type() == MIRType::Object)
663 0 : storePtr(value.reg().typedReg().gpr(), address);
664 : else
665 0 : StoreUnboxedFailure(*this, failure);
666 : } else {
667 4 : if (failure) {
668 0 : Label ok;
669 0 : branchTestNull(Assembler::Equal, value.reg().valueReg(), &ok);
670 0 : branchTestObject(Assembler::NotEqual, value.reg().valueReg(), failure);
671 0 : bind(&ok);
672 : }
673 4 : storeUnboxedPayload(value.reg().valueReg(), address, /* width = */ sizeof(uintptr_t));
674 : }
675 4 : break;
676 :
677 : case JSVAL_TYPE_STRING:
678 3 : if (value.constant()) {
679 0 : if (value.value().isString())
680 0 : storePtr(ImmGCPtr(value.value().toString()), address);
681 : else
682 0 : StoreUnboxedFailure(*this, failure);
683 3 : } else if (value.reg().hasTyped()) {
684 0 : if (value.reg().type() == MIRType::String)
685 0 : storePtr(value.reg().typedReg().gpr(), address);
686 : else
687 0 : StoreUnboxedFailure(*this, failure);
688 : } else {
689 3 : if (failure)
690 0 : branchTestString(Assembler::NotEqual, value.reg().valueReg(), failure);
691 3 : storeUnboxedPayload(value.reg().valueReg(), address, /* width = */ sizeof(uintptr_t));
692 : }
693 3 : break;
694 :
695 : default:
696 0 : MOZ_CRASH();
697 : }
698 10 : }
699 :
700 : template void
701 : MacroAssembler::storeUnboxedProperty(Address address, JSValueType type,
702 : const ConstantOrRegister& value, Label* failure);
703 :
704 : template void
705 : MacroAssembler::storeUnboxedProperty(BaseIndex address, JSValueType type,
706 : const ConstantOrRegister& value, Label* failure);
707 :
708 : void
709 0 : MacroAssembler::checkUnboxedArrayCapacity(Register obj, const RegisterOrInt32Constant& index,
710 : Register temp, Label* failure)
711 : {
712 0 : Address initLengthAddr(obj, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength());
713 0 : Address lengthAddr(obj, UnboxedArrayObject::offsetOfLength());
714 :
715 0 : Label capacityIsIndex, done;
716 0 : load32(initLengthAddr, temp);
717 0 : branchTest32(Assembler::NonZero, temp, Imm32(UnboxedArrayObject::CapacityMask), &capacityIsIndex);
718 0 : branch32(Assembler::BelowOrEqual, lengthAddr, index, failure);
719 0 : jump(&done);
720 0 : bind(&capacityIsIndex);
721 :
722 : // Do a partial shift so that we can get an absolute offset from the base
723 : // of CapacityArray to use.
724 : JS_STATIC_ASSERT(sizeof(UnboxedArrayObject::CapacityArray[0]) == 4);
725 0 : rshiftPtr(Imm32(UnboxedArrayObject::CapacityShift - 2), temp);
726 0 : and32(Imm32(~0x3), temp);
727 :
728 0 : addPtr(ImmPtr(&UnboxedArrayObject::CapacityArray), temp);
729 0 : branch32(Assembler::BelowOrEqual, Address(temp, 0), index, failure);
730 0 : bind(&done);
731 0 : }
732 :
733 : // Inlined version of gc::CheckAllocatorState that checks the bare essentials
734 : // and bails for anything that cannot be handled with our jit allocators.
735 : void
736 115 : MacroAssembler::checkAllocatorState(Label* fail)
737 : {
738 : // Don't execute the inline path if we are tracing allocations,
739 : // or when the memory profiler is enabled.
740 115 : if (js::gc::TraceEnabled() || MemProfiler::enabled())
741 0 : jump(fail);
742 :
743 : #ifdef JS_GC_ZEAL
744 : // Don't execute the inline path if gc zeal or tracing are active.
745 230 : branch32(Assembler::NotEqual,
746 230 : AbsoluteAddress(GetJitContext()->runtime->addressOfGCZealModeBits()), Imm32(0),
747 115 : fail);
748 : #endif
749 :
750 : // Don't execute the inline path if the compartment has an object metadata callback,
751 : // as the metadata to use for the object may vary between executions of the op.
752 115 : if (GetJitContext()->compartment->hasAllocationMetadataBuilder())
753 0 : jump(fail);
754 115 : }
755 :
756 : // Inline version of ShouldNurseryAllocate.
757 : bool
758 95 : MacroAssembler::shouldNurseryAllocate(gc::AllocKind allocKind, gc::InitialHeap initialHeap)
759 : {
760 : // Note that Ion elides barriers on writes to objects known to be in the
761 : // nursery, so any allocation that can be made into the nursery must be made
762 : // into the nursery, even if the nursery is disabled. At runtime these will
763 : // take the out-of-line path, which is required to insert a barrier for the
764 : // initializing writes.
765 95 : return IsNurseryAllocable(allocKind) && initialHeap != gc::TenuredHeap;
766 : }
767 :
768 : // Inline version of Nursery::allocateObject. If the object has dynamic slots,
769 : // this fills in the slots_ pointer.
770 : void
771 95 : MacroAssembler::nurseryAllocate(Register result, Register temp, gc::AllocKind allocKind,
772 : size_t nDynamicSlots, gc::InitialHeap initialHeap, Label* fail)
773 : {
774 95 : MOZ_ASSERT(IsNurseryAllocable(allocKind));
775 95 : MOZ_ASSERT(initialHeap != gc::TenuredHeap);
776 :
777 : // We still need to allocate in the nursery, per the comment in
778 : // shouldNurseryAllocate; however, we need to insert into the
779 : // mallocedBuffers set, so bail to do the nursery allocation in the
780 : // interpreter.
781 95 : if (nDynamicSlots >= Nursery::MaxNurseryBufferSize / sizeof(Value)) {
782 0 : jump(fail);
783 0 : return;
784 : }
785 :
786 : // No explicit check for nursery.isEnabled() is needed, as the comparison
787 : // with the nursery's end will always fail in such cases.
788 95 : CompileZone* zone = GetJitContext()->compartment->zone();
789 95 : int thingSize = int(gc::Arena::thingSize(allocKind));
790 95 : int totalSize = thingSize + nDynamicSlots * sizeof(HeapSlot);
791 95 : MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
792 95 : loadPtr(AbsoluteAddress(zone->addressOfNurseryPosition()), result);
793 95 : computeEffectiveAddress(Address(result, totalSize), temp);
794 95 : branchPtr(Assembler::Below, AbsoluteAddress(zone->addressOfNurseryCurrentEnd()), temp, fail);
795 95 : storePtr(temp, AbsoluteAddress(zone->addressOfNurseryPosition()));
796 :
797 95 : if (nDynamicSlots) {
798 0 : computeEffectiveAddress(Address(result, thingSize), temp);
799 0 : storePtr(temp, Address(result, NativeObject::offsetOfSlots()));
800 : }
801 : }
802 :
803 : // Inlined version of FreeSpan::allocate. This does not fill in slots_.
804 : void
805 20 : MacroAssembler::freeListAllocate(Register result, Register temp, gc::AllocKind allocKind, Label* fail)
806 : {
807 20 : CompileZone* zone = GetJitContext()->compartment->zone();
808 20 : int thingSize = int(gc::Arena::thingSize(allocKind));
809 :
810 40 : Label fallback;
811 40 : Label success;
812 :
813 : // Load the first and last offsets of |zone|'s free list for |allocKind|.
814 : // If there is no room remaining in the span, fall back to get the next one.
815 20 : loadPtr(AbsoluteAddress(zone->addressOfFreeList(allocKind)), temp);
816 20 : load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfFirst()), result);
817 20 : load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfLast()), temp);
818 20 : branch32(Assembler::AboveOrEqual, result, temp, &fallback);
819 :
820 : // Bump the offset for the next allocation.
821 20 : add32(Imm32(thingSize), result);
822 20 : loadPtr(AbsoluteAddress(zone->addressOfFreeList(allocKind)), temp);
823 20 : store16(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
824 20 : sub32(Imm32(thingSize), result);
825 20 : addPtr(temp, result); // Turn the offset into a pointer.
826 20 : jump(&success);
827 :
828 20 : bind(&fallback);
829 : // If there are no free spans left, we bail to finish the allocation. The
830 : // interpreter will call the GC allocator to set up a new arena to allocate
831 : // from, after which we can resume allocating in the jit.
832 20 : branchTest32(Assembler::Zero, result, result, fail);
833 20 : loadPtr(AbsoluteAddress(zone->addressOfFreeList(allocKind)), temp);
834 20 : addPtr(temp, result); // Turn the offset into a pointer.
835 20 : Push(result);
836 : // Update the free list to point to the next span (which may be empty).
837 20 : load32(Address(result, 0), result);
838 20 : store32(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
839 20 : Pop(result);
840 :
841 20 : bind(&success);
842 20 : }
843 :
844 : void
845 0 : MacroAssembler::callMallocStub(size_t nbytes, Register result, Label* fail)
846 : {
847 : // This register must match the one in JitRuntime::generateMallocStub.
848 0 : const Register regNBytes = CallTempReg0;
849 :
850 0 : MOZ_ASSERT(nbytes > 0);
851 0 : MOZ_ASSERT(nbytes <= INT32_MAX);
852 :
853 0 : if (regNBytes != result)
854 0 : push(regNBytes);
855 0 : move32(Imm32(nbytes), regNBytes);
856 0 : call(GetJitContext()->runtime->jitRuntime()->mallocStub());
857 0 : if (regNBytes != result) {
858 0 : movePtr(regNBytes, result);
859 0 : pop(regNBytes);
860 : }
861 0 : branchTest32(Assembler::Zero, result, result, fail);
862 0 : }
863 :
864 : void
865 0 : MacroAssembler::callFreeStub(Register slots)
866 : {
867 : // This register must match the one in JitRuntime::generateFreeStub.
868 0 : const Register regSlots = CallTempReg0;
869 :
870 0 : push(regSlots);
871 0 : movePtr(slots, regSlots);
872 0 : call(GetJitContext()->runtime->jitRuntime()->freeStub());
873 0 : pop(regSlots);
874 0 : }
875 :
876 : // Inlined equivalent of gc::AllocateObject, without failure case handling.
877 : void
878 95 : MacroAssembler::allocateObject(Register result, Register temp, gc::AllocKind allocKind,
879 : uint32_t nDynamicSlots, gc::InitialHeap initialHeap, Label* fail)
880 : {
881 95 : MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
882 :
883 95 : checkAllocatorState(fail);
884 :
885 95 : if (shouldNurseryAllocate(allocKind, initialHeap))
886 190 : return nurseryAllocate(result, temp, allocKind, nDynamicSlots, initialHeap, fail);
887 :
888 0 : if (!nDynamicSlots)
889 0 : return freeListAllocate(result, temp, allocKind, fail);
890 :
891 0 : callMallocStub(nDynamicSlots * sizeof(GCPtrValue), temp, fail);
892 :
893 0 : Label failAlloc;
894 0 : Label success;
895 :
896 0 : push(temp);
897 0 : freeListAllocate(result, temp, allocKind, &failAlloc);
898 :
899 0 : pop(temp);
900 0 : storePtr(temp, Address(result, NativeObject::offsetOfSlots()));
901 :
902 0 : jump(&success);
903 :
904 0 : bind(&failAlloc);
905 0 : pop(temp);
906 0 : callFreeStub(temp);
907 0 : jump(fail);
908 :
909 0 : bind(&success);
910 : }
911 :
912 : void
913 95 : MacroAssembler::createGCObject(Register obj, Register temp, JSObject* templateObj,
914 : gc::InitialHeap initialHeap, Label* fail, bool initContents,
915 : bool convertDoubleElements)
916 : {
917 95 : gc::AllocKind allocKind = templateObj->asTenured().getAllocKind();
918 95 : MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
919 :
920 95 : uint32_t nDynamicSlots = 0;
921 95 : if (templateObj->isNative()) {
922 75 : nDynamicSlots = templateObj->as<NativeObject>().numDynamicSlots();
923 :
924 : // Arrays with copy on write elements do not need fixed space for an
925 : // elements header. The template object, which owns the original
926 : // elements, might have another allocation kind.
927 75 : if (templateObj->as<NativeObject>().denseElementsAreCopyOnWrite())
928 0 : allocKind = gc::AllocKind::OBJECT0_BACKGROUND;
929 : }
930 :
931 95 : allocateObject(obj, temp, allocKind, nDynamicSlots, initialHeap, fail);
932 95 : initGCThing(obj, temp, templateObj, initContents, convertDoubleElements);
933 95 : }
934 :
935 :
936 : // Inlined equivalent of gc::AllocateNonObject, without failure case handling.
937 : // Non-object allocation does not need to worry about slots, so can take a
938 : // simpler path.
939 : void
940 20 : MacroAssembler::allocateNonObject(Register result, Register temp, gc::AllocKind allocKind, Label* fail)
941 : {
942 20 : checkAllocatorState(fail);
943 20 : freeListAllocate(result, temp, allocKind, fail);
944 20 : }
945 :
946 : void
947 12 : MacroAssembler::newGCString(Register result, Register temp, Label* fail)
948 : {
949 12 : allocateNonObject(result, temp, js::gc::AllocKind::STRING, fail);
950 12 : }
951 :
952 : void
953 8 : MacroAssembler::newGCFatInlineString(Register result, Register temp, Label* fail)
954 : {
955 8 : allocateNonObject(result, temp, js::gc::AllocKind::FAT_INLINE_STRING, fail);
956 8 : }
957 :
958 : void
959 32 : MacroAssembler::copySlotsFromTemplate(Register obj, const NativeObject* templateObj,
960 : uint32_t start, uint32_t end)
961 : {
962 32 : uint32_t nfixed = Min(templateObj->numFixedSlotsForCompilation(), end);
963 34 : for (unsigned i = start; i < nfixed; i++)
964 2 : storeValue(templateObj->getFixedSlot(i), Address(obj, NativeObject::getFixedSlotOffset(i)));
965 32 : }
966 :
967 : void
968 62 : MacroAssembler::fillSlotsWithConstantValue(Address base, Register temp,
969 : uint32_t start, uint32_t end, const Value& v)
970 : {
971 62 : MOZ_ASSERT(v.isUndefined() || IsUninitializedLexical(v));
972 :
973 62 : if (start >= end)
974 31 : return;
975 :
976 : #ifdef JS_NUNBOX32
977 : // We only have a single spare register, so do the initialization as two
978 : // strided writes of the tag and body.
979 : Address addr = base;
980 : move32(Imm32(v.toNunboxPayload()), temp);
981 : for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtrValue))
982 : store32(temp, ToPayload(addr));
983 :
984 : addr = base;
985 : move32(Imm32(v.toNunboxTag()), temp);
986 : for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtrValue))
987 : store32(temp, ToType(addr));
988 : #else
989 31 : moveValue(v, temp);
990 119 : for (uint32_t i = start; i < end; ++i, base.offset += sizeof(GCPtrValue))
991 88 : storePtr(temp, base);
992 : #endif
993 : }
994 :
995 : void
996 31 : MacroAssembler::fillSlotsWithUndefined(Address base, Register temp, uint32_t start, uint32_t end)
997 : {
998 31 : fillSlotsWithConstantValue(base, temp, start, end, UndefinedValue());
999 31 : }
1000 :
1001 : void
1002 31 : MacroAssembler::fillSlotsWithUninitialized(Address base, Register temp, uint32_t start, uint32_t end)
1003 : {
1004 31 : fillSlotsWithConstantValue(base, temp, start, end, MagicValue(JS_UNINITIALIZED_LEXICAL));
1005 31 : }
1006 :
1007 : static void
1008 32 : FindStartOfUninitializedAndUndefinedSlots(NativeObject* templateObj, uint32_t nslots,
1009 : uint32_t* startOfUninitialized,
1010 : uint32_t* startOfUndefined)
1011 : {
1012 32 : MOZ_ASSERT(nslots == templateObj->lastProperty()->slotSpan(templateObj->getClass()));
1013 32 : MOZ_ASSERT(nslots > 0);
1014 :
1015 32 : uint32_t first = nslots;
1016 212 : for (; first != 0; --first) {
1017 92 : if (templateObj->getSlot(first - 1) != UndefinedValue())
1018 2 : break;
1019 : }
1020 32 : *startOfUndefined = first;
1021 :
1022 32 : if (first != 0 && IsUninitializedLexical(templateObj->getSlot(first - 1))) {
1023 0 : for (; first != 0; --first) {
1024 0 : if (!IsUninitializedLexical(templateObj->getSlot(first - 1)))
1025 0 : break;
1026 : }
1027 0 : *startOfUninitialized = first;
1028 : } else {
1029 32 : *startOfUninitialized = *startOfUndefined;
1030 : }
1031 32 : }
1032 :
1033 : static void
1034 0 : AllocateObjectBufferWithInit(JSContext* cx, TypedArrayObject* obj, int32_t count)
1035 : {
1036 0 : JS::AutoCheckCannotGC nogc(cx);
1037 :
1038 0 : obj->initPrivate(nullptr);
1039 :
1040 : // Negative numbers or zero will bail out to the slow path, which in turn will raise
1041 : // an invalid argument exception or create a correct object with zero elements.
1042 0 : if (count <= 0 || uint32_t(count) >= INT32_MAX / obj->bytesPerElement()) {
1043 0 : obj->setFixedSlot(TypedArrayObject::LENGTH_SLOT, Int32Value(0));
1044 0 : return;
1045 : }
1046 :
1047 0 : obj->setFixedSlot(TypedArrayObject::LENGTH_SLOT, Int32Value(count));
1048 : size_t nbytes;
1049 :
1050 0 : switch (obj->type()) {
1051 : #define CREATE_TYPED_ARRAY(T, N) \
1052 : case Scalar::N: \
1053 : MOZ_ALWAYS_TRUE(js::CalculateAllocSize<T>(count, &nbytes)); \
1054 : break;
1055 0 : JS_FOR_EACH_TYPED_ARRAY(CREATE_TYPED_ARRAY)
1056 : #undef CREATE_TYPED_ARRAY
1057 : default:
1058 0 : MOZ_CRASH("Unsupported TypedArray type");
1059 : }
1060 :
1061 0 : MOZ_ASSERT((CheckedUint32(nbytes) + sizeof(Value)).isValid());
1062 :
1063 0 : nbytes = JS_ROUNDUP(nbytes, sizeof(Value));
1064 0 : void* buf = cx->nursery().allocateBuffer(obj, nbytes);
1065 0 : if (buf) {
1066 0 : obj->initPrivate(buf);
1067 0 : memset(buf, 0, nbytes);
1068 : }
1069 : }
1070 :
1071 : void
1072 0 : MacroAssembler::initTypedArraySlots(Register obj, Register temp, Register lengthReg,
1073 : LiveRegisterSet liveRegs, Label* fail,
1074 : TypedArrayObject* templateObj, TypedArrayLength lengthKind)
1075 : {
1076 0 : MOZ_ASSERT(templateObj->hasPrivate());
1077 0 : MOZ_ASSERT(!templateObj->hasBuffer());
1078 :
1079 0 : size_t dataSlotOffset = TypedArrayObject::dataOffset();
1080 0 : size_t dataOffset = TypedArrayObject::dataOffset() + sizeof(HeapSlot);
1081 :
1082 : static_assert(TypedArrayObject::FIXED_DATA_START == TypedArrayObject::DATA_SLOT + 1,
1083 : "fixed inline element data assumed to begin after the data slot");
1084 :
1085 : // Initialise data elements to zero.
1086 0 : int32_t length = templateObj->length();
1087 0 : size_t nbytes = length * templateObj->bytesPerElement();
1088 :
1089 0 : if (lengthKind == TypedArrayLength::Fixed && dataOffset + nbytes <= JSObject::MAX_BYTE_SIZE) {
1090 0 : MOZ_ASSERT(dataOffset + nbytes <= templateObj->tenuredSizeOfThis());
1091 :
1092 : // Store data elements inside the remaining JSObject slots.
1093 0 : computeEffectiveAddress(Address(obj, dataOffset), temp);
1094 0 : storePtr(temp, Address(obj, dataSlotOffset));
1095 :
1096 : // Write enough zero pointers into fixed data to zero every
1097 : // element. (This zeroes past the end of a byte count that's
1098 : // not a multiple of pointer size. That's okay, because fixed
1099 : // data is a count of 8-byte HeapSlots (i.e. <= pointer size),
1100 : // and we won't inline unless the desired memory fits in that
1101 : // space.)
1102 : static_assert(sizeof(HeapSlot) == 8, "Assumed 8 bytes alignment");
1103 :
1104 0 : size_t numZeroPointers = ((nbytes + 7) & ~0x7) / sizeof(char *);
1105 0 : for (size_t i = 0; i < numZeroPointers; i++)
1106 0 : storePtr(ImmWord(0), Address(obj, dataOffset + i * sizeof(char *)));
1107 : #ifdef DEBUG
1108 0 : if (nbytes == 0)
1109 0 : store8(Imm32(TypedArrayObject::ZeroLengthArrayData), Address(obj, dataSlotOffset));
1110 : #endif
1111 : } else {
1112 0 : if (lengthKind == TypedArrayLength::Fixed)
1113 0 : move32(Imm32(length), lengthReg);
1114 :
1115 : // Allocate a buffer on the heap to store the data elements.
1116 0 : liveRegs.addUnchecked(temp);
1117 0 : liveRegs.addUnchecked(obj);
1118 0 : liveRegs.addUnchecked(lengthReg);
1119 0 : PushRegsInMask(liveRegs);
1120 0 : setupUnalignedABICall(temp);
1121 0 : loadJSContext(temp);
1122 0 : passABIArg(temp);
1123 0 : passABIArg(obj);
1124 0 : passABIArg(lengthReg);
1125 0 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, AllocateObjectBufferWithInit));
1126 0 : PopRegsInMask(liveRegs);
1127 :
1128 : // Fail when data elements is set to NULL.
1129 0 : branchPtr(Assembler::Equal, Address(obj, dataSlotOffset), ImmWord(0), fail);
1130 : }
1131 0 : }
1132 :
1133 : void
1134 71 : MacroAssembler::initGCSlots(Register obj, Register temp, NativeObject* templateObj,
1135 : bool initContents)
1136 : {
1137 : // Slots of non-array objects are required to be initialized.
1138 : // Use the values currently in the template object.
1139 71 : uint32_t nslots = templateObj->lastProperty()->slotSpan(templateObj->getClass());
1140 71 : if (nslots == 0)
1141 39 : return;
1142 :
1143 32 : uint32_t nfixed = templateObj->numUsedFixedSlots();
1144 32 : uint32_t ndynamic = templateObj->numDynamicSlots();
1145 :
1146 : // Attempt to group slot writes such that we minimize the amount of
1147 : // duplicated data we need to embed in code and load into registers. In
1148 : // general, most template object slots will be undefined except for any
1149 : // reserved slots. Since reserved slots come first, we split the object
1150 : // logically into independent non-UndefinedValue writes to the head and
1151 : // duplicated writes of UndefinedValue to the tail. For the majority of
1152 : // objects, the "tail" will be the entire slot range.
1153 : //
1154 : // The template object may be a CallObject, in which case we need to
1155 : // account for uninitialized lexical slots as well as undefined
1156 : // slots. Unitialized lexical slots appears in CallObjects if the function
1157 : // has parameter expressions, in which case closed over parameters have
1158 : // TDZ. Uninitialized slots come before undefined slots in CallObjects.
1159 32 : uint32_t startOfUninitialized = nslots;
1160 32 : uint32_t startOfUndefined = nslots;
1161 : FindStartOfUninitializedAndUndefinedSlots(templateObj, nslots,
1162 32 : &startOfUninitialized, &startOfUndefined);
1163 32 : MOZ_ASSERT(startOfUninitialized <= nfixed); // Reserved slots must be fixed.
1164 32 : MOZ_ASSERT(startOfUndefined >= startOfUninitialized);
1165 32 : MOZ_ASSERT_IF(!templateObj->is<CallObject>(), startOfUninitialized == startOfUndefined);
1166 :
1167 : // Copy over any preserved reserved slots.
1168 32 : copySlotsFromTemplate(obj, templateObj, 0, startOfUninitialized);
1169 :
1170 : // Fill the rest of the fixed slots with undefined and uninitialized.
1171 32 : if (initContents) {
1172 31 : size_t offset = NativeObject::getFixedSlotOffset(startOfUninitialized);
1173 93 : fillSlotsWithUninitialized(Address(obj, offset), temp,
1174 62 : startOfUninitialized, Min(startOfUndefined, nfixed));
1175 :
1176 31 : offset = NativeObject::getFixedSlotOffset(startOfUndefined);
1177 62 : fillSlotsWithUndefined(Address(obj, offset), temp,
1178 62 : startOfUndefined, nfixed);
1179 : }
1180 :
1181 32 : if (ndynamic) {
1182 : // We are short one register to do this elegantly. Borrow the obj
1183 : // register briefly for our slots base address.
1184 0 : push(obj);
1185 0 : loadPtr(Address(obj, NativeObject::offsetOfSlots()), obj);
1186 :
1187 : // Fill uninitialized slots if necessary. Otherwise initialize all
1188 : // slots to undefined.
1189 0 : if (startOfUndefined > nfixed) {
1190 0 : MOZ_ASSERT(startOfUninitialized != startOfUndefined);
1191 0 : fillSlotsWithUninitialized(Address(obj, 0), temp, 0, startOfUndefined - nfixed);
1192 0 : size_t offset = (startOfUndefined - nfixed) * sizeof(Value);
1193 0 : fillSlotsWithUndefined(Address(obj, offset), temp, startOfUndefined - nfixed, ndynamic);
1194 : } else {
1195 0 : fillSlotsWithUndefined(Address(obj, 0), temp, 0, ndynamic);
1196 : }
1197 :
1198 0 : pop(obj);
1199 : }
1200 : }
1201 :
1202 : void
1203 95 : MacroAssembler::initGCThing(Register obj, Register temp, JSObject* templateObj,
1204 : bool initContents, bool convertDoubleElements)
1205 : {
1206 : // Fast initialization of an empty object returned by allocateObject().
1207 :
1208 95 : storePtr(ImmGCPtr(templateObj->group()), Address(obj, JSObject::offsetOfGroup()));
1209 :
1210 95 : if (Shape* shape = templateObj->maybeShape())
1211 75 : storePtr(ImmGCPtr(shape), Address(obj, ShapedObject::offsetOfShape()));
1212 :
1213 95 : MOZ_ASSERT_IF(convertDoubleElements, templateObj->is<ArrayObject>());
1214 :
1215 95 : if (templateObj->isNative()) {
1216 75 : NativeObject* ntemplate = &templateObj->as<NativeObject>();
1217 75 : MOZ_ASSERT_IF(!ntemplate->denseElementsAreCopyOnWrite(), !ntemplate->hasDynamicElements());
1218 :
1219 : // If the object has dynamic slots, the slots member has already been
1220 : // filled in.
1221 75 : if (!ntemplate->hasDynamicSlots())
1222 75 : storePtr(ImmPtr(nullptr), Address(obj, NativeObject::offsetOfSlots()));
1223 :
1224 75 : if (ntemplate->denseElementsAreCopyOnWrite()) {
1225 0 : storePtr(ImmPtr((const Value*) ntemplate->getDenseElements()),
1226 0 : Address(obj, NativeObject::offsetOfElements()));
1227 75 : } else if (ntemplate->is<ArrayObject>()) {
1228 4 : int elementsOffset = NativeObject::offsetOfFixedElements();
1229 :
1230 4 : computeEffectiveAddress(Address(obj, elementsOffset), temp);
1231 4 : storePtr(temp, Address(obj, NativeObject::offsetOfElements()));
1232 :
1233 : // Fill in the elements header.
1234 8 : store32(Imm32(ntemplate->getDenseCapacity()),
1235 12 : Address(obj, elementsOffset + ObjectElements::offsetOfCapacity()));
1236 8 : store32(Imm32(ntemplate->getDenseInitializedLength()),
1237 12 : Address(obj, elementsOffset + ObjectElements::offsetOfInitializedLength()));
1238 8 : store32(Imm32(ntemplate->as<ArrayObject>().length()),
1239 12 : Address(obj, elementsOffset + ObjectElements::offsetOfLength()));
1240 8 : store32(Imm32(convertDoubleElements
1241 : ? ObjectElements::CONVERT_DOUBLE_ELEMENTS
1242 : : 0),
1243 12 : Address(obj, elementsOffset + ObjectElements::offsetOfFlags()));
1244 4 : MOZ_ASSERT(!ntemplate->hasPrivate());
1245 71 : } else if (ntemplate->is<ArgumentsObject>()) {
1246 : // The caller will initialize the reserved slots.
1247 0 : MOZ_ASSERT(!initContents);
1248 0 : MOZ_ASSERT(!ntemplate->hasPrivate());
1249 0 : storePtr(ImmPtr(emptyObjectElements), Address(obj, NativeObject::offsetOfElements()));
1250 : } else {
1251 : // If the target type could be a TypedArray that maps shared memory
1252 : // then this would need to store emptyObjectElementsShared in that case.
1253 71 : MOZ_ASSERT(!ntemplate->isSharedMemory());
1254 :
1255 71 : storePtr(ImmPtr(emptyObjectElements), Address(obj, NativeObject::offsetOfElements()));
1256 :
1257 71 : initGCSlots(obj, temp, ntemplate, initContents);
1258 :
1259 71 : if (ntemplate->hasPrivate() && !ntemplate->is<TypedArrayObject>()) {
1260 0 : uint32_t nfixed = ntemplate->numFixedSlotsForCompilation();
1261 0 : Address privateSlot(obj, NativeObject::getPrivateDataOffset(nfixed));
1262 0 : if (ntemplate->is<RegExpObject>()) {
1263 : // RegExpObject stores a GC thing (RegExpShared*) in its
1264 : // private slot, so we have to use ImmGCPtr.
1265 0 : RegExpObject* regexp = &ntemplate->as<RegExpObject>();
1266 0 : MOZ_ASSERT(regexp->hasShared());
1267 0 : MOZ_ASSERT(ntemplate->getPrivate() == regexp->sharedRef().get());
1268 0 : storePtr(ImmGCPtr(regexp->sharedRef().get()), privateSlot);
1269 : } else {
1270 0 : storePtr(ImmPtr(ntemplate->getPrivate()), privateSlot);
1271 : }
1272 : }
1273 : }
1274 20 : } else if (templateObj->is<InlineTypedObject>()) {
1275 0 : JS::AutoAssertNoGC nogc; // off-thread, so cannot GC
1276 0 : size_t nbytes = templateObj->as<InlineTypedObject>().size();
1277 0 : const uint8_t* memory = templateObj->as<InlineTypedObject>().inlineTypedMem(nogc);
1278 :
1279 : // Memcpy the contents of the template object to the new object.
1280 0 : size_t offset = 0;
1281 0 : while (nbytes) {
1282 0 : uintptr_t value = *(uintptr_t*)(memory + offset);
1283 0 : storePtr(ImmWord(value),
1284 0 : Address(obj, InlineTypedObject::offsetOfDataStart() + offset));
1285 0 : nbytes = (nbytes < sizeof(uintptr_t)) ? 0 : nbytes - sizeof(uintptr_t);
1286 0 : offset += sizeof(uintptr_t);
1287 : }
1288 20 : } else if (templateObj->is<UnboxedPlainObject>()) {
1289 20 : storePtr(ImmWord(0), Address(obj, UnboxedPlainObject::offsetOfExpando()));
1290 20 : if (initContents)
1291 20 : initUnboxedObjectContents(obj, &templateObj->as<UnboxedPlainObject>());
1292 0 : } else if (templateObj->is<UnboxedArrayObject>()) {
1293 0 : MOZ_ASSERT(templateObj->as<UnboxedArrayObject>().hasInlineElements());
1294 0 : int elementsOffset = UnboxedArrayObject::offsetOfInlineElements();
1295 0 : computeEffectiveAddress(Address(obj, elementsOffset), temp);
1296 0 : storePtr(temp, Address(obj, UnboxedArrayObject::offsetOfElements()));
1297 0 : store32(Imm32(templateObj->as<UnboxedArrayObject>().length()),
1298 0 : Address(obj, UnboxedArrayObject::offsetOfLength()));
1299 0 : uint32_t capacityIndex = templateObj->as<UnboxedArrayObject>().capacityIndex();
1300 0 : store32(Imm32(capacityIndex << UnboxedArrayObject::CapacityShift),
1301 0 : Address(obj, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength()));
1302 : } else {
1303 0 : MOZ_CRASH("Unknown object");
1304 : }
1305 :
1306 : #ifdef JS_GC_TRACE
1307 : RegisterSet regs = RegisterSet::Volatile();
1308 : PushRegsInMask(regs);
1309 : regs.takeUnchecked(obj);
1310 : Register temp = regs.takeAnyGeneral();
1311 :
1312 : setupUnalignedABICall(temp);
1313 : passABIArg(obj);
1314 : movePtr(ImmGCPtr(templateObj->type()), temp);
1315 : passABIArg(temp);
1316 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, js::gc::TraceCreateObject));
1317 :
1318 : PopRegsInMask(RegisterSet::Volatile());
1319 : #endif
1320 95 : }
1321 :
1322 : void
1323 20 : MacroAssembler::initUnboxedObjectContents(Register object, UnboxedPlainObject* templateObject)
1324 : {
1325 20 : const UnboxedLayout& layout = templateObject->layoutDontCheckGeneration();
1326 :
1327 : // Initialize reference fields of the object, per UnboxedPlainObject::create.
1328 20 : if (const int32_t* list = layout.traceList()) {
1329 32 : while (*list != -1) {
1330 12 : storePtr(ImmGCPtr(GetJitContext()->runtime->names().empty),
1331 12 : Address(object, UnboxedPlainObject::offsetOfData() + *list));
1332 6 : list++;
1333 : }
1334 20 : list++;
1335 60 : while (*list != -1) {
1336 40 : storePtr(ImmWord(0),
1337 40 : Address(object, UnboxedPlainObject::offsetOfData() + *list));
1338 20 : list++;
1339 : }
1340 : // Unboxed objects don't have Values to initialize.
1341 20 : MOZ_ASSERT(*(list + 1) == -1);
1342 : }
1343 20 : }
1344 :
1345 : void
1346 11 : MacroAssembler::compareStrings(JSOp op, Register left, Register right, Register result,
1347 : Label* fail)
1348 : {
1349 11 : MOZ_ASSERT(IsEqualityOp(op));
1350 :
1351 22 : Label done;
1352 22 : Label notPointerEqual;
1353 : // Fast path for identical strings.
1354 11 : branchPtr(Assembler::NotEqual, left, right, ¬PointerEqual);
1355 11 : move32(Imm32(op == JSOP_EQ || op == JSOP_STRICTEQ), result);
1356 11 : jump(&done);
1357 :
1358 11 : bind(¬PointerEqual);
1359 :
1360 22 : Label notAtom;
1361 : // Optimize the equality operation to a pointer compare for two atoms.
1362 11 : Imm32 atomBit(JSString::ATOM_BIT);
1363 11 : branchTest32(Assembler::Zero, Address(left, JSString::offsetOfFlags()), atomBit, ¬Atom);
1364 11 : branchTest32(Assembler::Zero, Address(right, JSString::offsetOfFlags()), atomBit, ¬Atom);
1365 :
1366 11 : cmpPtrSet(JSOpToCondition(MCompare::Compare_String, op), left, right, result);
1367 11 : jump(&done);
1368 :
1369 11 : bind(¬Atom);
1370 : // Strings of different length can never be equal.
1371 11 : loadStringLength(left, result);
1372 11 : branch32(Assembler::Equal, Address(right, JSString::offsetOfLength()), result, fail);
1373 11 : move32(Imm32(op == JSOP_NE || op == JSOP_STRICTNE), result);
1374 :
1375 11 : bind(&done);
1376 11 : }
1377 :
1378 : void
1379 30 : MacroAssembler::loadStringChars(Register str, Register dest)
1380 : {
1381 60 : Label isInline, done;
1382 60 : branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
1383 30 : Imm32(JSString::INLINE_CHARS_BIT), &isInline);
1384 :
1385 30 : loadPtr(Address(str, JSString::offsetOfNonInlineChars()), dest);
1386 30 : jump(&done);
1387 :
1388 30 : bind(&isInline);
1389 30 : computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()), dest);
1390 :
1391 30 : bind(&done);
1392 30 : }
1393 :
1394 : void
1395 3 : MacroAssembler::loadStringChar(Register str, Register index, Register output, Label* fail)
1396 : {
1397 3 : MOZ_ASSERT(str != output);
1398 3 : MOZ_ASSERT(index != output);
1399 :
1400 3 : movePtr(str, output);
1401 :
1402 : // This follows JSString::getChar.
1403 6 : Label notRope;
1404 3 : branchIfNotRope(str, ¬Rope);
1405 :
1406 : // Load leftChild.
1407 3 : loadPtr(Address(str, JSRope::offsetOfLeft()), output);
1408 :
1409 : // Check if the index is contained in the leftChild.
1410 : // Todo: Handle index in the rightChild.
1411 3 : branch32(Assembler::BelowOrEqual, Address(output, JSString::offsetOfLength()), index, fail);
1412 :
1413 : // If the left side is another rope, give up.
1414 3 : branchIfRope(output, fail);
1415 :
1416 3 : bind(¬Rope);
1417 :
1418 6 : Label isLatin1, done;
1419 : // We have to check the left/right side for ropes,
1420 : // because a TwoByte rope might have a Latin1 child.
1421 3 : branchLatin1String(output, &isLatin1);
1422 :
1423 3 : loadStringChars(output, output);
1424 3 : load16ZeroExtend(BaseIndex(output, index, TimesTwo), output);
1425 3 : jump(&done);
1426 :
1427 3 : bind(&isLatin1);
1428 3 : loadStringChars(output, output);
1429 3 : load8ZeroExtend(BaseIndex(output, index, TimesOne), output);
1430 :
1431 3 : bind(&done);
1432 3 : }
1433 :
1434 : void
1435 2 : MacroAssembler::loadStringIndexValue(Register str, Register dest, Label* fail)
1436 : {
1437 2 : MOZ_ASSERT(str != dest);
1438 :
1439 2 : load32(Address(str, JSString::offsetOfFlags()), dest);
1440 :
1441 : // Does not have a cached index value.
1442 2 : branchTest32(Assembler::Zero, dest, Imm32(JSString::INDEX_VALUE_BIT), fail);
1443 :
1444 : // Extract the index.
1445 2 : rshift32(Imm32(JSString::INDEX_VALUE_SHIFT), dest);
1446 2 : }
1447 :
1448 : void
1449 3 : MacroAssembler::typeOfObject(Register obj, Register scratch, Label* slow,
1450 : Label* isObject, Label* isCallable, Label* isUndefined)
1451 : {
1452 3 : loadObjClass(obj, scratch);
1453 :
1454 : // Proxies can emulate undefined and have complex isCallable behavior.
1455 3 : branchTestClassIsProxy(true, scratch, slow);
1456 :
1457 : // JSFunctions are always callable.
1458 3 : branchPtr(Assembler::Equal, scratch, ImmPtr(&JSFunction::class_), isCallable);
1459 :
1460 : // Objects that emulate undefined.
1461 3 : Address flags(scratch, Class::offsetOfFlags());
1462 3 : branchTest32(Assembler::NonZero, flags, Imm32(JSCLASS_EMULATES_UNDEFINED), isUndefined);
1463 :
1464 : // Handle classes with a call hook.
1465 6 : branchPtr(Assembler::Equal, Address(scratch, offsetof(js::Class, cOps)), ImmPtr(nullptr),
1466 3 : isObject);
1467 :
1468 3 : loadPtr(Address(scratch, offsetof(js::Class, cOps)), scratch);
1469 6 : branchPtr(Assembler::Equal, Address(scratch, offsetof(js::ClassOps, call)), ImmPtr(nullptr),
1470 3 : isObject);
1471 :
1472 3 : jump(isCallable);
1473 3 : }
1474 :
1475 : void
1476 3522 : MacroAssembler::loadJSContext(Register dest)
1477 : {
1478 3522 : CompileCompartment* compartment = GetJitContext()->compartment;
1479 3522 : if (compartment->zone()->isAtomsZone()) {
1480 : // If we are in the atoms zone then we are generating a runtime wide
1481 : // trampoline which can run in any zone. Load the context which is
1482 : // currently running using cooperative scheduling in the runtime.
1483 : // (This will need to be fixed when we have preemptive scheduling,
1484 : // bug 1323066).
1485 1064 : loadPtr(AbsoluteAddress(GetJitContext()->runtime->addressOfActiveJSContext()), dest);
1486 : } else {
1487 : // If we are in a specific zone then the current context will be stored
1488 : // in the containing zone group.
1489 2458 : loadPtr(AbsoluteAddress(GetJitContext()->compartment->zone()->addressOfJSContext()), dest);
1490 : }
1491 3522 : }
1492 :
1493 : void
1494 13 : MacroAssembler::guardGroupHasUnanalyzedNewScript(Register group, Register scratch, Label* fail)
1495 : {
1496 26 : Label noNewScript;
1497 13 : load32(Address(group, ObjectGroup::offsetOfFlags()), scratch);
1498 13 : and32(Imm32(OBJECT_FLAG_ADDENDUM_MASK), scratch);
1499 26 : branch32(Assembler::NotEqual, scratch,
1500 : Imm32(uint32_t(ObjectGroup::Addendum_NewScript) << OBJECT_FLAG_ADDENDUM_SHIFT),
1501 13 : &noNewScript);
1502 :
1503 : // Guard group->newScript()->preliminaryObjects is non-nullptr.
1504 13 : loadPtr(Address(group, ObjectGroup::offsetOfAddendum()), scratch);
1505 26 : branchPtr(Assembler::Equal,
1506 26 : Address(scratch, TypeNewScript::offsetOfPreliminaryObjects()),
1507 13 : ImmWord(0), fail);
1508 :
1509 13 : bind(&noNewScript);
1510 13 : }
1511 :
1512 : static void
1513 0 : BailoutReportOverRecursed(JSContext* cx)
1514 : {
1515 0 : ReportOverRecursed(cx);
1516 0 : }
1517 :
1518 : void
1519 4 : MacroAssembler::generateBailoutTail(Register scratch, Register bailoutInfo)
1520 : {
1521 4 : loadJSContext(scratch);
1522 4 : enterExitFrame(scratch, scratch);
1523 :
1524 8 : Label baseline;
1525 :
1526 : // The return value from Bailout is tagged as:
1527 : // - 0x0: done (enter baseline)
1528 : // - 0x1: error (handle exception)
1529 : // - 0x2: overrecursed
1530 : JS_STATIC_ASSERT(BAILOUT_RETURN_OK == 0);
1531 : JS_STATIC_ASSERT(BAILOUT_RETURN_FATAL_ERROR == 1);
1532 : JS_STATIC_ASSERT(BAILOUT_RETURN_OVERRECURSED == 2);
1533 :
1534 4 : branch32(Equal, ReturnReg, Imm32(BAILOUT_RETURN_OK), &baseline);
1535 4 : branch32(Equal, ReturnReg, Imm32(BAILOUT_RETURN_FATAL_ERROR), exceptionLabel());
1536 :
1537 : // Fall-through: overrecursed.
1538 : {
1539 4 : loadJSContext(ReturnReg);
1540 4 : setupUnalignedABICall(scratch);
1541 4 : passABIArg(ReturnReg);
1542 4 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, BailoutReportOverRecursed));
1543 4 : jump(exceptionLabel());
1544 : }
1545 :
1546 4 : bind(&baseline);
1547 : {
1548 : // Prepare a register set for use in this case.
1549 4 : AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
1550 4 : MOZ_ASSERT(!regs.has(getStackPointer()));
1551 4 : regs.take(bailoutInfo);
1552 :
1553 : // Reset SP to the point where clobbering starts.
1554 4 : loadStackPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, incomingStack)));
1555 :
1556 4 : Register copyCur = regs.takeAny();
1557 4 : Register copyEnd = regs.takeAny();
1558 4 : Register temp = regs.takeAny();
1559 :
1560 : // Copy data onto stack.
1561 4 : loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackTop)), copyCur);
1562 4 : loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackBottom)), copyEnd);
1563 : {
1564 8 : Label copyLoop;
1565 8 : Label endOfCopy;
1566 4 : bind(©Loop);
1567 4 : branchPtr(Assembler::BelowOrEqual, copyCur, copyEnd, &endOfCopy);
1568 4 : subPtr(Imm32(4), copyCur);
1569 4 : subFromStackPtr(Imm32(4));
1570 4 : load32(Address(copyCur, 0), temp);
1571 4 : store32(temp, Address(getStackPointer(), 0));
1572 4 : jump(©Loop);
1573 4 : bind(&endOfCopy);
1574 : }
1575 :
1576 : // Enter exit frame for the FinishBailoutToBaseline call.
1577 4 : loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)), temp);
1578 4 : load32(Address(temp, BaselineFrame::reverseOffsetOfFrameSize()), temp);
1579 4 : makeFrameDescriptor(temp, JitFrame_BaselineJS, ExitFrameLayout::Size());
1580 4 : push(temp);
1581 4 : push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
1582 : // No GC things to mark on the stack, push a bare token.
1583 4 : loadJSContext(scratch);
1584 4 : enterFakeExitFrame(scratch, scratch, ExitFrameLayoutBareToken);
1585 :
1586 : // If monitorStub is non-null, handle resumeAddr appropriately.
1587 8 : Label noMonitor;
1588 8 : Label done;
1589 8 : branchPtr(Assembler::Equal,
1590 8 : Address(bailoutInfo, offsetof(BaselineBailoutInfo, monitorStub)),
1591 : ImmPtr(nullptr),
1592 4 : &noMonitor);
1593 :
1594 : //
1595 : // Resuming into a monitoring stub chain.
1596 : //
1597 : {
1598 : // Save needed values onto stack temporarily.
1599 4 : pushValue(Address(bailoutInfo, offsetof(BaselineBailoutInfo, valueR0)));
1600 4 : push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)));
1601 4 : push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
1602 4 : push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, monitorStub)));
1603 :
1604 : // Call a stub to free allocated memory and create arguments objects.
1605 4 : setupUnalignedABICall(temp);
1606 4 : passABIArg(bailoutInfo);
1607 4 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, FinishBailoutToBaseline));
1608 4 : branchTest32(Zero, ReturnReg, ReturnReg, exceptionLabel());
1609 :
1610 : // Restore values where they need to be and resume execution.
1611 4 : AllocatableGeneralRegisterSet enterMonRegs(GeneralRegisterSet::All());
1612 4 : enterMonRegs.take(R0);
1613 4 : enterMonRegs.take(ICStubReg);
1614 4 : enterMonRegs.take(BaselineFrameReg);
1615 4 : enterMonRegs.takeUnchecked(ICTailCallReg);
1616 :
1617 4 : pop(ICStubReg);
1618 4 : pop(ICTailCallReg);
1619 4 : pop(BaselineFrameReg);
1620 4 : popValue(R0);
1621 :
1622 : // Discard exit frame.
1623 4 : addToStackPtr(Imm32(ExitFrameLayout::SizeWithFooter()));
1624 :
1625 : #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
1626 4 : push(ICTailCallReg);
1627 : #endif
1628 4 : jump(Address(ICStubReg, ICStub::offsetOfStubCode()));
1629 : }
1630 :
1631 : //
1632 : // Resuming into main jitcode.
1633 : //
1634 4 : bind(&noMonitor);
1635 : {
1636 : // Save needed values onto stack temporarily.
1637 4 : pushValue(Address(bailoutInfo, offsetof(BaselineBailoutInfo, valueR0)));
1638 4 : pushValue(Address(bailoutInfo, offsetof(BaselineBailoutInfo, valueR1)));
1639 4 : push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)));
1640 4 : push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
1641 :
1642 : // Call a stub to free allocated memory and create arguments objects.
1643 4 : setupUnalignedABICall(temp);
1644 4 : passABIArg(bailoutInfo);
1645 4 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, FinishBailoutToBaseline));
1646 4 : branchTest32(Zero, ReturnReg, ReturnReg, exceptionLabel());
1647 :
1648 : // Restore values where they need to be and resume execution.
1649 4 : AllocatableGeneralRegisterSet enterRegs(GeneralRegisterSet::All());
1650 4 : enterRegs.take(R0);
1651 4 : enterRegs.take(R1);
1652 4 : enterRegs.take(BaselineFrameReg);
1653 4 : Register jitcodeReg = enterRegs.takeAny();
1654 :
1655 4 : pop(jitcodeReg);
1656 4 : pop(BaselineFrameReg);
1657 4 : popValue(R1);
1658 4 : popValue(R0);
1659 :
1660 : // Discard exit frame.
1661 4 : addToStackPtr(Imm32(ExitFrameLayout::SizeWithFooter()));
1662 :
1663 4 : jump(jitcodeReg);
1664 : }
1665 : }
1666 4 : }
1667 :
1668 : void
1669 80 : MacroAssembler::loadBaselineOrIonRaw(Register script, Register dest, Label* failure)
1670 : {
1671 80 : loadPtr(Address(script, JSScript::offsetOfBaselineOrIonRaw()), dest);
1672 80 : if (failure)
1673 75 : branchTestPtr(Assembler::Zero, dest, dest, failure);
1674 80 : }
1675 :
1676 : void
1677 7 : MacroAssembler::loadBaselineOrIonNoArgCheck(Register script, Register dest, Label* failure)
1678 : {
1679 7 : loadPtr(Address(script, JSScript::offsetOfBaselineOrIonSkipArgCheck()), dest);
1680 7 : if (failure)
1681 7 : branchTestPtr(Assembler::Zero, dest, dest, failure);
1682 7 : }
1683 :
1684 : void
1685 2577 : MacroAssembler::loadBaselineFramePtr(Register framePtr, Register dest)
1686 : {
1687 2577 : if (framePtr != dest)
1688 2368 : movePtr(framePtr, dest);
1689 2577 : subPtr(Imm32(BaselineFrame::Size()), dest);
1690 2577 : }
1691 :
1692 : void
1693 1119 : MacroAssembler::handleFailure()
1694 : {
1695 : // Re-entry code is irrelevant because the exception will leave the
1696 : // running function and never come back
1697 1119 : JitCode* excTail = GetJitContext()->runtime->jitRuntime()->getExceptionTail();
1698 1119 : jump(excTail);
1699 1119 : }
1700 :
1701 : #ifdef DEBUG
1702 : static void
1703 0 : AssumeUnreachable_(const char* output) {
1704 0 : MOZ_ReportAssertionFailure(output, __FILE__, __LINE__);
1705 0 : }
1706 : #endif
1707 :
1708 : void
1709 8120 : MacroAssembler::assumeUnreachable(const char* output)
1710 : {
1711 : #ifdef DEBUG
1712 8120 : if (!IsCompilingWasm()) {
1713 8120 : AllocatableRegisterSet regs(RegisterSet::Volatile());
1714 8120 : LiveRegisterSet save(regs.asLiveSet());
1715 8120 : PushRegsInMask(save);
1716 8120 : Register temp = regs.takeAnyGeneral();
1717 :
1718 8120 : setupUnalignedABICall(temp);
1719 8120 : movePtr(ImmPtr(output), temp);
1720 8120 : passABIArg(temp);
1721 8120 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, AssumeUnreachable_));
1722 :
1723 8120 : PopRegsInMask(save);
1724 : }
1725 : #endif
1726 :
1727 8120 : breakpoint();
1728 8120 : }
1729 :
1730 : template<typename T>
1731 : void
1732 0 : MacroAssembler::assertTestInt32(Condition cond, const T& value, const char* output)
1733 : {
1734 : #ifdef DEBUG
1735 0 : Label ok;
1736 0 : branchTestInt32(cond, value, &ok);
1737 0 : assumeUnreachable(output);
1738 0 : bind(&ok);
1739 : #endif
1740 0 : }
1741 :
1742 : template void MacroAssembler::assertTestInt32(Condition, const Address&, const char*);
1743 :
1744 : static void
1745 0 : Printf0_(const char* output) {
1746 : // Use stderr instead of stdout because this is only used for debug
1747 : // output. stderr is less likely to interfere with the program's normal
1748 : // output, and it's always unbuffered.
1749 0 : fprintf(stderr, "%s", output);
1750 0 : }
1751 :
1752 : void
1753 0 : MacroAssembler::printf(const char* output)
1754 : {
1755 0 : AllocatableRegisterSet regs(RegisterSet::Volatile());
1756 0 : LiveRegisterSet save(regs.asLiveSet());
1757 0 : PushRegsInMask(save);
1758 :
1759 0 : Register temp = regs.takeAnyGeneral();
1760 :
1761 0 : setupUnalignedABICall(temp);
1762 0 : movePtr(ImmPtr(output), temp);
1763 0 : passABIArg(temp);
1764 0 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, Printf0_));
1765 :
1766 0 : PopRegsInMask(save);
1767 0 : }
1768 :
1769 : static void
1770 0 : Printf1_(const char* output, uintptr_t value) {
1771 0 : AutoEnterOOMUnsafeRegion oomUnsafe;
1772 0 : js::UniqueChars line = JS_sprintf_append(nullptr, output, value);
1773 0 : if (!line)
1774 0 : oomUnsafe.crash("OOM at masm.printf");
1775 0 : fprintf(stderr, "%s", line.get());
1776 0 : }
1777 :
1778 : void
1779 0 : MacroAssembler::printf(const char* output, Register value)
1780 : {
1781 0 : AllocatableRegisterSet regs(RegisterSet::Volatile());
1782 0 : LiveRegisterSet save(regs.asLiveSet());
1783 0 : PushRegsInMask(save);
1784 :
1785 0 : regs.takeUnchecked(value);
1786 :
1787 0 : Register temp = regs.takeAnyGeneral();
1788 :
1789 0 : setupUnalignedABICall(temp);
1790 0 : movePtr(ImmPtr(output), temp);
1791 0 : passABIArg(temp);
1792 0 : passABIArg(value);
1793 0 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, Printf1_));
1794 :
1795 0 : PopRegsInMask(save);
1796 0 : }
1797 :
1798 : #ifdef JS_TRACE_LOGGING
1799 : void
1800 632 : MacroAssembler::tracelogStartId(Register logger, uint32_t textId, bool force)
1801 : {
1802 632 : if (!force && !TraceLogTextIdEnabled(textId))
1803 0 : return;
1804 :
1805 632 : AllocatableRegisterSet regs(RegisterSet::Volatile());
1806 632 : LiveRegisterSet save(regs.asLiveSet());
1807 632 : PushRegsInMask(save);
1808 632 : regs.takeUnchecked(logger);
1809 :
1810 632 : Register temp = regs.takeAnyGeneral();
1811 :
1812 632 : setupUnalignedABICall(temp);
1813 632 : passABIArg(logger);
1814 632 : move32(Imm32(textId), temp);
1815 632 : passABIArg(temp);
1816 632 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogStartEventPrivate));
1817 :
1818 632 : PopRegsInMask(save);
1819 : }
1820 :
1821 : void
1822 0 : MacroAssembler::tracelogStartId(Register logger, Register textId)
1823 : {
1824 0 : AllocatableRegisterSet regs(RegisterSet::Volatile());
1825 0 : LiveRegisterSet save(regs.asLiveSet());
1826 0 : PushRegsInMask(save);
1827 0 : regs.takeUnchecked(logger);
1828 0 : regs.takeUnchecked(textId);
1829 :
1830 0 : Register temp = regs.takeAnyGeneral();
1831 :
1832 0 : setupUnalignedABICall(temp);
1833 0 : passABIArg(logger);
1834 0 : passABIArg(textId);
1835 0 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogStartEventPrivate));
1836 :
1837 0 : PopRegsInMask(save);
1838 0 : }
1839 :
1840 : void
1841 632 : MacroAssembler::tracelogStartEvent(Register logger, Register event)
1842 : {
1843 632 : void (&TraceLogFunc)(TraceLoggerThread*, const TraceLoggerEvent&) = TraceLogStartEvent;
1844 :
1845 632 : AllocatableRegisterSet regs(RegisterSet::Volatile());
1846 632 : LiveRegisterSet save(regs.asLiveSet());
1847 632 : PushRegsInMask(save);
1848 632 : regs.takeUnchecked(logger);
1849 632 : regs.takeUnchecked(event);
1850 :
1851 632 : Register temp = regs.takeAnyGeneral();
1852 :
1853 632 : setupUnalignedABICall(temp);
1854 632 : passABIArg(logger);
1855 632 : passABIArg(event);
1856 632 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogFunc));
1857 :
1858 632 : PopRegsInMask(save);
1859 632 : }
1860 :
1861 : void
1862 1254 : MacroAssembler::tracelogStopId(Register logger, uint32_t textId, bool force)
1863 : {
1864 1254 : if (!force && !TraceLogTextIdEnabled(textId))
1865 0 : return;
1866 :
1867 1254 : AllocatableRegisterSet regs(RegisterSet::Volatile());
1868 1254 : LiveRegisterSet save(regs.asLiveSet());
1869 1254 : PushRegsInMask(save);
1870 1254 : regs.takeUnchecked(logger);
1871 :
1872 1254 : Register temp = regs.takeAnyGeneral();
1873 :
1874 1254 : setupUnalignedABICall(temp);
1875 1254 : passABIArg(logger);
1876 1254 : move32(Imm32(textId), temp);
1877 1254 : passABIArg(temp);
1878 :
1879 1254 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogStopEventPrivate));
1880 :
1881 1254 : PopRegsInMask(save);
1882 : }
1883 :
1884 : void
1885 0 : MacroAssembler::tracelogStopId(Register logger, Register textId)
1886 : {
1887 0 : AllocatableRegisterSet regs(RegisterSet::Volatile());
1888 0 : LiveRegisterSet save(regs.asLiveSet());
1889 0 : PushRegsInMask(save);
1890 0 : regs.takeUnchecked(logger);
1891 0 : regs.takeUnchecked(textId);
1892 :
1893 0 : Register temp = regs.takeAnyGeneral();
1894 :
1895 0 : setupUnalignedABICall(temp);
1896 0 : passABIArg(logger);
1897 0 : passABIArg(textId);
1898 0 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogStopEventPrivate));
1899 :
1900 0 : PopRegsInMask(save);
1901 0 : }
1902 : #endif
1903 :
1904 : void
1905 0 : MacroAssembler::convertInt32ValueToDouble(const Address& address, Register scratch, Label* done)
1906 : {
1907 0 : branchTestInt32(Assembler::NotEqual, address, done);
1908 0 : unboxInt32(address, scratch);
1909 0 : convertInt32ToDouble(scratch, ScratchDoubleReg);
1910 0 : storeDouble(ScratchDoubleReg, address);
1911 0 : }
1912 :
1913 : void
1914 7 : MacroAssembler::convertInt32ValueToDouble(ValueOperand val)
1915 : {
1916 14 : Label done;
1917 7 : branchTestInt32(Assembler::NotEqual, val, &done);
1918 7 : unboxInt32(val, val.scratchReg());
1919 7 : convertInt32ToDouble(val.scratchReg(), ScratchDoubleReg);
1920 7 : boxDouble(ScratchDoubleReg, val);
1921 7 : bind(&done);
1922 7 : }
1923 :
1924 : void
1925 0 : MacroAssembler::convertValueToFloatingPoint(ValueOperand value, FloatRegister output,
1926 : Label* fail, MIRType outputType)
1927 : {
1928 0 : Register tag = splitTagForTest(value);
1929 :
1930 0 : Label isDouble, isInt32, isBool, isNull, done;
1931 :
1932 0 : branchTestDouble(Assembler::Equal, tag, &isDouble);
1933 0 : branchTestInt32(Assembler::Equal, tag, &isInt32);
1934 0 : branchTestBoolean(Assembler::Equal, tag, &isBool);
1935 0 : branchTestNull(Assembler::Equal, tag, &isNull);
1936 0 : branchTestUndefined(Assembler::NotEqual, tag, fail);
1937 :
1938 : // fall-through: undefined
1939 0 : loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output, outputType);
1940 0 : jump(&done);
1941 :
1942 0 : bind(&isNull);
1943 0 : loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
1944 0 : jump(&done);
1945 :
1946 0 : bind(&isBool);
1947 0 : boolValueToFloatingPoint(value, output, outputType);
1948 0 : jump(&done);
1949 :
1950 0 : bind(&isInt32);
1951 0 : int32ValueToFloatingPoint(value, output, outputType);
1952 0 : jump(&done);
1953 :
1954 0 : bind(&isDouble);
1955 0 : FloatRegister tmp = output;
1956 0 : if (outputType == MIRType::Float32 && hasMultiAlias())
1957 0 : tmp = ScratchDoubleReg;
1958 :
1959 0 : unboxDouble(value, tmp);
1960 0 : if (outputType == MIRType::Float32)
1961 0 : convertDoubleToFloat32(tmp, output);
1962 :
1963 0 : bind(&done);
1964 0 : }
1965 :
1966 : bool
1967 0 : MacroAssembler::convertValueToFloatingPoint(JSContext* cx, const Value& v, FloatRegister output,
1968 : Label* fail, MIRType outputType)
1969 : {
1970 0 : if (v.isNumber() || v.isString()) {
1971 : double d;
1972 0 : if (v.isNumber())
1973 0 : d = v.toNumber();
1974 0 : else if (!StringToNumber(cx, v.toString(), &d))
1975 0 : return false;
1976 :
1977 0 : loadConstantFloatingPoint(d, (float)d, output, outputType);
1978 0 : return true;
1979 : }
1980 :
1981 0 : if (v.isBoolean()) {
1982 0 : if (v.toBoolean())
1983 0 : loadConstantFloatingPoint(1.0, 1.0f, output, outputType);
1984 : else
1985 0 : loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
1986 0 : return true;
1987 : }
1988 :
1989 0 : if (v.isNull()) {
1990 0 : loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
1991 0 : return true;
1992 : }
1993 :
1994 0 : if (v.isUndefined()) {
1995 0 : loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output, outputType);
1996 0 : return true;
1997 : }
1998 :
1999 0 : MOZ_ASSERT(v.isObject() || v.isSymbol());
2000 0 : jump(fail);
2001 0 : return true;
2002 : }
2003 :
2004 : bool
2005 0 : MacroAssembler::convertConstantOrRegisterToFloatingPoint(JSContext* cx,
2006 : const ConstantOrRegister& src,
2007 : FloatRegister output, Label* fail,
2008 : MIRType outputType)
2009 : {
2010 0 : if (src.constant())
2011 0 : return convertValueToFloatingPoint(cx, src.value(), output, fail, outputType);
2012 :
2013 0 : convertTypedOrValueToFloatingPoint(src.reg(), output, fail, outputType);
2014 0 : return true;
2015 : }
2016 :
2017 : void
2018 0 : MacroAssembler::convertTypedOrValueToFloatingPoint(TypedOrValueRegister src, FloatRegister output,
2019 : Label* fail, MIRType outputType)
2020 : {
2021 0 : MOZ_ASSERT(IsFloatingPointType(outputType));
2022 :
2023 0 : if (src.hasValue()) {
2024 0 : convertValueToFloatingPoint(src.valueReg(), output, fail, outputType);
2025 0 : return;
2026 : }
2027 :
2028 0 : bool outputIsDouble = outputType == MIRType::Double;
2029 0 : switch (src.type()) {
2030 : case MIRType::Null:
2031 0 : loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
2032 0 : break;
2033 : case MIRType::Boolean:
2034 : case MIRType::Int32:
2035 0 : convertInt32ToFloatingPoint(src.typedReg().gpr(), output, outputType);
2036 0 : break;
2037 : case MIRType::Float32:
2038 0 : if (outputIsDouble) {
2039 0 : convertFloat32ToDouble(src.typedReg().fpu(), output);
2040 : } else {
2041 0 : if (src.typedReg().fpu() != output)
2042 0 : moveFloat32(src.typedReg().fpu(), output);
2043 : }
2044 0 : break;
2045 : case MIRType::Double:
2046 0 : if (outputIsDouble) {
2047 0 : if (src.typedReg().fpu() != output)
2048 0 : moveDouble(src.typedReg().fpu(), output);
2049 : } else {
2050 0 : convertDoubleToFloat32(src.typedReg().fpu(), output);
2051 : }
2052 0 : break;
2053 : case MIRType::Object:
2054 : case MIRType::String:
2055 : case MIRType::Symbol:
2056 0 : jump(fail);
2057 0 : break;
2058 : case MIRType::Undefined:
2059 0 : loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output, outputType);
2060 0 : break;
2061 : default:
2062 0 : MOZ_CRASH("Bad MIRType");
2063 : }
2064 : }
2065 :
2066 : void
2067 1 : MacroAssembler::outOfLineTruncateSlow(FloatRegister src, Register dest, bool widenFloatToDouble,
2068 : bool compilingWasm, wasm::BytecodeOffset callOffset)
2069 : {
2070 : #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
2071 : defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
2072 : if (widenFloatToDouble) {
2073 : convertFloat32ToDouble(src, ScratchDoubleReg);
2074 : src = ScratchDoubleReg;
2075 : }
2076 : #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
2077 1 : FloatRegister srcSingle;
2078 1 : if (widenFloatToDouble) {
2079 0 : MOZ_ASSERT(src.isSingle());
2080 0 : srcSingle = src;
2081 0 : src = src.asDouble();
2082 0 : Push(srcSingle);
2083 0 : convertFloat32ToDouble(srcSingle, src);
2084 : }
2085 : #else
2086 : // Also see below
2087 : MOZ_CRASH("MacroAssembler platform hook: outOfLineTruncateSlow");
2088 : #endif
2089 :
2090 1 : MOZ_ASSERT(src.isDouble());
2091 :
2092 1 : if (compilingWasm) {
2093 0 : setupWasmABICall();
2094 0 : passABIArg(src, MoveOp::DOUBLE);
2095 0 : callWithABI(callOffset, wasm::SymbolicAddress::ToInt32);
2096 : } else {
2097 1 : setupUnalignedABICall(dest);
2098 1 : passABIArg(src, MoveOp::DOUBLE);
2099 1 : callWithABI(mozilla::BitwiseCast<void*, int32_t(*)(double)>(JS::ToInt32));
2100 : }
2101 1 : storeCallWordResult(dest);
2102 :
2103 : #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
2104 : defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
2105 : // Nothing
2106 : #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
2107 1 : if (widenFloatToDouble)
2108 0 : Pop(srcSingle);
2109 : #else
2110 : MOZ_CRASH("MacroAssembler platform hook: outOfLineTruncateSlow");
2111 : #endif
2112 1 : }
2113 :
2114 : void
2115 0 : MacroAssembler::convertDoubleToInt(FloatRegister src, Register output, FloatRegister temp,
2116 : Label* truncateFail, Label* fail,
2117 : IntConversionBehavior behavior)
2118 : {
2119 0 : switch (behavior) {
2120 : case IntConversion_Normal:
2121 : case IntConversion_NegativeZeroCheck:
2122 0 : convertDoubleToInt32(src, output, fail, behavior == IntConversion_NegativeZeroCheck);
2123 0 : break;
2124 : case IntConversion_Truncate:
2125 0 : branchTruncateDoubleMaybeModUint32(src, output, truncateFail ? truncateFail : fail);
2126 0 : break;
2127 : case IntConversion_ClampToUint8:
2128 : // Clamping clobbers the input register, so use a temp.
2129 0 : moveDouble(src, temp);
2130 0 : clampDoubleToUint8(temp, output);
2131 0 : break;
2132 : }
2133 0 : }
2134 :
2135 : void
2136 2 : MacroAssembler::convertValueToInt(ValueOperand value, MDefinition* maybeInput,
2137 : Label* handleStringEntry, Label* handleStringRejoin,
2138 : Label* truncateDoubleSlow,
2139 : Register stringReg, FloatRegister temp, Register output,
2140 : Label* fail, IntConversionBehavior behavior,
2141 : IntConversionInputKind conversion)
2142 : {
2143 2 : Register tag = splitTagForTest(value);
2144 1 : bool handleStrings = (behavior == IntConversion_Truncate ||
2145 1 : behavior == IntConversion_ClampToUint8) &&
2146 2 : handleStringEntry &&
2147 2 : handleStringRejoin;
2148 :
2149 2 : MOZ_ASSERT_IF(handleStrings, conversion == IntConversion_Any);
2150 :
2151 4 : Label done, isInt32, isBool, isDouble, isNull, isString;
2152 :
2153 2 : maybeBranchTestType(MIRType::Int32, maybeInput, tag, &isInt32);
2154 2 : if (conversion == IntConversion_Any || conversion == IntConversion_NumbersOrBoolsOnly)
2155 1 : maybeBranchTestType(MIRType::Boolean, maybeInput, tag, &isBool);
2156 2 : maybeBranchTestType(MIRType::Double, maybeInput, tag, &isDouble);
2157 :
2158 2 : if (conversion == IntConversion_Any) {
2159 : // If we are not truncating, we fail for anything that's not
2160 : // null. Otherwise we might be able to handle strings and objects.
2161 1 : switch (behavior) {
2162 : case IntConversion_Normal:
2163 : case IntConversion_NegativeZeroCheck:
2164 0 : branchTestNull(Assembler::NotEqual, tag, fail);
2165 0 : break;
2166 :
2167 : case IntConversion_Truncate:
2168 : case IntConversion_ClampToUint8:
2169 1 : maybeBranchTestType(MIRType::Null, maybeInput, tag, &isNull);
2170 1 : if (handleStrings)
2171 0 : maybeBranchTestType(MIRType::String, maybeInput, tag, &isString);
2172 1 : maybeBranchTestType(MIRType::Object, maybeInput, tag, fail);
2173 1 : branchTestUndefined(Assembler::NotEqual, tag, fail);
2174 1 : break;
2175 : }
2176 : } else {
2177 1 : jump(fail);
2178 : }
2179 :
2180 : // The value is null or undefined in truncation contexts - just emit 0.
2181 2 : if (isNull.used())
2182 0 : bind(&isNull);
2183 2 : mov(ImmWord(0), output);
2184 2 : jump(&done);
2185 :
2186 : // Try converting a string into a double, then jump to the double case.
2187 2 : if (handleStrings) {
2188 0 : bind(&isString);
2189 0 : unboxString(value, stringReg);
2190 0 : jump(handleStringEntry);
2191 : }
2192 :
2193 : // Try converting double into integer.
2194 2 : if (isDouble.used() || handleStrings) {
2195 0 : if (isDouble.used()) {
2196 0 : bind(&isDouble);
2197 0 : unboxDouble(value, temp);
2198 : }
2199 :
2200 0 : if (handleStrings)
2201 0 : bind(handleStringRejoin);
2202 :
2203 0 : convertDoubleToInt(temp, output, temp, truncateDoubleSlow, fail, behavior);
2204 0 : jump(&done);
2205 : }
2206 :
2207 : // Just unbox a bool, the result is 0 or 1.
2208 2 : if (isBool.used()) {
2209 0 : bind(&isBool);
2210 0 : unboxBoolean(value, output);
2211 0 : jump(&done);
2212 : }
2213 :
2214 : // Integers can be unboxed.
2215 2 : if (isInt32.used()) {
2216 1 : bind(&isInt32);
2217 1 : unboxInt32(value, output);
2218 1 : if (behavior == IntConversion_ClampToUint8)
2219 0 : clampIntToUint8(output);
2220 : }
2221 :
2222 2 : bind(&done);
2223 2 : }
2224 :
2225 : bool
2226 0 : MacroAssembler::convertValueToInt(JSContext* cx, const Value& v, Register output, Label* fail,
2227 : IntConversionBehavior behavior)
2228 : {
2229 0 : bool handleStrings = (behavior == IntConversion_Truncate ||
2230 0 : behavior == IntConversion_ClampToUint8);
2231 :
2232 0 : if (v.isNumber() || (handleStrings && v.isString())) {
2233 : double d;
2234 0 : if (v.isNumber())
2235 0 : d = v.toNumber();
2236 0 : else if (!StringToNumber(cx, v.toString(), &d))
2237 0 : return false;
2238 :
2239 0 : switch (behavior) {
2240 : case IntConversion_Normal:
2241 : case IntConversion_NegativeZeroCheck: {
2242 : // -0 is checked anyways if we have a constant value.
2243 : int i;
2244 0 : if (mozilla::NumberIsInt32(d, &i))
2245 0 : move32(Imm32(i), output);
2246 : else
2247 0 : jump(fail);
2248 0 : break;
2249 : }
2250 : case IntConversion_Truncate:
2251 0 : move32(Imm32(ToInt32(d)), output);
2252 0 : break;
2253 : case IntConversion_ClampToUint8:
2254 0 : move32(Imm32(ClampDoubleToUint8(d)), output);
2255 0 : break;
2256 : }
2257 :
2258 0 : return true;
2259 : }
2260 :
2261 0 : if (v.isBoolean()) {
2262 0 : move32(Imm32(v.toBoolean() ? 1 : 0), output);
2263 0 : return true;
2264 : }
2265 :
2266 0 : if (v.isNull() || v.isUndefined()) {
2267 0 : move32(Imm32(0), output);
2268 0 : return true;
2269 : }
2270 :
2271 0 : MOZ_ASSERT(v.isObject() || v.isSymbol());
2272 :
2273 0 : jump(fail);
2274 0 : return true;
2275 : }
2276 :
2277 : bool
2278 0 : MacroAssembler::convertConstantOrRegisterToInt(JSContext* cx,
2279 : const ConstantOrRegister& src,
2280 : FloatRegister temp, Register output,
2281 : Label* fail, IntConversionBehavior behavior)
2282 : {
2283 0 : if (src.constant())
2284 0 : return convertValueToInt(cx, src.value(), output, fail, behavior);
2285 :
2286 0 : convertTypedOrValueToInt(src.reg(), temp, output, fail, behavior);
2287 0 : return true;
2288 : }
2289 :
2290 : void
2291 0 : MacroAssembler::convertTypedOrValueToInt(TypedOrValueRegister src, FloatRegister temp,
2292 : Register output, Label* fail,
2293 : IntConversionBehavior behavior)
2294 : {
2295 0 : if (src.hasValue()) {
2296 0 : convertValueToInt(src.valueReg(), temp, output, fail, behavior);
2297 0 : return;
2298 : }
2299 :
2300 0 : switch (src.type()) {
2301 : case MIRType::Undefined:
2302 : case MIRType::Null:
2303 0 : move32(Imm32(0), output);
2304 0 : break;
2305 : case MIRType::Boolean:
2306 : case MIRType::Int32:
2307 0 : if (src.typedReg().gpr() != output)
2308 0 : move32(src.typedReg().gpr(), output);
2309 0 : if (src.type() == MIRType::Int32 && behavior == IntConversion_ClampToUint8)
2310 0 : clampIntToUint8(output);
2311 0 : break;
2312 : case MIRType::Double:
2313 0 : convertDoubleToInt(src.typedReg().fpu(), output, temp, nullptr, fail, behavior);
2314 0 : break;
2315 : case MIRType::Float32:
2316 : // Conversion to Double simplifies implementation at the expense of performance.
2317 0 : convertFloat32ToDouble(src.typedReg().fpu(), temp);
2318 0 : convertDoubleToInt(temp, output, temp, nullptr, fail, behavior);
2319 0 : break;
2320 : case MIRType::String:
2321 : case MIRType::Symbol:
2322 : case MIRType::Object:
2323 0 : jump(fail);
2324 0 : break;
2325 : default:
2326 0 : MOZ_CRASH("Bad MIRType");
2327 : }
2328 : }
2329 :
2330 : void
2331 4499 : MacroAssembler::finish()
2332 : {
2333 4499 : if (failureLabel_.used()) {
2334 1119 : bind(&failureLabel_);
2335 1119 : handleFailure();
2336 : }
2337 :
2338 4499 : MacroAssemblerSpecific::finish();
2339 4499 : }
2340 :
2341 : void
2342 4499 : MacroAssembler::link(JitCode* code)
2343 : {
2344 4499 : MOZ_ASSERT(!oom());
2345 4499 : linkSelfReference(code);
2346 4499 : linkProfilerCallSites(code);
2347 4499 : }
2348 :
2349 13225 : MacroAssembler::AutoProfilerCallInstrumentation::AutoProfilerCallInstrumentation(
2350 : MacroAssembler& masm
2351 13225 : MOZ_GUARD_OBJECT_NOTIFIER_PARAM_IN_IMPL)
2352 : {
2353 13225 : MOZ_GUARD_OBJECT_NOTIFIER_INIT;
2354 13225 : if (!masm.emitProfilingInstrumentation_)
2355 13225 : return;
2356 :
2357 0 : Register reg = CallTempReg0;
2358 0 : Register reg2 = CallTempReg1;
2359 0 : masm.push(reg);
2360 0 : masm.push(reg2);
2361 :
2362 0 : CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), reg);
2363 0 : masm.loadJSContext(reg2);
2364 0 : masm.loadPtr(Address(reg2, offsetof(JSContext, profilingActivation_)), reg2);
2365 0 : masm.storePtr(reg, Address(reg2, JitActivation::offsetOfLastProfilingCallSite()));
2366 :
2367 0 : masm.appendProfilerCallSite(label);
2368 :
2369 0 : masm.pop(reg2);
2370 0 : masm.pop(reg);
2371 : }
2372 :
2373 : void
2374 4499 : MacroAssembler::linkProfilerCallSites(JitCode* code)
2375 : {
2376 4499 : for (size_t i = 0; i < profilerCallSites_.length(); i++) {
2377 0 : CodeOffset offset = profilerCallSites_[i];
2378 0 : CodeLocationLabel location(code, offset);
2379 0 : PatchDataWithValueCheck(location, ImmPtr(location.raw()), ImmPtr((void*)-1));
2380 : }
2381 4499 : }
2382 :
2383 : void
2384 55 : MacroAssembler::alignJitStackBasedOnNArgs(Register nargs)
2385 : {
2386 : if (JitStackValueAlignment == 1)
2387 : return;
2388 :
2389 : // A JitFrameLayout is composed of the following:
2390 : // [padding?] [argN] .. [arg1] [this] [[argc] [callee] [descr] [raddr]]
2391 : //
2392 : // We want to ensure that the |raddr| address is aligned.
2393 : // Which implies that we want to ensure that |this| is aligned.
2394 : static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
2395 : "No need to consider the JitFrameLayout for aligning the stack");
2396 :
2397 : // Which implies that |argN| is aligned if |nargs| is even, and offset by
2398 : // |sizeof(Value)| if |nargs| is odd.
2399 : MOZ_ASSERT(JitStackValueAlignment == 2);
2400 :
2401 : // Thus the |padding| is offset by |sizeof(Value)| if |nargs| is even, and
2402 : // aligned if |nargs| is odd.
2403 :
2404 : // if (nargs % 2 == 0) {
2405 : // if (sp % JitStackAlignment == 0)
2406 : // sp -= sizeof(Value);
2407 : // MOZ_ASSERT(sp % JitStackAlignment == JitStackAlignment - sizeof(Value));
2408 : // } else {
2409 : // sp = sp & ~(JitStackAlignment - 1);
2410 : // }
2411 110 : Label odd, end;
2412 55 : Label* maybeAssert = &end;
2413 : #ifdef DEBUG
2414 110 : Label assert;
2415 55 : maybeAssert = &assert;
2416 : #endif
2417 55 : assertStackAlignment(sizeof(Value), 0);
2418 55 : branchTestPtr(Assembler::NonZero, nargs, Imm32(1), &odd);
2419 55 : branchTestStackPtr(Assembler::NonZero, Imm32(JitStackAlignment - 1), maybeAssert);
2420 55 : subFromStackPtr(Imm32(sizeof(Value)));
2421 : #ifdef DEBUG
2422 55 : bind(&assert);
2423 : #endif
2424 55 : assertStackAlignment(JitStackAlignment, sizeof(Value));
2425 55 : jump(&end);
2426 55 : bind(&odd);
2427 55 : andToStackPtr(Imm32(~(JitStackAlignment - 1)));
2428 55 : bind(&end);
2429 : }
2430 :
2431 : void
2432 11 : MacroAssembler::alignJitStackBasedOnNArgs(uint32_t nargs)
2433 : {
2434 : if (JitStackValueAlignment == 1)
2435 : return;
2436 :
2437 : // A JitFrameLayout is composed of the following:
2438 : // [padding?] [argN] .. [arg1] [this] [[argc] [callee] [descr] [raddr]]
2439 : //
2440 : // We want to ensure that the |raddr| address is aligned.
2441 : // Which implies that we want to ensure that |this| is aligned.
2442 : static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
2443 : "No need to consider the JitFrameLayout for aligning the stack");
2444 :
2445 : // Which implies that |argN| is aligned if |nargs| is even, and offset by
2446 : // |sizeof(Value)| if |nargs| is odd.
2447 : MOZ_ASSERT(JitStackValueAlignment == 2);
2448 :
2449 : // Thus the |padding| is offset by |sizeof(Value)| if |nargs| is even, and
2450 : // aligned if |nargs| is odd.
2451 :
2452 11 : assertStackAlignment(sizeof(Value), 0);
2453 11 : if (nargs % 2 == 0) {
2454 20 : Label end;
2455 10 : branchTestStackPtr(Assembler::NonZero, Imm32(JitStackAlignment - 1), &end);
2456 10 : subFromStackPtr(Imm32(sizeof(Value)));
2457 10 : bind(&end);
2458 10 : assertStackAlignment(JitStackAlignment, sizeof(Value));
2459 : } else {
2460 1 : andToStackPtr(Imm32(~(JitStackAlignment - 1)));
2461 : }
2462 : }
2463 :
2464 : // ===============================================================
2465 :
2466 32 : MacroAssembler::MacroAssembler(JSContext* cx, IonScript* ion,
2467 32 : JSScript* script, jsbytecode* pc)
2468 : : framePushed_(0),
2469 : #ifdef DEBUG
2470 : inCall_(false),
2471 : #endif
2472 32 : emitProfilingInstrumentation_(false)
2473 : {
2474 32 : constructRoot(cx);
2475 32 : jitContext_.emplace(cx, (js::jit::TempAllocator*)nullptr);
2476 32 : alloc_.emplace(cx);
2477 32 : moveResolver_.setAllocator(*jitContext_->temp);
2478 : #if defined(JS_CODEGEN_ARM)
2479 : initWithAllocator();
2480 : m_buffer.id = GetJitContext()->getNextAssemblerId();
2481 : #elif defined(JS_CODEGEN_ARM64)
2482 : initWithAllocator();
2483 : armbuffer_.id = GetJitContext()->getNextAssemblerId();
2484 : #endif
2485 32 : if (ion) {
2486 0 : setFramePushed(ion->frameSize());
2487 0 : if (pc && cx->runtime()->geckoProfiler().enabled())
2488 0 : enableProfilingInstrumentation();
2489 : }
2490 32 : }
2491 :
2492 : MacroAssembler::AfterICSaveLive
2493 0 : MacroAssembler::icSaveLive(LiveRegisterSet& liveRegs)
2494 : {
2495 0 : PushRegsInMask(liveRegs);
2496 0 : AfterICSaveLive aic(framePushed());
2497 0 : alignFrameForICArguments(aic);
2498 0 : return aic;
2499 : }
2500 :
2501 : bool
2502 0 : MacroAssembler::icBuildOOLFakeExitFrame(void* fakeReturnAddr, AfterICSaveLive& aic)
2503 : {
2504 0 : return buildOOLFakeExitFrame(fakeReturnAddr);
2505 : }
2506 :
2507 : bool
2508 0 : MacroAssembler::icBuildOOLFakeExitFrame(void* fakeReturnAddr, AutoSaveLiveRegisters& save)
2509 : {
2510 0 : return buildOOLFakeExitFrame(fakeReturnAddr);
2511 : }
2512 :
2513 : void
2514 0 : MacroAssembler::icRestoreLive(LiveRegisterSet& liveRegs, AfterICSaveLive& aic)
2515 : {
2516 0 : restoreFrameAlignmentForICArguments(aic);
2517 0 : MOZ_ASSERT(framePushed() == aic.initialStack);
2518 0 : PopRegsInMask(liveRegs);
2519 0 : }
2520 :
2521 : #ifndef JS_CODEGEN_ARM64
2522 : void
2523 0 : MacroAssembler::subFromStackPtr(Register reg)
2524 : {
2525 0 : subPtr(reg, getStackPointer());
2526 0 : }
2527 : #endif // JS_CODEGEN_ARM64
2528 :
2529 : //{{{ check_macroassembler_style
2530 : // ===============================================================
2531 : // Stack manipulation functions.
2532 :
2533 : void
2534 92 : MacroAssembler::PushRegsInMask(LiveGeneralRegisterSet set)
2535 : {
2536 92 : PushRegsInMask(LiveRegisterSet(set.set(), FloatRegisterSet()));
2537 92 : }
2538 :
2539 : void
2540 11216 : MacroAssembler::PopRegsInMask(LiveRegisterSet set)
2541 : {
2542 11216 : PopRegsInMaskIgnore(set, LiveRegisterSet());
2543 11216 : }
2544 :
2545 : void
2546 92 : MacroAssembler::PopRegsInMask(LiveGeneralRegisterSet set)
2547 : {
2548 92 : PopRegsInMask(LiveRegisterSet(set.set(), FloatRegisterSet()));
2549 92 : }
2550 :
2551 : void
2552 0 : MacroAssembler::Push(jsid id, Register scratchReg)
2553 : {
2554 0 : if (JSID_IS_GCTHING(id)) {
2555 : // If we're pushing a gcthing, then we can't just push the tagged jsid
2556 : // value since the GC won't have any idea that the push instruction
2557 : // carries a reference to a gcthing. Need to unpack the pointer,
2558 : // push it using ImmGCPtr, and then rematerialize the id at runtime.
2559 :
2560 0 : if (JSID_IS_STRING(id)) {
2561 0 : JSString* str = JSID_TO_STRING(id);
2562 0 : MOZ_ASSERT(((size_t)str & JSID_TYPE_MASK) == 0);
2563 : MOZ_ASSERT(JSID_TYPE_STRING == 0x0);
2564 0 : Push(ImmGCPtr(str));
2565 : } else {
2566 0 : MOZ_ASSERT(JSID_IS_SYMBOL(id));
2567 0 : JS::Symbol* sym = JSID_TO_SYMBOL(id);
2568 0 : movePtr(ImmGCPtr(sym), scratchReg);
2569 0 : orPtr(Imm32(JSID_TYPE_SYMBOL), scratchReg);
2570 0 : Push(scratchReg);
2571 : }
2572 : } else {
2573 0 : Push(ImmWord(JSID_BITS(id)));
2574 : }
2575 0 : }
2576 :
2577 : void
2578 104 : MacroAssembler::Push(TypedOrValueRegister v)
2579 : {
2580 104 : if (v.hasValue()) {
2581 5 : Push(v.valueReg());
2582 99 : } else if (IsFloatingPointType(v.type())) {
2583 0 : FloatRegister reg = v.typedReg().fpu();
2584 0 : if (v.type() == MIRType::Float32) {
2585 0 : convertFloat32ToDouble(reg, ScratchDoubleReg);
2586 0 : reg = ScratchDoubleReg;
2587 : }
2588 0 : Push(reg);
2589 : } else {
2590 99 : Push(ValueTypeFromMIRType(v.type()), v.typedReg().gpr());
2591 : }
2592 104 : }
2593 :
2594 : void
2595 39 : MacroAssembler::Push(const ConstantOrRegister& v)
2596 : {
2597 39 : if (v.constant())
2598 19 : Push(v.value());
2599 : else
2600 20 : Push(v.reg());
2601 39 : }
2602 :
2603 : void
2604 557 : MacroAssembler::Push(const ValueOperand& val)
2605 : {
2606 557 : pushValue(val);
2607 557 : framePushed_ += sizeof(Value);
2608 557 : }
2609 :
2610 : void
2611 300 : MacroAssembler::Push(const Value& val)
2612 : {
2613 300 : pushValue(val);
2614 300 : framePushed_ += sizeof(Value);
2615 300 : }
2616 :
2617 : void
2618 99 : MacroAssembler::Push(JSValueType type, Register reg)
2619 : {
2620 99 : pushValue(type, reg);
2621 99 : framePushed_ += sizeof(Value);
2622 99 : }
2623 :
2624 : void
2625 0 : MacroAssembler::PushValue(const Address& addr)
2626 : {
2627 0 : MOZ_ASSERT(addr.base != getStackPointer());
2628 0 : pushValue(addr);
2629 0 : framePushed_ += sizeof(Value);
2630 0 : }
2631 :
2632 : void
2633 268 : MacroAssembler::PushEmptyRooted(VMFunction::RootType rootType)
2634 : {
2635 268 : switch (rootType) {
2636 : case VMFunction::RootNone:
2637 0 : MOZ_CRASH("Handle must have root type");
2638 : case VMFunction::RootObject:
2639 : case VMFunction::RootString:
2640 : case VMFunction::RootFunction:
2641 : case VMFunction::RootCell:
2642 0 : Push(ImmPtr(nullptr));
2643 0 : break;
2644 : case VMFunction::RootValue:
2645 268 : Push(UndefinedValue());
2646 268 : break;
2647 : case VMFunction::RootId:
2648 0 : Push(ImmWord(JSID_BITS(JSID_VOID)));
2649 0 : break;
2650 : }
2651 268 : }
2652 :
2653 : void
2654 268 : MacroAssembler::popRooted(VMFunction::RootType rootType, Register cellReg,
2655 : const ValueOperand& valueReg)
2656 : {
2657 268 : switch (rootType) {
2658 : case VMFunction::RootNone:
2659 0 : MOZ_CRASH("Handle must have root type");
2660 : case VMFunction::RootObject:
2661 : case VMFunction::RootString:
2662 : case VMFunction::RootFunction:
2663 : case VMFunction::RootCell:
2664 : case VMFunction::RootId:
2665 0 : Pop(cellReg);
2666 0 : break;
2667 : case VMFunction::RootValue:
2668 268 : Pop(valueReg);
2669 268 : break;
2670 : }
2671 268 : }
2672 :
2673 : void
2674 52 : MacroAssembler::adjustStack(int amount)
2675 : {
2676 52 : if (amount > 0)
2677 43 : freeStack(amount);
2678 9 : else if (amount < 0)
2679 0 : reserveStack(-amount);
2680 52 : }
2681 :
2682 : void
2683 39301 : MacroAssembler::freeStack(uint32_t amount)
2684 : {
2685 39301 : MOZ_ASSERT(amount <= framePushed_);
2686 39301 : if (amount)
2687 25502 : addToStackPtr(Imm32(amount));
2688 39300 : framePushed_ -= amount;
2689 39300 : }
2690 :
2691 : void
2692 0 : MacroAssembler::freeStack(Register amount)
2693 : {
2694 0 : addToStackPtr(amount);
2695 0 : }
2696 :
2697 : // ===============================================================
2698 : // ABI function calls.
2699 :
2700 : void
2701 13023 : MacroAssembler::setupABICall()
2702 : {
2703 : #ifdef DEBUG
2704 13023 : MOZ_ASSERT(!inCall_);
2705 13023 : inCall_ = true;
2706 : #endif
2707 :
2708 : #ifdef JS_SIMULATOR
2709 : signature_ = 0;
2710 : #endif
2711 :
2712 : // Reinitialize the ABIArg generator.
2713 13023 : abiArgs_ = ABIArgGenerator();
2714 :
2715 : #if defined(JS_CODEGEN_ARM)
2716 : // On ARM, we need to know what ABI we are using, either in the
2717 : // simulator, or based on the configure flags.
2718 : #if defined(JS_SIMULATOR_ARM)
2719 : abiArgs_.setUseHardFp(UseHardFpABI());
2720 : #elif defined(JS_CODEGEN_ARM_HARDFP)
2721 : abiArgs_.setUseHardFp(true);
2722 : #else
2723 : abiArgs_.setUseHardFp(false);
2724 : #endif
2725 : #endif
2726 :
2727 : #if defined(JS_CODEGEN_MIPS32)
2728 : // On MIPS, the system ABI use general registers pairs to encode double
2729 : // arguments, after one or 2 integer-like arguments. Unfortunately, the
2730 : // Lowering phase is not capable to express it at the moment. So we enforce
2731 : // the system ABI here.
2732 : abiArgs_.enforceO32ABI();
2733 : #endif
2734 13023 : }
2735 :
2736 : void
2737 0 : MacroAssembler::setupWasmABICall()
2738 : {
2739 0 : MOZ_ASSERT(IsCompilingWasm(), "non-wasm should use setupAlignedABICall");
2740 0 : setupABICall();
2741 :
2742 : #if defined(JS_CODEGEN_ARM)
2743 : // The builtin thunk does the FP -> GPR moving on soft-FP, so
2744 : // use hard fp unconditionally.
2745 : abiArgs_.setUseHardFp(true);
2746 : #endif
2747 0 : dynamicAlignment_ = false;
2748 0 : }
2749 :
2750 : void
2751 0 : MacroAssembler::setupAlignedABICall()
2752 : {
2753 0 : MOZ_ASSERT(!IsCompilingWasm(), "wasm should use setupWasmABICall");
2754 0 : setupABICall();
2755 0 : dynamicAlignment_ = false;
2756 :
2757 : #if defined(JS_CODEGEN_ARM64)
2758 : MOZ_CRASH("Not supported on arm64");
2759 : #endif
2760 0 : }
2761 :
2762 : void
2763 19673 : MacroAssembler::passABIArg(const MoveOperand& from, MoveOp::Type type)
2764 : {
2765 19673 : MOZ_ASSERT(inCall_);
2766 19673 : appendSignatureType(type);
2767 :
2768 19673 : ABIArg arg;
2769 19673 : switch (type) {
2770 : case MoveOp::FLOAT32:
2771 0 : arg = abiArgs_.next(MIRType::Float32);
2772 0 : break;
2773 : case MoveOp::DOUBLE:
2774 5 : arg = abiArgs_.next(MIRType::Double);
2775 5 : break;
2776 : case MoveOp::GENERAL:
2777 19668 : arg = abiArgs_.next(MIRType::Pointer);
2778 19668 : break;
2779 : default:
2780 0 : MOZ_CRASH("Unexpected argument type");
2781 : }
2782 :
2783 19673 : MoveOperand to(*this, arg);
2784 19673 : if (from == to)
2785 2190 : return;
2786 :
2787 18578 : if (oom())
2788 0 : return;
2789 18578 : propagateOOM(moveResolver_.addMove(from, to, type));
2790 : }
2791 :
2792 : void
2793 12946 : MacroAssembler::callWithABINoProfiler(void* fun, MoveOp::Type result)
2794 : {
2795 12946 : appendSignatureType(result);
2796 : #ifdef JS_SIMULATOR
2797 : fun = Simulator::RedirectNativeFunction(fun, signature());
2798 : #endif
2799 :
2800 : uint32_t stackAdjust;
2801 12946 : callWithABIPre(&stackAdjust);
2802 12946 : call(ImmPtr(fun));
2803 12946 : callWithABIPost(stackAdjust, result);
2804 12946 : }
2805 :
2806 : void
2807 0 : MacroAssembler::callWithABI(wasm::BytecodeOffset callOffset, wasm::SymbolicAddress imm,
2808 : MoveOp::Type result)
2809 : {
2810 0 : MOZ_ASSERT(wasm::NeedsBuiltinThunk(imm));
2811 :
2812 : // We clobber WasmTlsReg below in the loadWasmTlsRegFromFrame(), but Ion
2813 : // assumes it is non-volatile, so preserve it manually.
2814 0 : Push(WasmTlsReg);
2815 :
2816 : uint32_t stackAdjust;
2817 0 : callWithABIPre(&stackAdjust, /* callFromWasm = */ true);
2818 :
2819 : // The TLS register is used in builtin thunks and must be set, by ABI:
2820 : // reload it after passing arguments, which might have used it at spill
2821 : // points when placing arguments.
2822 0 : loadWasmTlsRegFromFrame();
2823 :
2824 0 : call(wasm::CallSiteDesc(callOffset.bytecodeOffset, wasm::CallSite::Symbolic), imm);
2825 0 : callWithABIPost(stackAdjust, result, /* callFromWasm = */ true);
2826 :
2827 0 : Pop(WasmTlsReg);
2828 0 : }
2829 :
2830 : // ===============================================================
2831 : // Exit frame footer.
2832 :
2833 : void
2834 1138 : MacroAssembler::linkExitFrame(Register cxreg, Register scratch)
2835 : {
2836 1138 : loadPtr(Address(cxreg, JSContext::offsetOfActivation()), scratch);
2837 1138 : storeStackPtr(Address(scratch, JitActivation::offsetOfExitFP()));
2838 1138 : }
2839 :
2840 : void
2841 4499 : MacroAssembler::linkSelfReference(JitCode* code)
2842 : {
2843 : // If this code can transition to C++ code and witness a GC, then we need to store
2844 : // the JitCode onto the stack in order to GC it correctly. exitCodePatch should
2845 : // be unset if the code never needed to push its JitCode*.
2846 4499 : if (hasSelfReference()) {
2847 2088 : PatchDataWithValueCheck(CodeLocationLabel(code, selfReferencePatch_),
2848 : ImmPtr(code),
2849 1044 : ImmPtr((void*)-1));
2850 : }
2851 4499 : }
2852 :
2853 : // ===============================================================
2854 : // Branch functions
2855 :
2856 : void
2857 4 : MacroAssembler::branchIfNotInterpretedConstructor(Register fun, Register scratch, Label* label)
2858 : {
2859 : // 16-bit loads are slow and unaligned 32-bit loads may be too so
2860 : // perform an aligned 32-bit load and adjust the bitmask accordingly.
2861 4 : MOZ_ASSERT(JSFunction::offsetOfNargs() % sizeof(uint32_t) == 0);
2862 4 : MOZ_ASSERT(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2);
2863 :
2864 : // First, ensure it's a scripted function.
2865 4 : load32(Address(fun, JSFunction::offsetOfNargs()), scratch);
2866 4 : int32_t bits = IMM32_16ADJ(JSFunction::INTERPRETED);
2867 4 : branchTest32(Assembler::Zero, scratch, Imm32(bits), label);
2868 :
2869 : // Check if the CONSTRUCTOR bit is set.
2870 4 : bits = IMM32_16ADJ(JSFunction::CONSTRUCTOR);
2871 4 : branchTest32(Assembler::Zero, scratch, Imm32(bits), label);
2872 4 : }
2873 :
2874 : void
2875 7 : MacroAssembler::maybeBranchTestType(MIRType type, MDefinition* maybeDef, Register tag, Label* label)
2876 : {
2877 7 : if (!maybeDef || maybeDef->mightBeType(type)) {
2878 1 : switch (type) {
2879 : case MIRType::Null:
2880 0 : branchTestNull(Equal, tag, label);
2881 0 : break;
2882 : case MIRType::Boolean:
2883 0 : branchTestBoolean(Equal, tag, label);
2884 0 : break;
2885 : case MIRType::Int32:
2886 1 : branchTestInt32(Equal, tag, label);
2887 1 : break;
2888 : case MIRType::Double:
2889 0 : branchTestDouble(Equal, tag, label);
2890 0 : break;
2891 : case MIRType::String:
2892 0 : branchTestString(Equal, tag, label);
2893 0 : break;
2894 : case MIRType::Symbol:
2895 0 : branchTestSymbol(Equal, tag, label);
2896 0 : break;
2897 : case MIRType::Object:
2898 0 : branchTestObject(Equal, tag, label);
2899 0 : break;
2900 : default:
2901 0 : MOZ_CRASH("Unsupported type");
2902 : }
2903 : }
2904 7 : }
2905 :
2906 : void
2907 0 : MacroAssembler::wasmCallImport(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee)
2908 : {
2909 : // Load the callee, before the caller's registers are clobbered.
2910 0 : uint32_t globalDataOffset = callee.importGlobalDataOffset();
2911 0 : loadWasmGlobalPtr(globalDataOffset + offsetof(wasm::FuncImportTls, code), ABINonArgReg0);
2912 :
2913 0 : MOZ_ASSERT(ABINonArgReg0 != WasmTlsReg, "by constraint");
2914 :
2915 : // Switch to the callee's TLS and pinned registers and make the call.
2916 0 : loadWasmGlobalPtr(globalDataOffset + offsetof(wasm::FuncImportTls, tls), WasmTlsReg);
2917 0 : loadWasmPinnedRegsFromTls();
2918 :
2919 0 : call(desc, ABINonArgReg0);
2920 0 : }
2921 :
2922 : void
2923 0 : MacroAssembler::wasmCallBuiltinInstanceMethod(const wasm::CallSiteDesc& desc,
2924 : const ABIArg& instanceArg,
2925 : wasm::SymbolicAddress builtin)
2926 : {
2927 0 : MOZ_ASSERT(instanceArg != ABIArg());
2928 :
2929 0 : if (instanceArg.kind() == ABIArg::GPR) {
2930 0 : loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, instance)), instanceArg.gpr());
2931 0 : } else if (instanceArg.kind() == ABIArg::Stack) {
2932 : // Safe to use ABINonArgReg0 since it's the last thing before the call.
2933 0 : Register scratch = ABINonArgReg0;
2934 0 : loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, instance)), scratch);
2935 0 : storePtr(scratch, Address(getStackPointer(), instanceArg.offsetFromArgBase()));
2936 : } else {
2937 0 : MOZ_CRASH("Unknown abi passing style for pointer");
2938 : }
2939 :
2940 0 : call(desc, builtin);
2941 0 : }
2942 :
2943 : void
2944 0 : MacroAssembler::wasmCallIndirect(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee,
2945 : bool needsBoundsCheck)
2946 : {
2947 0 : Register scratch = WasmTableCallScratchReg;
2948 0 : Register index = WasmTableCallIndexReg;
2949 :
2950 0 : if (callee.which() == wasm::CalleeDesc::AsmJSTable) {
2951 : // asm.js tables require no signature check, have had their index masked
2952 : // into range and thus need no bounds check and cannot be external.
2953 0 : loadWasmGlobalPtr(callee.tableBaseGlobalDataOffset(), scratch);
2954 0 : loadPtr(BaseIndex(scratch, index, ScalePointer), scratch);
2955 0 : call(desc, scratch);
2956 0 : return;
2957 : }
2958 :
2959 0 : MOZ_ASSERT(callee.which() == wasm::CalleeDesc::WasmTable);
2960 :
2961 : // Write the sig-id into the ABI sig-id register.
2962 0 : wasm::SigIdDesc sigId = callee.wasmTableSigId();
2963 0 : switch (sigId.kind()) {
2964 : case wasm::SigIdDesc::Kind::Global:
2965 0 : loadWasmGlobalPtr(sigId.globalDataOffset(), WasmTableCallSigReg);
2966 0 : break;
2967 : case wasm::SigIdDesc::Kind::Immediate:
2968 0 : move32(Imm32(sigId.immediate()), WasmTableCallSigReg);
2969 0 : break;
2970 : case wasm::SigIdDesc::Kind::None:
2971 0 : break;
2972 : }
2973 :
2974 0 : wasm::BytecodeOffset trapOffset(desc.lineOrBytecode());
2975 :
2976 : // WebAssembly throws if the index is out-of-bounds.
2977 0 : if (needsBoundsCheck) {
2978 0 : loadWasmGlobalPtr(callee.tableLengthGlobalDataOffset(), scratch);
2979 :
2980 0 : wasm::TrapDesc oobTrap(trapOffset, wasm::Trap::OutOfBounds, framePushed());
2981 0 : branch32(Assembler::Condition::AboveOrEqual, index, scratch, oobTrap);
2982 : }
2983 :
2984 : // Load the base pointer of the table.
2985 0 : loadWasmGlobalPtr(callee.tableBaseGlobalDataOffset(), scratch);
2986 :
2987 : // Load the callee from the table.
2988 0 : wasm::TrapDesc nullTrap(trapOffset, wasm::Trap::IndirectCallToNull, framePushed());
2989 0 : if (callee.wasmTableIsExternal()) {
2990 : static_assert(sizeof(wasm::ExternalTableElem) == 8 || sizeof(wasm::ExternalTableElem) == 16,
2991 : "elements of external tables are two words");
2992 : if (sizeof(wasm::ExternalTableElem) == 8) {
2993 : computeEffectiveAddress(BaseIndex(scratch, index, TimesEight), scratch);
2994 : } else {
2995 0 : lshift32(Imm32(4), index);
2996 0 : addPtr(index, scratch);
2997 : }
2998 :
2999 0 : loadPtr(Address(scratch, offsetof(wasm::ExternalTableElem, tls)), WasmTlsReg);
3000 0 : branchTest32(Assembler::Zero, WasmTlsReg, WasmTlsReg, nullTrap);
3001 :
3002 0 : loadWasmPinnedRegsFromTls();
3003 :
3004 0 : loadPtr(Address(scratch, offsetof(wasm::ExternalTableElem, code)), scratch);
3005 : } else {
3006 0 : loadPtr(BaseIndex(scratch, index, ScalePointer), scratch);
3007 0 : branchTest32(Assembler::Zero, scratch, scratch, nullTrap);
3008 : }
3009 :
3010 0 : call(desc, scratch);
3011 : }
3012 :
3013 : void
3014 0 : MacroAssembler::wasmEmitTrapOutOfLineCode()
3015 : {
3016 0 : for (const wasm::TrapSite& site : trapSites()) {
3017 : // Trap out-of-line codes are created for two kinds of trap sites:
3018 : // - jumps, which are bound directly to the trap out-of-line path
3019 : // - memory accesses, which can fault and then have control transferred
3020 : // to the out-of-line path directly via signal handler setting pc
3021 0 : switch (site.kind) {
3022 : case wasm::TrapSite::Jump: {
3023 0 : RepatchLabel jump;
3024 0 : jump.use(site.codeOffset);
3025 0 : bind(&jump);
3026 0 : break;
3027 : }
3028 : case wasm::TrapSite::MemoryAccess: {
3029 0 : append(wasm::MemoryAccess(site.codeOffset, currentOffset()));
3030 0 : break;
3031 : }
3032 : }
3033 :
3034 0 : if (site.trap == wasm::Trap::IndirectCallBadSig) {
3035 : // The indirect call bad-signature trap is a special case for two
3036 : // reasons:
3037 : // - the check happens in the very first instructions of the
3038 : // prologue, before the stack frame has been set up which messes
3039 : // up everything (stack depth computations, unwinding)
3040 : // - the check happens in the callee while the trap should be
3041 : // reported at the caller's call_indirect
3042 : // To solve both problems at once, the out-of-line path (far) jumps
3043 : // directly to the trap exit stub. This takes advantage of the fact
3044 : // that there is already a CallSite for call_indirect and the
3045 : // current pre-prologue stack/register state.
3046 0 : append(wasm::TrapFarJump(site.trap, farJumpWithPatch()));
3047 : } else {
3048 : // Inherit the frame depth of the trap site. This value is captured
3049 : // by the wasm::CallSite to allow unwinding this frame.
3050 0 : setFramePushed(site.framePushed);
3051 :
3052 : // Align the stack for a nullary call.
3053 0 : size_t alreadyPushed = sizeof(wasm::Frame) + framePushed();
3054 0 : size_t toPush = ABIArgGenerator().stackBytesConsumedSoFar();
3055 0 : if (size_t dec = StackDecrementForCall(ABIStackAlignment, alreadyPushed, toPush))
3056 0 : reserveStack(dec);
3057 :
3058 : // To call the trap handler function, we must have the WasmTlsReg
3059 : // filled since this is the normal calling ABI. To avoid requiring
3060 : // every trapping operation to have the TLS register filled for the
3061 : // rare case that it takes a trap, we restore it from the frame on
3062 : // the out-of-line path. However, there are millions of out-of-line
3063 : // paths (viz. for loads/stores), so the load is factored out into
3064 : // the shared FarJumpIsland generated by patchCallSites.
3065 :
3066 : // Call the trap's exit, using the bytecode offset of the trap site.
3067 : // Note that this code is inside the same CodeRange::Function as the
3068 : // trap site so it's as if the trapping instruction called the
3069 : // trap-handling function. The frame iterator knows to skip the trap
3070 : // exit's frame so that unwinding begins at the frame and offset of
3071 : // the trapping instruction.
3072 0 : wasm::CallSiteDesc desc(site.bytecodeOffset, wasm::CallSiteDesc::TrapExit);
3073 0 : call(desc, site.trap);
3074 : }
3075 :
3076 : #ifdef DEBUG
3077 : // Traps do not return, so no need to freeStack().
3078 0 : breakpoint();
3079 : #endif
3080 : }
3081 :
3082 : // Ensure that the return address of the last emitted call above is always
3083 : // within this function's CodeRange which is necessary for the stack
3084 : // iterator to find the right CodeRange while walking the stack.
3085 0 : breakpoint();
3086 :
3087 0 : clearTrapSites();
3088 0 : }
3089 :
3090 : void
3091 0 : MacroAssembler::wasmAssertNonExitInvariants(Register activation)
3092 : {
3093 : #ifdef DEBUG
3094 : // WasmActivation.exitFP should be null when outside any exit frame.
3095 0 : Label ok;
3096 0 : Address exitFP(activation, WasmActivation::offsetOfExitFP());
3097 0 : branchPtr(Assembler::Equal, exitFP, ImmWord(0), &ok);
3098 0 : breakpoint();
3099 0 : bind(&ok);
3100 : #endif
3101 0 : }
3102 :
3103 : void
3104 0 : MacroAssembler::wasmEmitStackCheck(Register sp, Register scratch, Label* onOverflow)
3105 : {
3106 0 : loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, addressOfContext)), scratch);
3107 0 : loadPtr(Address(scratch, 0), scratch);
3108 : branchPtr(Assembler::AboveOrEqual,
3109 0 : Address(scratch, offsetof(JSContext, jitStackLimitNoInterrupt)),
3110 : sp,
3111 0 : onOverflow);
3112 0 : }
3113 :
3114 : //}}} check_macroassembler_style
3115 :
3116 : void
3117 0 : MacroAssembler::loadWasmTlsRegFromFrame(Register dest)
3118 : {
3119 0 : loadPtr(Address(getStackPointer(), framePushed() + offsetof(wasm::Frame, tls)), dest);
3120 0 : }
3121 :
3122 : void
3123 163 : MacroAssembler::BranchType::emit(MacroAssembler& masm)
3124 : {
3125 163 : MOZ_ASSERT(isInitialized());
3126 163 : MIRType mirType = MIRType::None;
3127 :
3128 163 : if (type_.isPrimitive()) {
3129 152 : if (type_.isMagicArguments())
3130 0 : mirType = MIRType::MagicOptimizedArguments;
3131 : else
3132 152 : mirType = MIRTypeFromValueType(type_.primitive());
3133 11 : } else if (type_.isAnyObject()) {
3134 11 : mirType = MIRType::Object;
3135 : } else {
3136 0 : MOZ_CRASH("Unknown conversion to mirtype");
3137 : }
3138 :
3139 163 : if (mirType == MIRType::Double)
3140 1 : masm.branchTestNumber(cond(), reg(), jump());
3141 : else
3142 162 : masm.branchTestMIRType(cond(), reg(), mirType, jump());
3143 163 : }
3144 :
3145 : void
3146 95 : MacroAssembler::BranchGCPtr::emit(MacroAssembler& masm)
3147 : {
3148 95 : MOZ_ASSERT(isInitialized());
3149 95 : masm.branchPtr(cond(), reg(), ptr_, jump());
3150 95 : }
3151 :
3152 : void
3153 0 : MacroAssembler::debugAssertIsObject(const ValueOperand& val)
3154 : {
3155 : #ifdef DEBUG
3156 0 : Label ok;
3157 0 : branchTestObject(Assembler::Equal, val, &ok);
3158 0 : assumeUnreachable("Expected an object!");
3159 0 : bind(&ok);
3160 : #endif
3161 0 : }
3162 :
3163 : namespace js {
3164 : namespace jit {
3165 :
3166 : #ifdef DEBUG
3167 : template <class RegisterType>
3168 25605 : AutoGenericRegisterScope<RegisterType>::AutoGenericRegisterScope(MacroAssembler& masm, RegisterType reg)
3169 25605 : : RegisterType(reg), masm_(masm)
3170 : {
3171 25605 : masm.debugTrackedRegisters_.add(reg);
3172 25604 : }
3173 :
3174 : template AutoGenericRegisterScope<Register>::AutoGenericRegisterScope(MacroAssembler& masm, Register reg);
3175 : template AutoGenericRegisterScope<FloatRegister>::AutoGenericRegisterScope(MacroAssembler& masm, FloatRegister reg);
3176 : #endif // DEBUG
3177 :
3178 : #ifdef DEBUG
3179 : template <class RegisterType>
3180 25605 : AutoGenericRegisterScope<RegisterType>::~AutoGenericRegisterScope()
3181 : {
3182 25605 : const RegisterType& reg = *dynamic_cast<RegisterType*>(this);
3183 25605 : masm_.debugTrackedRegisters_.take(reg);
3184 25605 : }
3185 :
3186 : template AutoGenericRegisterScope<Register>::~AutoGenericRegisterScope();
3187 : template AutoGenericRegisterScope<FloatRegister>::~AutoGenericRegisterScope();
3188 : #endif // DEBUG
3189 :
3190 : } // namespace jit
3191 : } // namespace js
|