Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #include "jit/BaselineCacheIRCompiler.h"
8 :
9 : #include "jit/CacheIR.h"
10 : #include "jit/Linker.h"
11 : #include "jit/SharedICHelpers.h"
12 : #include "proxy/Proxy.h"
13 :
14 : #include "jscntxtinlines.h"
15 : #include "jscompartmentinlines.h"
16 :
17 : #include "jit/MacroAssembler-inl.h"
18 :
19 : using namespace js;
20 : using namespace js::jit;
21 :
22 : using mozilla::Maybe;
23 :
24 : class AutoStubFrame;
25 :
26 : Address
27 27 : CacheRegisterAllocator::addressOf(MacroAssembler& masm, BaselineFrameSlot slot) const
28 : {
29 27 : uint32_t offset = stackPushed_ + ICStackValueOffset + slot.slot() * sizeof(JS::Value);
30 27 : return Address(masm.getStackPointer(), offset);
31 : }
32 :
33 : // BaselineCacheIRCompiler compiles CacheIR to BaselineIC native code.
34 245 : class MOZ_RAII BaselineCacheIRCompiler : public CacheIRCompiler
35 : {
36 : #ifdef DEBUG
37 : // Some Baseline IC stubs can be used in IonMonkey through SharedStubs.
38 : // Those stubs have different machine code, so we need to track whether
39 : // we're compiling for Baseline or Ion.
40 : ICStubEngine engine_;
41 : #endif
42 :
43 : uint32_t stubDataOffset_;
44 : bool inStubFrame_;
45 : bool makesGCCalls_;
46 :
47 : MOZ_MUST_USE bool callVM(MacroAssembler& masm, const VMFunction& fun);
48 :
49 : MOZ_MUST_USE bool callTypeUpdateIC(Register obj, ValueOperand val, Register scratch,
50 : LiveGeneralRegisterSet saveRegs);
51 :
52 : MOZ_MUST_USE bool emitStoreSlotShared(bool isFixed);
53 : MOZ_MUST_USE bool emitAddAndStoreSlotShared(CacheOp op);
54 :
55 : public:
56 : friend class AutoStubFrame;
57 :
58 245 : BaselineCacheIRCompiler(JSContext* cx, const CacheIRWriter& writer, ICStubEngine engine,
59 : uint32_t stubDataOffset)
60 245 : : CacheIRCompiler(cx, writer, Mode::Baseline),
61 : #ifdef DEBUG
62 : engine_(engine),
63 : #endif
64 : stubDataOffset_(stubDataOffset),
65 : inStubFrame_(false),
66 245 : makesGCCalls_(false)
67 245 : {}
68 :
69 : MOZ_MUST_USE bool init(CacheKind kind);
70 :
71 : JitCode* compile();
72 :
73 245 : bool makesGCCalls() const { return makesGCCalls_; }
74 :
75 : private:
76 : #define DEFINE_OP(op) MOZ_MUST_USE bool emit##op();
77 : CACHE_IR_OPS(DEFINE_OP)
78 : #undef DEFINE_OP
79 :
80 700 : Address stubAddress(uint32_t offset) const {
81 700 : return Address(ICStubReg, stubDataOffset_ + offset);
82 : }
83 : };
84 :
85 : #define DEFINE_SHARED_OP(op) \
86 : bool BaselineCacheIRCompiler::emit##op() { return CacheIRCompiler::emit##op(); }
87 588 : CACHE_IR_SHARED_OPS(DEFINE_SHARED_OP)
88 : #undef DEFINE_SHARED_OP
89 :
90 : enum class CallCanGC { CanGC, CanNotGC };
91 :
92 : // Instructions that have to perform a callVM require a stub frame. Call its
93 : // enter() and leave() methods to enter/leave the stub frame.
94 : class MOZ_RAII AutoStubFrame
95 : {
96 : BaselineCacheIRCompiler& compiler;
97 : #ifdef DEBUG
98 : uint32_t framePushedAtEnterStubFrame_;
99 : #endif
100 :
101 : AutoStubFrame(const AutoStubFrame&) = delete;
102 : void operator=(const AutoStubFrame&) = delete;
103 :
104 : public:
105 94 : explicit AutoStubFrame(BaselineCacheIRCompiler& compiler)
106 94 : : compiler(compiler)
107 : #ifdef DEBUG
108 94 : , framePushedAtEnterStubFrame_(0)
109 : #endif
110 94 : { }
111 :
112 94 : void enter(MacroAssembler& masm, Register scratch, CallCanGC canGC = CallCanGC::CanGC) {
113 94 : MOZ_ASSERT(compiler.allocator.stackPushed() == 0);
114 94 : MOZ_ASSERT(compiler.engine_ == ICStubEngine::Baseline);
115 :
116 94 : EmitBaselineEnterStubFrame(masm, scratch);
117 :
118 : #ifdef DEBUG
119 94 : framePushedAtEnterStubFrame_ = masm.framePushed();
120 : #endif
121 :
122 94 : MOZ_ASSERT(!compiler.inStubFrame_);
123 94 : compiler.inStubFrame_ = true;
124 94 : if (canGC == CallCanGC::CanGC)
125 42 : compiler.makesGCCalls_ = true;
126 94 : }
127 94 : void leave(MacroAssembler& masm, bool calledIntoIon = false) {
128 94 : MOZ_ASSERT(compiler.inStubFrame_);
129 94 : compiler.inStubFrame_ = false;
130 :
131 : #ifdef DEBUG
132 94 : masm.setFramePushed(framePushedAtEnterStubFrame_);
133 94 : if (calledIntoIon)
134 9 : masm.adjustFrame(sizeof(intptr_t)); // Calls into ion have this extra.
135 : #endif
136 :
137 94 : EmitBaselineLeaveStubFrame(masm, calledIntoIon);
138 94 : }
139 :
140 : #ifdef DEBUG
141 188 : ~AutoStubFrame() {
142 94 : MOZ_ASSERT(!compiler.inStubFrame_);
143 94 : }
144 : #endif
145 : };
146 :
147 : bool
148 85 : BaselineCacheIRCompiler::callVM(MacroAssembler& masm, const VMFunction& fun)
149 : {
150 85 : MOZ_ASSERT(inStubFrame_);
151 :
152 85 : JitCode* code = cx_->runtime()->jitRuntime()->getVMWrapper(fun);
153 85 : if (!code)
154 0 : return false;
155 :
156 85 : MOZ_ASSERT(fun.expectTailCall == NonTailCall);
157 85 : MOZ_ASSERT(engine_ == ICStubEngine::Baseline);
158 :
159 85 : EmitBaselineCallVM(code, masm);
160 85 : return true;
161 : }
162 :
163 : JitCode*
164 245 : BaselineCacheIRCompiler::compile()
165 : {
166 : #ifndef JS_USE_LINK_REGISTER
167 : // The first value contains the return addres,
168 : // which we pull into ICTailCallReg for tail calls.
169 245 : masm.adjustFrame(sizeof(intptr_t));
170 : #endif
171 : #ifdef JS_CODEGEN_ARM
172 : masm.setSecondScratchReg(BaselineSecondScratchReg);
173 : #endif
174 :
175 1489 : do {
176 1489 : switch (reader.readOp()) {
177 : #define DEFINE_OP(op) \
178 : case CacheOp::op: \
179 : if (!emit##op()) \
180 : return nullptr; \
181 : break;
182 203 : CACHE_IR_OPS(DEFINE_OP)
183 : #undef DEFINE_OP
184 :
185 : default:
186 0 : MOZ_CRASH("Invalid op");
187 : }
188 :
189 1489 : allocator.nextOp();
190 1489 : } while (reader.more());
191 :
192 245 : MOZ_ASSERT(!inStubFrame_);
193 245 : masm.assumeUnreachable("Should have returned from IC");
194 :
195 : // Done emitting the main IC code. Now emit the failure paths.
196 792 : for (size_t i = 0; i < failurePaths.length(); i++) {
197 547 : if (!emitFailurePath(i))
198 0 : return nullptr;
199 547 : EmitStubGuardFailure(masm);
200 : }
201 :
202 490 : Linker linker(masm);
203 490 : AutoFlushICache afc("getStubCode");
204 490 : Rooted<JitCode*> newStubCode(cx_, linker.newCode<NoGC>(cx_, BASELINE_CODE));
205 245 : if (!newStubCode) {
206 0 : cx_->recoverFromOutOfMemory();
207 0 : return nullptr;
208 : }
209 :
210 245 : return newStubCode;
211 : }
212 :
213 : bool
214 280 : BaselineCacheIRCompiler::emitGuardShape()
215 : {
216 280 : Register obj = allocator.useRegister(masm, reader.objOperandId());
217 560 : AutoScratchRegister scratch(allocator, masm);
218 :
219 : FailurePath* failure;
220 280 : if (!addFailurePath(&failure))
221 0 : return false;
222 :
223 280 : Address addr(stubAddress(reader.stubOffset()));
224 280 : masm.loadPtr(addr, scratch);
225 280 : masm.branchTestObjShape(Assembler::NotEqual, obj, scratch, failure->label());
226 280 : return true;
227 : }
228 :
229 : bool
230 61 : BaselineCacheIRCompiler::emitGuardGroup()
231 : {
232 61 : Register obj = allocator.useRegister(masm, reader.objOperandId());
233 122 : AutoScratchRegister scratch(allocator, masm);
234 :
235 : FailurePath* failure;
236 61 : if (!addFailurePath(&failure))
237 0 : return false;
238 :
239 61 : Address addr(stubAddress(reader.stubOffset()));
240 61 : masm.loadPtr(addr, scratch);
241 61 : masm.branchTestObjGroup(Assembler::NotEqual, obj, scratch, failure->label());
242 61 : return true;
243 : }
244 :
245 : bool
246 13 : BaselineCacheIRCompiler::emitGuardGroupHasUnanalyzedNewScript()
247 : {
248 13 : Address addr(stubAddress(reader.stubOffset()));
249 26 : AutoScratchRegister scratch1(allocator, masm);
250 26 : AutoScratchRegister scratch2(allocator, masm);
251 :
252 : FailurePath* failure;
253 13 : if (!addFailurePath(&failure))
254 0 : return false;
255 :
256 13 : masm.loadPtr(addr, scratch1);
257 13 : masm.guardGroupHasUnanalyzedNewScript(scratch1, scratch2, failure->label());
258 13 : return true;
259 : }
260 :
261 : bool
262 5 : BaselineCacheIRCompiler::emitGuardProto()
263 : {
264 5 : Register obj = allocator.useRegister(masm, reader.objOperandId());
265 10 : AutoScratchRegister scratch(allocator, masm);
266 :
267 : FailurePath* failure;
268 5 : if (!addFailurePath(&failure))
269 0 : return false;
270 :
271 5 : Address addr(stubAddress(reader.stubOffset()));
272 5 : masm.loadObjProto(obj, scratch);
273 5 : masm.branchPtr(Assembler::NotEqual, addr, scratch, failure->label());
274 5 : return true;
275 : }
276 :
277 : bool
278 17 : BaselineCacheIRCompiler::emitGuardCompartment()
279 : {
280 17 : Register obj = allocator.useRegister(masm, reader.objOperandId());
281 17 : reader.stubOffset(); // Read global wrapper.
282 34 : AutoScratchRegister scratch(allocator, masm);
283 :
284 : FailurePath* failure;
285 17 : if (!addFailurePath(&failure))
286 0 : return false;
287 :
288 17 : Address addr(stubAddress(reader.stubOffset()));
289 17 : masm.loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
290 17 : masm.loadPtr(Address(scratch, ObjectGroup::offsetOfCompartment()), scratch);
291 17 : masm.branchPtr(Assembler::NotEqual, addr, scratch, failure->label());
292 17 : return true;
293 : }
294 :
295 : bool
296 0 : BaselineCacheIRCompiler::emitGuardSpecificObject()
297 : {
298 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
299 :
300 : FailurePath* failure;
301 0 : if (!addFailurePath(&failure))
302 0 : return false;
303 :
304 0 : Address addr(stubAddress(reader.stubOffset()));
305 0 : masm.branchPtr(Assembler::NotEqual, addr, obj, failure->label());
306 0 : return true;
307 : }
308 :
309 : bool
310 36 : BaselineCacheIRCompiler::emitGuardSpecificAtom()
311 : {
312 36 : Register str = allocator.useRegister(masm, reader.stringOperandId());
313 72 : AutoScratchRegister scratch(allocator, masm);
314 :
315 : FailurePath* failure;
316 36 : if (!addFailurePath(&failure))
317 0 : return false;
318 :
319 36 : Address atomAddr(stubAddress(reader.stubOffset()));
320 :
321 72 : Label done;
322 36 : masm.branchPtr(Assembler::Equal, atomAddr, str, &done);
323 :
324 : // The pointers are not equal, so if the input string is also an atom it
325 : // must be a different string.
326 72 : masm.branchTest32(Assembler::NonZero, Address(str, JSString::offsetOfFlags()),
327 36 : Imm32(JSString::ATOM_BIT), failure->label());
328 :
329 : // Check the length.
330 36 : masm.loadPtr(atomAddr, scratch);
331 36 : masm.loadStringLength(scratch, scratch);
332 72 : masm.branch32(Assembler::NotEqual, Address(str, JSString::offsetOfLength()),
333 36 : scratch, failure->label());
334 :
335 : // We have a non-atomized string with the same length. Call a helper
336 : // function to do the comparison.
337 36 : LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
338 36 : masm.PushRegsInMask(volatileRegs);
339 :
340 36 : masm.setupUnalignedABICall(scratch);
341 36 : masm.loadPtr(atomAddr, scratch);
342 36 : masm.passABIArg(scratch);
343 36 : masm.passABIArg(str);
344 36 : masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, EqualStringsHelper));
345 36 : masm.mov(ReturnReg, scratch);
346 :
347 36 : LiveRegisterSet ignore;
348 36 : ignore.add(scratch);
349 36 : masm.PopRegsInMaskIgnore(volatileRegs, ignore);
350 36 : masm.branchIfFalseBool(scratch, failure->label());
351 :
352 36 : masm.bind(&done);
353 36 : return true;
354 : }
355 :
356 : bool
357 14 : BaselineCacheIRCompiler::emitGuardSpecificSymbol()
358 : {
359 14 : Register sym = allocator.useRegister(masm, reader.symbolOperandId());
360 :
361 : FailurePath* failure;
362 14 : if (!addFailurePath(&failure))
363 0 : return false;
364 :
365 14 : Address addr(stubAddress(reader.stubOffset()));
366 14 : masm.branchPtr(Assembler::NotEqual, addr, sym, failure->label());
367 14 : return true;
368 : }
369 :
370 : bool
371 14 : BaselineCacheIRCompiler::emitLoadFixedSlotResult()
372 : {
373 28 : AutoOutputRegister output(*this);
374 14 : Register obj = allocator.useRegister(masm, reader.objOperandId());
375 28 : AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
376 :
377 14 : masm.load32(stubAddress(reader.stubOffset()), scratch);
378 14 : masm.loadValue(BaseIndex(obj, scratch, TimesOne), output.valueReg());
379 28 : return true;
380 : }
381 :
382 : bool
383 39 : BaselineCacheIRCompiler::emitLoadDynamicSlotResult()
384 : {
385 78 : AutoOutputRegister output(*this);
386 39 : Register obj = allocator.useRegister(masm, reader.objOperandId());
387 78 : AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
388 78 : AutoScratchRegister scratch2(allocator, masm);
389 :
390 39 : masm.load32(stubAddress(reader.stubOffset()), scratch);
391 39 : masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch2);
392 39 : masm.loadValue(BaseIndex(scratch2, scratch, TimesOne), output.valueReg());
393 78 : return true;
394 : }
395 :
396 : bool
397 4 : BaselineCacheIRCompiler::emitMegamorphicLoadSlotResult()
398 : {
399 8 : AutoOutputRegister output(*this);
400 :
401 4 : Register obj = allocator.useRegister(masm, reader.objOperandId());
402 4 : Address nameAddr = stubAddress(reader.stubOffset());
403 4 : bool handleMissing = reader.readBool();
404 :
405 8 : AutoScratchRegisterMaybeOutput scratch1(allocator, masm, output);
406 8 : AutoScratchRegister scratch2(allocator, masm);
407 8 : AutoScratchRegister scratch3(allocator, masm);
408 :
409 : FailurePath* failure;
410 4 : if (!addFailurePath(&failure))
411 0 : return false;
412 :
413 4 : masm.Push(UndefinedValue());
414 4 : masm.moveStackPtrTo(scratch3.get());
415 :
416 4 : LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
417 4 : volatileRegs.takeUnchecked(scratch1);
418 4 : volatileRegs.takeUnchecked(scratch2);
419 4 : volatileRegs.takeUnchecked(scratch3);
420 4 : masm.PushRegsInMask(volatileRegs);
421 :
422 4 : masm.setupUnalignedABICall(scratch1);
423 4 : masm.loadJSContext(scratch1);
424 4 : masm.passABIArg(scratch1);
425 4 : masm.passABIArg(obj);
426 4 : masm.loadPtr(nameAddr, scratch2);
427 4 : masm.passABIArg(scratch2);
428 4 : masm.passABIArg(scratch3);
429 4 : if (handleMissing)
430 1 : masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, (GetNativeDataProperty<true>)));
431 : else
432 3 : masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, (GetNativeDataProperty<false>)));
433 4 : masm.mov(ReturnReg, scratch2);
434 4 : masm.PopRegsInMask(volatileRegs);
435 :
436 4 : masm.loadTypedOrValue(Address(masm.getStackPointer(), 0), output);
437 4 : masm.adjustStack(sizeof(Value));
438 :
439 4 : masm.branchIfFalseBool(scratch2, failure->label());
440 4 : return true;
441 : }
442 :
443 : bool
444 0 : BaselineCacheIRCompiler::emitMegamorphicStoreSlot()
445 : {
446 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
447 0 : Address nameAddr = stubAddress(reader.stubOffset());
448 0 : ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
449 0 : bool needsTypeBarrier = reader.readBool();
450 :
451 0 : AutoScratchRegister scratch1(allocator, masm);
452 0 : AutoScratchRegister scratch2(allocator, masm);
453 :
454 : FailurePath* failure;
455 0 : if (!addFailurePath(&failure))
456 0 : return false;
457 :
458 0 : masm.Push(val);
459 0 : masm.moveStackPtrTo(val.scratchReg());
460 :
461 0 : LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
462 0 : volatileRegs.takeUnchecked(scratch1);
463 0 : volatileRegs.takeUnchecked(scratch2);
464 0 : volatileRegs.takeUnchecked(val);
465 0 : masm.PushRegsInMask(volatileRegs);
466 :
467 0 : masm.setupUnalignedABICall(scratch1);
468 0 : masm.loadJSContext(scratch1);
469 0 : masm.passABIArg(scratch1);
470 0 : masm.passABIArg(obj);
471 0 : masm.loadPtr(nameAddr, scratch2);
472 0 : masm.passABIArg(scratch2);
473 0 : masm.passABIArg(val.scratchReg());
474 0 : if (needsTypeBarrier)
475 0 : masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, (SetNativeDataProperty<true>)));
476 : else
477 0 : masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, (SetNativeDataProperty<false>)));
478 0 : masm.mov(ReturnReg, scratch1);
479 0 : masm.PopRegsInMask(volatileRegs);
480 :
481 0 : masm.loadValue(Address(masm.getStackPointer(), 0), val);
482 0 : masm.adjustStack(sizeof(Value));
483 :
484 0 : masm.branchIfFalseBool(scratch1, failure->label());
485 0 : return true;
486 : }
487 :
488 : bool
489 3 : BaselineCacheIRCompiler::emitGuardHasGetterSetter()
490 : {
491 3 : Register obj = allocator.useRegister(masm, reader.objOperandId());
492 3 : Address shapeAddr = stubAddress(reader.stubOffset());
493 :
494 6 : AutoScratchRegister scratch1(allocator, masm);
495 6 : AutoScratchRegister scratch2(allocator, masm);
496 :
497 : FailurePath* failure;
498 3 : if (!addFailurePath(&failure))
499 0 : return false;
500 :
501 3 : LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
502 3 : volatileRegs.takeUnchecked(scratch1);
503 3 : volatileRegs.takeUnchecked(scratch2);
504 3 : masm.PushRegsInMask(volatileRegs);
505 :
506 3 : masm.setupUnalignedABICall(scratch1);
507 3 : masm.loadJSContext(scratch1);
508 3 : masm.passABIArg(scratch1);
509 3 : masm.passABIArg(obj);
510 3 : masm.loadPtr(shapeAddr, scratch2);
511 3 : masm.passABIArg(scratch2);
512 3 : masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ObjectHasGetterSetter));
513 3 : masm.mov(ReturnReg, scratch1);
514 3 : masm.PopRegsInMask(volatileRegs);
515 :
516 3 : masm.branchIfFalseBool(scratch1, failure->label());
517 3 : return true;
518 : }
519 :
520 : bool
521 8 : BaselineCacheIRCompiler::emitCallScriptedGetterResult()
522 : {
523 8 : MOZ_ASSERT(engine_ == ICStubEngine::Baseline);
524 :
525 8 : Register obj = allocator.useRegister(masm, reader.objOperandId());
526 8 : Address getterAddr(stubAddress(reader.stubOffset()));
527 :
528 16 : AutoScratchRegisterExcluding code(allocator, masm, ArgumentsRectifierReg);
529 16 : AutoScratchRegister callee(allocator, masm);
530 16 : AutoScratchRegister scratch(allocator, masm);
531 :
532 : // First, ensure our getter is non-lazy and has JIT code.
533 : {
534 : FailurePath* failure;
535 8 : if (!addFailurePath(&failure))
536 0 : return false;
537 :
538 8 : masm.loadPtr(getterAddr, callee);
539 8 : masm.branchIfFunctionHasNoScript(callee, failure->label());
540 8 : masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), code);
541 8 : masm.loadBaselineOrIonRaw(code, code, failure->label());
542 : }
543 :
544 8 : allocator.discardStack(masm);
545 :
546 16 : AutoStubFrame stubFrame(*this);
547 8 : stubFrame.enter(masm, scratch);
548 :
549 : // Align the stack such that the JitFrameLayout is aligned on
550 : // JitStackAlignment.
551 8 : masm.alignJitStackBasedOnNArgs(0);
552 :
553 : // Getter is called with 0 arguments, just |obj| as thisv.
554 : // Note that we use Push, not push, so that callJit will align the stack
555 : // properly on ARM.
556 8 : masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
557 :
558 8 : EmitBaselineCreateStubFrameDescriptor(masm, scratch, JitFrameLayout::Size());
559 8 : masm.Push(Imm32(0)); // ActualArgc is 0
560 8 : masm.Push(callee);
561 8 : masm.Push(scratch);
562 :
563 : // Handle arguments underflow.
564 16 : Label noUnderflow;
565 8 : masm.load16ZeroExtend(Address(callee, JSFunction::offsetOfNargs()), callee);
566 8 : masm.branch32(Assembler::Equal, callee, Imm32(0), &noUnderflow);
567 : {
568 : // Call the arguments rectifier.
569 8 : MOZ_ASSERT(ArgumentsRectifierReg != code);
570 :
571 8 : JitCode* argumentsRectifier = cx_->runtime()->jitRuntime()->getArgumentsRectifier();
572 8 : masm.movePtr(ImmGCPtr(argumentsRectifier), code);
573 8 : masm.loadPtr(Address(code, JitCode::offsetOfCode()), code);
574 8 : masm.movePtr(ImmWord(0), ArgumentsRectifierReg);
575 : }
576 :
577 8 : masm.bind(&noUnderflow);
578 8 : masm.callJit(code);
579 :
580 8 : stubFrame.leave(masm, true);
581 8 : return true;
582 : }
583 :
584 : typedef bool (*CallNativeGetterFn)(JSContext*, HandleFunction, HandleObject, MutableHandleValue);
585 3 : static const VMFunction CallNativeGetterInfo =
586 6 : FunctionInfo<CallNativeGetterFn>(CallNativeGetter, "CallNativeGetter");
587 :
588 : bool
589 12 : BaselineCacheIRCompiler::emitCallNativeGetterResult()
590 : {
591 12 : Register obj = allocator.useRegister(masm, reader.objOperandId());
592 12 : Address getterAddr(stubAddress(reader.stubOffset()));
593 :
594 24 : AutoScratchRegister scratch(allocator, masm);
595 :
596 12 : allocator.discardStack(masm);
597 :
598 24 : AutoStubFrame stubFrame(*this);
599 12 : stubFrame.enter(masm, scratch);
600 :
601 : // Load the callee in the scratch register.
602 12 : masm.loadPtr(getterAddr, scratch);
603 :
604 12 : masm.Push(obj);
605 12 : masm.Push(scratch);
606 :
607 12 : if (!callVM(masm, CallNativeGetterInfo))
608 0 : return false;
609 :
610 12 : stubFrame.leave(masm);
611 12 : return true;
612 : }
613 :
614 : typedef bool (*ProxyGetPropertyFn)(JSContext*, HandleObject, HandleId, MutableHandleValue);
615 3 : static const VMFunction ProxyGetPropertyInfo =
616 6 : FunctionInfo<ProxyGetPropertyFn>(ProxyGetProperty, "ProxyGetProperty");
617 :
618 : bool
619 7 : BaselineCacheIRCompiler::emitCallProxyGetResult()
620 : {
621 7 : Register obj = allocator.useRegister(masm, reader.objOperandId());
622 7 : Address idAddr(stubAddress(reader.stubOffset()));
623 :
624 14 : AutoScratchRegister scratch(allocator, masm);
625 :
626 7 : allocator.discardStack(masm);
627 :
628 14 : AutoStubFrame stubFrame(*this);
629 7 : stubFrame.enter(masm, scratch);
630 :
631 : // Load the jsid in the scratch register.
632 7 : masm.loadPtr(idAddr, scratch);
633 :
634 7 : masm.Push(scratch);
635 7 : masm.Push(obj);
636 :
637 7 : if (!callVM(masm, ProxyGetPropertyInfo))
638 0 : return false;
639 :
640 7 : stubFrame.leave(masm);
641 7 : return true;
642 : }
643 :
644 : typedef bool (*ProxyGetPropertyByValueFn)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
645 3 : static const VMFunction ProxyGetPropertyByValueInfo =
646 6 : FunctionInfo<ProxyGetPropertyByValueFn>(ProxyGetPropertyByValue, "ProxyGetPropertyByValue");
647 :
648 : bool
649 3 : BaselineCacheIRCompiler::emitCallProxyGetByValueResult()
650 : {
651 3 : Register obj = allocator.useRegister(masm, reader.objOperandId());
652 3 : ValueOperand idVal = allocator.useValueRegister(masm, reader.valOperandId());
653 :
654 6 : AutoScratchRegister scratch(allocator, masm);
655 :
656 3 : allocator.discardStack(masm);
657 :
658 6 : AutoStubFrame stubFrame(*this);
659 3 : stubFrame.enter(masm, scratch);
660 :
661 3 : masm.Push(idVal);
662 3 : masm.Push(obj);
663 :
664 3 : if (!callVM(masm, ProxyGetPropertyByValueInfo))
665 0 : return false;
666 :
667 3 : stubFrame.leave(masm);
668 3 : return true;
669 : }
670 :
671 : typedef bool (*ProxyHasOwnFn)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
672 3 : static const VMFunction ProxyHasOwnInfo = FunctionInfo<ProxyHasOwnFn>(ProxyHasOwn, "ProxyHasOwn");
673 :
674 : bool
675 0 : BaselineCacheIRCompiler::emitCallProxyHasOwnResult()
676 : {
677 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
678 0 : ValueOperand idVal = allocator.useValueRegister(masm, reader.valOperandId());
679 :
680 0 : AutoScratchRegister scratch(allocator, masm);
681 :
682 0 : allocator.discardStack(masm);
683 :
684 0 : AutoStubFrame stubFrame(*this);
685 0 : stubFrame.enter(masm, scratch);
686 :
687 0 : masm.Push(idVal);
688 0 : masm.Push(obj);
689 :
690 0 : if (!callVM(masm, ProxyHasOwnInfo))
691 0 : return false;
692 :
693 0 : stubFrame.leave(masm);
694 0 : return true;
695 : }
696 :
697 : bool
698 3 : BaselineCacheIRCompiler::emitLoadUnboxedPropertyResult()
699 : {
700 6 : AutoOutputRegister output(*this);
701 3 : Register obj = allocator.useRegister(masm, reader.objOperandId());
702 6 : AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
703 :
704 3 : JSValueType fieldType = reader.valueType();
705 3 : Address fieldOffset(stubAddress(reader.stubOffset()));
706 3 : masm.load32(fieldOffset, scratch);
707 3 : masm.loadUnboxedProperty(BaseIndex(obj, scratch, TimesOne), fieldType, output);
708 6 : return true;
709 : }
710 :
711 : bool
712 6 : BaselineCacheIRCompiler::emitGuardFrameHasNoArgumentsObject()
713 : {
714 : FailurePath* failure;
715 6 : if (!addFailurePath(&failure))
716 0 : return false;
717 :
718 18 : masm.branchTest32(Assembler::NonZero,
719 12 : Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFlags()),
720 : Imm32(BaselineFrame::HAS_ARGS_OBJ),
721 6 : failure->label());
722 6 : return true;
723 : }
724 :
725 : bool
726 0 : BaselineCacheIRCompiler::emitLoadFrameCalleeResult()
727 : {
728 0 : AutoOutputRegister output(*this);
729 0 : AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
730 :
731 0 : Address callee(BaselineFrameReg, BaselineFrame::offsetOfCalleeToken());
732 0 : masm.loadFunctionFromCalleeToken(callee, scratch);
733 0 : masm.tagValue(JSVAL_TYPE_OBJECT, scratch, output.valueReg());
734 0 : return true;
735 : }
736 :
737 : bool
738 3 : BaselineCacheIRCompiler::emitLoadFrameNumActualArgsResult()
739 : {
740 6 : AutoOutputRegister output(*this);
741 6 : AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
742 :
743 3 : Address actualArgs(BaselineFrameReg, BaselineFrame::offsetOfNumActualArgs());
744 3 : masm.loadPtr(actualArgs, scratch);
745 3 : masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
746 6 : return true;
747 : }
748 :
749 : bool
750 0 : BaselineCacheIRCompiler::emitLoadTypedObjectResult()
751 : {
752 0 : AutoOutputRegister output(*this);
753 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
754 0 : AutoScratchRegister scratch1(allocator, masm);
755 0 : AutoScratchRegister scratch2(allocator, masm);
756 :
757 0 : TypedThingLayout layout = reader.typedThingLayout();
758 0 : uint32_t typeDescr = reader.typeDescrKey();
759 0 : Address fieldOffset(stubAddress(reader.stubOffset()));
760 :
761 : // Get the object's data pointer.
762 0 : LoadTypedThingData(masm, layout, obj, scratch1);
763 :
764 : // Get the address being written to.
765 0 : masm.load32(fieldOffset, scratch2);
766 0 : masm.addPtr(scratch2, scratch1);
767 :
768 0 : Address fieldAddr(scratch1, 0);
769 0 : emitLoadTypedObjectResultShared(fieldAddr, scratch2, layout, typeDescr, output);
770 0 : return true;
771 : }
772 :
773 : bool
774 3 : BaselineCacheIRCompiler::emitLoadFrameArgumentResult()
775 : {
776 6 : AutoOutputRegister output(*this);
777 3 : Register index = allocator.useRegister(masm, reader.int32OperandId());
778 6 : AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
779 :
780 : FailurePath* failure;
781 3 : if (!addFailurePath(&failure))
782 0 : return false;
783 :
784 : // Bounds check.
785 3 : masm.loadPtr(Address(BaselineFrameReg, BaselineFrame::offsetOfNumActualArgs()), scratch);
786 3 : masm.branch32(Assembler::AboveOrEqual, index, scratch, failure->label());
787 :
788 : // Load the argument.
789 6 : masm.loadValue(BaseValueIndex(BaselineFrameReg, index, BaselineFrame::offsetOfArg(0)),
790 3 : output.valueReg());
791 3 : return true;
792 : }
793 :
794 : bool
795 1 : BaselineCacheIRCompiler::emitLoadEnvironmentFixedSlotResult()
796 : {
797 2 : AutoOutputRegister output(*this);
798 1 : Register obj = allocator.useRegister(masm, reader.objOperandId());
799 2 : AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
800 :
801 : FailurePath* failure;
802 1 : if (!addFailurePath(&failure))
803 0 : return false;
804 :
805 1 : masm.load32(stubAddress(reader.stubOffset()), scratch);
806 1 : BaseIndex slot(obj, scratch, TimesOne);
807 :
808 : // Check for uninitialized lexicals.
809 1 : masm.branchTestMagic(Assembler::Equal, slot, failure->label());
810 :
811 : // Load the value.
812 1 : masm.loadValue(slot, output.valueReg());
813 1 : return true;
814 : }
815 :
816 : bool
817 2 : BaselineCacheIRCompiler::emitLoadEnvironmentDynamicSlotResult()
818 : {
819 4 : AutoOutputRegister output(*this);
820 2 : Register obj = allocator.useRegister(masm, reader.objOperandId());
821 4 : AutoScratchRegister scratch(allocator, masm);
822 4 : AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
823 :
824 : FailurePath* failure;
825 2 : if (!addFailurePath(&failure))
826 0 : return false;
827 :
828 2 : masm.load32(stubAddress(reader.stubOffset()), scratch);
829 2 : masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch2);
830 :
831 : // Check for uninitialized lexicals.
832 2 : BaseIndex slot(scratch2, scratch, TimesOne);
833 2 : masm.branchTestMagic(Assembler::Equal, slot, failure->label());
834 :
835 : // Load the value.
836 2 : masm.loadValue(slot, output.valueReg());
837 2 : return true;
838 : }
839 :
840 : bool
841 15 : BaselineCacheIRCompiler::emitLoadStringResult()
842 : {
843 30 : AutoOutputRegister output(*this);
844 30 : AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
845 :
846 15 : masm.loadPtr(stubAddress(reader.stubOffset()), scratch);
847 15 : masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
848 30 : return true;
849 : }
850 :
851 : typedef bool (*StringSplitHelperFn)(JSContext*, HandleString, HandleString, HandleObjectGroup,
852 : uint32_t limit, MutableHandleValue);
853 3 : static const VMFunction StringSplitHelperInfo =
854 6 : FunctionInfo<StringSplitHelperFn>(StringSplitHelper, "StringSplitHelper");
855 :
856 : bool
857 1 : BaselineCacheIRCompiler::emitCallStringSplitResult()
858 : {
859 1 : Register str = allocator.useRegister(masm, reader.stringOperandId());
860 1 : Register sep = allocator.useRegister(masm, reader.stringOperandId());
861 1 : Address groupAddr(stubAddress(reader.stubOffset()));
862 :
863 2 : AutoScratchRegister scratch(allocator, masm);
864 1 : allocator.discardStack(masm);
865 :
866 2 : AutoStubFrame stubFrame(*this);
867 1 : stubFrame.enter(masm, scratch);
868 :
869 : // Load the group in the scratch register.
870 1 : masm.loadPtr(groupAddr, scratch);
871 :
872 1 : masm.Push(Imm32(INT32_MAX));
873 1 : masm.Push(scratch);
874 1 : masm.Push(sep);
875 1 : masm.Push(str);
876 :
877 1 : if (!callVM(masm, StringSplitHelperInfo))
878 0 : return false;
879 :
880 1 : stubFrame.leave(masm);
881 1 : return true;
882 : }
883 :
884 : bool
885 52 : BaselineCacheIRCompiler::callTypeUpdateIC(Register obj, ValueOperand val, Register scratch,
886 : LiveGeneralRegisterSet saveRegs)
887 : {
888 : // Ensure the stack is empty for the VM call below.
889 52 : allocator.discardStack(masm);
890 :
891 : // R0 contains the value that needs to be typechecked.
892 52 : MOZ_ASSERT(val == R0);
893 52 : MOZ_ASSERT(scratch == R1.scratchReg());
894 :
895 : #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
896 : static const bool CallClobbersTailReg = false;
897 : #else
898 : static const bool CallClobbersTailReg = true;
899 : #endif
900 :
901 : // Call the first type update stub.
902 : if (CallClobbersTailReg)
903 : masm.push(ICTailCallReg);
904 52 : masm.push(ICStubReg);
905 104 : masm.loadPtr(Address(ICStubReg, ICUpdatedStub::offsetOfFirstUpdateStub()),
906 52 : ICStubReg);
907 52 : masm.call(Address(ICStubReg, ICStub::offsetOfStubCode()));
908 52 : masm.pop(ICStubReg);
909 : if (CallClobbersTailReg)
910 : masm.pop(ICTailCallReg);
911 :
912 : // The update IC will store 0 or 1 in |scratch|, R1.scratchReg(), reflecting
913 : // if the value in R0 type-checked properly or not.
914 104 : Label done;
915 52 : masm.branch32(Assembler::Equal, scratch, Imm32(1), &done);
916 :
917 104 : AutoStubFrame stubFrame(*this);
918 52 : stubFrame.enter(masm, scratch, CallCanGC::CanNotGC);
919 :
920 52 : masm.PushRegsInMask(saveRegs);
921 :
922 52 : masm.Push(val);
923 52 : masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
924 52 : masm.Push(ICStubReg);
925 :
926 : // Load previous frame pointer, push BaselineFrame*.
927 52 : masm.loadPtr(Address(BaselineFrameReg, 0), scratch);
928 52 : masm.pushBaselineFramePtr(scratch, scratch);
929 :
930 52 : if (!callVM(masm, DoTypeUpdateFallbackInfo))
931 0 : return false;
932 :
933 52 : masm.PopRegsInMask(saveRegs);
934 :
935 52 : stubFrame.leave(masm);
936 :
937 52 : masm.bind(&done);
938 52 : return true;
939 : }
940 :
941 : bool
942 9 : BaselineCacheIRCompiler::emitStoreSlotShared(bool isFixed)
943 : {
944 9 : ObjOperandId objId = reader.objOperandId();
945 9 : Address offsetAddr = stubAddress(reader.stubOffset());
946 :
947 : // Allocate the fixed registers first. These need to be fixed for
948 : // callTypeUpdateIC.
949 18 : AutoScratchRegister scratch1(allocator, masm, R1.scratchReg());
950 9 : ValueOperand val = allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
951 :
952 9 : Register obj = allocator.useRegister(masm, objId);
953 18 : Maybe<AutoScratchRegister> scratch2;
954 9 : if (!isFixed)
955 3 : scratch2.emplace(allocator, masm);
956 :
957 9 : LiveGeneralRegisterSet saveRegs;
958 9 : saveRegs.add(obj);
959 9 : saveRegs.add(val);
960 9 : if (!callTypeUpdateIC(obj, val, scratch1, saveRegs))
961 0 : return false;
962 :
963 9 : masm.load32(offsetAddr, scratch1);
964 :
965 9 : if (isFixed) {
966 6 : BaseIndex slot(obj, scratch1, TimesOne);
967 6 : EmitPreBarrier(masm, slot, MIRType::Value);
968 6 : masm.storeValue(val, slot);
969 : } else {
970 3 : masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch2.ref());
971 3 : BaseIndex slot(scratch2.ref(), scratch1, TimesOne);
972 3 : EmitPreBarrier(masm, slot, MIRType::Value);
973 3 : masm.storeValue(val, slot);
974 : }
975 :
976 9 : emitPostBarrierSlot(obj, val, scratch1);
977 9 : return true;
978 : }
979 :
980 : bool
981 6 : BaselineCacheIRCompiler::emitStoreFixedSlot()
982 : {
983 6 : return emitStoreSlotShared(true);
984 : }
985 :
986 : bool
987 3 : BaselineCacheIRCompiler::emitStoreDynamicSlot()
988 : {
989 3 : return emitStoreSlotShared(false);
990 : }
991 :
992 : bool
993 32 : BaselineCacheIRCompiler::emitAddAndStoreSlotShared(CacheOp op)
994 : {
995 32 : ObjOperandId objId = reader.objOperandId();
996 32 : Address offsetAddr = stubAddress(reader.stubOffset());
997 :
998 : // Allocate the fixed registers first. These need to be fixed for
999 : // callTypeUpdateIC.
1000 64 : AutoScratchRegister scratch1(allocator, masm, R1.scratchReg());
1001 32 : ValueOperand val = allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
1002 :
1003 32 : Register obj = allocator.useRegister(masm, objId);
1004 64 : AutoScratchRegister scratch2(allocator, masm);
1005 :
1006 32 : bool changeGroup = reader.readBool();
1007 32 : Address newGroupAddr = stubAddress(reader.stubOffset());
1008 32 : Address newShapeAddr = stubAddress(reader.stubOffset());
1009 :
1010 32 : if (op == CacheOp::AllocateAndStoreDynamicSlot) {
1011 : // We have to (re)allocate dynamic slots. Do this first, as it's the
1012 : // only fallible operation here. This simplifies the callTypeUpdateIC
1013 : // call below: it does not have to worry about saving registers used by
1014 : // failure paths. Note that growSlotsDontReportOOM is fallible but does
1015 : // not GC.
1016 10 : Address numNewSlotsAddr = stubAddress(reader.stubOffset());
1017 :
1018 : FailurePath* failure;
1019 10 : if (!addFailurePath(&failure))
1020 0 : return false;
1021 :
1022 10 : LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
1023 10 : masm.PushRegsInMask(save);
1024 :
1025 10 : masm.setupUnalignedABICall(scratch1);
1026 10 : masm.loadJSContext(scratch1);
1027 10 : masm.passABIArg(scratch1);
1028 10 : masm.passABIArg(obj);
1029 10 : masm.load32(numNewSlotsAddr, scratch2);
1030 10 : masm.passABIArg(scratch2);
1031 10 : masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, NativeObject::growSlotsDontReportOOM));
1032 10 : masm.mov(ReturnReg, scratch1);
1033 :
1034 10 : LiveRegisterSet ignore;
1035 10 : ignore.add(scratch1);
1036 10 : masm.PopRegsInMaskIgnore(save, ignore);
1037 :
1038 10 : masm.branchIfFalseBool(scratch1, failure->label());
1039 : }
1040 :
1041 32 : LiveGeneralRegisterSet saveRegs;
1042 32 : saveRegs.add(obj);
1043 32 : saveRegs.add(val);
1044 32 : if (!callTypeUpdateIC(obj, val, scratch1, saveRegs))
1045 0 : return false;
1046 :
1047 32 : if (changeGroup) {
1048 : // Changing object's group from a partially to fully initialized group,
1049 : // per the acquired properties analysis. Only change the group if the
1050 : // old group still has a newScript. This only applies to PlainObjects.
1051 2 : Label noGroupChange;
1052 1 : masm.loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch1);
1053 3 : masm.branchPtr(Assembler::Equal,
1054 2 : Address(scratch1, ObjectGroup::offsetOfAddendum()),
1055 : ImmWord(0),
1056 1 : &noGroupChange);
1057 :
1058 : // Reload the new group from the cache.
1059 1 : masm.loadPtr(newGroupAddr, scratch1);
1060 :
1061 1 : Address groupAddr(obj, JSObject::offsetOfGroup());
1062 1 : EmitPreBarrier(masm, groupAddr, MIRType::ObjectGroup);
1063 1 : masm.storePtr(scratch1, groupAddr);
1064 :
1065 1 : masm.bind(&noGroupChange);
1066 : }
1067 :
1068 : // Update the object's shape.
1069 32 : Address shapeAddr(obj, ShapedObject::offsetOfShape());
1070 32 : masm.loadPtr(newShapeAddr, scratch1);
1071 32 : EmitPreBarrier(masm, shapeAddr, MIRType::Shape);
1072 32 : masm.storePtr(scratch1, shapeAddr);
1073 :
1074 : // Perform the store. No pre-barrier required since this is a new
1075 : // initialization.
1076 32 : masm.load32(offsetAddr, scratch1);
1077 32 : if (op == CacheOp::AddAndStoreFixedSlot) {
1078 12 : BaseIndex slot(obj, scratch1, TimesOne);
1079 12 : masm.storeValue(val, slot);
1080 : } else {
1081 20 : MOZ_ASSERT(op == CacheOp::AddAndStoreDynamicSlot ||
1082 : op == CacheOp::AllocateAndStoreDynamicSlot);
1083 20 : masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch2);
1084 20 : BaseIndex slot(scratch2, scratch1, TimesOne);
1085 20 : masm.storeValue(val, slot);
1086 : }
1087 :
1088 32 : emitPostBarrierSlot(obj, val, scratch1);
1089 32 : return true;
1090 : }
1091 :
1092 : bool
1093 12 : BaselineCacheIRCompiler::emitAddAndStoreFixedSlot()
1094 : {
1095 12 : return emitAddAndStoreSlotShared(CacheOp::AddAndStoreFixedSlot);
1096 : }
1097 :
1098 : bool
1099 10 : BaselineCacheIRCompiler::emitAddAndStoreDynamicSlot()
1100 : {
1101 10 : return emitAddAndStoreSlotShared(CacheOp::AddAndStoreDynamicSlot);
1102 : }
1103 :
1104 : bool
1105 10 : BaselineCacheIRCompiler::emitAllocateAndStoreDynamicSlot()
1106 : {
1107 10 : return emitAddAndStoreSlotShared(CacheOp::AllocateAndStoreDynamicSlot);
1108 : }
1109 :
1110 : bool
1111 10 : BaselineCacheIRCompiler::emitStoreUnboxedProperty()
1112 : {
1113 10 : ObjOperandId objId = reader.objOperandId();
1114 10 : JSValueType fieldType = reader.valueType();
1115 10 : Address offsetAddr = stubAddress(reader.stubOffset());
1116 :
1117 : // Allocate the fixed registers first. These need to be fixed for
1118 : // callTypeUpdateIC.
1119 20 : AutoScratchRegister scratch(allocator, masm, R1.scratchReg());
1120 10 : ValueOperand val = allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
1121 :
1122 10 : Register obj = allocator.useRegister(masm, objId);
1123 :
1124 : // We only need the type update IC if we are storing an object.
1125 10 : if (fieldType == JSVAL_TYPE_OBJECT) {
1126 4 : LiveGeneralRegisterSet saveRegs;
1127 4 : saveRegs.add(obj);
1128 4 : saveRegs.add(val);
1129 4 : if (!callTypeUpdateIC(obj, val, scratch, saveRegs))
1130 0 : return false;
1131 : }
1132 :
1133 10 : masm.load32(offsetAddr, scratch);
1134 10 : BaseIndex fieldAddr(obj, scratch, TimesOne);
1135 :
1136 : // Note that the storeUnboxedProperty call here is infallible, as the
1137 : // IR emitter is responsible for guarding on |val|'s type.
1138 10 : EmitICUnboxedPreBarrier(masm, fieldAddr, fieldType);
1139 10 : masm.storeUnboxedProperty(fieldAddr, fieldType,
1140 20 : ConstantOrRegister(TypedOrValueRegister(val)),
1141 10 : /* failure = */ nullptr);
1142 :
1143 10 : if (UnboxedTypeNeedsPostBarrier(fieldType))
1144 4 : emitPostBarrierSlot(obj, val, scratch);
1145 10 : return true;
1146 : }
1147 :
1148 : bool
1149 0 : BaselineCacheIRCompiler::emitStoreTypedObjectReferenceProperty()
1150 : {
1151 0 : ObjOperandId objId = reader.objOperandId();
1152 0 : Address offsetAddr = stubAddress(reader.stubOffset());
1153 0 : TypedThingLayout layout = reader.typedThingLayout();
1154 0 : ReferenceTypeDescr::Type type = reader.referenceTypeDescrType();
1155 :
1156 : // Allocate the fixed registers first. These need to be fixed for
1157 : // callTypeUpdateIC.
1158 0 : AutoScratchRegister scratch1(allocator, masm, R1.scratchReg());
1159 0 : ValueOperand val = allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
1160 :
1161 0 : Register obj = allocator.useRegister(masm, objId);
1162 0 : AutoScratchRegister scratch2(allocator, masm);
1163 :
1164 : // We don't need a type update IC if the property is always a string.
1165 0 : if (type != ReferenceTypeDescr::TYPE_STRING) {
1166 0 : LiveGeneralRegisterSet saveRegs;
1167 0 : saveRegs.add(obj);
1168 0 : saveRegs.add(val);
1169 0 : if (!callTypeUpdateIC(obj, val, scratch1, saveRegs))
1170 0 : return false;
1171 : }
1172 :
1173 : // Compute the address being written to.
1174 0 : LoadTypedThingData(masm, layout, obj, scratch1);
1175 0 : masm.addPtr(offsetAddr, scratch1);
1176 0 : Address dest(scratch1, 0);
1177 :
1178 0 : emitStoreTypedObjectReferenceProp(val, type, dest, scratch2);
1179 :
1180 0 : if (type != ReferenceTypeDescr::TYPE_STRING)
1181 0 : emitPostBarrierSlot(obj, val, scratch1);
1182 0 : return true;
1183 : }
1184 :
1185 : bool
1186 0 : BaselineCacheIRCompiler::emitStoreTypedObjectScalarProperty()
1187 : {
1188 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
1189 0 : Address offsetAddr = stubAddress(reader.stubOffset());
1190 0 : TypedThingLayout layout = reader.typedThingLayout();
1191 0 : Scalar::Type type = reader.scalarType();
1192 0 : ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
1193 0 : AutoScratchRegister scratch1(allocator, masm);
1194 0 : AutoScratchRegister scratch2(allocator, masm);
1195 :
1196 : FailurePath* failure;
1197 0 : if (!addFailurePath(&failure))
1198 0 : return false;
1199 :
1200 : // Compute the address being written to.
1201 0 : LoadTypedThingData(masm, layout, obj, scratch1);
1202 0 : masm.addPtr(offsetAddr, scratch1);
1203 0 : Address dest(scratch1, 0);
1204 :
1205 0 : StoreToTypedArray(cx_, masm, type, val, dest, scratch2, failure->label());
1206 0 : return true;
1207 : }
1208 :
1209 : bool
1210 2 : BaselineCacheIRCompiler::emitStoreDenseElement()
1211 : {
1212 2 : ObjOperandId objId = reader.objOperandId();
1213 2 : Int32OperandId indexId = reader.int32OperandId();
1214 :
1215 : // Allocate the fixed registers first. These need to be fixed for
1216 : // callTypeUpdateIC.
1217 4 : AutoScratchRegister scratch(allocator, masm, R1.scratchReg());
1218 2 : ValueOperand val = allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
1219 :
1220 2 : Register obj = allocator.useRegister(masm, objId);
1221 2 : Register index = allocator.useRegister(masm, indexId);
1222 :
1223 : FailurePath* failure;
1224 2 : if (!addFailurePath(&failure))
1225 0 : return false;
1226 :
1227 : // Load obj->elements in scratch.
1228 2 : masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
1229 :
1230 : // Bounds check.
1231 2 : Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
1232 2 : masm.branch32(Assembler::BelowOrEqual, initLength, index, failure->label());
1233 :
1234 : // Hole check.
1235 2 : BaseObjectElementIndex element(scratch, index);
1236 2 : masm.branchTestMagic(Assembler::Equal, element, failure->label());
1237 :
1238 : // Perform a single test to see if we either need to convert double
1239 : // elements, clone the copy on write elements in the object or fail
1240 : // due to a frozen element.
1241 4 : Label noSpecialHandling;
1242 2 : Address elementsFlags(scratch, ObjectElements::offsetOfFlags());
1243 4 : masm.branchTest32(Assembler::Zero, elementsFlags,
1244 : Imm32(ObjectElements::CONVERT_DOUBLE_ELEMENTS |
1245 : ObjectElements::COPY_ON_WRITE |
1246 : ObjectElements::FROZEN),
1247 2 : &noSpecialHandling);
1248 :
1249 : // Fail if we need to clone copy on write elements or to throw due
1250 : // to a frozen element.
1251 4 : masm.branchTest32(Assembler::NonZero, elementsFlags,
1252 : Imm32(ObjectElements::COPY_ON_WRITE |
1253 : ObjectElements::FROZEN),
1254 2 : failure->label());
1255 :
1256 : // We need to convert int32 values being stored into doubles. Note that
1257 : // double arrays are only created by IonMonkey, so if we have no FP support
1258 : // Ion is disabled and there should be no double arrays.
1259 2 : if (cx_->runtime()->jitSupportsFloatingPoint) {
1260 : // It's fine to convert the value in place in Baseline. We can't do
1261 : // this in Ion.
1262 2 : masm.convertInt32ValueToDouble(val);
1263 : } else {
1264 0 : masm.assumeUnreachable("There shouldn't be double arrays when there is no FP support.");
1265 : }
1266 :
1267 2 : masm.bind(&noSpecialHandling);
1268 :
1269 : // Call the type update IC. After this everything must be infallible as we
1270 : // don't save all registers here.
1271 2 : LiveGeneralRegisterSet saveRegs;
1272 2 : saveRegs.add(obj);
1273 2 : saveRegs.add(index);
1274 2 : saveRegs.add(val);
1275 2 : if (!callTypeUpdateIC(obj, val, scratch, saveRegs))
1276 0 : return false;
1277 :
1278 : // Perform the store. Reload obj->elements because callTypeUpdateIC
1279 : // used the scratch register.
1280 2 : masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
1281 2 : EmitPreBarrier(masm, element, MIRType::Value);
1282 2 : masm.storeValue(val, element);
1283 :
1284 2 : emitPostBarrierElement(obj, val, scratch, index);
1285 2 : return true;
1286 : }
1287 :
1288 : bool
1289 5 : BaselineCacheIRCompiler::emitStoreDenseElementHole()
1290 : {
1291 5 : ObjOperandId objId = reader.objOperandId();
1292 5 : Int32OperandId indexId = reader.int32OperandId();
1293 :
1294 : // Allocate the fixed registers first. These need to be fixed for
1295 : // callTypeUpdateIC.
1296 10 : AutoScratchRegister scratch(allocator, masm, R1.scratchReg());
1297 5 : ValueOperand val = allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
1298 :
1299 5 : Register obj = allocator.useRegister(masm, objId);
1300 5 : Register index = allocator.useRegister(masm, indexId);
1301 :
1302 5 : bool handleAdd = reader.readBool();
1303 :
1304 : FailurePath* failure;
1305 5 : if (!addFailurePath(&failure))
1306 0 : return false;
1307 :
1308 : // Load obj->elements in scratch.
1309 5 : masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
1310 :
1311 5 : BaseObjectElementIndex element(scratch, index);
1312 5 : Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
1313 5 : Address elementsFlags(scratch, ObjectElements::offsetOfFlags());
1314 :
1315 : // Check for copy-on-write or frozen elements.
1316 10 : masm.branchTest32(Assembler::NonZero, elementsFlags,
1317 : Imm32(ObjectElements::COPY_ON_WRITE |
1318 : ObjectElements::FROZEN),
1319 5 : failure->label());
1320 :
1321 5 : if (handleAdd) {
1322 : // Fail if index > initLength.
1323 4 : masm.branch32(Assembler::Below, initLength, index, failure->label());
1324 :
1325 : // If index < capacity, we can add a dense element inline. If not we
1326 : // need to allocate more elements.
1327 8 : Label capacityOk;
1328 4 : Address capacity(scratch, ObjectElements::offsetOfCapacity());
1329 4 : masm.branch32(Assembler::Above, capacity, index, &capacityOk);
1330 :
1331 : // Check for non-writable array length. We only have to do this if
1332 : // index >= capacity.
1333 8 : masm.branchTest32(Assembler::NonZero, elementsFlags,
1334 : Imm32(ObjectElements::NONWRITABLE_ARRAY_LENGTH),
1335 4 : failure->label());
1336 :
1337 4 : LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
1338 4 : save.takeUnchecked(scratch);
1339 4 : masm.PushRegsInMask(save);
1340 :
1341 4 : masm.setupUnalignedABICall(scratch);
1342 4 : masm.loadJSContext(scratch);
1343 4 : masm.passABIArg(scratch);
1344 4 : masm.passABIArg(obj);
1345 4 : masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, NativeObject::addDenseElementDontReportOOM));
1346 4 : masm.mov(ReturnReg, scratch);
1347 :
1348 4 : masm.PopRegsInMask(save);
1349 4 : masm.branchIfFalseBool(scratch, failure->label());
1350 :
1351 : // Load the reallocated elements pointer.
1352 4 : masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
1353 :
1354 4 : masm.bind(&capacityOk);
1355 :
1356 : // We increment initLength after the callTypeUpdateIC call, to ensure
1357 : // the type update code doesn't read uninitialized memory.
1358 : } else {
1359 : // Fail if index >= initLength.
1360 1 : masm.branch32(Assembler::BelowOrEqual, initLength, index, failure->label());
1361 : }
1362 :
1363 : // Check if we have to convert a double element.
1364 10 : Label noConversion;
1365 10 : masm.branchTest32(Assembler::Zero, elementsFlags,
1366 : Imm32(ObjectElements::CONVERT_DOUBLE_ELEMENTS),
1367 5 : &noConversion);
1368 :
1369 : // We need to convert int32 values being stored into doubles. Note that
1370 : // double arrays are only created by IonMonkey, so if we have no FP support
1371 : // Ion is disabled and there should be no double arrays.
1372 5 : if (cx_->runtime()->jitSupportsFloatingPoint) {
1373 : // It's fine to convert the value in place in Baseline. We can't do
1374 : // this in Ion.
1375 5 : masm.convertInt32ValueToDouble(val);
1376 : } else {
1377 0 : masm.assumeUnreachable("There shouldn't be double arrays when there is no FP support.");
1378 : }
1379 :
1380 5 : masm.bind(&noConversion);
1381 :
1382 : // Call the type update IC. After this everything must be infallible as we
1383 : // don't save all registers here.
1384 5 : LiveGeneralRegisterSet saveRegs;
1385 5 : saveRegs.add(obj);
1386 5 : saveRegs.add(index);
1387 5 : saveRegs.add(val);
1388 5 : if (!callTypeUpdateIC(obj, val, scratch, saveRegs))
1389 0 : return false;
1390 :
1391 : // Reload obj->elements as callTypeUpdateIC used the scratch register.
1392 5 : masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
1393 :
1394 10 : Label doStore;
1395 5 : if (handleAdd) {
1396 : // If index == initLength, increment initLength.
1397 8 : Label inBounds;
1398 4 : masm.branch32(Assembler::NotEqual, initLength, index, &inBounds);
1399 :
1400 : // Increment initLength.
1401 4 : masm.add32(Imm32(1), initLength);
1402 :
1403 : // If length is now <= index, increment length too.
1404 8 : Label skipIncrementLength;
1405 4 : Address length(scratch, ObjectElements::offsetOfLength());
1406 4 : masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
1407 4 : masm.add32(Imm32(1), length);
1408 4 : masm.bind(&skipIncrementLength);
1409 :
1410 : // Skip EmitPreBarrier as the memory is uninitialized.
1411 4 : masm.jump(&doStore);
1412 :
1413 4 : masm.bind(&inBounds);
1414 : }
1415 :
1416 5 : EmitPreBarrier(masm, element, MIRType::Value);
1417 :
1418 5 : masm.bind(&doStore);
1419 5 : masm.storeValue(val, element);
1420 :
1421 5 : emitPostBarrierElement(obj, val, scratch, index);
1422 5 : return true;
1423 : }
1424 :
1425 : bool
1426 0 : BaselineCacheIRCompiler::emitStoreTypedElement()
1427 : {
1428 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
1429 0 : Register index = allocator.useRegister(masm, reader.int32OperandId());
1430 0 : ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
1431 :
1432 0 : TypedThingLayout layout = reader.typedThingLayout();
1433 0 : Scalar::Type type = reader.scalarType();
1434 0 : bool handleOOB = reader.readBool();
1435 :
1436 0 : AutoScratchRegister scratch1(allocator, masm);
1437 :
1438 : FailurePath* failure;
1439 0 : if (!addFailurePath(&failure))
1440 0 : return false;
1441 :
1442 : // Bounds check.
1443 0 : Label done;
1444 0 : LoadTypedThingLength(masm, layout, obj, scratch1);
1445 0 : masm.branch32(Assembler::BelowOrEqual, scratch1, index, handleOOB ? &done : failure->label());
1446 :
1447 : // Load the elements vector.
1448 0 : LoadTypedThingData(masm, layout, obj, scratch1);
1449 :
1450 0 : BaseIndex dest(scratch1, index, ScaleFromElemWidth(Scalar::byteSize(type)));
1451 :
1452 : // Use ICStubReg as second scratch register. TODO: consider doing the RHS
1453 : // type check/conversion as a separate IR instruction so we can simplify
1454 : // this.
1455 0 : Register scratch2 = ICStubReg;
1456 0 : masm.push(scratch2);
1457 :
1458 0 : Label fail;
1459 0 : StoreToTypedArray(cx_, masm, type, val, dest, scratch2, &fail);
1460 0 : masm.pop(scratch2);
1461 0 : masm.jump(&done);
1462 :
1463 0 : masm.bind(&fail);
1464 0 : masm.pop(scratch2);
1465 0 : masm.jump(failure->label());
1466 :
1467 0 : masm.bind(&done);
1468 0 : return true;
1469 : }
1470 :
1471 : bool
1472 0 : BaselineCacheIRCompiler::emitStoreUnboxedArrayElement()
1473 : {
1474 0 : ObjOperandId objId = reader.objOperandId();
1475 0 : Int32OperandId indexId = reader.int32OperandId();
1476 :
1477 : // Allocate the fixed registers first. These need to be fixed for
1478 : // callTypeUpdateIC.
1479 0 : AutoScratchRegister scratch(allocator, masm, R1.scratchReg());
1480 0 : ValueOperand val = allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
1481 :
1482 0 : JSValueType elementType = reader.valueType();
1483 0 : Register obj = allocator.useRegister(masm, objId);
1484 0 : Register index = allocator.useRegister(masm, indexId);
1485 :
1486 : FailurePath* failure;
1487 0 : if (!addFailurePath(&failure))
1488 0 : return false;
1489 :
1490 : // Bounds check.
1491 0 : Address initLength(obj, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength());
1492 0 : masm.load32(initLength, scratch);
1493 0 : masm.and32(Imm32(UnboxedArrayObject::InitializedLengthMask), scratch);
1494 0 : masm.branch32(Assembler::BelowOrEqual, scratch, index, failure->label());
1495 :
1496 : // Call the type update IC. After this everything must be infallible as we
1497 : // don't save all registers here.
1498 0 : if (elementType == JSVAL_TYPE_OBJECT) {
1499 0 : LiveGeneralRegisterSet saveRegs;
1500 0 : saveRegs.add(obj);
1501 0 : saveRegs.add(index);
1502 0 : saveRegs.add(val);
1503 0 : if (!callTypeUpdateIC(obj, val, scratch, saveRegs))
1504 0 : return false;
1505 : }
1506 :
1507 : // Load obj->elements.
1508 0 : masm.loadPtr(Address(obj, UnboxedArrayObject::offsetOfElements()), scratch);
1509 :
1510 : // Note that the storeUnboxedProperty call here is infallible, as the
1511 : // IR emitter is responsible for guarding on |val|'s type.
1512 0 : BaseIndex element(scratch, index, ScaleFromElemWidth(UnboxedTypeSize(elementType)));
1513 0 : EmitICUnboxedPreBarrier(masm, element, elementType);
1514 0 : masm.storeUnboxedProperty(element, elementType,
1515 0 : ConstantOrRegister(TypedOrValueRegister(val)),
1516 0 : /* failure = */ nullptr);
1517 :
1518 0 : if (UnboxedTypeNeedsPostBarrier(elementType))
1519 0 : emitPostBarrierSlot(obj, val, scratch);
1520 0 : return true;
1521 : }
1522 :
1523 : bool
1524 0 : BaselineCacheIRCompiler::emitStoreUnboxedArrayElementHole()
1525 : {
1526 0 : ObjOperandId objId = reader.objOperandId();
1527 0 : Int32OperandId indexId = reader.int32OperandId();
1528 :
1529 : // Allocate the fixed registers first. These need to be fixed for
1530 : // callTypeUpdateIC.
1531 0 : AutoScratchRegister scratch(allocator, masm, R1.scratchReg());
1532 0 : ValueOperand val = allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
1533 :
1534 0 : JSValueType elementType = reader.valueType();
1535 0 : Register obj = allocator.useRegister(masm, objId);
1536 0 : Register index = allocator.useRegister(masm, indexId);
1537 :
1538 : FailurePath* failure;
1539 0 : if (!addFailurePath(&failure))
1540 0 : return false;
1541 :
1542 : // Check index <= initLength.
1543 0 : Address initLength(obj, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength());
1544 0 : masm.load32(initLength, scratch);
1545 0 : masm.and32(Imm32(UnboxedArrayObject::InitializedLengthMask), scratch);
1546 0 : masm.branch32(Assembler::Below, scratch, index, failure->label());
1547 :
1548 : // Check capacity.
1549 0 : masm.checkUnboxedArrayCapacity(obj, RegisterOrInt32Constant(index), scratch, failure->label());
1550 :
1551 : // Call the type update IC. After this everything must be infallible as we
1552 : // don't save all registers here.
1553 0 : if (elementType == JSVAL_TYPE_OBJECT) {
1554 0 : LiveGeneralRegisterSet saveRegs;
1555 0 : saveRegs.add(obj);
1556 0 : saveRegs.add(index);
1557 0 : saveRegs.add(val);
1558 0 : if (!callTypeUpdateIC(obj, val, scratch, saveRegs))
1559 0 : return false;
1560 : }
1561 :
1562 : // Load obj->elements.
1563 0 : masm.loadPtr(Address(obj, UnboxedArrayObject::offsetOfElements()), scratch);
1564 :
1565 : // If index == initLength, increment initialized length.
1566 0 : Label inBounds, doStore;
1567 0 : masm.load32(initLength, scratch);
1568 0 : masm.and32(Imm32(UnboxedArrayObject::InitializedLengthMask), scratch);
1569 0 : masm.branch32(Assembler::NotEqual, scratch, index, &inBounds);
1570 :
1571 0 : masm.add32(Imm32(1), initLength);
1572 :
1573 : // If length is now <= index, increment length.
1574 0 : Address length(obj, UnboxedArrayObject::offsetOfLength());
1575 0 : Label skipIncrementLength;
1576 0 : masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
1577 0 : masm.add32(Imm32(1), length);
1578 0 : masm.bind(&skipIncrementLength);
1579 :
1580 : // Skip EmitICUnboxedPreBarrier as the memory is uninitialized.
1581 0 : masm.jump(&doStore);
1582 :
1583 0 : masm.bind(&inBounds);
1584 :
1585 0 : BaseIndex element(scratch, index, ScaleFromElemWidth(UnboxedTypeSize(elementType)));
1586 0 : EmitICUnboxedPreBarrier(masm, element, elementType);
1587 :
1588 : // Note that the storeUnboxedProperty call here is infallible, as the
1589 : // IR emitter is responsible for guarding on |val|'s type.
1590 0 : masm.bind(&doStore);
1591 0 : masm.storeUnboxedProperty(element, elementType,
1592 0 : ConstantOrRegister(TypedOrValueRegister(val)),
1593 0 : /* failure = */ nullptr);
1594 :
1595 0 : if (UnboxedTypeNeedsPostBarrier(elementType))
1596 0 : emitPostBarrierSlot(obj, val, scratch);
1597 0 : return true;
1598 : }
1599 :
1600 : typedef bool (*CallNativeSetterFn)(JSContext*, HandleFunction, HandleObject, HandleValue);
1601 3 : static const VMFunction CallNativeSetterInfo =
1602 6 : FunctionInfo<CallNativeSetterFn>(CallNativeSetter, "CallNativeSetter");
1603 :
1604 : bool
1605 3 : BaselineCacheIRCompiler::emitCallNativeSetter()
1606 : {
1607 3 : Register obj = allocator.useRegister(masm, reader.objOperandId());
1608 3 : Address setterAddr(stubAddress(reader.stubOffset()));
1609 3 : ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
1610 :
1611 6 : AutoScratchRegister scratch(allocator, masm);
1612 :
1613 3 : allocator.discardStack(masm);
1614 :
1615 6 : AutoStubFrame stubFrame(*this);
1616 3 : stubFrame.enter(masm, scratch);
1617 :
1618 : // Load the callee in the scratch register.
1619 3 : masm.loadPtr(setterAddr, scratch);
1620 :
1621 3 : masm.Push(val);
1622 3 : masm.Push(obj);
1623 3 : masm.Push(scratch);
1624 :
1625 3 : if (!callVM(masm, CallNativeSetterInfo))
1626 0 : return false;
1627 :
1628 3 : stubFrame.leave(masm);
1629 3 : return true;
1630 : }
1631 :
1632 : bool
1633 1 : BaselineCacheIRCompiler::emitCallScriptedSetter()
1634 : {
1635 2 : AutoScratchRegisterExcluding scratch1(allocator, masm, ArgumentsRectifierReg);
1636 2 : AutoScratchRegister scratch2(allocator, masm);
1637 :
1638 1 : Register obj = allocator.useRegister(masm, reader.objOperandId());
1639 1 : Address setterAddr(stubAddress(reader.stubOffset()));
1640 1 : ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
1641 :
1642 : // First, ensure our setter is non-lazy and has JIT code. This also loads
1643 : // the callee in scratch1.
1644 : {
1645 : FailurePath* failure;
1646 1 : if (!addFailurePath(&failure))
1647 0 : return false;
1648 :
1649 1 : masm.loadPtr(setterAddr, scratch1);
1650 1 : masm.branchIfFunctionHasNoScript(scratch1, failure->label());
1651 1 : masm.loadPtr(Address(scratch1, JSFunction::offsetOfNativeOrScript()), scratch2);
1652 1 : masm.loadBaselineOrIonRaw(scratch2, scratch2, failure->label());
1653 : }
1654 :
1655 1 : allocator.discardStack(masm);
1656 :
1657 2 : AutoStubFrame stubFrame(*this);
1658 1 : stubFrame.enter(masm, scratch2);
1659 :
1660 : // Align the stack such that the JitFrameLayout is aligned on
1661 : // JitStackAlignment.
1662 1 : masm.alignJitStackBasedOnNArgs(1);
1663 :
1664 : // Setter is called with 1 argument, and |obj| as thisv. Note that we use
1665 : // Push, not push, so that callJit will align the stack properly on ARM.
1666 1 : masm.Push(val);
1667 1 : masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
1668 :
1669 : // Now that the object register is no longer needed, use it as second
1670 : // scratch.
1671 1 : EmitBaselineCreateStubFrameDescriptor(masm, scratch2, JitFrameLayout::Size());
1672 1 : masm.Push(Imm32(1)); // ActualArgc
1673 :
1674 : // Push callee.
1675 1 : masm.Push(scratch1);
1676 :
1677 : // Push frame descriptor.
1678 1 : masm.Push(scratch2);
1679 :
1680 : // Load callee->nargs in scratch2 and the JIT code in scratch.
1681 2 : Label noUnderflow;
1682 1 : masm.load16ZeroExtend(Address(scratch1, JSFunction::offsetOfNargs()), scratch2);
1683 1 : masm.loadPtr(Address(scratch1, JSFunction::offsetOfNativeOrScript()), scratch1);
1684 1 : masm.loadBaselineOrIonRaw(scratch1, scratch1, nullptr);
1685 :
1686 : // Handle arguments underflow.
1687 1 : masm.branch32(Assembler::BelowOrEqual, scratch2, Imm32(1), &noUnderflow);
1688 : {
1689 : // Call the arguments rectifier.
1690 1 : MOZ_ASSERT(ArgumentsRectifierReg != scratch1);
1691 :
1692 1 : JitCode* argumentsRectifier = cx_->runtime()->jitRuntime()->getArgumentsRectifier();
1693 1 : masm.movePtr(ImmGCPtr(argumentsRectifier), scratch1);
1694 1 : masm.loadPtr(Address(scratch1, JitCode::offsetOfCode()), scratch1);
1695 1 : masm.movePtr(ImmWord(1), ArgumentsRectifierReg);
1696 : }
1697 :
1698 1 : masm.bind(&noUnderflow);
1699 1 : masm.callJit(scratch1);
1700 :
1701 1 : stubFrame.leave(masm, true);
1702 1 : return true;
1703 : }
1704 :
1705 : typedef bool (*SetArrayLengthFn)(JSContext*, HandleObject, HandleValue, bool);
1706 3 : static const VMFunction SetArrayLengthInfo =
1707 6 : FunctionInfo<SetArrayLengthFn>(SetArrayLength, "SetArrayLength");
1708 :
1709 : bool
1710 4 : BaselineCacheIRCompiler::emitCallSetArrayLength()
1711 : {
1712 4 : Register obj = allocator.useRegister(masm, reader.objOperandId());
1713 4 : bool strict = reader.readBool();
1714 4 : ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
1715 :
1716 8 : AutoScratchRegister scratch(allocator, masm);
1717 :
1718 4 : allocator.discardStack(masm);
1719 :
1720 8 : AutoStubFrame stubFrame(*this);
1721 4 : stubFrame.enter(masm, scratch);
1722 :
1723 4 : masm.Push(Imm32(strict));
1724 4 : masm.Push(val);
1725 4 : masm.Push(obj);
1726 :
1727 4 : if (!callVM(masm, SetArrayLengthInfo))
1728 0 : return false;
1729 :
1730 4 : stubFrame.leave(masm);
1731 4 : return true;
1732 : }
1733 :
1734 : typedef bool (*ProxySetPropertyFn)(JSContext*, HandleObject, HandleId, HandleValue, bool);
1735 3 : static const VMFunction ProxySetPropertyInfo =
1736 6 : FunctionInfo<ProxySetPropertyFn>(ProxySetProperty, "ProxySetProperty");
1737 :
1738 : bool
1739 2 : BaselineCacheIRCompiler::emitCallProxySet()
1740 : {
1741 2 : Register obj = allocator.useRegister(masm, reader.objOperandId());
1742 2 : ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
1743 2 : Address idAddr(stubAddress(reader.stubOffset()));
1744 2 : bool strict = reader.readBool();
1745 :
1746 4 : AutoScratchRegister scratch(allocator, masm);
1747 :
1748 2 : allocator.discardStack(masm);
1749 :
1750 4 : AutoStubFrame stubFrame(*this);
1751 2 : stubFrame.enter(masm, scratch);
1752 :
1753 : // Load the jsid in the scratch register.
1754 2 : masm.loadPtr(idAddr, scratch);
1755 :
1756 2 : masm.Push(Imm32(strict));
1757 2 : masm.Push(val);
1758 2 : masm.Push(scratch);
1759 2 : masm.Push(obj);
1760 :
1761 2 : if (!callVM(masm, ProxySetPropertyInfo))
1762 0 : return false;
1763 :
1764 2 : stubFrame.leave(masm);
1765 2 : return true;
1766 : }
1767 :
1768 : typedef bool (*ProxySetPropertyByValueFn)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
1769 3 : static const VMFunction ProxySetPropertyByValueInfo =
1770 6 : FunctionInfo<ProxySetPropertyByValueFn>(ProxySetPropertyByValue, "ProxySetPropertyByValue");
1771 :
1772 : bool
1773 1 : BaselineCacheIRCompiler::emitCallProxySetByValue()
1774 : {
1775 1 : Register obj = allocator.useRegister(masm, reader.objOperandId());
1776 1 : ValueOperand idVal = allocator.useValueRegister(masm, reader.valOperandId());
1777 1 : ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
1778 1 : bool strict = reader.readBool();
1779 :
1780 1 : allocator.discardStack(masm);
1781 :
1782 : // We need a scratch register but we don't have any registers available on
1783 : // x86, so temporarily store |obj| in the frame's scratch slot.
1784 1 : int scratchOffset = BaselineFrame::reverseOffsetOfScratchValue();
1785 1 : masm.storePtr(obj, Address(BaselineFrameReg, scratchOffset));
1786 :
1787 2 : AutoStubFrame stubFrame(*this);
1788 1 : stubFrame.enter(masm, obj);
1789 :
1790 : // Restore |obj|. Because we entered a stub frame we first have to load
1791 : // the original frame pointer.
1792 1 : masm.loadPtr(Address(BaselineFrameReg, 0), obj);
1793 1 : masm.loadPtr(Address(obj, scratchOffset), obj);
1794 :
1795 1 : masm.Push(Imm32(strict));
1796 1 : masm.Push(val);
1797 1 : masm.Push(idVal);
1798 1 : masm.Push(obj);
1799 :
1800 1 : if (!callVM(masm, ProxySetPropertyByValueInfo))
1801 0 : return false;
1802 :
1803 1 : stubFrame.leave(masm);
1804 1 : return true;
1805 : }
1806 :
1807 : bool
1808 105 : BaselineCacheIRCompiler::emitTypeMonitorResult()
1809 : {
1810 105 : allocator.discardStack(masm);
1811 105 : EmitEnterTypeMonitorIC(masm);
1812 105 : return true;
1813 : }
1814 :
1815 : bool
1816 140 : BaselineCacheIRCompiler::emitReturnFromIC()
1817 : {
1818 140 : allocator.discardStack(masm);
1819 140 : EmitReturnFromIC(masm);
1820 140 : return true;
1821 : }
1822 :
1823 : bool
1824 34 : BaselineCacheIRCompiler::emitLoadObject()
1825 : {
1826 34 : Register reg = allocator.defineRegister(masm, reader.objOperandId());
1827 34 : masm.loadPtr(stubAddress(reader.stubOffset()), reg);
1828 34 : return true;
1829 : }
1830 :
1831 : bool
1832 3 : BaselineCacheIRCompiler::emitLoadStackValue()
1833 : {
1834 3 : ValueOperand val = allocator.defineValueRegister(masm, reader.valOperandId());
1835 3 : Address addr = allocator.addressOf(masm, BaselineFrameSlot(reader.uint32Immediate()));
1836 3 : masm.loadValue(addr, val);
1837 3 : return true;
1838 : }
1839 :
1840 : bool
1841 0 : BaselineCacheIRCompiler::emitGuardDOMExpandoMissingOrGuardShape()
1842 : {
1843 0 : ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
1844 0 : AutoScratchRegister shapeScratch(allocator, masm);
1845 0 : AutoScratchRegister objScratch(allocator, masm);
1846 0 : Address shapeAddr(stubAddress(reader.stubOffset()));
1847 :
1848 : FailurePath* failure;
1849 0 : if (!addFailurePath(&failure))
1850 0 : return false;
1851 :
1852 0 : Label done;
1853 0 : masm.branchTestUndefined(Assembler::Equal, val, &done);
1854 :
1855 0 : masm.debugAssertIsObject(val);
1856 0 : masm.loadPtr(shapeAddr, shapeScratch);
1857 0 : masm.unboxObject(val, objScratch);
1858 0 : masm.branchTestObjShape(Assembler::NotEqual, objScratch, shapeScratch, failure->label());
1859 :
1860 0 : masm.bind(&done);
1861 0 : return true;
1862 : }
1863 :
1864 : bool
1865 0 : BaselineCacheIRCompiler::emitLoadDOMExpandoValueGuardGeneration()
1866 : {
1867 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
1868 0 : Address expandoAndGenerationAddr(stubAddress(reader.stubOffset()));
1869 0 : Address generationAddr(stubAddress(reader.stubOffset()));
1870 :
1871 0 : AutoScratchRegister scratch(allocator, masm);
1872 0 : ValueOperand output = allocator.defineValueRegister(masm, reader.valOperandId());
1873 :
1874 : FailurePath* failure;
1875 0 : if (!addFailurePath(&failure))
1876 0 : return false;
1877 :
1878 0 : masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
1879 0 : Address expandoAddr(scratch, detail::ProxyReservedSlots::offsetOfPrivateSlot());
1880 :
1881 : // Load the ExpandoAndGeneration* in the output scratch register and guard
1882 : // it matches the proxy's ExpandoAndGeneration.
1883 0 : masm.loadPtr(expandoAndGenerationAddr, output.scratchReg());
1884 0 : masm.branchPrivatePtr(Assembler::NotEqual, expandoAddr, output.scratchReg(), failure->label());
1885 :
1886 : // Guard expandoAndGeneration->generation matches the expected generation.
1887 0 : masm.branch64(Assembler::NotEqual,
1888 0 : Address(output.scratchReg(), ExpandoAndGeneration::offsetOfGeneration()),
1889 : generationAddr,
1890 0 : scratch, failure->label());
1891 :
1892 : // Load expandoAndGeneration->expando into the output Value register.
1893 0 : masm.loadValue(Address(output.scratchReg(), ExpandoAndGeneration::offsetOfExpando()), output);
1894 0 : return true;
1895 : }
1896 :
1897 : bool
1898 245 : BaselineCacheIRCompiler::init(CacheKind kind)
1899 : {
1900 245 : if (!allocator.init())
1901 0 : return false;
1902 :
1903 : // Baseline ICs monitor values when needed, so returning doubles is fine.
1904 245 : allowDoubleResult_.emplace(true);
1905 :
1906 245 : size_t numInputs = writer_.numInputOperands();
1907 :
1908 : // Baseline passes the first 2 inputs in R0/R1, other Values are stored on
1909 : // the stack.
1910 245 : size_t numInputsInRegs = std::min(numInputs, size_t(2));
1911 245 : AllocatableGeneralRegisterSet available(ICStubCompiler::availableGeneralRegs(numInputsInRegs));
1912 :
1913 245 : switch (kind) {
1914 : case CacheKind::GetProp:
1915 : case CacheKind::TypeOf:
1916 95 : MOZ_ASSERT(numInputs == 1);
1917 95 : allocator.initInputLocation(0, R0);
1918 95 : break;
1919 : case CacheKind::Compare:
1920 : case CacheKind::GetElem:
1921 : case CacheKind::GetPropSuper:
1922 : case CacheKind::SetProp:
1923 : case CacheKind::In:
1924 : case CacheKind::HasOwn:
1925 115 : MOZ_ASSERT(numInputs == 2);
1926 115 : allocator.initInputLocation(0, R0);
1927 115 : allocator.initInputLocation(1, R1);
1928 115 : break;
1929 : case CacheKind::GetElemSuper:
1930 : case CacheKind::SetElem:
1931 24 : MOZ_ASSERT(numInputs == 3);
1932 24 : allocator.initInputLocation(0, R0);
1933 24 : allocator.initInputLocation(1, R1);
1934 24 : allocator.initInputLocation(2, BaselineFrameSlot(0));
1935 24 : break;
1936 : case CacheKind::GetName:
1937 : case CacheKind::BindName:
1938 10 : MOZ_ASSERT(numInputs == 1);
1939 10 : allocator.initInputLocation(0, R0.scratchReg(), JSVAL_TYPE_OBJECT);
1940 : #if defined(JS_NUNBOX32)
1941 : // availableGeneralRegs can't know that GetName/BindName is only using
1942 : // the payloadReg and not typeReg on x86.
1943 : available.add(R0.typeReg());
1944 : #endif
1945 10 : break;
1946 : case CacheKind::Call:
1947 1 : MOZ_ASSERT(numInputs == 1);
1948 1 : allocator.initInputLocation(0, R0.scratchReg(), JSVAL_TYPE_INT32);
1949 : #if defined(JS_NUNBOX32)
1950 : // availableGeneralRegs can't know that Call is only using
1951 : // the payloadReg and not typeReg on x86.
1952 : available.add(R0.typeReg());
1953 : #endif
1954 1 : break;
1955 : }
1956 :
1957 : // Baseline doesn't allocate float registers so none of them are live.
1958 245 : liveFloatRegs_ = LiveFloatRegisterSet(FloatRegisterSet());
1959 :
1960 245 : allocator.initAvailableRegs(available);
1961 245 : outputUnchecked_.emplace(R0);
1962 245 : return true;
1963 : }
1964 :
1965 : static const size_t MaxOptimizedCacheIRStubs = 16;
1966 :
1967 : ICStub*
1968 6361 : jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
1969 : CacheKind kind, ICStubEngine engine, JSScript* outerScript,
1970 : ICFallbackStub* stub, bool* attached)
1971 : {
1972 : // We shouldn't GC or report OOM (or any other exception) here.
1973 12722 : AutoAssertNoPendingException aanpe(cx);
1974 12722 : JS::AutoCheckCannotGC nogc;
1975 :
1976 6361 : MOZ_ASSERT(!*attached);
1977 :
1978 6361 : if (writer.failed())
1979 0 : return nullptr;
1980 :
1981 : // Just a sanity check: the caller should ensure we don't attach an
1982 : // unlimited number of stubs.
1983 6361 : MOZ_ASSERT(stub->numOptimizedStubs() < MaxOptimizedCacheIRStubs);
1984 :
1985 : enum class CacheIRStubKind { Regular, Monitored, Updated };
1986 :
1987 : uint32_t stubDataOffset;
1988 : CacheIRStubKind stubKind;
1989 6361 : switch (kind) {
1990 : case CacheKind::Compare:
1991 : case CacheKind::In:
1992 : case CacheKind::HasOwn:
1993 : case CacheKind::BindName:
1994 : case CacheKind::TypeOf:
1995 624 : stubDataOffset = sizeof(ICCacheIR_Regular);
1996 624 : stubKind = CacheIRStubKind::Regular;
1997 624 : break;
1998 : case CacheKind::GetProp:
1999 : case CacheKind::GetElem:
2000 : case CacheKind::GetName:
2001 : case CacheKind::GetPropSuper:
2002 : case CacheKind::GetElemSuper:
2003 : case CacheKind::Call:
2004 4667 : stubDataOffset = sizeof(ICCacheIR_Monitored);
2005 4667 : stubKind = CacheIRStubKind::Monitored;
2006 4667 : break;
2007 : case CacheKind::SetProp:
2008 : case CacheKind::SetElem:
2009 1070 : stubDataOffset = sizeof(ICCacheIR_Updated);
2010 1070 : stubKind = CacheIRStubKind::Updated;
2011 1070 : break;
2012 : }
2013 :
2014 6361 : JitZone* jitZone = cx->zone()->jitZone();
2015 :
2016 : // Check if we already have JitCode for this stub.
2017 : CacheIRStubInfo* stubInfo;
2018 6361 : CacheIRStubKey::Lookup lookup(kind, engine, writer.codeStart(), writer.codeLength());
2019 6361 : JitCode* code = jitZone->getBaselineCacheIRStubCode(lookup, &stubInfo);
2020 6361 : if (!code) {
2021 : // We have to generate stub code.
2022 490 : JitContext jctx(cx, nullptr);
2023 490 : BaselineCacheIRCompiler comp(cx, writer, engine, stubDataOffset);
2024 245 : if (!comp.init(kind))
2025 0 : return nullptr;
2026 :
2027 245 : code = comp.compile();
2028 245 : if (!code)
2029 0 : return nullptr;
2030 :
2031 : // Allocate the shared CacheIRStubInfo. Note that the
2032 : // putBaselineCacheIRStubCode call below will transfer ownership
2033 : // to the stub code HashMap, so we don't have to worry about freeing
2034 : // it below.
2035 245 : MOZ_ASSERT(!stubInfo);
2036 245 : stubInfo = CacheIRStubInfo::New(kind, engine, comp.makesGCCalls(), stubDataOffset, writer);
2037 245 : if (!stubInfo)
2038 0 : return nullptr;
2039 :
2040 490 : CacheIRStubKey key(stubInfo);
2041 245 : if (!jitZone->putBaselineCacheIRStubCode(lookup, key, code))
2042 0 : return nullptr;
2043 : }
2044 :
2045 6361 : MOZ_ASSERT(code);
2046 6361 : MOZ_ASSERT(stubInfo);
2047 6361 : MOZ_ASSERT(stubInfo->stubDataSize() == writer.stubDataSize());
2048 :
2049 : // Ensure we don't attach duplicate stubs. This can happen if a stub failed
2050 : // for some reason and the IR generator doesn't check for exactly the same
2051 : // conditions.
2052 15998 : for (ICStubConstIterator iter = stub->beginChainConst(); !iter.atEnd(); iter++) {
2053 10012 : bool updated = false;
2054 10012 : switch (stubKind) {
2055 : case CacheIRStubKind::Regular: {
2056 924 : if (!iter->isCacheIR_Regular())
2057 10032 : continue;
2058 529 : auto otherStub = iter->toCacheIR_Regular();
2059 529 : if (otherStub->stubInfo() != stubInfo)
2060 138 : continue;
2061 391 : if (!writer.stubDataEqualsMaybeUpdate(otherStub->stubDataStart(), &updated))
2062 161 : continue;
2063 230 : break;
2064 : }
2065 : case CacheIRStubKind::Monitored: {
2066 7261 : if (!iter->isCacheIR_Monitored())
2067 4523 : continue;
2068 2738 : auto otherStub = iter->toCacheIR_Monitored();
2069 2738 : if (otherStub->stubInfo() != stubInfo)
2070 335 : continue;
2071 2403 : if (!writer.stubDataEqualsMaybeUpdate(otherStub->stubDataStart(), &updated))
2072 2259 : continue;
2073 144 : break;
2074 : }
2075 : case CacheIRStubKind::Updated: {
2076 1827 : if (!iter->isCacheIR_Updated())
2077 1069 : continue;
2078 758 : auto otherStub = iter->toCacheIR_Updated();
2079 758 : if (otherStub->stubInfo() != stubInfo)
2080 209 : continue;
2081 549 : if (!writer.stubDataEqualsMaybeUpdate(otherStub->stubDataStart(), &updated))
2082 548 : continue;
2083 1 : break;
2084 : }
2085 : }
2086 :
2087 : // We found a stub that's exactly the same as the stub we're about to
2088 : // attach. Just return nullptr, the caller should do nothing in this
2089 : // case.
2090 375 : if (updated)
2091 0 : *attached = true;
2092 375 : return nullptr;
2093 : }
2094 :
2095 : // Time to allocate and attach a new stub.
2096 :
2097 5986 : size_t bytesNeeded = stubInfo->stubDataOffset() + stubInfo->stubDataSize();
2098 :
2099 5986 : ICStubSpace* stubSpace = ICStubCompiler::StubSpaceForStub(stubInfo->makesGCCalls(),
2100 5986 : outerScript, engine);
2101 5986 : void* newStubMem = stubSpace->alloc(bytesNeeded);
2102 5986 : if (!newStubMem)
2103 0 : return nullptr;
2104 :
2105 5986 : switch (stubKind) {
2106 : case CacheIRStubKind::Regular: {
2107 394 : auto newStub = new(newStubMem) ICCacheIR_Regular(code, stubInfo);
2108 394 : writer.copyStubData(newStub->stubDataStart());
2109 394 : stub->addNewStub(newStub);
2110 394 : *attached = true;
2111 394 : return newStub;
2112 : }
2113 : case CacheIRStubKind::Monitored: {
2114 : ICStub* monitorStub =
2115 4523 : stub->toMonitoredFallbackStub()->fallbackMonitorStub()->firstMonitorStub();
2116 4523 : auto newStub = new(newStubMem) ICCacheIR_Monitored(code, monitorStub, stubInfo);
2117 4523 : writer.copyStubData(newStub->stubDataStart());
2118 4523 : stub->addNewStub(newStub);
2119 4523 : *attached = true;
2120 4523 : return newStub;
2121 : }
2122 : case CacheIRStubKind::Updated: {
2123 1069 : auto newStub = new(newStubMem) ICCacheIR_Updated(code, stubInfo);
2124 1069 : if (!newStub->initUpdatingChain(cx, stubSpace)) {
2125 0 : cx->recoverFromOutOfMemory();
2126 0 : return nullptr;
2127 : }
2128 1069 : writer.copyStubData(newStub->stubDataStart());
2129 1069 : stub->addNewStub(newStub);
2130 1069 : *attached = true;
2131 1069 : return newStub;
2132 : }
2133 : }
2134 :
2135 0 : MOZ_CRASH("Invalid kind");
2136 : }
2137 :
2138 : uint8_t*
2139 785 : ICCacheIR_Regular::stubDataStart()
2140 : {
2141 785 : return reinterpret_cast<uint8_t*>(this) + stubInfo_->stubDataOffset();
2142 : }
2143 :
2144 : uint8_t*
2145 6926 : ICCacheIR_Monitored::stubDataStart()
2146 : {
2147 6926 : return reinterpret_cast<uint8_t*>(this) + stubInfo_->stubDataOffset();
2148 : }
2149 :
2150 : uint8_t*
2151 1618 : ICCacheIR_Updated::stubDataStart()
2152 : {
2153 1618 : return reinterpret_cast<uint8_t*>(this) + stubInfo_->stubDataOffset();
2154 : }
2155 :
2156 : /* static */ ICCacheIR_Monitored*
2157 0 : ICCacheIR_Monitored::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
2158 : ICCacheIR_Monitored& other)
2159 : {
2160 0 : const CacheIRStubInfo* stubInfo = other.stubInfo();
2161 0 : MOZ_ASSERT(stubInfo->makesGCCalls());
2162 :
2163 0 : size_t bytesNeeded = stubInfo->stubDataOffset() + stubInfo->stubDataSize();
2164 0 : void* newStub = space->alloc(bytesNeeded);
2165 0 : if (!newStub)
2166 0 : return nullptr;
2167 :
2168 0 : ICCacheIR_Monitored* res = new(newStub) ICCacheIR_Monitored(other.jitCode(), firstMonitorStub,
2169 0 : stubInfo);
2170 0 : stubInfo->copyStubData(&other, res);
2171 0 : return res;
2172 : }
2173 :
2174 : /* static */ ICCacheIR_Updated*
2175 0 : ICCacheIR_Updated::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
2176 : ICCacheIR_Updated& other)
2177 : {
2178 0 : const CacheIRStubInfo* stubInfo = other.stubInfo();
2179 0 : MOZ_ASSERT(stubInfo->makesGCCalls());
2180 :
2181 0 : size_t bytesNeeded = stubInfo->stubDataOffset() + stubInfo->stubDataSize();
2182 0 : void* newStub = space->alloc(bytesNeeded);
2183 0 : if (!newStub)
2184 0 : return nullptr;
2185 :
2186 0 : ICCacheIR_Updated* res = new(newStub) ICCacheIR_Updated(other.jitCode(), stubInfo);
2187 0 : res->updateStubGroup() = other.updateStubGroup();
2188 0 : res->updateStubId() = other.updateStubId();
2189 :
2190 0 : stubInfo->copyStubData(&other, res);
2191 0 : return res;
2192 : }
|