Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #include "jit/BaselineDebugModeOSR.h"
8 :
9 : #include "mozilla/DebugOnly.h"
10 : #include "mozilla/SizePrintfMacros.h"
11 :
12 : #include "jit/BaselineIC.h"
13 : #include "jit/JitcodeMap.h"
14 : #include "jit/Linker.h"
15 : #include "jit/PerfSpewer.h"
16 :
17 : #include "jit/JitFrames-inl.h"
18 : #include "jit/MacroAssembler-inl.h"
19 : #include "vm/Stack-inl.h"
20 :
21 : using namespace js;
22 : using namespace js::jit;
23 :
24 : using mozilla::DebugOnly;
25 :
26 : struct DebugModeOSREntry
27 : {
28 : JSScript* script;
29 : BaselineScript* oldBaselineScript;
30 : ICStub* oldStub;
31 : ICStub* newStub;
32 : BaselineDebugModeOSRInfo* recompInfo;
33 : uint32_t pcOffset;
34 : ICEntry::Kind frameKind;
35 :
36 0 : explicit DebugModeOSREntry(JSScript* script)
37 0 : : script(script),
38 0 : oldBaselineScript(script->baselineScript()),
39 : oldStub(nullptr),
40 : newStub(nullptr),
41 : recompInfo(nullptr),
42 : pcOffset(uint32_t(-1)),
43 0 : frameKind(ICEntry::Kind_Invalid)
44 0 : { }
45 :
46 0 : DebugModeOSREntry(JSScript* script, uint32_t pcOffset)
47 0 : : script(script),
48 0 : oldBaselineScript(script->baselineScript()),
49 : oldStub(nullptr),
50 : newStub(nullptr),
51 : recompInfo(nullptr),
52 : pcOffset(pcOffset),
53 0 : frameKind(ICEntry::Kind_Invalid)
54 0 : { }
55 :
56 0 : DebugModeOSREntry(JSScript* script, const ICEntry& icEntry)
57 0 : : script(script),
58 0 : oldBaselineScript(script->baselineScript()),
59 : oldStub(nullptr),
60 : newStub(nullptr),
61 : recompInfo(nullptr),
62 0 : pcOffset(icEntry.pcOffset()),
63 0 : frameKind(icEntry.kind())
64 : {
65 : #ifdef DEBUG
66 0 : MOZ_ASSERT(pcOffset == icEntry.pcOffset());
67 0 : MOZ_ASSERT(frameKind == icEntry.kind());
68 : #endif
69 0 : }
70 :
71 0 : DebugModeOSREntry(JSScript* script, BaselineDebugModeOSRInfo* info)
72 0 : : script(script),
73 0 : oldBaselineScript(script->baselineScript()),
74 : oldStub(nullptr),
75 : newStub(nullptr),
76 : recompInfo(nullptr),
77 0 : pcOffset(script->pcToOffset(info->pc)),
78 0 : frameKind(info->frameKind)
79 : {
80 : #ifdef DEBUG
81 0 : MOZ_ASSERT(pcOffset == script->pcToOffset(info->pc));
82 0 : MOZ_ASSERT(frameKind == info->frameKind);
83 : #endif
84 0 : }
85 :
86 0 : DebugModeOSREntry(DebugModeOSREntry&& other)
87 0 : : script(other.script),
88 0 : oldBaselineScript(other.oldBaselineScript),
89 0 : oldStub(other.oldStub),
90 0 : newStub(other.newStub),
91 0 : recompInfo(other.recompInfo ? other.takeRecompInfo() : nullptr),
92 0 : pcOffset(other.pcOffset),
93 0 : frameKind(other.frameKind)
94 0 : { }
95 :
96 0 : ~DebugModeOSREntry() {
97 : // Note that this is nulled out when the recompInfo is taken by the
98 : // frame. The frame then has the responsibility of freeing the
99 : // recompInfo.
100 0 : js_delete(recompInfo);
101 0 : }
102 :
103 0 : bool needsRecompileInfo() const {
104 0 : return frameKind == ICEntry::Kind_CallVM ||
105 0 : frameKind == ICEntry::Kind_WarmupCounter ||
106 0 : frameKind == ICEntry::Kind_StackCheck ||
107 0 : frameKind == ICEntry::Kind_EarlyStackCheck ||
108 0 : frameKind == ICEntry::Kind_DebugTrap ||
109 0 : frameKind == ICEntry::Kind_DebugPrologue ||
110 0 : frameKind == ICEntry::Kind_DebugEpilogue;
111 : }
112 :
113 0 : bool recompiled() const {
114 0 : return oldBaselineScript != script->baselineScript();
115 : }
116 :
117 0 : BaselineDebugModeOSRInfo* takeRecompInfo() {
118 0 : MOZ_ASSERT(needsRecompileInfo() && recompInfo);
119 0 : BaselineDebugModeOSRInfo* tmp = recompInfo;
120 0 : recompInfo = nullptr;
121 0 : return tmp;
122 : }
123 :
124 0 : bool allocateRecompileInfo(JSContext* cx) {
125 0 : MOZ_ASSERT(script);
126 0 : MOZ_ASSERT(needsRecompileInfo());
127 :
128 : // If we are returning to a frame which needs a continuation fixer,
129 : // allocate the recompile info up front so that the patching function
130 : // is infallible.
131 0 : jsbytecode* pc = script->offsetToPC(pcOffset);
132 :
133 : // XXX: Work around compiler error disallowing using bitfields
134 : // with the template magic of new_.
135 0 : ICEntry::Kind kind = frameKind;
136 0 : recompInfo = cx->new_<BaselineDebugModeOSRInfo>(pc, kind);
137 0 : return !!recompInfo;
138 : }
139 :
140 0 : ICFallbackStub* fallbackStub() const {
141 0 : MOZ_ASSERT(script);
142 0 : MOZ_ASSERT(oldStub);
143 0 : return script->baselineScript()->icEntryFromPCOffset(pcOffset).fallbackStub();
144 : }
145 : };
146 :
147 : typedef Vector<DebugModeOSREntry> DebugModeOSREntryVector;
148 :
149 : class UniqueScriptOSREntryIter
150 : {
151 : const DebugModeOSREntryVector& entries_;
152 : size_t index_;
153 :
154 : public:
155 0 : explicit UniqueScriptOSREntryIter(const DebugModeOSREntryVector& entries)
156 0 : : entries_(entries),
157 0 : index_(0)
158 0 : { }
159 :
160 0 : bool done() {
161 0 : return index_ == entries_.length();
162 : }
163 :
164 0 : const DebugModeOSREntry& entry() {
165 0 : MOZ_ASSERT(!done());
166 0 : return entries_[index_];
167 : }
168 :
169 0 : UniqueScriptOSREntryIter& operator++() {
170 0 : MOZ_ASSERT(!done());
171 0 : while (++index_ < entries_.length()) {
172 0 : bool unique = true;
173 0 : for (size_t i = 0; i < index_; i++) {
174 0 : if (entries_[i].script == entries_[index_].script) {
175 0 : unique = false;
176 0 : break;
177 : }
178 : }
179 0 : if (unique)
180 0 : break;
181 : }
182 0 : return *this;
183 : }
184 : };
185 :
186 : static bool
187 0 : CollectJitStackScripts(JSContext* cx, const Debugger::ExecutionObservableSet& obs,
188 : const ActivationIterator& activation, DebugModeOSREntryVector& entries)
189 : {
190 0 : ICStub* prevFrameStubPtr = nullptr;
191 0 : bool needsRecompileHandler = false;
192 0 : for (JitFrameIterator iter(activation); !iter.done(); ++iter) {
193 0 : switch (iter.type()) {
194 : case JitFrame_BaselineJS: {
195 0 : JSScript* script = iter.script();
196 :
197 0 : if (!obs.shouldRecompileOrInvalidate(script)) {
198 0 : prevFrameStubPtr = nullptr;
199 0 : break;
200 : }
201 :
202 0 : BaselineFrame* frame = iter.baselineFrame();
203 :
204 0 : if (BaselineDebugModeOSRInfo* info = frame->getDebugModeOSRInfo()) {
205 : // If patching a previously patched yet unpopped frame, we can
206 : // use the BaselineDebugModeOSRInfo on the frame directly to
207 : // patch. Indeed, we cannot use iter.returnAddressToFp(), as
208 : // it points into the debug mode OSR handler and cannot be
209 : // used to look up a corresponding ICEntry.
210 : //
211 : // See cases F and G in PatchBaselineFramesForDebugMode.
212 0 : if (!entries.append(DebugModeOSREntry(script, info)))
213 0 : return false;
214 0 : } else if (frame->isHandlingException()) {
215 : // We are in the middle of handling an exception and the frame
216 : // must have an override pc.
217 0 : uint32_t offset = script->pcToOffset(frame->overridePc());
218 0 : if (!entries.append(DebugModeOSREntry(script, offset)))
219 0 : return false;
220 : } else {
221 : // The frame must be settled on a pc with an ICEntry.
222 0 : uint8_t* retAddr = iter.returnAddressToFp();
223 0 : BaselineICEntry& icEntry = script->baselineScript()->icEntryFromReturnAddress(retAddr);
224 0 : if (!entries.append(DebugModeOSREntry(script, icEntry)))
225 0 : return false;
226 : }
227 :
228 0 : if (entries.back().needsRecompileInfo()) {
229 0 : if (!entries.back().allocateRecompileInfo(cx))
230 0 : return false;
231 :
232 0 : needsRecompileHandler |= true;
233 : }
234 0 : entries.back().oldStub = prevFrameStubPtr;
235 0 : prevFrameStubPtr = nullptr;
236 0 : break;
237 : }
238 :
239 : case JitFrame_BaselineStub:
240 : prevFrameStubPtr =
241 0 : reinterpret_cast<BaselineStubFrameLayout*>(iter.fp())->maybeStubPtr();
242 0 : break;
243 :
244 : case JitFrame_IonJS: {
245 0 : InlineFrameIterator inlineIter(cx, &iter);
246 : while (true) {
247 0 : if (obs.shouldRecompileOrInvalidate(inlineIter.script())) {
248 0 : if (!entries.append(DebugModeOSREntry(inlineIter.script())))
249 0 : return false;
250 : }
251 0 : if (!inlineIter.more())
252 0 : break;
253 0 : ++inlineIter;
254 : }
255 0 : break;
256 : }
257 :
258 : default:;
259 : }
260 : }
261 :
262 : // Initialize the on-stack recompile handler, which may fail, so that
263 : // patching the stack is infallible.
264 0 : if (needsRecompileHandler) {
265 0 : JitRuntime* rt = cx->runtime()->jitRuntime();
266 0 : if (!rt->getBaselineDebugModeOSRHandlerAddress(cx, true))
267 0 : return false;
268 : }
269 :
270 0 : return true;
271 : }
272 :
273 : static bool
274 0 : CollectInterpreterStackScripts(JSContext* cx, const Debugger::ExecutionObservableSet& obs,
275 : const ActivationIterator& activation,
276 : DebugModeOSREntryVector& entries)
277 : {
278 : // Collect interpreter frame stacks with IonScript or BaselineScript as
279 : // well. These do not need to be patched, but do need to be invalidated
280 : // and recompiled.
281 0 : InterpreterActivation* act = activation.activation()->asInterpreter();
282 0 : for (InterpreterFrameIterator iter(act); !iter.done(); ++iter) {
283 0 : JSScript* script = iter.frame()->script();
284 0 : if (obs.shouldRecompileOrInvalidate(script)) {
285 0 : if (!entries.append(DebugModeOSREntry(iter.frame()->script())))
286 0 : return false;
287 : }
288 : }
289 0 : return true;
290 : }
291 :
292 : #ifdef JS_JITSPEW
293 : static const char*
294 0 : ICEntryKindToString(ICEntry::Kind kind)
295 : {
296 0 : switch (kind) {
297 : case ICEntry::Kind_Op:
298 0 : return "IC";
299 : case ICEntry::Kind_NonOp:
300 0 : return "non-op IC";
301 : case ICEntry::Kind_CallVM:
302 0 : return "callVM";
303 : case ICEntry::Kind_WarmupCounter:
304 0 : return "warmup counter";
305 : case ICEntry::Kind_StackCheck:
306 0 : return "stack check";
307 : case ICEntry::Kind_EarlyStackCheck:
308 0 : return "early stack check";
309 : case ICEntry::Kind_DebugTrap:
310 0 : return "debug trap";
311 : case ICEntry::Kind_DebugPrologue:
312 0 : return "debug prologue";
313 : case ICEntry::Kind_DebugEpilogue:
314 0 : return "debug epilogue";
315 : default:
316 0 : MOZ_CRASH("bad ICEntry kind");
317 : }
318 : }
319 : #endif // JS_JITSPEW
320 :
321 : static void
322 0 : SpewPatchBaselineFrame(uint8_t* oldReturnAddress, uint8_t* newReturnAddress,
323 : JSScript* script, ICEntry::Kind frameKind, jsbytecode* pc)
324 : {
325 0 : JitSpew(JitSpew_BaselineDebugModeOSR,
326 : "Patch return %p -> %p on BaselineJS frame (%s:%" PRIuSIZE ") from %s at %s",
327 : oldReturnAddress, newReturnAddress, script->filename(), script->lineno(),
328 0 : ICEntryKindToString(frameKind), CodeName[(JSOp)*pc]);
329 0 : }
330 :
331 : static void
332 0 : SpewPatchBaselineFrameFromExceptionHandler(uint8_t* oldReturnAddress, uint8_t* newReturnAddress,
333 : JSScript* script, jsbytecode* pc)
334 : {
335 0 : JitSpew(JitSpew_BaselineDebugModeOSR,
336 : "Patch return %p -> %p on BaselineJS frame (%s:%" PRIuSIZE ") from exception handler at %s",
337 : oldReturnAddress, newReturnAddress, script->filename(), script->lineno(),
338 0 : CodeName[(JSOp)*pc]);
339 0 : }
340 :
341 : static void
342 0 : SpewPatchStubFrame(ICStub* oldStub, ICStub* newStub)
343 : {
344 0 : JitSpew(JitSpew_BaselineDebugModeOSR,
345 : "Patch stub %p -> %p on BaselineStub frame (%s)",
346 0 : oldStub, newStub, newStub ? ICStub::KindString(newStub->kind()) : "exception handler");
347 0 : }
348 :
349 : static void
350 0 : PatchBaselineFramesForDebugMode(JSContext* cx, const CooperatingContext& target,
351 : const Debugger::ExecutionObservableSet& obs,
352 : const ActivationIterator& activation,
353 : DebugModeOSREntryVector& entries, size_t* start)
354 : {
355 : //
356 : // Recompile Patching Overview
357 : //
358 : // When toggling debug mode with live baseline scripts on the stack, we
359 : // could have entered the VM via the following ways from the baseline
360 : // script.
361 : //
362 : // Off to On:
363 : // A. From a "can call" stub.
364 : // B. From a VM call.
365 : // H. From inside HandleExceptionBaseline.
366 : // I. From inside the interrupt handler via the prologue stack check.
367 : // J. From the warmup counter in the prologue.
368 : //
369 : // On to Off:
370 : // - All the ways above.
371 : // C. From the debug trap handler.
372 : // D. From the debug prologue.
373 : // E. From the debug epilogue.
374 : //
375 : // Cycles (On to Off to On)+ or (Off to On to Off)+:
376 : // F. Undo cases B, C, D, E, I or J above on previously patched yet unpopped
377 : // frames.
378 : //
379 : // In general, we patch the return address from the VM call to return to a
380 : // "continuation fixer" to fix up machine state (registers and stack
381 : // state). Specifics on what needs to be done are documented below.
382 : //
383 :
384 0 : CommonFrameLayout* prev = nullptr;
385 0 : size_t entryIndex = *start;
386 :
387 0 : for (JitFrameIterator iter(activation); !iter.done(); ++iter) {
388 0 : switch (iter.type()) {
389 : case JitFrame_BaselineJS: {
390 : // If the script wasn't recompiled or is not observed, there's
391 : // nothing to patch.
392 0 : if (!obs.shouldRecompileOrInvalidate(iter.script()))
393 0 : break;
394 :
395 0 : DebugModeOSREntry& entry = entries[entryIndex];
396 :
397 0 : if (!entry.recompiled()) {
398 0 : entryIndex++;
399 0 : break;
400 : }
401 :
402 0 : JSScript* script = entry.script;
403 0 : uint32_t pcOffset = entry.pcOffset;
404 0 : jsbytecode* pc = script->offsetToPC(pcOffset);
405 :
406 0 : MOZ_ASSERT(script == iter.script());
407 0 : MOZ_ASSERT(pcOffset < script->length());
408 :
409 0 : BaselineScript* bl = script->baselineScript();
410 0 : ICEntry::Kind kind = entry.frameKind;
411 :
412 0 : if (kind == ICEntry::Kind_Op) {
413 : // Case A above.
414 : //
415 : // Patching these cases needs to patch both the stub frame and
416 : // the baseline frame. The stub frame is patched below. For
417 : // the baseline frame here, we resume right after the IC
418 : // returns.
419 : //
420 : // Since we're using the same IC stub code, we can resume
421 : // directly to the IC resume address.
422 0 : uint8_t* retAddr = bl->returnAddressForIC(bl->icEntryFromPCOffset(pcOffset));
423 0 : SpewPatchBaselineFrame(prev->returnAddress(), retAddr, script, kind, pc);
424 0 : DebugModeOSRVolatileJitFrameIterator::forwardLiveIterators(
425 0 : target, prev->returnAddress(), retAddr);
426 0 : prev->setReturnAddress(retAddr);
427 0 : entryIndex++;
428 0 : break;
429 : }
430 :
431 0 : if (kind == ICEntry::Kind_Invalid) {
432 : // Case H above.
433 : //
434 : // We are recompiling on-stack scripts from inside the
435 : // exception handler, by way of an onExceptionUnwind
436 : // invocation, on a pc without an ICEntry. This means the
437 : // frame must have an override pc.
438 : //
439 : // If profiling is off, patch the resume address to nullptr,
440 : // to ensure the old address is not used anywhere.
441 : //
442 : // If profiling is on, JitProfilingFrameIterator requires a
443 : // valid return address.
444 0 : MOZ_ASSERT(iter.baselineFrame()->isHandlingException());
445 0 : MOZ_ASSERT(iter.baselineFrame()->overridePc() == pc);
446 : uint8_t* retAddr;
447 0 : if (cx->runtime()->geckoProfiler().enabled())
448 0 : retAddr = bl->nativeCodeForPC(script, pc);
449 : else
450 0 : retAddr = nullptr;
451 0 : SpewPatchBaselineFrameFromExceptionHandler(prev->returnAddress(), retAddr,
452 0 : script, pc);
453 0 : DebugModeOSRVolatileJitFrameIterator::forwardLiveIterators(
454 0 : target, prev->returnAddress(), retAddr);
455 0 : prev->setReturnAddress(retAddr);
456 0 : entryIndex++;
457 0 : break;
458 : }
459 :
460 : // Case F above.
461 : //
462 : // We undo a previous recompile by handling cases B, C, D, E, I or J
463 : // like normal, except that we retrieve the pc information via
464 : // the previous OSR debug info stashed on the frame.
465 0 : BaselineDebugModeOSRInfo* info = iter.baselineFrame()->getDebugModeOSRInfo();
466 0 : if (info) {
467 0 : MOZ_ASSERT(info->pc == pc);
468 0 : MOZ_ASSERT(info->frameKind == kind);
469 0 : MOZ_ASSERT(kind == ICEntry::Kind_CallVM ||
470 : kind == ICEntry::Kind_WarmupCounter ||
471 : kind == ICEntry::Kind_StackCheck ||
472 : kind == ICEntry::Kind_EarlyStackCheck ||
473 : kind == ICEntry::Kind_DebugTrap ||
474 : kind == ICEntry::Kind_DebugPrologue ||
475 : kind == ICEntry::Kind_DebugEpilogue);
476 :
477 : // We will have allocated a new recompile info, so delete the
478 : // existing one.
479 0 : iter.baselineFrame()->deleteDebugModeOSRInfo();
480 : }
481 :
482 : // The RecompileInfo must already be allocated so that this
483 : // function may be infallible.
484 0 : BaselineDebugModeOSRInfo* recompInfo = entry.takeRecompInfo();
485 :
486 : bool popFrameReg;
487 0 : switch (kind) {
488 : case ICEntry::Kind_CallVM: {
489 : // Case B above.
490 : //
491 : // Patching returns from a VM call. After fixing up the the
492 : // continuation for unsynced values (the frame register is
493 : // popped by the callVM trampoline), we resume at the
494 : // return-from-callVM address. The assumption here is that all
495 : // callVMs which can trigger debug mode OSR are the *only*
496 : // callVMs generated for their respective pc locations in the
497 : // baseline JIT code.
498 0 : BaselineICEntry& callVMEntry = bl->callVMEntryFromPCOffset(pcOffset);
499 0 : recompInfo->resumeAddr = bl->returnAddressForIC(callVMEntry);
500 0 : popFrameReg = false;
501 0 : break;
502 : }
503 :
504 : case ICEntry::Kind_WarmupCounter: {
505 : // Case J above.
506 : //
507 : // Patching mechanism is identical to a CallVM. This is
508 : // handled especially only because the warmup counter VM call is
509 : // part of the prologue, and not tied an opcode.
510 0 : BaselineICEntry& warmupCountEntry = bl->warmupCountICEntry();
511 0 : recompInfo->resumeAddr = bl->returnAddressForIC(warmupCountEntry);
512 0 : popFrameReg = false;
513 0 : break;
514 : }
515 :
516 : case ICEntry::Kind_StackCheck:
517 : case ICEntry::Kind_EarlyStackCheck: {
518 : // Case I above.
519 : //
520 : // Patching mechanism is identical to a CallVM. This is
521 : // handled especially only because the stack check VM call is
522 : // part of the prologue, and not tied an opcode.
523 0 : bool earlyCheck = kind == ICEntry::Kind_EarlyStackCheck;
524 0 : BaselineICEntry& stackCheckEntry = bl->stackCheckICEntry(earlyCheck);
525 0 : recompInfo->resumeAddr = bl->returnAddressForIC(stackCheckEntry);
526 0 : popFrameReg = false;
527 0 : break;
528 : }
529 :
530 : case ICEntry::Kind_DebugTrap:
531 : // Case C above.
532 : //
533 : // Debug traps are emitted before each op, so we resume at the
534 : // same op. Calling debug trap handlers is done via a toggled
535 : // call to a thunk (DebugTrapHandler) that takes care tearing
536 : // down its own stub frame so we don't need to worry about
537 : // popping the frame reg.
538 0 : recompInfo->resumeAddr = bl->nativeCodeForPC(script, pc, &recompInfo->slotInfo);
539 0 : popFrameReg = false;
540 0 : break;
541 :
542 : case ICEntry::Kind_DebugPrologue:
543 : // Case D above.
544 : //
545 : // We patch a jump directly to the right place in the prologue
546 : // after popping the frame reg and checking for forced return.
547 0 : recompInfo->resumeAddr = bl->postDebugPrologueAddr();
548 0 : popFrameReg = true;
549 0 : break;
550 :
551 : default:
552 : // Case E above.
553 : //
554 : // We patch a jump directly to the epilogue after popping the
555 : // frame reg and checking for forced return.
556 0 : MOZ_ASSERT(kind == ICEntry::Kind_DebugEpilogue);
557 0 : recompInfo->resumeAddr = bl->epilogueEntryAddr();
558 0 : popFrameReg = true;
559 0 : break;
560 : }
561 :
562 0 : SpewPatchBaselineFrame(prev->returnAddress(), recompInfo->resumeAddr,
563 0 : script, kind, recompInfo->pc);
564 :
565 : // The recompile handler must already be created so that this
566 : // function may be infallible.
567 0 : JitRuntime* rt = cx->runtime()->jitRuntime();
568 0 : void* handlerAddr = rt->getBaselineDebugModeOSRHandlerAddress(cx, popFrameReg);
569 0 : MOZ_ASSERT(handlerAddr);
570 :
571 0 : prev->setReturnAddress(reinterpret_cast<uint8_t*>(handlerAddr));
572 0 : iter.baselineFrame()->setDebugModeOSRInfo(recompInfo);
573 0 : iter.baselineFrame()->setOverridePc(recompInfo->pc);
574 :
575 0 : entryIndex++;
576 0 : break;
577 : }
578 :
579 : case JitFrame_BaselineStub: {
580 0 : JitFrameIterator prev(iter);
581 0 : ++prev;
582 0 : BaselineFrame* prevFrame = prev.baselineFrame();
583 0 : if (!obs.shouldRecompileOrInvalidate(prevFrame->script()))
584 0 : break;
585 :
586 0 : DebugModeOSREntry& entry = entries[entryIndex];
587 :
588 : // If the script wasn't recompiled, there's nothing to patch.
589 0 : if (!entry.recompiled())
590 0 : break;
591 :
592 : BaselineStubFrameLayout* layout =
593 0 : reinterpret_cast<BaselineStubFrameLayout*>(iter.fp());
594 0 : MOZ_ASSERT(layout->maybeStubPtr() == entry.oldStub);
595 :
596 : // Patch baseline stub frames for case A above.
597 : //
598 : // We need to patch the stub frame to point to an ICStub belonging
599 : // to the recompiled baseline script. These stubs are allocated up
600 : // front in CloneOldBaselineStub. They share the same JitCode as
601 : // the old baseline script's stubs, so we don't need to patch the
602 : // exit frame's return address.
603 : //
604 : // Subtlety here: the debug trap handler of case C above pushes a
605 : // stub frame with a null stub pointer. This handler will exist
606 : // across recompiling the script, so we don't patch anything for
607 : // such stub frames. We will return to that handler, which takes
608 : // care of cleaning up the stub frame.
609 : //
610 : // Note that for stub pointers that are already on the C stack
611 : // (i.e. fallback calls), we need to check for recompilation using
612 : // DebugModeOSRVolatileStub.
613 0 : if (layout->maybeStubPtr()) {
614 0 : MOZ_ASSERT(entry.newStub || prevFrame->isHandlingException());
615 0 : SpewPatchStubFrame(entry.oldStub, entry.newStub);
616 0 : layout->setStubPtr(entry.newStub);
617 : }
618 :
619 0 : break;
620 : }
621 :
622 : case JitFrame_IonJS: {
623 : // Nothing to patch.
624 0 : InlineFrameIterator inlineIter(cx, &iter);
625 : while (true) {
626 0 : if (obs.shouldRecompileOrInvalidate(inlineIter.script()))
627 0 : entryIndex++;
628 0 : if (!inlineIter.more())
629 0 : break;
630 0 : ++inlineIter;
631 : }
632 0 : break;
633 : }
634 :
635 : default:;
636 : }
637 :
638 0 : prev = iter.current();
639 : }
640 :
641 0 : *start = entryIndex;
642 0 : }
643 :
644 : static void
645 0 : SkipInterpreterFrameEntries(const Debugger::ExecutionObservableSet& obs,
646 : const ActivationIterator& activation,
647 : DebugModeOSREntryVector& entries, size_t* start)
648 : {
649 0 : size_t entryIndex = *start;
650 :
651 : // Skip interpreter frames, which do not need patching.
652 0 : InterpreterActivation* act = activation.activation()->asInterpreter();
653 0 : for (InterpreterFrameIterator iter(act); !iter.done(); ++iter) {
654 0 : if (obs.shouldRecompileOrInvalidate(iter.frame()->script()))
655 0 : entryIndex++;
656 : }
657 :
658 0 : *start = entryIndex;
659 0 : }
660 :
661 : static bool
662 0 : RecompileBaselineScriptForDebugMode(JSContext* cx, JSScript* script,
663 : Debugger::IsObserving observing)
664 : {
665 0 : BaselineScript* oldBaselineScript = script->baselineScript();
666 :
667 : // If a script is on the stack multiple times, it may have already
668 : // been recompiled.
669 0 : if (oldBaselineScript->hasDebugInstrumentation() == observing)
670 0 : return true;
671 :
672 0 : JitSpew(JitSpew_BaselineDebugModeOSR, "Recompiling (%s:%" PRIuSIZE ") for %s",
673 0 : script->filename(), script->lineno(), observing ? "DEBUGGING" : "NORMAL EXECUTION");
674 :
675 0 : script->setBaselineScript(cx->runtime(), nullptr);
676 :
677 0 : MethodStatus status = BaselineCompile(cx, script, /* forceDebugMode = */ observing);
678 0 : if (status != Method_Compiled) {
679 : // We will only fail to recompile for debug mode due to OOM. Restore
680 : // the old baseline script in case something doesn't properly
681 : // propagate OOM.
682 0 : MOZ_ASSERT(status == Method_Error);
683 0 : script->setBaselineScript(cx->runtime(), oldBaselineScript);
684 0 : return false;
685 : }
686 :
687 : // Don't destroy the old baseline script yet, since if we fail any of the
688 : // recompiles we need to rollback all the old baseline scripts.
689 0 : MOZ_ASSERT(script->baselineScript()->hasDebugInstrumentation() == observing);
690 0 : return true;
691 : }
692 :
693 : #define PATCHABLE_ICSTUB_KIND_LIST(_) \
694 : _(CacheIR_Monitored) \
695 : _(CacheIR_Updated) \
696 : _(Call_Scripted) \
697 : _(Call_AnyScripted) \
698 : _(Call_Native) \
699 : _(Call_ClassHook) \
700 : _(Call_ScriptedApplyArray) \
701 : _(Call_ScriptedApplyArguments) \
702 : _(Call_ScriptedFunCall)
703 :
704 : static bool
705 0 : CloneOldBaselineStub(JSContext* cx, DebugModeOSREntryVector& entries, size_t entryIndex)
706 : {
707 0 : DebugModeOSREntry& entry = entries[entryIndex];
708 0 : if (!entry.oldStub)
709 0 : return true;
710 :
711 0 : ICStub* oldStub = entry.oldStub;
712 0 : MOZ_ASSERT(oldStub->makesGCCalls());
713 :
714 : // If this script was not recompiled (because it already had the correct
715 : // debug instrumentation), don't clone to avoid attaching duplicate stubs.
716 0 : if (!entry.recompiled()) {
717 0 : entry.newStub = nullptr;
718 0 : return true;
719 : }
720 :
721 0 : if (entry.frameKind == ICEntry::Kind_Invalid) {
722 : // The exception handler can modify the frame's override pc while
723 : // unwinding scopes. This is fine, but if we have a stub frame, the code
724 : // code below will get confused: the entry's pcOffset doesn't match the
725 : // stub that's still on the stack. To prevent that, we just set the new
726 : // stub to nullptr as we will never return to this stub frame anyway.
727 0 : entry.newStub = nullptr;
728 0 : return true;
729 : }
730 :
731 : // Get the new fallback stub from the recompiled baseline script.
732 0 : ICFallbackStub* fallbackStub = entry.fallbackStub();
733 :
734 : // We don't need to clone fallback stubs, as they are guaranteed to
735 : // exist. Furthermore, their JitCode is cached and should be the same even
736 : // across the recompile.
737 0 : if (oldStub->isFallback()) {
738 0 : MOZ_ASSERT(oldStub->jitCode() == fallbackStub->jitCode());
739 0 : entry.newStub = fallbackStub;
740 0 : return true;
741 : }
742 :
743 : // Check if we have already cloned the stub on a younger frame. Ignore
744 : // frames that entered the exception handler (entries[i].newStub is nullptr
745 : // in that case, see above).
746 0 : for (size_t i = 0; i < entryIndex; i++) {
747 0 : if (oldStub == entries[i].oldStub && entries[i].frameKind != ICEntry::Kind_Invalid) {
748 0 : MOZ_ASSERT(entries[i].newStub);
749 0 : entry.newStub = entries[i].newStub;
750 0 : return true;
751 : }
752 : }
753 :
754 : // Some stubs are monitored, get the first stub in the monitor chain from
755 : // the new fallback stub if so.
756 : ICStub* firstMonitorStub;
757 0 : if (fallbackStub->isMonitoredFallback()) {
758 0 : ICMonitoredFallbackStub* monitored = fallbackStub->toMonitoredFallbackStub();
759 0 : firstMonitorStub = monitored->fallbackMonitorStub()->firstMonitorStub();
760 : } else {
761 0 : firstMonitorStub = nullptr;
762 : }
763 0 : ICStubSpace* stubSpace = ICStubCompiler::StubSpaceForStub(oldStub->makesGCCalls(), entry.script,
764 0 : ICStubCompiler::Engine::Baseline);
765 :
766 : // Clone the existing stub into the recompiled IC.
767 : //
768 : // Note that since JitCode is a GC thing, cloning an ICStub with the same
769 : // JitCode ensures it won't be collected.
770 0 : switch (oldStub->kind()) {
771 : #define CASE_KIND(kindName) \
772 : case ICStub::kindName: \
773 : entry.newStub = IC##kindName::Clone(cx, stubSpace, firstMonitorStub, \
774 : *oldStub->to##kindName()); \
775 : break;
776 0 : PATCHABLE_ICSTUB_KIND_LIST(CASE_KIND)
777 : #undef CASE_KIND
778 :
779 : default:
780 0 : MOZ_CRASH("Bad stub kind");
781 : }
782 :
783 0 : if (!entry.newStub)
784 0 : return false;
785 :
786 0 : fallbackStub->addNewStub(entry.newStub);
787 0 : return true;
788 : }
789 :
790 : static bool
791 0 : InvalidateScriptsInZone(JSContext* cx, Zone* zone, const Vector<DebugModeOSREntry>& entries)
792 : {
793 0 : RecompileInfoVector invalid;
794 0 : for (UniqueScriptOSREntryIter iter(entries); !iter.done(); ++iter) {
795 0 : JSScript* script = iter.entry().script;
796 0 : if (script->compartment()->zone() != zone)
797 0 : continue;
798 :
799 0 : if (script->hasIonScript()) {
800 0 : if (!invalid.append(script->ionScript()->recompileInfo())) {
801 0 : ReportOutOfMemory(cx);
802 0 : return false;
803 : }
804 : }
805 :
806 : // Cancel off-thread Ion compile for anything that has a
807 : // BaselineScript. If we relied on the call to Invalidate below to
808 : // cancel off-thread Ion compiles, only those with existing IonScripts
809 : // would be cancelled.
810 0 : if (script->hasBaselineScript())
811 0 : CancelOffThreadIonCompile(script);
812 : }
813 :
814 : // No need to cancel off-thread Ion compiles again, we already did it
815 : // above.
816 0 : Invalidate(zone->types, cx->runtime()->defaultFreeOp(), invalid,
817 0 : /* resetUses = */ true, /* cancelOffThread = */ false);
818 0 : return true;
819 : }
820 :
821 : static void
822 0 : UndoRecompileBaselineScriptsForDebugMode(JSContext* cx,
823 : const DebugModeOSREntryVector& entries)
824 : {
825 : // In case of failure, roll back the entire set of active scripts so that
826 : // we don't have to patch return addresses on the stack.
827 0 : for (UniqueScriptOSREntryIter iter(entries); !iter.done(); ++iter) {
828 0 : const DebugModeOSREntry& entry = iter.entry();
829 0 : JSScript* script = entry.script;
830 0 : BaselineScript* baselineScript = script->baselineScript();
831 0 : if (entry.recompiled()) {
832 0 : script->setBaselineScript(cx->runtime(), entry.oldBaselineScript);
833 0 : BaselineScript::Destroy(cx->runtime()->defaultFreeOp(), baselineScript);
834 : }
835 : }
836 0 : }
837 :
838 : bool
839 0 : jit::RecompileOnStackBaselineScriptsForDebugMode(JSContext* cx,
840 : const Debugger::ExecutionObservableSet& obs,
841 : Debugger::IsObserving observing)
842 : {
843 : // First recompile the active scripts on the stack and patch the live
844 : // frames.
845 0 : Vector<DebugModeOSREntry> entries(cx);
846 :
847 0 : for (const CooperatingContext& target : cx->runtime()->cooperatingContexts()) {
848 0 : for (ActivationIterator iter(cx, target); !iter.done(); ++iter) {
849 0 : if (iter->isJit()) {
850 0 : if (!CollectJitStackScripts(cx, obs, iter, entries))
851 0 : return false;
852 0 : } else if (iter->isInterpreter()) {
853 0 : if (!CollectInterpreterStackScripts(cx, obs, iter, entries))
854 0 : return false;
855 : }
856 : }
857 : }
858 :
859 0 : if (entries.empty())
860 0 : return true;
861 :
862 : // When the profiler is enabled, we need to have suppressed sampling,
863 : // since the basline jit scripts are in a state of flux.
864 0 : MOZ_ASSERT(!cx->isProfilerSamplingEnabled());
865 :
866 : // Invalidate all scripts we are recompiling.
867 0 : if (Zone* zone = obs.singleZone()) {
868 0 : if (!InvalidateScriptsInZone(cx, zone, entries))
869 0 : return false;
870 : } else {
871 : typedef Debugger::ExecutionObservableSet::ZoneRange ZoneRange;
872 0 : for (ZoneRange r = obs.zones()->all(); !r.empty(); r.popFront()) {
873 0 : if (!InvalidateScriptsInZone(cx, r.front(), entries))
874 0 : return false;
875 : }
876 : }
877 :
878 : // Try to recompile all the scripts. If we encounter an error, we need to
879 : // roll back as if none of the compilations happened, so that we don't
880 : // crash.
881 0 : for (size_t i = 0; i < entries.length(); i++) {
882 0 : JSScript* script = entries[i].script;
883 0 : AutoCompartment ac(cx, script);
884 0 : if (!RecompileBaselineScriptForDebugMode(cx, script, observing) ||
885 0 : !CloneOldBaselineStub(cx, entries, i))
886 : {
887 0 : UndoRecompileBaselineScriptsForDebugMode(cx, entries);
888 0 : return false;
889 : }
890 : }
891 :
892 : // If all recompiles succeeded, destroy the old baseline scripts and patch
893 : // the live frames.
894 : //
895 : // After this point the function must be infallible.
896 :
897 0 : for (UniqueScriptOSREntryIter iter(entries); !iter.done(); ++iter) {
898 0 : const DebugModeOSREntry& entry = iter.entry();
899 0 : if (entry.recompiled())
900 0 : BaselineScript::Destroy(cx->runtime()->defaultFreeOp(), entry.oldBaselineScript);
901 : }
902 :
903 0 : size_t processed = 0;
904 0 : for (const CooperatingContext& target : cx->runtime()->cooperatingContexts()) {
905 0 : for (ActivationIterator iter(cx, target); !iter.done(); ++iter) {
906 0 : if (iter->isJit())
907 0 : PatchBaselineFramesForDebugMode(cx, target, obs, iter, entries, &processed);
908 0 : else if (iter->isInterpreter())
909 0 : SkipInterpreterFrameEntries(obs, iter, entries, &processed);
910 : }
911 : }
912 0 : MOZ_ASSERT(processed == entries.length());
913 :
914 0 : return true;
915 : }
916 :
917 : void
918 0 : BaselineDebugModeOSRInfo::popValueInto(PCMappingSlotInfo::SlotLocation loc, Value* vp)
919 : {
920 0 : switch (loc) {
921 : case PCMappingSlotInfo::SlotInR0:
922 0 : valueR0 = vp[stackAdjust];
923 0 : break;
924 : case PCMappingSlotInfo::SlotInR1:
925 0 : valueR1 = vp[stackAdjust];
926 0 : break;
927 : case PCMappingSlotInfo::SlotIgnore:
928 0 : break;
929 : default:
930 0 : MOZ_CRASH("Bad slot location");
931 : }
932 :
933 0 : stackAdjust++;
934 0 : }
935 :
936 : static inline bool
937 0 : HasForcedReturn(BaselineDebugModeOSRInfo* info, bool rv)
938 : {
939 0 : ICEntry::Kind kind = info->frameKind;
940 :
941 : // The debug epilogue always checks its resumption value, so we don't need
942 : // to check rv.
943 0 : if (kind == ICEntry::Kind_DebugEpilogue)
944 0 : return true;
945 :
946 : // |rv| is the value in ReturnReg. If true, in the case of the prologue,
947 : // it means a forced return.
948 0 : if (kind == ICEntry::Kind_DebugPrologue)
949 0 : return rv;
950 :
951 : // N.B. The debug trap handler handles its own forced return, so no
952 : // need to deal with it here.
953 0 : return false;
954 : }
955 :
956 : static inline bool
957 0 : IsReturningFromCallVM(BaselineDebugModeOSRInfo* info)
958 : {
959 : // Keep this in sync with EmitBranchIsReturningFromCallVM.
960 : //
961 : // The stack check entries are returns from a callVM, but have a special
962 : // kind because they do not exist in a 1-1 relationship with a pc offset.
963 0 : return info->frameKind == ICEntry::Kind_CallVM ||
964 0 : info->frameKind == ICEntry::Kind_WarmupCounter ||
965 0 : info->frameKind == ICEntry::Kind_StackCheck ||
966 0 : info->frameKind == ICEntry::Kind_EarlyStackCheck;
967 : }
968 :
969 : static void
970 0 : EmitBranchICEntryKind(MacroAssembler& masm, Register entry, ICEntry::Kind kind, Label* label)
971 : {
972 0 : masm.branch32(MacroAssembler::Equal,
973 0 : Address(entry, offsetof(BaselineDebugModeOSRInfo, frameKind)),
974 0 : Imm32(kind), label);
975 0 : }
976 :
977 : static void
978 0 : EmitBranchIsReturningFromCallVM(MacroAssembler& masm, Register entry, Label* label)
979 : {
980 : // Keep this in sync with IsReturningFromCallVM.
981 0 : EmitBranchICEntryKind(masm, entry, ICEntry::Kind_CallVM, label);
982 0 : EmitBranchICEntryKind(masm, entry, ICEntry::Kind_WarmupCounter, label);
983 0 : EmitBranchICEntryKind(masm, entry, ICEntry::Kind_StackCheck, label);
984 0 : EmitBranchICEntryKind(masm, entry, ICEntry::Kind_EarlyStackCheck, label);
985 0 : }
986 :
987 : static void
988 0 : SyncBaselineDebugModeOSRInfo(BaselineFrame* frame, Value* vp, bool rv)
989 : {
990 0 : BaselineDebugModeOSRInfo* info = frame->debugModeOSRInfo();
991 0 : MOZ_ASSERT(info);
992 0 : MOZ_ASSERT(frame->script()->baselineScript()->containsCodeAddress(info->resumeAddr));
993 :
994 0 : if (HasForcedReturn(info, rv)) {
995 : // Load the frame's rval and overwrite the resume address to go to the
996 : // epilogue.
997 0 : MOZ_ASSERT(R0 == JSReturnOperand);
998 0 : info->valueR0 = frame->returnValue();
999 0 : info->resumeAddr = frame->script()->baselineScript()->epilogueEntryAddr();
1000 0 : return;
1001 : }
1002 :
1003 : // Read stack values and make sure R0 and R1 have the right values if we
1004 : // aren't returning from a callVM.
1005 : //
1006 : // In the case of returning from a callVM, we don't need to restore R0 and
1007 : // R1 ourself since we'll return into code that does it if needed.
1008 0 : if (!IsReturningFromCallVM(info)) {
1009 0 : unsigned numUnsynced = info->slotInfo.numUnsynced();
1010 0 : MOZ_ASSERT(numUnsynced <= 2);
1011 0 : if (numUnsynced > 0)
1012 0 : info->popValueInto(info->slotInfo.topSlotLocation(), vp);
1013 0 : if (numUnsynced > 1)
1014 0 : info->popValueInto(info->slotInfo.nextSlotLocation(), vp);
1015 : }
1016 :
1017 : // Scale stackAdjust.
1018 0 : info->stackAdjust *= sizeof(Value);
1019 : }
1020 :
1021 : static void
1022 0 : FinishBaselineDebugModeOSR(BaselineFrame* frame)
1023 : {
1024 0 : frame->deleteDebugModeOSRInfo();
1025 :
1026 : // We will return to JIT code now so we have to clear the override pc.
1027 0 : frame->clearOverridePc();
1028 0 : }
1029 :
1030 : void
1031 1993 : BaselineFrame::deleteDebugModeOSRInfo()
1032 : {
1033 1993 : js_delete(getDebugModeOSRInfo());
1034 1993 : flags_ &= ~HAS_DEBUG_MODE_OSR_INFO;
1035 1993 : }
1036 :
1037 : JitCode*
1038 0 : JitRuntime::getBaselineDebugModeOSRHandler(JSContext* cx)
1039 : {
1040 0 : if (!baselineDebugModeOSRHandler_) {
1041 0 : AutoLockForExclusiveAccess lock(cx);
1042 0 : AutoAtomsCompartment ac(cx, lock);
1043 : uint32_t offset;
1044 0 : if (JitCode* code = generateBaselineDebugModeOSRHandler(cx, &offset)) {
1045 0 : baselineDebugModeOSRHandler_ = code;
1046 0 : baselineDebugModeOSRHandlerNoFrameRegPopAddr_ = code->raw() + offset;
1047 : }
1048 : }
1049 :
1050 0 : return baselineDebugModeOSRHandler_;
1051 : }
1052 :
1053 : void*
1054 0 : JitRuntime::getBaselineDebugModeOSRHandlerAddress(JSContext* cx, bool popFrameReg)
1055 : {
1056 0 : if (!getBaselineDebugModeOSRHandler(cx))
1057 0 : return nullptr;
1058 : return popFrameReg
1059 0 : ? baselineDebugModeOSRHandler_->raw()
1060 0 : : baselineDebugModeOSRHandlerNoFrameRegPopAddr_.ref();
1061 : }
1062 :
1063 : static void
1064 0 : EmitBaselineDebugModeOSRHandlerTail(MacroAssembler& masm, Register temp, bool returnFromCallVM)
1065 : {
1066 : // Save real return address on the stack temporarily.
1067 : //
1068 : // If we're returning from a callVM, we don't need to worry about R0 and
1069 : // R1 but do need to propagate the original ReturnReg value. Otherwise we
1070 : // need to worry about R0 and R1 but can clobber ReturnReg. Indeed, on
1071 : // x86, R1 contains ReturnReg.
1072 0 : if (returnFromCallVM) {
1073 0 : masm.push(ReturnReg);
1074 : } else {
1075 0 : masm.pushValue(Address(temp, offsetof(BaselineDebugModeOSRInfo, valueR0)));
1076 0 : masm.pushValue(Address(temp, offsetof(BaselineDebugModeOSRInfo, valueR1)));
1077 : }
1078 0 : masm.push(BaselineFrameReg);
1079 0 : masm.push(Address(temp, offsetof(BaselineDebugModeOSRInfo, resumeAddr)));
1080 :
1081 : // Call a stub to free the allocated info.
1082 0 : masm.setupUnalignedABICall(temp);
1083 0 : masm.loadBaselineFramePtr(BaselineFrameReg, temp);
1084 0 : masm.passABIArg(temp);
1085 0 : masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, FinishBaselineDebugModeOSR));
1086 :
1087 : // Restore saved values.
1088 0 : AllocatableGeneralRegisterSet jumpRegs(GeneralRegisterSet::All());
1089 0 : if (returnFromCallVM) {
1090 0 : jumpRegs.take(ReturnReg);
1091 : } else {
1092 0 : jumpRegs.take(R0);
1093 0 : jumpRegs.take(R1);
1094 : }
1095 0 : jumpRegs.take(BaselineFrameReg);
1096 0 : Register target = jumpRegs.takeAny();
1097 :
1098 0 : masm.pop(target);
1099 0 : masm.pop(BaselineFrameReg);
1100 0 : if (returnFromCallVM) {
1101 0 : masm.pop(ReturnReg);
1102 : } else {
1103 0 : masm.popValue(R1);
1104 0 : masm.popValue(R0);
1105 : }
1106 :
1107 0 : masm.jump(target);
1108 0 : }
1109 :
1110 : JitCode*
1111 0 : JitRuntime::generateBaselineDebugModeOSRHandler(JSContext* cx, uint32_t* noFrameRegPopOffsetOut)
1112 : {
1113 0 : MacroAssembler masm(cx);
1114 :
1115 0 : AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
1116 0 : regs.take(BaselineFrameReg);
1117 0 : regs.take(ReturnReg);
1118 0 : Register temp = regs.takeAny();
1119 0 : Register syncedStackStart = regs.takeAny();
1120 :
1121 : // Pop the frame reg.
1122 0 : masm.pop(BaselineFrameReg);
1123 :
1124 : // Not all patched baseline frames are returning from a situation where
1125 : // the frame reg is already fixed up.
1126 0 : CodeOffset noFrameRegPopOffset(masm.currentOffset());
1127 :
1128 : // Record the stack pointer for syncing.
1129 0 : masm.moveStackPtrTo(syncedStackStart);
1130 0 : masm.push(ReturnReg);
1131 0 : masm.push(BaselineFrameReg);
1132 :
1133 : // Call a stub to fully initialize the info.
1134 0 : masm.setupUnalignedABICall(temp);
1135 0 : masm.loadBaselineFramePtr(BaselineFrameReg, temp);
1136 0 : masm.passABIArg(temp);
1137 0 : masm.passABIArg(syncedStackStart);
1138 0 : masm.passABIArg(ReturnReg);
1139 0 : masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, SyncBaselineDebugModeOSRInfo));
1140 :
1141 : // Discard stack values depending on how many were unsynced, as we always
1142 : // have a fully synced stack in the recompile handler. We arrive here via
1143 : // a callVM, and prepareCallVM in BaselineCompiler always fully syncs the
1144 : // stack.
1145 0 : masm.pop(BaselineFrameReg);
1146 0 : masm.pop(ReturnReg);
1147 0 : masm.loadPtr(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfScratchValue()), temp);
1148 0 : masm.addToStackPtr(Address(temp, offsetof(BaselineDebugModeOSRInfo, stackAdjust)));
1149 :
1150 : // Emit two tails for the case of returning from a callVM and all other
1151 : // cases, as the state we need to restore differs depending on the case.
1152 0 : Label returnFromCallVM, end;
1153 0 : EmitBranchIsReturningFromCallVM(masm, temp, &returnFromCallVM);
1154 :
1155 0 : EmitBaselineDebugModeOSRHandlerTail(masm, temp, /* returnFromCallVM = */ false);
1156 0 : masm.jump(&end);
1157 0 : masm.bind(&returnFromCallVM);
1158 0 : EmitBaselineDebugModeOSRHandlerTail(masm, temp, /* returnFromCallVM = */ true);
1159 0 : masm.bind(&end);
1160 :
1161 0 : Linker linker(masm);
1162 0 : AutoFlushICache afc("BaselineDebugModeOSRHandler");
1163 0 : JitCode* code = linker.newCode<NoGC>(cx, OTHER_CODE);
1164 0 : if (!code)
1165 0 : return nullptr;
1166 :
1167 0 : *noFrameRegPopOffsetOut = noFrameRegPopOffset.offset();
1168 :
1169 : #ifdef JS_ION_PERF
1170 : writePerfSpewerJitCodeProfile(code, "BaselineDebugModeOSRHandler");
1171 : #endif
1172 :
1173 0 : return code;
1174 : }
1175 :
1176 : /* static */ void
1177 0 : DebugModeOSRVolatileJitFrameIterator::forwardLiveIterators(const CooperatingContext& cx,
1178 : uint8_t* oldAddr, uint8_t* newAddr)
1179 : {
1180 : DebugModeOSRVolatileJitFrameIterator* iter;
1181 0 : for (iter = cx.context()->liveVolatileJitFrameIterators_; iter; iter = iter->prev) {
1182 0 : if (iter->returnAddressToFp_ == oldAddr)
1183 0 : iter->returnAddressToFp_ = newAddr;
1184 : }
1185 0 : }
|