Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #include "jit/x86-shared/MoveEmitter-x86-shared.h"
8 :
9 : #include "jit/MacroAssembler-inl.h"
10 :
11 : using namespace js;
12 : using namespace js::jit;
13 :
14 : using mozilla::Maybe;
15 :
16 13452 : MoveEmitterX86::MoveEmitterX86(MacroAssembler& masm)
17 : : inCycle_(false),
18 : masm(masm),
19 13452 : pushedAtCycle_(-1)
20 : {
21 13452 : pushedAtStart_ = masm.framePushed();
22 13452 : }
23 :
24 : // Examine the cycle in moves starting at position i. Determine if it's a
25 : // simple cycle consisting of all register-to-register moves in a single class,
26 : // and whether it can be implemented entirely by swaps.
27 : size_t
28 5 : MoveEmitterX86::characterizeCycle(const MoveResolver& moves, size_t i,
29 : bool* allGeneralRegs, bool* allFloatRegs)
30 : {
31 5 : size_t swapCount = 0;
32 :
33 10 : for (size_t j = i; ; j++) {
34 10 : const MoveOp& move = moves.getMove(j);
35 :
36 : // If it isn't a cycle of registers of the same kind, we won't be able
37 : // to optimize it.
38 10 : if (!move.to().isGeneralReg())
39 0 : *allGeneralRegs = false;
40 10 : if (!move.to().isFloatReg())
41 10 : *allFloatRegs = false;
42 10 : if (!*allGeneralRegs && !*allFloatRegs)
43 0 : return -1;
44 :
45 : // Stop iterating when we see the last one.
46 10 : if (j != i && move.isCycleEnd())
47 5 : break;
48 :
49 : // Check that this move is actually part of the cycle. This is
50 : // over-conservative when there are multiple reads from the same source,
51 : // but that's expected to be rare.
52 5 : if (move.from() != moves.getMove(j + 1).to()) {
53 0 : *allGeneralRegs = false;
54 0 : *allFloatRegs = false;
55 0 : return -1;
56 : }
57 :
58 5 : swapCount++;
59 5 : }
60 :
61 : // Check that the last move cycles back to the first move.
62 5 : const MoveOp& move = moves.getMove(i + swapCount);
63 5 : if (move.from() != moves.getMove(i).to()) {
64 0 : *allGeneralRegs = false;
65 0 : *allFloatRegs = false;
66 0 : return -1;
67 : }
68 :
69 5 : return swapCount;
70 : }
71 :
72 : // If we can emit optimized code for the cycle in moves starting at position i,
73 : // do so, and return true.
74 : bool
75 5 : MoveEmitterX86::maybeEmitOptimizedCycle(const MoveResolver& moves, size_t i,
76 : bool allGeneralRegs, bool allFloatRegs, size_t swapCount)
77 : {
78 5 : if (allGeneralRegs && swapCount <= 2) {
79 : // Use x86's swap-integer-registers instruction if we only have a few
80 : // swaps. (x86 also has a swap between registers and memory but it's
81 : // slow.)
82 10 : for (size_t k = 0; k < swapCount; k++)
83 5 : masm.xchg(moves.getMove(i + k).to().reg(), moves.getMove(i + k + 1).to().reg());
84 5 : return true;
85 : }
86 :
87 0 : if (allFloatRegs && swapCount == 1) {
88 : // There's no xchg for xmm registers, but if we only need a single swap,
89 : // it's cheap to do an XOR swap.
90 0 : FloatRegister a = moves.getMove(i).to().floatReg();
91 0 : FloatRegister b = moves.getMove(i + 1).to().floatReg();
92 0 : masm.vxorpd(a, b, b);
93 0 : masm.vxorpd(b, a, a);
94 0 : masm.vxorpd(a, b, b);
95 0 : return true;
96 : }
97 :
98 0 : return false;
99 : }
100 :
101 : void
102 13452 : MoveEmitterX86::emit(const MoveResolver& moves)
103 : {
104 : #if defined(JS_CODEGEN_X86) && defined(DEBUG)
105 : // Clobber any scratch register we have, to make regalloc bugs more visible.
106 : if (scratchRegister_.isSome())
107 : masm.mov(ImmWord(0xdeadbeef), scratchRegister_.value());
108 : #endif
109 :
110 32581 : for (size_t i = 0; i < moves.numMoves(); i++) {
111 : #if defined(JS_CODEGEN_X86) && defined(DEBUG)
112 : if (!scratchRegister_.isSome()) {
113 : Maybe<Register> reg = findScratchRegister(moves, i);
114 : if (reg.isSome())
115 : masm.mov(ImmWord(0xdeadbeef), reg.value());
116 : }
117 : #endif
118 :
119 19129 : const MoveOp& move = moves.getMove(i);
120 19129 : const MoveOperand& from = move.from();
121 19129 : const MoveOperand& to = move.to();
122 :
123 19129 : if (move.isCycleEnd()) {
124 0 : MOZ_ASSERT(inCycle_);
125 0 : completeCycle(to, move.type());
126 0 : inCycle_ = false;
127 0 : continue;
128 : }
129 :
130 19129 : if (move.isCycleBegin()) {
131 5 : MOZ_ASSERT(!inCycle_);
132 :
133 : // Characterize the cycle.
134 5 : bool allGeneralRegs = true, allFloatRegs = true;
135 5 : size_t swapCount = characterizeCycle(moves, i, &allGeneralRegs, &allFloatRegs);
136 :
137 : // Attempt to optimize it to avoid using the stack.
138 5 : if (maybeEmitOptimizedCycle(moves, i, allGeneralRegs, allFloatRegs, swapCount)) {
139 5 : i += swapCount;
140 5 : continue;
141 : }
142 :
143 : // Otherwise use the stack.
144 0 : breakCycle(to, move.endCycleType());
145 0 : inCycle_ = true;
146 : }
147 :
148 : // A normal move which is not part of a cycle.
149 19124 : switch (move.type()) {
150 : case MoveOp::FLOAT32:
151 0 : emitFloat32Move(from, to);
152 0 : break;
153 : case MoveOp::DOUBLE:
154 4 : emitDoubleMove(from, to);
155 4 : break;
156 : case MoveOp::INT32:
157 165 : emitInt32Move(from, to, moves, i);
158 165 : break;
159 : case MoveOp::GENERAL:
160 18955 : emitGeneralMove(from, to, moves, i);
161 18955 : break;
162 : case MoveOp::SIMD128INT:
163 0 : emitSimd128IntMove(from, to);
164 0 : break;
165 : case MoveOp::SIMD128FLOAT:
166 0 : emitSimd128FloatMove(from, to);
167 0 : break;
168 : default:
169 0 : MOZ_CRASH("Unexpected move type");
170 : }
171 : }
172 13452 : }
173 :
174 26904 : MoveEmitterX86::~MoveEmitterX86()
175 : {
176 13452 : assertDone();
177 13452 : }
178 :
179 : Address
180 0 : MoveEmitterX86::cycleSlot()
181 : {
182 0 : if (pushedAtCycle_ == -1) {
183 : // Reserve stack for cycle resolution
184 0 : masm.reserveStack(Simd128DataSize);
185 0 : pushedAtCycle_ = masm.framePushed();
186 : }
187 :
188 0 : return Address(StackPointer, masm.framePushed() - pushedAtCycle_);
189 : }
190 :
191 : Address
192 2202 : MoveEmitterX86::toAddress(const MoveOperand& operand) const
193 : {
194 2202 : if (operand.base() != StackPointer)
195 1644 : return Address(operand.base(), operand.disp());
196 :
197 558 : MOZ_ASSERT(operand.disp() >= 0);
198 :
199 : // Otherwise, the stack offset may need to be adjusted.
200 558 : return Address(StackPointer, operand.disp() + (masm.framePushed() - pushedAtStart_));
201 : }
202 :
203 : // Warning, do not use the resulting operand with pop instructions, since they
204 : // compute the effective destination address after altering the stack pointer.
205 : // Use toPopOperand if an Operand is needed for a pop.
206 : Operand
207 18046 : MoveEmitterX86::toOperand(const MoveOperand& operand) const
208 : {
209 18046 : if (operand.isMemoryOrEffectiveAddress())
210 1118 : return Operand(toAddress(operand));
211 16928 : if (operand.isGeneralReg())
212 16928 : return Operand(operand.reg());
213 :
214 0 : MOZ_ASSERT(operand.isFloatReg());
215 0 : return Operand(operand.floatReg());
216 : }
217 :
218 : // This is the same as toOperand except that it computes an Operand suitable for
219 : // use in a pop.
220 : Operand
221 0 : MoveEmitterX86::toPopOperand(const MoveOperand& operand) const
222 : {
223 0 : if (operand.isMemory()) {
224 0 : if (operand.base() != StackPointer)
225 0 : return Operand(operand.base(), operand.disp());
226 :
227 0 : MOZ_ASSERT(operand.disp() >= 0);
228 :
229 : // Otherwise, the stack offset may need to be adjusted.
230 : // Note the adjustment by the stack slot here, to offset for the fact that pop
231 : // computes its effective address after incrementing the stack pointer.
232 0 : return Operand(StackPointer,
233 0 : operand.disp() + (masm.framePushed() - sizeof(void*) - pushedAtStart_));
234 : }
235 0 : if (operand.isGeneralReg())
236 0 : return Operand(operand.reg());
237 :
238 0 : MOZ_ASSERT(operand.isFloatReg());
239 0 : return Operand(operand.floatReg());
240 : }
241 :
242 : void
243 0 : MoveEmitterX86::breakCycle(const MoveOperand& to, MoveOp::Type type)
244 : {
245 : // There is some pattern:
246 : // (A -> B)
247 : // (B -> A)
248 : //
249 : // This case handles (A -> B), which we reach first. We save B, then allow
250 : // the original move to continue.
251 0 : switch (type) {
252 : case MoveOp::SIMD128INT:
253 0 : if (to.isMemory()) {
254 0 : ScratchSimd128Scope scratch(masm);
255 0 : masm.loadAlignedSimd128Int(toAddress(to), scratch);
256 0 : masm.storeAlignedSimd128Int(scratch, cycleSlot());
257 : } else {
258 0 : masm.storeAlignedSimd128Int(to.floatReg(), cycleSlot());
259 : }
260 0 : break;
261 : case MoveOp::SIMD128FLOAT:
262 0 : if (to.isMemory()) {
263 0 : ScratchSimd128Scope scratch(masm);
264 0 : masm.loadAlignedSimd128Float(toAddress(to), scratch);
265 0 : masm.storeAlignedSimd128Float(scratch, cycleSlot());
266 : } else {
267 0 : masm.storeAlignedSimd128Float(to.floatReg(), cycleSlot());
268 : }
269 0 : break;
270 : case MoveOp::FLOAT32:
271 0 : if (to.isMemory()) {
272 0 : ScratchFloat32Scope scratch(masm);
273 0 : masm.loadFloat32(toAddress(to), scratch);
274 0 : masm.storeFloat32(scratch, cycleSlot());
275 : } else {
276 0 : masm.storeFloat32(to.floatReg(), cycleSlot());
277 : }
278 0 : break;
279 : case MoveOp::DOUBLE:
280 0 : if (to.isMemory()) {
281 0 : ScratchDoubleScope scratch(masm);
282 0 : masm.loadDouble(toAddress(to), scratch);
283 0 : masm.storeDouble(scratch, cycleSlot());
284 : } else {
285 0 : masm.storeDouble(to.floatReg(), cycleSlot());
286 : }
287 0 : break;
288 : case MoveOp::INT32:
289 : #ifdef JS_CODEGEN_X64
290 : // x64 can't pop to a 32-bit destination, so don't push.
291 0 : if (to.isMemory()) {
292 0 : masm.load32(toAddress(to), ScratchReg);
293 0 : masm.store32(ScratchReg, cycleSlot());
294 : } else {
295 0 : masm.store32(to.reg(), cycleSlot());
296 : }
297 0 : break;
298 : #endif
299 : case MoveOp::GENERAL:
300 0 : masm.Push(toOperand(to));
301 0 : break;
302 : default:
303 0 : MOZ_CRASH("Unexpected move type");
304 : }
305 0 : }
306 :
307 : void
308 0 : MoveEmitterX86::completeCycle(const MoveOperand& to, MoveOp::Type type)
309 : {
310 : // There is some pattern:
311 : // (A -> B)
312 : // (B -> A)
313 : //
314 : // This case handles (B -> A), which we reach last. We emit a move from the
315 : // saved value of B, to A.
316 0 : switch (type) {
317 : case MoveOp::SIMD128INT:
318 0 : MOZ_ASSERT(pushedAtCycle_ != -1);
319 0 : MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= Simd128DataSize);
320 0 : if (to.isMemory()) {
321 0 : ScratchSimd128Scope scratch(masm);
322 0 : masm.loadAlignedSimd128Int(cycleSlot(), scratch);
323 0 : masm.storeAlignedSimd128Int(scratch, toAddress(to));
324 : } else {
325 0 : masm.loadAlignedSimd128Int(cycleSlot(), to.floatReg());
326 : }
327 0 : break;
328 : case MoveOp::SIMD128FLOAT:
329 0 : MOZ_ASSERT(pushedAtCycle_ != -1);
330 0 : MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= Simd128DataSize);
331 0 : if (to.isMemory()) {
332 0 : ScratchSimd128Scope scratch(masm);
333 0 : masm.loadAlignedSimd128Float(cycleSlot(), scratch);
334 0 : masm.storeAlignedSimd128Float(scratch, toAddress(to));
335 : } else {
336 0 : masm.loadAlignedSimd128Float(cycleSlot(), to.floatReg());
337 : }
338 0 : break;
339 : case MoveOp::FLOAT32:
340 0 : MOZ_ASSERT(pushedAtCycle_ != -1);
341 0 : MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= sizeof(float));
342 0 : if (to.isMemory()) {
343 0 : ScratchFloat32Scope scratch(masm);
344 0 : masm.loadFloat32(cycleSlot(), scratch);
345 0 : masm.storeFloat32(scratch, toAddress(to));
346 : } else {
347 0 : masm.loadFloat32(cycleSlot(), to.floatReg());
348 : }
349 0 : break;
350 : case MoveOp::DOUBLE:
351 0 : MOZ_ASSERT(pushedAtCycle_ != -1);
352 0 : MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= sizeof(double));
353 0 : if (to.isMemory()) {
354 0 : ScratchDoubleScope scratch(masm);
355 0 : masm.loadDouble(cycleSlot(), scratch);
356 0 : masm.storeDouble(scratch, toAddress(to));
357 : } else {
358 0 : masm.loadDouble(cycleSlot(), to.floatReg());
359 : }
360 0 : break;
361 : case MoveOp::INT32:
362 : #ifdef JS_CODEGEN_X64
363 0 : MOZ_ASSERT(pushedAtCycle_ != -1);
364 0 : MOZ_ASSERT(pushedAtCycle_ - pushedAtStart_ >= sizeof(int32_t));
365 : // x64 can't pop to a 32-bit destination.
366 0 : if (to.isMemory()) {
367 0 : masm.load32(cycleSlot(), ScratchReg);
368 0 : masm.store32(ScratchReg, toAddress(to));
369 : } else {
370 0 : masm.load32(cycleSlot(), to.reg());
371 : }
372 0 : break;
373 : #endif
374 : case MoveOp::GENERAL:
375 0 : MOZ_ASSERT(masm.framePushed() - pushedAtStart_ >= sizeof(intptr_t));
376 0 : masm.Pop(toPopOperand(to));
377 0 : break;
378 : default:
379 0 : MOZ_CRASH("Unexpected move type");
380 : }
381 0 : }
382 :
383 : void
384 165 : MoveEmitterX86::emitInt32Move(const MoveOperand& from, const MoveOperand& to,
385 : const MoveResolver& moves, size_t i)
386 : {
387 165 : if (from.isGeneralReg()) {
388 118 : masm.move32(from.reg(), toOperand(to));
389 47 : } else if (to.isGeneralReg()) {
390 47 : MOZ_ASSERT(from.isMemory());
391 47 : masm.load32(toAddress(from), to.reg());
392 : } else {
393 : // Memory to memory gpr move.
394 0 : MOZ_ASSERT(from.isMemory());
395 0 : Maybe<Register> reg = findScratchRegister(moves, i);
396 0 : if (reg.isSome()) {
397 0 : masm.load32(toAddress(from), reg.value());
398 0 : masm.move32(reg.value(), toOperand(to));
399 : } else {
400 : // No scratch register available; bounce it off the stack.
401 0 : masm.Push(toOperand(from));
402 0 : masm.Pop(toPopOperand(to));
403 : }
404 : }
405 165 : }
406 :
407 : void
408 18955 : MoveEmitterX86::emitGeneralMove(const MoveOperand& from, const MoveOperand& to,
409 : const MoveResolver& moves, size_t i)
410 : {
411 18955 : if (from.isGeneralReg()) {
412 17122 : masm.mov(from.reg(), toOperand(to));
413 1833 : } else if (to.isGeneralReg()) {
414 1827 : MOZ_ASSERT(from.isMemoryOrEffectiveAddress());
415 1827 : if (from.isMemory())
416 1031 : masm.loadPtr(toAddress(from), to.reg());
417 : else
418 796 : masm.lea(toOperand(from), to.reg());
419 6 : } else if (from.isMemory()) {
420 : // Memory to memory gpr move.
421 4 : Maybe<Register> reg = findScratchRegister(moves, i);
422 2 : if (reg.isSome()) {
423 2 : masm.loadPtr(toAddress(from), reg.value());
424 2 : masm.mov(reg.value(), toOperand(to));
425 : } else {
426 : // No scratch register available; bounce it off the stack.
427 0 : masm.Push(toOperand(from));
428 0 : masm.Pop(toPopOperand(to));
429 : }
430 : } else {
431 : // Effective address to memory move.
432 4 : MOZ_ASSERT(from.isEffectiveAddress());
433 8 : Maybe<Register> reg = findScratchRegister(moves, i);
434 4 : if (reg.isSome()) {
435 4 : masm.lea(toOperand(from), reg.value());
436 4 : masm.mov(reg.value(), toOperand(to));
437 : } else {
438 : // This is tricky without a scratch reg. We can't do an lea. Bounce the
439 : // base register off the stack, then add the offset in place. Note that
440 : // this clobbers FLAGS!
441 0 : masm.Push(from.base());
442 0 : masm.Pop(toPopOperand(to));
443 0 : MOZ_ASSERT(to.isMemoryOrEffectiveAddress());
444 0 : masm.addPtr(Imm32(from.disp()), toAddress(to));
445 : }
446 : }
447 18955 : }
448 :
449 : void
450 0 : MoveEmitterX86::emitFloat32Move(const MoveOperand& from, const MoveOperand& to)
451 : {
452 0 : MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isSingle());
453 0 : MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isSingle());
454 :
455 0 : if (from.isFloatReg()) {
456 0 : if (to.isFloatReg())
457 0 : masm.moveFloat32(from.floatReg(), to.floatReg());
458 : else
459 0 : masm.storeFloat32(from.floatReg(), toAddress(to));
460 0 : } else if (to.isFloatReg()) {
461 0 : masm.loadFloat32(toAddress(from), to.floatReg());
462 : } else {
463 : // Memory to memory move.
464 0 : MOZ_ASSERT(from.isMemory());
465 0 : ScratchFloat32Scope scratch(masm);
466 0 : masm.loadFloat32(toAddress(from), scratch);
467 0 : masm.storeFloat32(scratch, toAddress(to));
468 : }
469 0 : }
470 :
471 : void
472 4 : MoveEmitterX86::emitDoubleMove(const MoveOperand& from, const MoveOperand& to)
473 : {
474 4 : MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isDouble());
475 4 : MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isDouble());
476 :
477 4 : if (from.isFloatReg()) {
478 0 : if (to.isFloatReg())
479 0 : masm.moveDouble(from.floatReg(), to.floatReg());
480 : else
481 0 : masm.storeDouble(from.floatReg(), toAddress(to));
482 4 : } else if (to.isFloatReg()) {
483 4 : masm.loadDouble(toAddress(from), to.floatReg());
484 : } else {
485 : // Memory to memory move.
486 0 : MOZ_ASSERT(from.isMemory());
487 0 : ScratchDoubleScope scratch(masm);
488 0 : masm.loadDouble(toAddress(from), scratch);
489 0 : masm.storeDouble(scratch, toAddress(to));
490 : }
491 4 : }
492 :
493 : void
494 0 : MoveEmitterX86::emitSimd128IntMove(const MoveOperand& from, const MoveOperand& to)
495 : {
496 0 : MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isSimd128());
497 0 : MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isSimd128());
498 :
499 0 : if (from.isFloatReg()) {
500 0 : if (to.isFloatReg())
501 0 : masm.moveSimd128Int(from.floatReg(), to.floatReg());
502 : else
503 0 : masm.storeAlignedSimd128Int(from.floatReg(), toAddress(to));
504 0 : } else if (to.isFloatReg()) {
505 0 : masm.loadAlignedSimd128Int(toAddress(from), to.floatReg());
506 : } else {
507 : // Memory to memory move.
508 0 : MOZ_ASSERT(from.isMemory());
509 0 : ScratchSimd128Scope scratch(masm);
510 0 : masm.loadAlignedSimd128Int(toAddress(from), scratch);
511 0 : masm.storeAlignedSimd128Int(scratch, toAddress(to));
512 : }
513 0 : }
514 :
515 : void
516 0 : MoveEmitterX86::emitSimd128FloatMove(const MoveOperand& from, const MoveOperand& to)
517 : {
518 0 : MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg().isSimd128());
519 0 : MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg().isSimd128());
520 :
521 0 : if (from.isFloatReg()) {
522 0 : if (to.isFloatReg())
523 0 : masm.moveSimd128Float(from.floatReg(), to.floatReg());
524 : else
525 0 : masm.storeAlignedSimd128Float(from.floatReg(), toAddress(to));
526 0 : } else if (to.isFloatReg()) {
527 0 : masm.loadAlignedSimd128Float(toAddress(from), to.floatReg());
528 : } else {
529 : // Memory to memory move.
530 0 : MOZ_ASSERT(from.isMemory());
531 0 : ScratchSimd128Scope scratch(masm);
532 0 : masm.loadAlignedSimd128Float(toAddress(from), scratch);
533 0 : masm.storeAlignedSimd128Float(scratch, toAddress(to));
534 : }
535 0 : }
536 :
537 : void
538 26903 : MoveEmitterX86::assertDone()
539 : {
540 26903 : MOZ_ASSERT(!inCycle_);
541 26903 : }
542 :
543 : void
544 13452 : MoveEmitterX86::finish()
545 : {
546 13452 : assertDone();
547 :
548 13452 : masm.freeStack(masm.framePushed() - pushedAtStart_);
549 13452 : }
550 :
551 : Maybe<Register>
552 6 : MoveEmitterX86::findScratchRegister(const MoveResolver& moves, size_t initial)
553 : {
554 : #ifdef JS_CODEGEN_X86
555 : if (scratchRegister_.isSome())
556 : return scratchRegister_;
557 :
558 : // All registers are either in use by this move group or are live
559 : // afterwards. Look through the remaining moves for a register which is
560 : // clobbered before it is used, and is thus dead at this point.
561 : AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
562 : for (size_t i = initial; i < moves.numMoves(); i++) {
563 : const MoveOp& move = moves.getMove(i);
564 : if (move.from().isGeneralReg())
565 : regs.takeUnchecked(move.from().reg());
566 : else if (move.from().isMemoryOrEffectiveAddress())
567 : regs.takeUnchecked(move.from().base());
568 : if (move.to().isGeneralReg()) {
569 : if (i != initial && !move.isCycleBegin() && regs.has(move.to().reg()))
570 : return mozilla::Some(move.to().reg());
571 : regs.takeUnchecked(move.to().reg());
572 : } else if (move.to().isMemoryOrEffectiveAddress()) {
573 : regs.takeUnchecked(move.to().base());
574 : }
575 : }
576 :
577 : return mozilla::Nothing();
578 : #else
579 6 : return mozilla::Some(ScratchReg);
580 : #endif
581 : }
|