Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #include "jit/RegisterAllocator.h"
8 :
9 : using namespace js;
10 : using namespace js::jit;
11 :
12 : bool
13 8 : AllocationIntegrityState::record()
14 : {
15 : // Ignore repeated record() calls.
16 8 : if (!instructions.empty())
17 0 : return true;
18 :
19 8 : if (!instructions.appendN(InstructionInfo(), graph.numInstructions()))
20 0 : return false;
21 :
22 8 : if (!virtualRegisters.appendN((LDefinition*)nullptr, graph.numVirtualRegisters()))
23 0 : return false;
24 :
25 8 : if (!blocks.reserve(graph.numBlocks()))
26 0 : return false;
27 411 : for (size_t i = 0; i < graph.numBlocks(); i++) {
28 403 : blocks.infallibleAppend(BlockInfo());
29 403 : LBlock* block = graph.getBlock(i);
30 403 : MOZ_ASSERT(block->mir()->id() == i);
31 :
32 403 : BlockInfo& blockInfo = blocks[i];
33 403 : if (!blockInfo.phis.reserve(block->numPhis()))
34 0 : return false;
35 :
36 578 : for (size_t j = 0; j < block->numPhis(); j++) {
37 175 : blockInfo.phis.infallibleAppend(InstructionInfo());
38 175 : InstructionInfo& info = blockInfo.phis[j];
39 175 : LPhi* phi = block->getPhi(j);
40 175 : MOZ_ASSERT(phi->numDefs() == 1);
41 175 : uint32_t vreg = phi->getDef(0)->virtualRegister();
42 175 : virtualRegisters[vreg] = phi->getDef(0);
43 175 : if (!info.outputs.append(*phi->getDef(0)))
44 0 : return false;
45 605 : for (size_t k = 0, kend = phi->numOperands(); k < kend; k++) {
46 430 : if (!info.inputs.append(*phi->getOperand(k)))
47 0 : return false;
48 : }
49 : }
50 :
51 1762 : for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
52 1359 : LInstruction* ins = *iter;
53 1359 : InstructionInfo& info = instructions[ins->id()];
54 :
55 1746 : for (size_t k = 0; k < ins->numTemps(); k++) {
56 387 : if (!ins->getTemp(k)->isBogusTemp()) {
57 314 : uint32_t vreg = ins->getTemp(k)->virtualRegister();
58 314 : virtualRegisters[vreg] = ins->getTemp(k);
59 : }
60 387 : if (!info.temps.append(*ins->getTemp(k)))
61 0 : return false;
62 : }
63 1932 : for (size_t k = 0; k < ins->numDefs(); k++) {
64 573 : if (!ins->getDef(k)->isBogusTemp()) {
65 573 : uint32_t vreg = ins->getDef(k)->virtualRegister();
66 573 : virtualRegisters[vreg] = ins->getDef(k);
67 : }
68 573 : if (!info.outputs.append(*ins->getDef(k)))
69 0 : return false;
70 : }
71 9259 : for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
72 7900 : if (!info.inputs.append(**alloc))
73 0 : return false;
74 : }
75 : }
76 : }
77 :
78 8 : return seen.init();
79 : }
80 :
81 : bool
82 8 : AllocationIntegrityState::check(bool populateSafepoints)
83 : {
84 8 : MOZ_ASSERT(!instructions.empty());
85 :
86 : #ifdef JS_JITSPEW
87 8 : if (JitSpewEnabled(JitSpew_RegAlloc))
88 0 : dump();
89 : #endif
90 : #ifdef DEBUG
91 411 : for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
92 403 : LBlock* block = graph.getBlock(blockIndex);
93 :
94 : // Check that all instruction inputs and outputs have been assigned an allocation.
95 2191 : for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
96 1788 : LInstruction* ins = *iter;
97 :
98 9688 : for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next())
99 7900 : MOZ_ASSERT(!alloc->isUse());
100 :
101 2361 : for (size_t i = 0; i < ins->numDefs(); i++) {
102 573 : LDefinition* def = ins->getDef(i);
103 573 : MOZ_ASSERT(!def->output()->isUse());
104 :
105 573 : LDefinition oldDef = instructions[ins->id()].outputs[i];
106 573 : MOZ_ASSERT_IF(oldDef.policy() == LDefinition::MUST_REUSE_INPUT,
107 : *def->output() == *ins->getOperand(oldDef.getReusedInput()));
108 : }
109 :
110 2175 : for (size_t i = 0; i < ins->numTemps(); i++) {
111 387 : LDefinition* temp = ins->getTemp(i);
112 387 : MOZ_ASSERT_IF(!temp->isBogusTemp(), temp->output()->isRegister());
113 :
114 387 : LDefinition oldTemp = instructions[ins->id()].temps[i];
115 387 : MOZ_ASSERT_IF(oldTemp.policy() == LDefinition::MUST_REUSE_INPUT,
116 : *temp->output() == *ins->getOperand(oldTemp.getReusedInput()));
117 : }
118 : }
119 : }
120 : #endif
121 :
122 : // Check that the register assignment and move groups preserve the original
123 : // semantics of the virtual registers. Each virtual register has a single
124 : // write (owing to the SSA representation), but the allocation may move the
125 : // written value around between registers and memory locations along
126 : // different paths through the script.
127 : //
128 : // For each use of an allocation, follow the physical value which is read
129 : // backward through the script, along all paths to the value's virtual
130 : // register's definition.
131 411 : for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
132 403 : LBlock* block = graph.getBlock(blockIndex);
133 2191 : for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
134 1788 : LInstruction* ins = *iter;
135 1788 : const InstructionInfo& info = instructions[ins->id()];
136 :
137 1788 : LSafepoint* safepoint = ins->safepoint();
138 1788 : if (safepoint) {
139 354 : for (size_t i = 0; i < ins->numTemps(); i++) {
140 220 : if (ins->getTemp(i)->isBogusTemp())
141 29 : continue;
142 191 : uint32_t vreg = info.temps[i].virtualRegister();
143 191 : LAllocation* alloc = ins->getTemp(i)->output();
144 191 : if (!checkSafepointAllocation(ins, vreg, *alloc, populateSafepoints))
145 0 : return false;
146 : }
147 134 : MOZ_ASSERT_IF(ins->isCall() && !populateSafepoints,
148 : safepoint->liveRegs().emptyFloat() &&
149 : safepoint->liveRegs().emptyGeneral());
150 : }
151 :
152 1788 : size_t inputIndex = 0;
153 9688 : for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
154 7900 : LAllocation oldInput = info.inputs[inputIndex++];
155 7900 : if (!oldInput.isUse())
156 1929 : continue;
157 :
158 5971 : uint32_t vreg = oldInput.toUse()->virtualRegister();
159 :
160 5971 : if (safepoint && !oldInput.toUse()->usedAtStart()) {
161 110 : if (!checkSafepointAllocation(ins, vreg, **alloc, populateSafepoints))
162 0 : return false;
163 : }
164 :
165 : // Start checking at the previous instruction, in case this
166 : // instruction reuses its input register for an output.
167 5971 : LInstructionReverseIterator riter = block->rbegin(ins);
168 5971 : riter++;
169 5971 : if (!checkIntegrity(block, *riter, vreg, **alloc, populateSafepoints))
170 0 : return false;
171 :
172 19731 : while (!worklist.empty()) {
173 6880 : IntegrityItem item = worklist.popCopy();
174 6880 : if (!checkIntegrity(item.block, *item.block->rbegin(), item.vreg, item.alloc,
175 : populateSafepoints)) {
176 0 : return false;
177 : }
178 : }
179 : }
180 : }
181 : }
182 :
183 8 : return true;
184 : }
185 :
186 : bool
187 12851 : AllocationIntegrityState::checkIntegrity(LBlock* block, LInstruction* ins,
188 : uint32_t vreg, LAllocation alloc, bool populateSafepoints)
189 : {
190 107703 : for (LInstructionReverseIterator iter(block->rbegin(ins)); iter != block->rend(); iter++) {
191 97172 : ins = *iter;
192 :
193 : // Follow values through assignments in move groups. All assignments in
194 : // a move group are considered to happen simultaneously, so stop after
195 : // the first matching move is found.
196 97172 : if (ins->isMoveGroup()) {
197 29615 : LMoveGroup* group = ins->toMoveGroup();
198 64604 : for (int i = group->numMoves() - 1; i >= 0; i--) {
199 36211 : if (group->getMove(i).to() == alloc) {
200 1222 : alloc = group->getMove(i).from();
201 1222 : break;
202 : }
203 : }
204 : }
205 :
206 97172 : const InstructionInfo& info = instructions[ins->id()];
207 :
208 : // Make sure the physical location being tracked is not clobbered by
209 : // another instruction, and that if the originating vreg definition is
210 : // found that it is writing to the tracked location.
211 :
212 135928 : for (size_t i = 0; i < ins->numDefs(); i++) {
213 41076 : LDefinition* def = ins->getDef(i);
214 41076 : if (def->isBogusTemp())
215 0 : continue;
216 41076 : if (info.outputs[i].virtualRegister() == vreg) {
217 2320 : MOZ_ASSERT(*def->output() == alloc);
218 :
219 : // Found the original definition, done scanning.
220 4640 : return true;
221 : } else {
222 38756 : MOZ_ASSERT(*def->output() != alloc);
223 : }
224 : }
225 :
226 118070 : for (size_t i = 0; i < ins->numTemps(); i++) {
227 23218 : LDefinition* temp = ins->getTemp(i);
228 23218 : if (!temp->isBogusTemp())
229 21193 : MOZ_ASSERT(*temp->output() != alloc);
230 : }
231 :
232 94852 : if (ins->safepoint()) {
233 6713 : if (!checkSafepointAllocation(ins, vreg, alloc, populateSafepoints))
234 0 : return false;
235 : }
236 : }
237 :
238 : // Phis are effectless, but change the vreg we are tracking. Check if there
239 : // is one which produced this vreg. We need to follow back through the phi
240 : // inputs as it is not guaranteed the register allocator filled in physical
241 : // allocations for the inputs and outputs of the phis.
242 28226 : for (size_t i = 0; i < block->numPhis(); i++) {
243 3854 : const InstructionInfo& info = blocks[block->mir()->id()].phis[i];
244 3854 : LPhi* phi = block->getPhi(i);
245 3854 : if (info.outputs[0].virtualRegister() == vreg) {
246 896 : for (size_t j = 0, jend = phi->numOperands(); j < jend; j++) {
247 624 : uint32_t newvreg = info.inputs[j].toUse()->virtualRegister();
248 624 : LBlock* predecessor = block->mir()->getPredecessor(j)->lir();
249 624 : if (!addPredecessor(predecessor, newvreg, alloc))
250 0 : return false;
251 : }
252 272 : return true;
253 : }
254 : }
255 :
256 : // No phi which defined the vreg we are tracking, follow back through all
257 : // predecessors with the existing vreg.
258 22145 : for (size_t i = 0, iend = block->mir()->numPredecessors(); i < iend; i++) {
259 11886 : LBlock* predecessor = block->mir()->getPredecessor(i)->lir();
260 11886 : if (!addPredecessor(predecessor, vreg, alloc))
261 0 : return false;
262 : }
263 :
264 10259 : return true;
265 : }
266 :
267 : bool
268 7014 : AllocationIntegrityState::checkSafepointAllocation(LInstruction* ins,
269 : uint32_t vreg, LAllocation alloc,
270 : bool populateSafepoints)
271 : {
272 7014 : LSafepoint* safepoint = ins->safepoint();
273 7014 : MOZ_ASSERT(safepoint);
274 :
275 7014 : if (ins->isCall() && alloc.isRegister())
276 66 : return true;
277 :
278 6948 : if (alloc.isRegister()) {
279 401 : AnyRegister reg = alloc.toRegister();
280 401 : if (populateSafepoints)
281 0 : safepoint->addLiveRegister(reg);
282 :
283 401 : MOZ_ASSERT(safepoint->liveRegs().has(reg));
284 : }
285 :
286 : // The |this| argument slot is implicitly included in all safepoints.
287 6948 : if (alloc.isArgument() && alloc.toArgument()->index() < THIS_FRAME_ARGSLOT + sizeof(Value))
288 32 : return true;
289 :
290 6916 : LDefinition::Type type = virtualRegisters[vreg]
291 6916 : ? virtualRegisters[vreg]->type()
292 13832 : : LDefinition::GENERAL;
293 :
294 6916 : switch (type) {
295 : case LDefinition::OBJECT:
296 3527 : if (populateSafepoints) {
297 0 : JitSpew(JitSpew_RegAlloc, "Safepoint object v%u i%u %s",
298 0 : vreg, ins->id(), alloc.toString().get());
299 0 : if (!safepoint->addGcPointer(alloc))
300 0 : return false;
301 : }
302 3527 : MOZ_ASSERT(safepoint->hasGcPointer(alloc));
303 3527 : break;
304 : case LDefinition::SLOTS:
305 1 : if (populateSafepoints) {
306 0 : JitSpew(JitSpew_RegAlloc, "Safepoint slots v%u i%u %s",
307 0 : vreg, ins->id(), alloc.toString().get());
308 0 : if (!safepoint->addSlotsOrElementsPointer(alloc))
309 0 : return false;
310 : }
311 1 : MOZ_ASSERT(safepoint->hasSlotsOrElementsPointer(alloc));
312 1 : break;
313 : #ifdef JS_NUNBOX32
314 : // Do not assert that safepoint information for nunbox types is complete,
315 : // as if a vreg for a value's components are copied in multiple places
316 : // then the safepoint information may not reflect all copies. All copies
317 : // of payloads must be reflected, however, for generational GC.
318 : case LDefinition::TYPE:
319 : if (populateSafepoints) {
320 : JitSpew(JitSpew_RegAlloc, "Safepoint type v%u i%u %s",
321 : vreg, ins->id(), alloc.toString().get());
322 : if (!safepoint->addNunboxType(vreg, alloc))
323 : return false;
324 : }
325 : break;
326 : case LDefinition::PAYLOAD:
327 : if (populateSafepoints) {
328 : JitSpew(JitSpew_RegAlloc, "Safepoint payload v%u i%u %s",
329 : vreg, ins->id(), alloc.toString().get());
330 : if (!safepoint->addNunboxPayload(vreg, alloc))
331 : return false;
332 : }
333 : MOZ_ASSERT(safepoint->hasNunboxPayload(alloc));
334 : break;
335 : #else
336 : case LDefinition::BOX:
337 1911 : if (populateSafepoints) {
338 0 : JitSpew(JitSpew_RegAlloc, "Safepoint boxed value v%u i%u %s",
339 0 : vreg, ins->id(), alloc.toString().get());
340 0 : if (!safepoint->addBoxedValue(alloc))
341 0 : return false;
342 : }
343 1911 : MOZ_ASSERT(safepoint->hasBoxedValue(alloc));
344 1911 : break;
345 : #endif
346 : default:
347 1477 : break;
348 : }
349 :
350 6916 : return true;
351 : }
352 :
353 : bool
354 12510 : AllocationIntegrityState::addPredecessor(LBlock* block, uint32_t vreg, LAllocation alloc)
355 : {
356 : // There is no need to reanalyze if we have already seen this predecessor.
357 : // We share the seen allocations across analysis of each use, as there will
358 : // likely be common ground between different uses of the same vreg.
359 12510 : IntegrityItem item;
360 12510 : item.block = block;
361 12510 : item.vreg = vreg;
362 12510 : item.alloc = alloc;
363 12510 : item.index = seen.count();
364 :
365 12510 : IntegrityItemSet::AddPtr p = seen.lookupForAdd(item);
366 12510 : if (p)
367 5630 : return true;
368 6880 : if (!seen.add(p, item))
369 0 : return false;
370 :
371 6880 : return worklist.append(item);
372 : }
373 :
374 : void
375 0 : AllocationIntegrityState::dump()
376 : {
377 : #ifdef DEBUG
378 0 : fprintf(stderr, "Register Allocation Integrity State:\n");
379 :
380 0 : for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
381 0 : LBlock* block = graph.getBlock(blockIndex);
382 0 : MBasicBlock* mir = block->mir();
383 :
384 0 : fprintf(stderr, "\nBlock %lu", static_cast<unsigned long>(blockIndex));
385 0 : for (size_t i = 0; i < mir->numSuccessors(); i++)
386 0 : fprintf(stderr, " [successor %u]", mir->getSuccessor(i)->id());
387 0 : fprintf(stderr, "\n");
388 :
389 0 : for (size_t i = 0; i < block->numPhis(); i++) {
390 0 : const InstructionInfo& info = blocks[blockIndex].phis[i];
391 0 : LPhi* phi = block->getPhi(i);
392 0 : CodePosition input(block->getPhi(0)->id(), CodePosition::INPUT);
393 0 : CodePosition output(block->getPhi(block->numPhis() - 1)->id(), CodePosition::OUTPUT);
394 :
395 0 : fprintf(stderr, "[%u,%u Phi] [def %s] ",
396 : input.bits(),
397 : output.bits(),
398 0 : phi->getDef(0)->toString().get());
399 0 : for (size_t j = 0; j < phi->numOperands(); j++)
400 0 : fprintf(stderr, " [use %s]", info.inputs[j].toString().get());
401 0 : fprintf(stderr, "\n");
402 : }
403 :
404 0 : for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
405 0 : LInstruction* ins = *iter;
406 0 : const InstructionInfo& info = instructions[ins->id()];
407 :
408 0 : CodePosition input(ins->id(), CodePosition::INPUT);
409 0 : CodePosition output(ins->id(), CodePosition::OUTPUT);
410 :
411 0 : fprintf(stderr, "[");
412 0 : if (input != CodePosition::MIN)
413 0 : fprintf(stderr, "%u,%u ", input.bits(), output.bits());
414 0 : fprintf(stderr, "%s]", ins->opName());
415 :
416 0 : if (ins->isMoveGroup()) {
417 0 : LMoveGroup* group = ins->toMoveGroup();
418 0 : for (int i = group->numMoves() - 1; i >= 0; i--) {
419 0 : fprintf(stderr, " [%s -> %s]",
420 0 : group->getMove(i).from().toString().get(),
421 0 : group->getMove(i).to().toString().get());
422 : }
423 0 : fprintf(stderr, "\n");
424 0 : continue;
425 : }
426 :
427 0 : for (size_t i = 0; i < ins->numDefs(); i++)
428 0 : fprintf(stderr, " [def %s]", ins->getDef(i)->toString().get());
429 :
430 0 : for (size_t i = 0; i < ins->numTemps(); i++) {
431 0 : LDefinition* temp = ins->getTemp(i);
432 0 : if (!temp->isBogusTemp())
433 0 : fprintf(stderr, " [temp v%u %s]", info.temps[i].virtualRegister(),
434 0 : temp->toString().get());
435 : }
436 :
437 0 : size_t index = 0;
438 0 : for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
439 0 : fprintf(stderr, " [use %s", info.inputs[index++].toString().get());
440 0 : if (!alloc->isConstant())
441 0 : fprintf(stderr, " %s", alloc->toString().get());
442 0 : fprintf(stderr, "]");
443 : }
444 :
445 0 : fprintf(stderr, "\n");
446 : }
447 : }
448 :
449 : // Print discovered allocations at the ends of blocks, in the order they
450 : // were discovered.
451 :
452 0 : Vector<IntegrityItem, 20, SystemAllocPolicy> seenOrdered;
453 0 : if (!seenOrdered.appendN(IntegrityItem(), seen.count())) {
454 0 : fprintf(stderr, "OOM while dumping allocations\n");
455 0 : return;
456 : }
457 :
458 0 : for (IntegrityItemSet::Enum iter(seen); !iter.empty(); iter.popFront()) {
459 0 : IntegrityItem item = iter.front();
460 0 : seenOrdered[item.index] = item;
461 : }
462 :
463 0 : if (!seenOrdered.empty()) {
464 0 : fprintf(stderr, "Intermediate Allocations:\n");
465 :
466 0 : for (size_t i = 0; i < seenOrdered.length(); i++) {
467 0 : IntegrityItem item = seenOrdered[i];
468 0 : fprintf(stderr, " block %u reg v%u alloc %s\n",
469 0 : item.block->mir()->id(), item.vreg, item.alloc.toString().get());
470 : }
471 : }
472 :
473 0 : fprintf(stderr, "\n");
474 : #endif
475 : }
476 :
477 : const CodePosition CodePosition::MAX(UINT_MAX);
478 : const CodePosition CodePosition::MIN(0);
479 :
480 : bool
481 8 : RegisterAllocator::init()
482 : {
483 8 : if (!insData.init(mir, graph.numInstructions()))
484 0 : return false;
485 :
486 8 : if (!entryPositions.reserve(graph.numBlocks()) || !exitPositions.reserve(graph.numBlocks()))
487 0 : return false;
488 :
489 411 : for (size_t i = 0; i < graph.numBlocks(); i++) {
490 403 : LBlock* block = graph.getBlock(i);
491 1762 : for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++)
492 1359 : insData[ins->id()] = *ins;
493 578 : for (size_t j = 0; j < block->numPhis(); j++) {
494 175 : LPhi* phi = block->getPhi(j);
495 175 : insData[phi->id()] = phi;
496 : }
497 :
498 403 : CodePosition entry = block->numPhis() != 0
499 62 : ? CodePosition(block->getPhi(0)->id(), CodePosition::INPUT)
500 465 : : inputOf(block->firstInstructionWithId());
501 403 : CodePosition exit = outputOf(block->lastInstructionWithId());
502 :
503 403 : MOZ_ASSERT(block->mir()->id() == i);
504 403 : entryPositions.infallibleAppend(entry);
505 403 : exitPositions.infallibleAppend(exit);
506 : }
507 :
508 8 : return true;
509 : }
510 :
511 : LMoveGroup*
512 294 : RegisterAllocator::getInputMoveGroup(LInstruction* ins)
513 : {
514 294 : MOZ_ASSERT(!ins->fixReuseMoves());
515 294 : if (ins->inputMoves())
516 11 : return ins->inputMoves();
517 :
518 283 : LMoveGroup* moves = LMoveGroup::New(alloc());
519 283 : ins->setInputMoves(moves);
520 283 : ins->block()->insertBefore(ins, moves);
521 283 : return moves;
522 : }
523 :
524 : LMoveGroup*
525 0 : RegisterAllocator::getFixReuseMoveGroup(LInstruction* ins)
526 : {
527 0 : if (ins->fixReuseMoves())
528 0 : return ins->fixReuseMoves();
529 :
530 0 : LMoveGroup* moves = LMoveGroup::New(alloc());
531 0 : ins->setFixReuseMoves(moves);
532 0 : ins->block()->insertBefore(ins, moves);
533 0 : return moves;
534 : }
535 :
536 : LMoveGroup*
537 0 : RegisterAllocator::getMoveGroupAfter(LInstruction* ins)
538 : {
539 0 : if (ins->movesAfter())
540 0 : return ins->movesAfter();
541 :
542 0 : LMoveGroup* moves = LMoveGroup::New(alloc());
543 0 : ins->setMovesAfter(moves);
544 :
545 0 : ins->block()->insertAfter(ins, moves);
546 0 : return moves;
547 : }
548 :
549 : void
550 0 : RegisterAllocator::dumpInstructions()
551 : {
552 : #ifdef JS_JITSPEW
553 0 : fprintf(stderr, "Instructions:\n");
554 :
555 0 : for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
556 0 : LBlock* block = graph.getBlock(blockIndex);
557 0 : MBasicBlock* mir = block->mir();
558 :
559 0 : fprintf(stderr, "\nBlock %lu", static_cast<unsigned long>(blockIndex));
560 0 : for (size_t i = 0; i < mir->numSuccessors(); i++)
561 0 : fprintf(stderr, " [successor %u]", mir->getSuccessor(i)->id());
562 0 : fprintf(stderr, "\n");
563 :
564 0 : for (size_t i = 0; i < block->numPhis(); i++) {
565 0 : LPhi* phi = block->getPhi(i);
566 :
567 0 : fprintf(stderr, "[%u,%u Phi] [def %s]",
568 0 : inputOf(phi).bits(),
569 0 : outputOf(phi).bits(),
570 0 : phi->getDef(0)->toString().get());
571 0 : for (size_t j = 0; j < phi->numOperands(); j++)
572 0 : fprintf(stderr, " [use %s]", phi->getOperand(j)->toString().get());
573 0 : fprintf(stderr, "\n");
574 : }
575 :
576 0 : for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
577 0 : LInstruction* ins = *iter;
578 :
579 0 : fprintf(stderr, "[");
580 0 : if (ins->id() != 0)
581 0 : fprintf(stderr, "%u,%u ", inputOf(ins).bits(), outputOf(ins).bits());
582 0 : fprintf(stderr, "%s]", ins->opName());
583 :
584 0 : if (ins->isMoveGroup()) {
585 0 : LMoveGroup* group = ins->toMoveGroup();
586 0 : for (int i = group->numMoves() - 1; i >= 0; i--) {
587 : // Use two printfs, as LAllocation::toString is not reentant.
588 0 : fprintf(stderr, " [%s", group->getMove(i).from().toString().get());
589 0 : fprintf(stderr, " -> %s]", group->getMove(i).to().toString().get());
590 : }
591 0 : fprintf(stderr, "\n");
592 0 : continue;
593 : }
594 :
595 0 : for (size_t i = 0; i < ins->numDefs(); i++)
596 0 : fprintf(stderr, " [def %s]", ins->getDef(i)->toString().get());
597 :
598 0 : for (size_t i = 0; i < ins->numTemps(); i++) {
599 0 : LDefinition* temp = ins->getTemp(i);
600 0 : if (!temp->isBogusTemp())
601 0 : fprintf(stderr, " [temp %s]", temp->toString().get());
602 : }
603 :
604 0 : for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
605 0 : if (!alloc->isBogus())
606 0 : fprintf(stderr, " [use %s]", alloc->toString().get());
607 : }
608 :
609 0 : fprintf(stderr, "\n");
610 : }
611 : }
612 0 : fprintf(stderr, "\n");
613 : #endif // JS_JITSPEW
614 0 : }
|