Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #ifndef jit_BacktrackingAllocator_h
8 : #define jit_BacktrackingAllocator_h
9 :
10 : #include "mozilla/Array.h"
11 :
12 : #include "ds/PriorityQueue.h"
13 : #include "ds/SplayTree.h"
14 : #include "jit/RegisterAllocator.h"
15 : #include "jit/StackSlotAllocator.h"
16 :
17 : // Backtracking priority queue based register allocator based on that described
18 : // in the following blog post:
19 : //
20 : // http://blog.llvm.org/2011/09/greedy-register-allocation-in-llvm-30.html
21 :
22 : namespace js {
23 : namespace jit {
24 :
25 : class Requirement
26 : {
27 : public:
28 : enum Kind {
29 : NONE,
30 : REGISTER,
31 : FIXED,
32 : MUST_REUSE_INPUT
33 : };
34 :
35 2876 : Requirement()
36 2876 : : kind_(NONE)
37 2876 : { }
38 :
39 2608 : explicit Requirement(Kind kind)
40 2608 : : kind_(kind)
41 : {
42 : // These have dedicated constructors.
43 2608 : MOZ_ASSERT(kind != FIXED && kind != MUST_REUSE_INPUT);
44 2608 : }
45 :
46 : Requirement(Kind kind, CodePosition at)
47 : : kind_(kind),
48 : position_(at)
49 : {
50 : // These have dedicated constructors.
51 : MOZ_ASSERT(kind != FIXED && kind != MUST_REUSE_INPUT);
52 : }
53 :
54 362 : explicit Requirement(LAllocation fixed)
55 362 : : kind_(FIXED),
56 362 : allocation_(fixed)
57 : {
58 362 : MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse());
59 362 : }
60 :
61 : // Only useful as a hint, encodes where the fixed requirement is used to
62 : // avoid allocating a fixed register too early.
63 : Requirement(LAllocation fixed, CodePosition at)
64 : : kind_(FIXED),
65 : allocation_(fixed),
66 : position_(at)
67 : {
68 : MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse());
69 : }
70 :
71 : Requirement(uint32_t vreg, CodePosition at)
72 : : kind_(MUST_REUSE_INPUT),
73 : allocation_(LUse(vreg, LUse::ANY)),
74 : position_(at)
75 : { }
76 :
77 15725 : Kind kind() const {
78 15725 : return kind_;
79 : }
80 :
81 854 : LAllocation allocation() const {
82 854 : MOZ_ASSERT(!allocation_.isBogus() && !allocation_.isUse());
83 854 : return allocation_;
84 : }
85 :
86 : uint32_t virtualRegister() const {
87 : MOZ_ASSERT(allocation_.isUse());
88 : MOZ_ASSERT(kind() == MUST_REUSE_INPUT);
89 : return allocation_.toUse()->virtualRegister();
90 : }
91 :
92 : CodePosition pos() const {
93 : return position_;
94 : }
95 :
96 : int priority() const;
97 :
98 2970 : MOZ_MUST_USE bool merge(const Requirement& newRequirement) {
99 : // Merge newRequirement with any existing requirement, returning false
100 : // if the new and old requirements conflict.
101 2970 : MOZ_ASSERT(newRequirement.kind() != Requirement::MUST_REUSE_INPUT);
102 :
103 2970 : if (newRequirement.kind() == Requirement::FIXED) {
104 362 : if (kind() == Requirement::FIXED)
105 41 : return newRequirement.allocation() == allocation();
106 321 : *this = newRequirement;
107 321 : return true;
108 : }
109 :
110 2608 : MOZ_ASSERT(newRequirement.kind() == Requirement::REGISTER);
111 2608 : if (kind() == Requirement::FIXED)
112 108 : return allocation().isRegister();
113 :
114 2500 : *this = newRequirement;
115 2500 : return true;
116 : }
117 :
118 : void dump() const;
119 :
120 : private:
121 : Kind kind_;
122 : LAllocation allocation_;
123 : CodePosition position_;
124 : };
125 :
126 : struct UsePosition : public TempObject,
127 : public InlineForwardListNode<UsePosition>
128 : {
129 : private:
130 : // Packed LUse* with a copy of the LUse::Policy value, in order to avoid
131 : // making cache misses while reaching out to the policy value.
132 : uintptr_t use_;
133 :
134 5970 : void setUse(LUse* use) {
135 : // Assert that we can safely pack the LUse policy in the last 2 bits of
136 : // the LUse pointer.
137 : static_assert((LUse::ANY | LUse::REGISTER | LUse::FIXED | LUse::KEEPALIVE) <= 0x3,
138 : "Cannot pack the LUse::Policy value on 32 bits architectures.");
139 :
140 : // RECOVERED_INPUT is used by snapshots and ignored when building the
141 : // liveness information. Thus we can safely assume that no such value
142 : // would be seen.
143 5970 : MOZ_ASSERT(use->policy() != LUse::RECOVERED_INPUT);
144 5970 : use_ = uintptr_t(use) | (use->policy() & 0x3);
145 5970 : }
146 :
147 : public:
148 : CodePosition pos;
149 :
150 107224 : LUse* use() const {
151 107224 : return reinterpret_cast<LUse*>(use_ & ~0x3);
152 : }
153 :
154 100538 : LUse::Policy usePolicy() const {
155 100538 : LUse::Policy policy = LUse::Policy(use_ & 0x3);
156 100538 : MOZ_ASSERT(use()->policy() == policy);
157 100538 : return policy;
158 : }
159 :
160 5970 : UsePosition(LUse* use, CodePosition pos) :
161 5970 : pos(pos)
162 : {
163 : // Verify that the usedAtStart() flag is consistent with the
164 : // subposition. For now ignore fixed registers, because they
165 : // are handled specially around calls.
166 5970 : MOZ_ASSERT_IF(!use->isFixedRegister(),
167 : pos.subpos() == (use->usedAtStart()
168 : ? CodePosition::INPUT
169 : : CodePosition::OUTPUT));
170 5970 : setUse(use);
171 5970 : }
172 : };
173 :
174 : typedef InlineForwardListIterator<UsePosition> UsePositionIterator;
175 :
176 : // Backtracking allocator data structures overview.
177 : //
178 : // LiveRange: A continuous range of positions where a virtual register is live.
179 : // LiveBundle: A set of LiveRanges which do not overlap.
180 : // VirtualRegister: A set of all LiveRanges used for some LDefinition.
181 : //
182 : // The allocator first performs a liveness ananlysis on the LIR graph which
183 : // constructs LiveRanges for each VirtualRegister, determining where the
184 : // registers are live.
185 : //
186 : // The ranges are then bundled together according to heuristics, and placed on
187 : // the allocation queue.
188 : //
189 : // As bundles are removed from the allocation queue, we attempt to find a
190 : // physical register or stack slot allocation for all ranges in the removed
191 : // bundle, possibly evicting already-allocated bundles. See processBundle()
192 : // for details.
193 : //
194 : // If we are not able to allocate a bundle, it is split according to heuristics
195 : // into two or more smaller bundles which cover all the ranges of the original.
196 : // These smaller bundles are then allocated independently.
197 :
198 : class LiveBundle;
199 :
200 : class LiveRange : public TempObject
201 : {
202 : public:
203 : // Linked lists are used to keep track of the ranges in each LiveBundle and
204 : // VirtualRegister. Since a LiveRange may be in two lists simultaneously, use
205 : // these auxiliary classes to keep things straight.
206 6496 : class BundleLink : public InlineForwardListNode<BundleLink> {};
207 6496 : class RegisterLink : public InlineForwardListNode<RegisterLink> {};
208 :
209 : typedef InlineForwardListIterator<BundleLink> BundleLinkIterator;
210 : typedef InlineForwardListIterator<RegisterLink> RegisterLinkIterator;
211 :
212 : // Links in the lists in LiveBundle and VirtualRegister.
213 : BundleLink bundleLink;
214 : RegisterLink registerLink;
215 :
216 129835 : static LiveRange* get(BundleLink* link) {
217 : return reinterpret_cast<LiveRange*>(reinterpret_cast<uint8_t*>(link) -
218 129835 : offsetof(LiveRange, bundleLink));
219 : }
220 81258 : static LiveRange* get(RegisterLink* link) {
221 : return reinterpret_cast<LiveRange*>(reinterpret_cast<uint8_t*>(link) -
222 81258 : offsetof(LiveRange, registerLink));
223 : }
224 :
225 : struct Range
226 : {
227 : // The beginning of this range, inclusive.
228 : CodePosition from;
229 :
230 : // The end of this range, exclusive.
231 : CodePosition to;
232 :
233 24129 : Range() {}
234 :
235 8241 : Range(CodePosition from, CodePosition to)
236 8241 : : from(from), to(to)
237 : {
238 8241 : MOZ_ASSERT(!empty());
239 8241 : }
240 :
241 56406 : bool empty() {
242 56406 : MOZ_ASSERT(from <= to);
243 56406 : return from == to;
244 : }
245 : };
246 :
247 : private:
248 : // The virtual register this range is for, or zero if this does not have a
249 : // virtual register (for example, it is in the callRanges bundle).
250 : uint32_t vreg_;
251 :
252 : // The bundle containing this range, null if liveness information is being
253 : // constructed and we haven't started allocating bundles yet.
254 : LiveBundle* bundle_;
255 :
256 : // The code positions in this range.
257 : Range range_;
258 :
259 : // All uses of the virtual register in this range, ordered by location.
260 : InlineForwardList<UsePosition> uses_;
261 :
262 : // Whether this range contains the virtual register's definition.
263 : bool hasDefinition_;
264 :
265 6496 : LiveRange(uint32_t vreg, Range range)
266 6496 : : vreg_(vreg), bundle_(nullptr), range_(range), hasDefinition_(false)
267 : {
268 6496 : MOZ_ASSERT(!range.empty());
269 6496 : }
270 :
271 : public:
272 6496 : static LiveRange* FallibleNew(TempAllocator& alloc, uint32_t vreg,
273 : CodePosition from, CodePosition to)
274 : {
275 6496 : return new(alloc.fallible()) LiveRange(vreg, Range(from, to));
276 : }
277 :
278 69068 : uint32_t vreg() const {
279 69068 : MOZ_ASSERT(hasVreg());
280 69068 : return vreg_;
281 : }
282 91267 : bool hasVreg() const {
283 91267 : return vreg_ != 0;
284 : }
285 :
286 49810 : LiveBundle* bundle() const {
287 49810 : return bundle_;
288 : }
289 :
290 1138175 : CodePosition from() const {
291 1138175 : return range_.from;
292 : }
293 1102888 : CodePosition to() const {
294 1102888 : return range_.to;
295 : }
296 80349 : bool covers(CodePosition pos) const {
297 80349 : return pos >= from() && pos < to();
298 : }
299 :
300 : // Whether this range wholly contains other.
301 : bool contains(LiveRange* other) const;
302 :
303 : // Intersect this range with other, returning the subranges of this
304 : // that are before, inside, or after other.
305 : void intersect(LiveRange* other, Range* pre, Range* inside, Range* post) const;
306 :
307 : // Whether this range has any intersection with other.
308 : bool intersects(LiveRange* other) const;
309 :
310 36339 : UsePositionIterator usesBegin() const {
311 36339 : return uses_.begin();
312 : }
313 199 : UsePosition* lastUse() const {
314 199 : return uses_.back();
315 : }
316 7435 : bool hasUses() const {
317 7435 : return !!usesBegin();
318 : }
319 3414 : UsePosition* popUse() {
320 3414 : return uses_.popFront();
321 : }
322 :
323 39909 : bool hasDefinition() const {
324 39909 : return hasDefinition_;
325 : }
326 :
327 6640 : void setFrom(CodePosition from) {
328 6640 : range_.from = from;
329 6640 : MOZ_ASSERT(!range_.empty());
330 6640 : }
331 1093 : void setTo(CodePosition to) {
332 1093 : range_.to = to;
333 1093 : MOZ_ASSERT(!range_.empty());
334 1093 : }
335 :
336 8380 : void setBundle(LiveBundle* bundle) {
337 8380 : bundle_ = bundle;
338 8380 : }
339 :
340 : void addUse(UsePosition* use);
341 : void distributeUses(LiveRange* other);
342 :
343 1294 : void setHasDefinition() {
344 1294 : MOZ_ASSERT(!hasDefinition_);
345 1294 : hasDefinition_ = true;
346 1294 : }
347 :
348 : #ifdef JS_JITSPEW
349 : // Return a string describing this range.
350 : UniqueChars toString() const;
351 : #endif
352 :
353 : // Comparator for use in range splay trees.
354 868896 : static int compare(LiveRange* v0, LiveRange* v1) {
355 : // LiveRange includes 'from' but excludes 'to'.
356 868896 : if (v0->to() <= v1->from())
357 813098 : return -1;
358 55798 : if (v0->from() >= v1->to())
359 31358 : return 1;
360 24440 : return 0;
361 : }
362 : };
363 :
364 : // Tracks information about bundles that should all be spilled to the same
365 : // physical location. At the beginning of allocation, each bundle has its own
366 : // spill set. As bundles are split, the new smaller bundles continue to use the
367 : // same spill set.
368 : class SpillSet : public TempObject
369 : {
370 : // All bundles with this spill set which have been spilled. All bundles in
371 : // this list will be given the same physical slot.
372 : Vector<LiveBundle*, 1, JitAllocPolicy> list_;
373 :
374 720 : explicit SpillSet(TempAllocator& alloc)
375 720 : : list_(alloc)
376 720 : { }
377 :
378 : public:
379 720 : static SpillSet* New(TempAllocator& alloc) {
380 720 : return new(alloc) SpillSet(alloc);
381 : }
382 :
383 194 : MOZ_MUST_USE bool addSpilledBundle(LiveBundle* bundle) {
384 194 : return list_.append(bundle);
385 : }
386 1448 : size_t numSpilledBundles() const {
387 1448 : return list_.length();
388 : }
389 1217 : LiveBundle* spilledBundle(size_t i) const {
390 1217 : return list_[i];
391 : }
392 :
393 : void setAllocation(LAllocation alloc);
394 : };
395 :
396 : // A set of live ranges which are all pairwise disjoint. The register allocator
397 : // attempts to find allocations for an entire bundle, and if it fails the
398 : // bundle will be broken into smaller ones which are allocated independently.
399 : class LiveBundle : public TempObject
400 : {
401 : // Set to use if this bundle or one it is split into is spilled.
402 : SpillSet* spill_;
403 :
404 : // All the ranges in this set, ordered by location.
405 : InlineForwardList<LiveRange::BundleLink> ranges_;
406 :
407 : // Allocation to use for ranges in this set, bogus if unallocated or spilled
408 : // and not yet given a physical stack slot.
409 : LAllocation alloc_;
410 :
411 : // Bundle which entirely contains this one and has no register uses. This
412 : // may or may not be spilled by the allocator, but it can be spilled and
413 : // will not be split.
414 : LiveBundle* spillParent_;
415 :
416 1853 : LiveBundle(SpillSet* spill, LiveBundle* spillParent)
417 1853 : : spill_(spill), spillParent_(spillParent)
418 1853 : { }
419 :
420 : public:
421 1853 : static LiveBundle* FallibleNew(TempAllocator& alloc, SpillSet* spill, LiveBundle* spillParent)
422 : {
423 1853 : return new(alloc.fallible()) LiveBundle(spill, spillParent);
424 : }
425 :
426 1090 : SpillSet* spillSet() const {
427 1090 : return spill_;
428 : }
429 720 : void setSpillSet(SpillSet* spill) {
430 720 : spill_ = spill;
431 720 : }
432 :
433 63867 : LiveRange::BundleLinkIterator rangesBegin() const {
434 63867 : return ranges_.begin();
435 : }
436 425 : bool hasRanges() const {
437 425 : return !!rangesBegin();
438 : }
439 3118 : LiveRange* firstRange() const {
440 3118 : return LiveRange::get(*rangesBegin());
441 : }
442 21 : LiveRange* lastRange() const {
443 21 : return LiveRange::get(ranges_.back());
444 : }
445 : LiveRange* rangeFor(CodePosition pos) const;
446 : void removeRange(LiveRange* range);
447 325 : void removeRangeAndIncrementIterator(LiveRange::BundleLinkIterator& iter) {
448 325 : ranges_.removeAndIncrement(iter);
449 325 : }
450 : void addRange(LiveRange* range);
451 : MOZ_MUST_USE bool addRange(TempAllocator& alloc, uint32_t vreg,
452 : CodePosition from, CodePosition to);
453 : MOZ_MUST_USE bool addRangeAndDistributeUses(TempAllocator& alloc, LiveRange* oldRange,
454 : CodePosition from, CodePosition to);
455 : LiveRange* popFirstRange();
456 : #ifdef DEBUG
457 : size_t numRanges() const;
458 : #endif
459 :
460 26892 : LAllocation allocation() const {
461 26892 : return alloc_;
462 : }
463 1250 : void setAllocation(LAllocation alloc) {
464 1250 : alloc_ = alloc;
465 1250 : }
466 :
467 568 : LiveBundle* spillParent() const {
468 568 : return spillParent_;
469 : }
470 :
471 : #ifdef JS_JITSPEW
472 : // Return a string describing this bundle.
473 : UniqueChars toString() const;
474 : #endif
475 : };
476 :
477 : // Information about the allocation for a virtual register.
478 : class VirtualRegister
479 : {
480 : // Instruction which defines this register.
481 : LNode* ins_;
482 :
483 : // Definition in the instruction for this register.
484 : LDefinition* def_;
485 :
486 : // All live ranges for this register. These may overlap each other, and are
487 : // ordered by their start position.
488 : InlineForwardList<LiveRange::RegisterLink> ranges_;
489 :
490 : // Whether def_ is a temp or an output.
491 : bool isTemp_;
492 :
493 : // Whether this vreg is an input for some phi. This use is not reflected in
494 : // any range on the vreg.
495 : bool usedByPhi_;
496 :
497 : // If this register's definition is MUST_REUSE_INPUT, whether a copy must
498 : // be introduced before the definition that relaxes the policy.
499 : bool mustCopyInput_;
500 :
501 : void operator=(const VirtualRegister&) = delete;
502 : VirtualRegister(const VirtualRegister&) = delete;
503 :
504 : public:
505 1070 : explicit VirtualRegister()
506 1070 : {
507 : // Note: This class is zeroed before it is constructed.
508 1070 : }
509 :
510 1062 : void init(LNode* ins, LDefinition* def, bool isTemp) {
511 1062 : MOZ_ASSERT(!ins_);
512 1062 : ins_ = ins;
513 1062 : def_ = def;
514 1062 : isTemp_ = isTemp;
515 1062 : }
516 :
517 16864 : LNode* ins() const {
518 16864 : return ins_;
519 : }
520 26673 : LDefinition* def() const {
521 26673 : return def_;
522 : }
523 5435 : LDefinition::Type type() const {
524 5435 : return def()->type();
525 : }
526 2023 : uint32_t vreg() const {
527 2023 : return def()->virtualRegister();
528 : }
529 43113 : bool isCompatible(const AnyRegister& r) const {
530 43113 : return def_->isCompatibleReg(r);
531 : }
532 347 : bool isCompatible(const VirtualRegister& vr) const {
533 347 : return def_->isCompatibleDef(*vr.def_);
534 : }
535 1172 : bool isTemp() const {
536 1172 : return isTemp_;
537 : }
538 :
539 430 : void setUsedByPhi() {
540 430 : usedByPhi_ = true;
541 430 : }
542 111 : bool usedByPhi() {
543 111 : return usedByPhi_;
544 : }
545 :
546 4 : void setMustCopyInput() {
547 4 : mustCopyInput_ = true;
548 4 : }
549 8 : bool mustCopyInput() {
550 8 : return mustCopyInput_;
551 : }
552 :
553 36194 : LiveRange::RegisterLinkIterator rangesBegin() const {
554 36194 : return ranges_.begin();
555 : }
556 134 : LiveRange::RegisterLinkIterator rangesBegin(LiveRange* range) const {
557 134 : return ranges_.begin(&range->registerLink);
558 : }
559 2148 : bool hasRanges() const {
560 2148 : return !!rangesBegin();
561 : }
562 903 : LiveRange* firstRange() const {
563 903 : return LiveRange::get(*rangesBegin());
564 : }
565 4 : LiveRange* lastRange() const {
566 4 : return LiveRange::get(ranges_.back());
567 : }
568 : LiveRange* rangeFor(CodePosition pos, bool preferRegister = false) const;
569 : void removeRange(LiveRange* range);
570 : void addRange(LiveRange* range);
571 :
572 0 : void removeRangeAndIncrement(LiveRange::RegisterLinkIterator& iter) {
573 0 : ranges_.removeAndIncrement(iter);
574 0 : }
575 :
576 902 : LiveBundle* firstBundle() const {
577 902 : return firstRange()->bundle();
578 : }
579 :
580 : MOZ_MUST_USE bool addInitialRange(TempAllocator& alloc, CodePosition from, CodePosition to);
581 : void addInitialUse(UsePosition* use);
582 : void setInitialDefinition(CodePosition from);
583 : };
584 :
585 : // A sequence of code positions, for tellings BacktrackingAllocator::splitAt
586 : // where to split.
587 : typedef js::Vector<CodePosition, 4, SystemAllocPolicy> SplitPositionVector;
588 :
589 8 : class BacktrackingAllocator : protected RegisterAllocator
590 : {
591 : friend class C1Spewer;
592 : friend class JSONSpewer;
593 :
594 : // This flag is set when testing new allocator modifications.
595 : bool testbed;
596 :
597 : BitSet* liveIn;
598 : FixedList<VirtualRegister> vregs;
599 :
600 : // Allocation state.
601 : StackSlotAllocator stackSlotAllocator;
602 :
603 : // Priority queue element: a bundle and the associated priority.
604 : struct QueueItem
605 : {
606 : LiveBundle* bundle;
607 :
608 1438 : QueueItem(LiveBundle* bundle, size_t priority)
609 1438 : : bundle(bundle), priority_(priority)
610 1438 : {}
611 :
612 41634 : static size_t priority(const QueueItem& v) {
613 41634 : return v.priority_;
614 : }
615 :
616 : private:
617 : size_t priority_;
618 : };
619 :
620 : PriorityQueue<QueueItem, QueueItem, 0, SystemAllocPolicy> allocationQueue;
621 :
622 : typedef SplayTree<LiveRange*, LiveRange> LiveRangeSet;
623 :
624 : // Each physical register is associated with the set of ranges over which
625 : // that register is currently allocated.
626 : struct PhysicalRegister {
627 : bool allocatable;
628 : AnyRegister reg;
629 : LiveRangeSet allocations;
630 :
631 512 : PhysicalRegister() : allocatable(false) {}
632 : };
633 : mozilla::Array<PhysicalRegister, AnyRegister::Total> registers;
634 :
635 : // Ranges of code which are considered to be hot, for which good allocation
636 : // should be prioritized.
637 : LiveRangeSet hotcode;
638 :
639 : struct CallRange : public TempObject, public InlineListNode<CallRange> {
640 : LiveRange::Range range;
641 :
642 541 : CallRange(CodePosition from, CodePosition to)
643 541 : : range(from, to)
644 541 : {}
645 :
646 : // Comparator for use in splay tree.
647 6184 : static int compare(CallRange* v0, CallRange* v1) {
648 6184 : if (v0->range.to <= v1->range.from)
649 5228 : return -1;
650 956 : if (v0->range.from >= v1->range.to)
651 590 : return 1;
652 366 : return 0;
653 : }
654 : };
655 :
656 : // Ranges where all registers must be spilled due to call instructions.
657 : typedef InlineList<CallRange> CallRangeList;
658 : CallRangeList callRangesList;
659 : SplayTree<CallRange*, CallRange> callRanges;
660 :
661 : // Information about an allocated stack slot.
662 : struct SpillSlot : public TempObject, public InlineForwardListNode<SpillSlot> {
663 : LStackSlot alloc;
664 : LiveRangeSet allocated;
665 :
666 84 : SpillSlot(uint32_t slot, LifoAlloc* alloc)
667 84 : : alloc(slot), allocated(alloc)
668 84 : {}
669 : };
670 : typedef InlineForwardList<SpillSlot> SpillSlotList;
671 :
672 : // All allocated slots of each width.
673 : SpillSlotList normalSlots, doubleSlots, quadSlots;
674 :
675 : Vector<LiveBundle*, 4, SystemAllocPolicy> spilledBundles;
676 :
677 : public:
678 8 : BacktrackingAllocator(MIRGenerator* mir, LIRGenerator* lir, LIRGraph& graph, bool testbed)
679 8 : : RegisterAllocator(mir, lir, graph),
680 : testbed(testbed),
681 : liveIn(nullptr),
682 8 : callRanges(nullptr)
683 8 : { }
684 :
685 : MOZ_MUST_USE bool go();
686 :
687 : private:
688 :
689 : typedef Vector<LiveRange*, 4, SystemAllocPolicy> LiveRangeVector;
690 : typedef Vector<LiveBundle*, 4, SystemAllocPolicy> LiveBundleVector;
691 :
692 : // Liveness methods.
693 : MOZ_MUST_USE bool init();
694 : MOZ_MUST_USE bool buildLivenessInfo();
695 :
696 : MOZ_MUST_USE bool addInitialFixedRange(AnyRegister reg, CodePosition from, CodePosition to);
697 :
698 3241 : VirtualRegister& vreg(const LDefinition* def) {
699 3241 : return vregs[def->virtualRegister()];
700 : }
701 13423 : VirtualRegister& vreg(const LAllocation* alloc) {
702 13423 : MOZ_ASSERT(alloc->isUse());
703 13423 : return vregs[alloc->toUse()->virtualRegister()];
704 : }
705 :
706 : // Allocation methods.
707 : MOZ_MUST_USE bool tryMergeBundles(LiveBundle* bundle0, LiveBundle* bundle1);
708 : MOZ_MUST_USE bool tryMergeReusedRegister(VirtualRegister& def, VirtualRegister& input);
709 : MOZ_MUST_USE bool mergeAndQueueRegisters();
710 : MOZ_MUST_USE bool tryAllocateFixed(LiveBundle* bundle, Requirement requirement,
711 : bool* success, bool* pfixed, LiveBundleVector& conflicting);
712 : MOZ_MUST_USE bool tryAllocateNonFixed(LiveBundle* bundle, Requirement requirement,
713 : Requirement hint, bool* success, bool* pfixed,
714 : LiveBundleVector& conflicting);
715 : MOZ_MUST_USE bool processBundle(MIRGenerator* mir, LiveBundle* bundle);
716 : MOZ_MUST_USE bool computeRequirement(LiveBundle* bundle, Requirement *prequirement,
717 : Requirement *phint);
718 : MOZ_MUST_USE bool tryAllocateRegister(PhysicalRegister& r, LiveBundle* bundle, bool* success,
719 : bool* pfixed, LiveBundleVector& conflicting);
720 : MOZ_MUST_USE bool evictBundle(LiveBundle* bundle);
721 : MOZ_MUST_USE bool splitAndRequeueBundles(LiveBundle* bundle,
722 : const LiveBundleVector& newBundles);
723 : MOZ_MUST_USE bool spill(LiveBundle* bundle);
724 : MOZ_MUST_USE bool tryAllocatingRegistersForSpillBundles();
725 :
726 : bool isReusedInput(LUse* use, LNode* ins, bool considerCopy);
727 : bool isRegisterUse(UsePosition* use, LNode* ins, bool considerCopy = false);
728 : bool isRegisterDefinition(LiveRange* range);
729 : MOZ_MUST_USE bool pickStackSlot(SpillSet* spill);
730 : MOZ_MUST_USE bool insertAllRanges(LiveRangeSet& set, LiveBundle* bundle);
731 :
732 : // Reification methods.
733 : MOZ_MUST_USE bool pickStackSlots();
734 : MOZ_MUST_USE bool resolveControlFlow();
735 : MOZ_MUST_USE bool reifyAllocations();
736 : MOZ_MUST_USE bool populateSafepoints();
737 : MOZ_MUST_USE bool annotateMoveGroups();
738 : MOZ_MUST_USE bool deadRange(LiveRange* range);
739 : size_t findFirstNonCallSafepoint(CodePosition from);
740 : size_t findFirstSafepoint(CodePosition pos, size_t startFrom);
741 : void addLiveRegistersForRange(VirtualRegister& reg, LiveRange* range);
742 :
743 550 : MOZ_MUST_USE bool addMove(LMoveGroup* moves, LiveRange* from, LiveRange* to,
744 : LDefinition::Type type) {
745 550 : LAllocation fromAlloc = from->bundle()->allocation();
746 550 : LAllocation toAlloc = to->bundle()->allocation();
747 550 : MOZ_ASSERT(fromAlloc != toAlloc);
748 550 : return moves->add(fromAlloc, toAlloc, type);
749 : }
750 :
751 293 : MOZ_MUST_USE bool moveInput(LInstruction* ins, LiveRange* from, LiveRange* to,
752 : LDefinition::Type type) {
753 293 : if (from->bundle()->allocation() == to->bundle()->allocation())
754 3 : return true;
755 290 : LMoveGroup* moves = getInputMoveGroup(ins);
756 290 : return addMove(moves, from, to, type);
757 : }
758 :
759 0 : MOZ_MUST_USE bool moveAfter(LInstruction* ins, LiveRange* from, LiveRange* to,
760 : LDefinition::Type type) {
761 0 : if (from->bundle()->allocation() == to->bundle()->allocation())
762 0 : return true;
763 0 : LMoveGroup* moves = getMoveGroupAfter(ins);
764 0 : return addMove(moves, from, to, type);
765 : }
766 :
767 602 : MOZ_MUST_USE bool moveAtExit(LBlock* block, LiveRange* from, LiveRange* to,
768 : LDefinition::Type type) {
769 602 : if (from->bundle()->allocation() == to->bundle()->allocation())
770 541 : return true;
771 61 : LMoveGroup* moves = block->getExitMoveGroup(alloc());
772 61 : return addMove(moves, from, to, type);
773 : }
774 :
775 734 : MOZ_MUST_USE bool moveAtEntry(LBlock* block, LiveRange* from, LiveRange* to,
776 : LDefinition::Type type) {
777 734 : if (from->bundle()->allocation() == to->bundle()->allocation())
778 535 : return true;
779 199 : LMoveGroup* moves = block->getEntryMoveGroup(alloc());
780 199 : return addMove(moves, from, to, type);
781 : }
782 :
783 : // Debugging methods.
784 : void dumpAllocations();
785 :
786 : struct PrintLiveRange;
787 :
788 : bool minimalDef(LiveRange* range, LNode* ins);
789 : bool minimalUse(LiveRange* range, UsePosition* use);
790 : bool minimalBundle(LiveBundle* bundle, bool* pfixed = nullptr);
791 :
792 : // Heuristic methods.
793 :
794 : size_t computePriority(LiveBundle* bundle);
795 : size_t computeSpillWeight(LiveBundle* bundle);
796 :
797 : size_t maximumSpillWeight(const LiveBundleVector& bundles);
798 :
799 : MOZ_MUST_USE bool chooseBundleSplit(LiveBundle* bundle, bool fixed, LiveBundle* conflict);
800 :
801 : MOZ_MUST_USE bool splitAt(LiveBundle* bundle, const SplitPositionVector& splitPositions);
802 : MOZ_MUST_USE bool trySplitAcrossHotcode(LiveBundle* bundle, bool* success);
803 : MOZ_MUST_USE bool trySplitAfterLastRegisterUse(LiveBundle* bundle, LiveBundle* conflict,
804 : bool* success);
805 : MOZ_MUST_USE bool trySplitBeforeFirstRegisterUse(LiveBundle* bundle, LiveBundle* conflict,
806 : bool* success);
807 : MOZ_MUST_USE bool splitAcrossCalls(LiveBundle* bundle);
808 :
809 78 : bool compilingWasm() {
810 78 : return mir->info().compilingWasm();
811 : }
812 :
813 : void dumpVregs();
814 : };
815 :
816 : } // namespace jit
817 : } // namespace js
818 :
819 : #endif /* jit_BacktrackingAllocator_h */
|