Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #include "jit/AlignmentMaskAnalysis.h"
8 : #include "jit/MIR.h"
9 : #include "jit/MIRGraph.h"
10 :
11 : using namespace js;
12 : using namespace jit;
13 :
14 : static bool
15 0 : IsAlignmentMask(uint32_t m)
16 : {
17 : // Test whether m is just leading ones and trailing zeros.
18 0 : return (-m & ~m) == 0;
19 : }
20 :
21 : static void
22 0 : AnalyzeAsmHeapAddress(MDefinition* ptr, MIRGraph& graph)
23 : {
24 : // Fold (a+i)&m to (a&m)+i, provided that this doesn't change the result,
25 : // since the users of the BitAnd include heap accesses. This will expose
26 : // the redundancy for GVN when expressions like this:
27 : // a&m
28 : // (a+1)&m,
29 : // (a+2)&m,
30 : // are transformed into this:
31 : // a&m
32 : // (a&m)+1
33 : // (a&m)+2
34 : // and it will allow the constants to be folded by the
35 : // EffectiveAddressAnalysis pass.
36 : //
37 : // Putting the add on the outside might seem like it exposes other users of
38 : // the expression to the possibility of i32 overflow, if we aren't in wasm
39 : // and they aren't naturally truncating. However, since we use MAdd::New
40 : // with MIRType::Int32, we make sure that the value is truncated, just as it
41 : // would be by the MBitAnd.
42 :
43 0 : MOZ_ASSERT(IsCompilingWasm());
44 :
45 0 : if (!ptr->isBitAnd())
46 0 : return;
47 :
48 0 : MDefinition* lhs = ptr->toBitAnd()->getOperand(0);
49 0 : MDefinition* rhs = ptr->toBitAnd()->getOperand(1);
50 0 : if (lhs->isConstant())
51 0 : mozilla::Swap(lhs, rhs);
52 0 : if (!lhs->isAdd() || !rhs->isConstant())
53 0 : return;
54 :
55 0 : MDefinition* op0 = lhs->toAdd()->getOperand(0);
56 0 : MDefinition* op1 = lhs->toAdd()->getOperand(1);
57 0 : if (op0->isConstant())
58 0 : mozilla::Swap(op0, op1);
59 0 : if (!op1->isConstant())
60 0 : return;
61 :
62 0 : uint32_t i = op1->toConstant()->toInt32();
63 0 : uint32_t m = rhs->toConstant()->toInt32();
64 0 : if (!IsAlignmentMask(m) || (i & m) != i)
65 0 : return;
66 :
67 : // The pattern was matched! Produce the replacement expression.
68 0 : MInstruction* and_ = MBitAnd::New(graph.alloc(), op0, rhs, MIRType::Int32);
69 0 : ptr->block()->insertBefore(ptr->toBitAnd(), and_);
70 0 : MInstruction* add = MAdd::New(graph.alloc(), and_, op1, MIRType::Int32);
71 0 : ptr->block()->insertBefore(ptr->toBitAnd(), add);
72 0 : ptr->replaceAllUsesWith(add);
73 0 : ptr->block()->discard(ptr->toBitAnd());
74 : }
75 :
76 : bool
77 0 : AlignmentMaskAnalysis::analyze()
78 : {
79 0 : for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
80 0 : for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
81 0 : if (!graph_.alloc().ensureBallast())
82 0 : return false;
83 :
84 : // Note that we don't check for MAsmJSCompareExchangeHeap
85 : // or MAsmJSAtomicBinopHeap, because the backend and the OOB
86 : // mechanism don't support non-zero offsets for them yet.
87 0 : if (i->isAsmJSLoadHeap())
88 0 : AnalyzeAsmHeapAddress(i->toAsmJSLoadHeap()->base(), graph_);
89 0 : else if (i->isAsmJSStoreHeap())
90 0 : AnalyzeAsmHeapAddress(i->toAsmJSStoreHeap()->base(), graph_);
91 : }
92 : }
93 0 : return true;
94 : }
|