1 //=- AArch64VectorByElementOpt.cpp - AArch64 vector by element inst opt pass =//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a pass that performs optimization for vector by element
13 // Certain SIMD instructions with vector element operand are not efficient.
14 // Rewrite them into SIMD instructions with vector operands. This rewrite
15 // is driven by the latency of the instructions.
18 // fmla v0.4s, v1.4s, v2.s[1]
21 // fmla v0.4s, v1.4s, v3.4s
22 //===----------------------------------------------------------------------===//
24 #include "AArch64InstrInfo.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/TargetSchedule.h"
32 #define DEBUG_TYPE "aarch64-vectorbyelement-opt"
34 STATISTIC(NumModifiedInstr,
35 "Number of vector by element instructions modified");
37 #define AARCH64_VECTOR_BY_ELEMENT_OPT_NAME \
38 "AArch64 vector by element instruction optimization pass"
42 struct AArch64VectorByElementOpt : public MachineFunctionPass {
44 AArch64VectorByElementOpt() : MachineFunctionPass(ID) {
45 initializeAArch64VectorByElementOptPass(*PassRegistry::getPassRegistry());
48 const TargetInstrInfo *TII;
49 MachineRegisterInfo *MRI;
50 TargetSchedModel SchedModel;
52 /// Based only on latency of instructions, determine if it is cost efficient
53 /// to replace the instruction InstDesc by the two instructions InstDescRep1
55 /// Return true if replacement is recommended.
57 shouldReplaceInstruction(MachineFunction *MF, const MCInstrDesc *InstDesc,
58 const MCInstrDesc *InstDescRep1,
59 const MCInstrDesc *InstDescRep2,
60 std::map<unsigned, bool> &VecInstElemTable) const;
62 /// Determine if we need to exit the vector by element instruction
63 /// optimization pass early. This makes sure that Targets with no need
64 /// for this optimization do not spent any compile time on this pass.
65 /// This check is done by comparing the latency of an indexed FMLA
66 /// instruction to the latency of the DUP + the latency of a vector
67 /// FMLA instruction. We do not check on other related instructions such
68 /// as FMLS as we assume that if the situation shows up for one
69 /// instruction, then it is likely to show up for the related ones.
70 /// Return true if early exit of the pass is recommended.
71 bool earlyExitVectElement(MachineFunction *MF);
73 /// Check whether an equivalent DUP instruction has already been
75 /// Return true when the dup instruction already exists. In this case,
76 /// DestReg will point to the destination of the already created DUP.
77 bool reuseDUP(MachineInstr &MI, unsigned DupOpcode, unsigned SrcReg,
78 unsigned LaneNumber, unsigned *DestReg) const;
80 /// Certain SIMD instructions with vector element operand are not efficient.
81 /// Rewrite them into SIMD instructions with vector operands. This rewrite
82 /// is driven by the latency of the instructions.
83 /// Return true if the SIMD instruction is modified.
84 bool optimizeVectElement(MachineInstr &MI,
85 std::map<unsigned, bool> *VecInstElemTable) const;
87 bool runOnMachineFunction(MachineFunction &Fn) override;
89 StringRef getPassName() const override {
90 return AARCH64_VECTOR_BY_ELEMENT_OPT_NAME;
93 char AArch64VectorByElementOpt::ID = 0;
96 INITIALIZE_PASS(AArch64VectorByElementOpt, "aarch64-vectorbyelement-opt",
97 AARCH64_VECTOR_BY_ELEMENT_OPT_NAME, false, false)
99 /// Based only on latency of instructions, determine if it is cost efficient
100 /// to replace the instruction InstDesc by the two instructions InstDescRep1
101 /// and InstDescRep2. Note that it is assumed in this fuction that an
102 /// instruction of type InstDesc is always replaced by the same two
103 /// instructions as results are cached here.
104 /// Return true if replacement is recommended.
105 bool AArch64VectorByElementOpt::shouldReplaceInstruction(
106 MachineFunction *MF, const MCInstrDesc *InstDesc,
107 const MCInstrDesc *InstDescRep1, const MCInstrDesc *InstDescRep2,
108 std::map<unsigned, bool> &VecInstElemTable) const {
109 // Check if replacment decision is alredy available in the cached table.
111 if (!VecInstElemTable.empty() &&
112 VecInstElemTable.find(InstDesc->getOpcode()) != VecInstElemTable.end())
113 return VecInstElemTable[InstDesc->getOpcode()];
115 unsigned SCIdx = InstDesc->getSchedClass();
116 unsigned SCIdxRep1 = InstDescRep1->getSchedClass();
117 unsigned SCIdxRep2 = InstDescRep2->getSchedClass();
118 const MCSchedClassDesc *SCDesc =
119 SchedModel.getMCSchedModel()->getSchedClassDesc(SCIdx);
120 const MCSchedClassDesc *SCDescRep1 =
121 SchedModel.getMCSchedModel()->getSchedClassDesc(SCIdxRep1);
122 const MCSchedClassDesc *SCDescRep2 =
123 SchedModel.getMCSchedModel()->getSchedClassDesc(SCIdxRep2);
125 // If a subtarget does not define resources for any of the instructions
126 // of interest, then return false for no replacement.
127 if (!SCDesc->isValid() || SCDesc->isVariant() || !SCDescRep1->isValid() ||
128 SCDescRep1->isVariant() || !SCDescRep2->isValid() ||
129 SCDescRep2->isVariant()) {
130 VecInstElemTable[InstDesc->getOpcode()] = false;
134 if (SchedModel.computeInstrLatency(InstDesc->getOpcode()) >
135 SchedModel.computeInstrLatency(InstDescRep1->getOpcode()) +
136 SchedModel.computeInstrLatency(InstDescRep2->getOpcode())) {
137 VecInstElemTable[InstDesc->getOpcode()] = true;
140 VecInstElemTable[InstDesc->getOpcode()] = false;
144 /// Determine if we need to exit the vector by element instruction
145 /// optimization pass early. This makes sure that Targets with no need
146 /// for this optimization do not spent any compile time on this pass.
147 /// This check is done by comparing the latency of an indexed FMLA
148 /// instruction to the latency of the DUP + the latency of a vector
149 /// FMLA instruction. We do not check on other related instructions such
150 /// as FMLS as we assume that if the situation shows up for one
151 /// instruction, then it is likely to show up for the related ones.
152 /// Return true if early exit of the pass is recommended.
153 bool AArch64VectorByElementOpt::earlyExitVectElement(MachineFunction *MF) {
154 std::map<unsigned, bool> VecInstElemTable;
155 const MCInstrDesc *IndexMulMCID = &TII->get(AArch64::FMLAv4i32_indexed);
156 const MCInstrDesc *DupMCID = &TII->get(AArch64::DUPv4i32lane);
157 const MCInstrDesc *MulMCID = &TII->get(AArch64::FMULv4f32);
159 if (!shouldReplaceInstruction(MF, IndexMulMCID, DupMCID, MulMCID,
165 /// Check whether an equivalent DUP instruction has already been
167 /// Return true when the dup instruction already exists. In this case,
168 /// DestReg will point to the destination of the already created DUP.
169 bool AArch64VectorByElementOpt::reuseDUP(MachineInstr &MI, unsigned DupOpcode,
170 unsigned SrcReg, unsigned LaneNumber,
171 unsigned *DestReg) const {
172 for (MachineBasicBlock::iterator MII = MI, MIE = MI.getParent()->begin();
175 MachineInstr *CurrentMI = &*MII;
177 if (CurrentMI->getOpcode() == DupOpcode &&
178 CurrentMI->getNumOperands() == 3 &&
179 CurrentMI->getOperand(1).getReg() == SrcReg &&
180 CurrentMI->getOperand(2).getImm() == LaneNumber) {
181 *DestReg = CurrentMI->getOperand(0).getReg();
189 /// Certain SIMD instructions with vector element operand are not efficient.
190 /// Rewrite them into SIMD instructions with vector operands. This rewrite
191 /// is driven by the latency of the instructions.
192 /// The instruction of concerns are for the time being fmla, fmls, fmul,
193 /// and fmulx and hence they are hardcoded.
196 /// fmla v0.4s, v1.4s, v2.s[1]
197 /// is rewritten into
198 /// dup v3.4s, v2.s[1] // dup not necessary if redundant
199 /// fmla v0.4s, v1.4s, v3.4s
200 /// Return true if the SIMD instruction is modified.
201 bool AArch64VectorByElementOpt::optimizeVectElement(
202 MachineInstr &MI, std::map<unsigned, bool> *VecInstElemTable) const {
203 const MCInstrDesc *MulMCID, *DupMCID;
204 const TargetRegisterClass *RC = &AArch64::FPR128RegClass;
206 switch (MI.getOpcode()) {
211 case AArch64::FMLAv4i32_indexed:
212 DupMCID = &TII->get(AArch64::DUPv4i32lane);
213 MulMCID = &TII->get(AArch64::FMLAv4f32);
215 case AArch64::FMLSv4i32_indexed:
216 DupMCID = &TII->get(AArch64::DUPv4i32lane);
217 MulMCID = &TII->get(AArch64::FMLSv4f32);
219 case AArch64::FMULXv4i32_indexed:
220 DupMCID = &TII->get(AArch64::DUPv4i32lane);
221 MulMCID = &TII->get(AArch64::FMULXv4f32);
223 case AArch64::FMULv4i32_indexed:
224 DupMCID = &TII->get(AArch64::DUPv4i32lane);
225 MulMCID = &TII->get(AArch64::FMULv4f32);
229 case AArch64::FMLAv2i64_indexed:
230 DupMCID = &TII->get(AArch64::DUPv2i64lane);
231 MulMCID = &TII->get(AArch64::FMLAv2f64);
233 case AArch64::FMLSv2i64_indexed:
234 DupMCID = &TII->get(AArch64::DUPv2i64lane);
235 MulMCID = &TII->get(AArch64::FMLSv2f64);
237 case AArch64::FMULXv2i64_indexed:
238 DupMCID = &TII->get(AArch64::DUPv2i64lane);
239 MulMCID = &TII->get(AArch64::FMULXv2f64);
241 case AArch64::FMULv2i64_indexed:
242 DupMCID = &TII->get(AArch64::DUPv2i64lane);
243 MulMCID = &TII->get(AArch64::FMULv2f64);
247 case AArch64::FMLAv2i32_indexed:
248 RC = &AArch64::FPR64RegClass;
249 DupMCID = &TII->get(AArch64::DUPv2i32lane);
250 MulMCID = &TII->get(AArch64::FMLAv2f32);
252 case AArch64::FMLSv2i32_indexed:
253 RC = &AArch64::FPR64RegClass;
254 DupMCID = &TII->get(AArch64::DUPv2i32lane);
255 MulMCID = &TII->get(AArch64::FMLSv2f32);
257 case AArch64::FMULXv2i32_indexed:
258 RC = &AArch64::FPR64RegClass;
259 DupMCID = &TII->get(AArch64::DUPv2i32lane);
260 MulMCID = &TII->get(AArch64::FMULXv2f32);
262 case AArch64::FMULv2i32_indexed:
263 RC = &AArch64::FPR64RegClass;
264 DupMCID = &TII->get(AArch64::DUPv2i32lane);
265 MulMCID = &TII->get(AArch64::FMULv2f32);
269 if (!shouldReplaceInstruction(MI.getParent()->getParent(),
270 &TII->get(MI.getOpcode()), DupMCID, MulMCID,
274 const DebugLoc &DL = MI.getDebugLoc();
275 MachineBasicBlock &MBB = *MI.getParent();
276 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
278 // get the operands of the current SIMD arithmetic instruction.
279 unsigned MulDest = MI.getOperand(0).getReg();
280 unsigned SrcReg0 = MI.getOperand(1).getReg();
281 unsigned Src0IsKill = getKillRegState(MI.getOperand(1).isKill());
282 unsigned SrcReg1 = MI.getOperand(2).getReg();
283 unsigned Src1IsKill = getKillRegState(MI.getOperand(2).isKill());
286 // Instructions of interest have either 4 or 5 operands.
287 if (MI.getNumOperands() == 5) {
288 unsigned SrcReg2 = MI.getOperand(3).getReg();
289 unsigned Src2IsKill = getKillRegState(MI.getOperand(3).isKill());
290 unsigned LaneNumber = MI.getOperand(4).getImm();
292 // Create a new DUP instruction. Note that if an equivalent DUP instruction
293 // has already been created before, then use that one instread of creating
295 if (!reuseDUP(MI, DupMCID->getOpcode(), SrcReg2, LaneNumber, &DupDest)) {
296 DupDest = MRI.createVirtualRegister(RC);
297 BuildMI(MBB, MI, DL, *DupMCID, DupDest)
298 .addReg(SrcReg2, Src2IsKill)
301 BuildMI(MBB, MI, DL, *MulMCID, MulDest)
302 .addReg(SrcReg0, Src0IsKill)
303 .addReg(SrcReg1, Src1IsKill)
304 .addReg(DupDest, Src2IsKill);
305 } else if (MI.getNumOperands() == 4) {
306 unsigned LaneNumber = MI.getOperand(3).getImm();
307 if (!reuseDUP(MI, DupMCID->getOpcode(), SrcReg1, LaneNumber, &DupDest)) {
308 DupDest = MRI.createVirtualRegister(RC);
309 BuildMI(MBB, MI, DL, *DupMCID, DupDest)
310 .addReg(SrcReg1, Src1IsKill)
313 BuildMI(MBB, MI, DL, *MulMCID, MulDest)
314 .addReg(SrcReg0, Src0IsKill)
315 .addReg(DupDest, Src1IsKill);
324 bool AArch64VectorByElementOpt::runOnMachineFunction(MachineFunction &MF) {
325 if (skipFunction(*MF.getFunction()))
328 TII = MF.getSubtarget().getInstrInfo();
329 MRI = &MF.getRegInfo();
330 const TargetSubtargetInfo &ST = MF.getSubtarget();
331 const AArch64InstrInfo *AAII =
332 static_cast<const AArch64InstrInfo *>(ST.getInstrInfo());
335 SchedModel.init(ST.getSchedModel(), &ST, AAII);
336 if (!SchedModel.hasInstrSchedModel())
339 // A simple check to exit this pass early for targets that do not need it.
340 if (earlyExitVectElement(&MF))
343 bool Changed = false;
344 std::map<unsigned, bool> VecInstElemTable;
345 SmallVector<MachineInstr *, 8> RemoveMIs;
347 for (MachineBasicBlock &MBB : MF) {
348 for (MachineBasicBlock::iterator MII = MBB.begin(), MIE = MBB.end();
350 MachineInstr &MI = *MII;
351 if (optimizeVectElement(MI, &VecInstElemTable)) {
352 // Add MI to the list of instructions to be removed given that it has
354 RemoveMIs.push_back(&MI);
361 for (MachineInstr *MI : RemoveMIs)
362 MI->eraseFromParent();
367 /// createAArch64VectorByElementOptPass - returns an instance of the
368 /// vector by element optimization pass.
369 FunctionPass *llvm::createAArch64VectorByElementOptPass() {
370 return new AArch64VectorByElementOpt();