1 //===-- Thumb2SizeReduction.cpp - Thumb2 code size reduction pass -*- C++ -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 #include "ARMBaseInstrInfo.h"
12 #include "ARMSubtarget.h"
13 #include "MCTargetDesc/ARMAddressingModes.h"
14 #include "Thumb2InstrInfo.h"
15 #include "llvm/ADT/DenseMap.h"
16 #include "llvm/ADT/PostOrderIterator.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/CodeGen/MachineFunctionPass.h"
19 #include "llvm/CodeGen/MachineInstr.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/IR/Function.h" // To access Function attributes
22 #include "llvm/Support/CommandLine.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Support/raw_ostream.h"
25 #include "llvm/Target/TargetMachine.h"
29 #define DEBUG_TYPE "t2-reduce-size"
31 STATISTIC(NumNarrows, "Number of 32-bit instrs reduced to 16-bit ones");
32 STATISTIC(Num2Addrs, "Number of 32-bit instrs reduced to 2addr 16-bit ones");
33 STATISTIC(NumLdSts, "Number of 32-bit load / store reduced to 16-bit ones");
35 static cl::opt<int> ReduceLimit("t2-reduce-limit",
36 cl::init(-1), cl::Hidden);
37 static cl::opt<int> ReduceLimit2Addr("t2-reduce-limit2",
38 cl::init(-1), cl::Hidden);
39 static cl::opt<int> ReduceLimitLdSt("t2-reduce-limit3",
40 cl::init(-1), cl::Hidden);
43 /// ReduceTable - A static table with information on mapping from wide
46 uint16_t WideOpc; // Wide opcode
47 uint16_t NarrowOpc1; // Narrow opcode to transform to
48 uint16_t NarrowOpc2; // Narrow opcode when it's two-address
49 uint8_t Imm1Limit; // Limit of immediate field (bits)
50 uint8_t Imm2Limit; // Limit of immediate field when it's two-address
51 unsigned LowRegs1 : 1; // Only possible if low-registers are used
52 unsigned LowRegs2 : 1; // Only possible if low-registers are used (2addr)
53 unsigned PredCC1 : 2; // 0 - If predicated, cc is on and vice versa.
55 // 2 - Always set CPSR.
57 unsigned PartFlag : 1; // 16-bit instruction does partial flag update
58 unsigned Special : 1; // Needs to be dealt with specially
59 unsigned AvoidMovs: 1; // Avoid movs with shifter operand (for Swift)
62 static const ReduceEntry ReduceTable[] = {
63 // Wide, Narrow1, Narrow2, imm1,imm2, lo1, lo2, P/C,PF,S,AM
64 { ARM::t2ADCrr, 0, ARM::tADC, 0, 0, 0, 1, 0,0, 0,0,0 },
65 { ARM::t2ADDri, ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 0,0, 0,1,0 },
66 { ARM::t2ADDrr, ARM::tADDrr, ARM::tADDhirr, 0, 0, 1, 0, 0,1, 0,0,0 },
67 { ARM::t2ADDSri,ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 2,2, 0,1,0 },
68 { ARM::t2ADDSrr,ARM::tADDrr, 0, 0, 0, 1, 0, 2,0, 0,1,0 },
69 { ARM::t2ANDrr, 0, ARM::tAND, 0, 0, 0, 1, 0,0, 1,0,0 },
70 { ARM::t2ASRri, ARM::tASRri, 0, 5, 0, 1, 0, 0,0, 1,0,1 },
71 { ARM::t2ASRrr, 0, ARM::tASRrr, 0, 0, 0, 1, 0,0, 1,0,1 },
72 { ARM::t2BICrr, 0, ARM::tBIC, 0, 0, 0, 1, 0,0, 1,0,0 },
73 //FIXME: Disable CMN, as CCodes are backwards from compare expectations
74 //{ ARM::t2CMNrr, ARM::tCMN, 0, 0, 0, 1, 0, 2,0, 0,0,0 },
75 { ARM::t2CMNzrr, ARM::tCMNz, 0, 0, 0, 1, 0, 2,0, 0,0,0 },
76 { ARM::t2CMPri, ARM::tCMPi8, 0, 8, 0, 1, 0, 2,0, 0,0,0 },
77 { ARM::t2CMPrr, ARM::tCMPhir, 0, 0, 0, 0, 0, 2,0, 0,1,0 },
78 { ARM::t2EORrr, 0, ARM::tEOR, 0, 0, 0, 1, 0,0, 1,0,0 },
79 // FIXME: adr.n immediate offset must be multiple of 4.
80 //{ ARM::t2LEApcrelJT,ARM::tLEApcrelJT, 0, 0, 0, 1, 0, 1,0, 0,0,0 },
81 { ARM::t2LSLri, ARM::tLSLri, 0, 5, 0, 1, 0, 0,0, 1,0,1 },
82 { ARM::t2LSLrr, 0, ARM::tLSLrr, 0, 0, 0, 1, 0,0, 1,0,1 },
83 { ARM::t2LSRri, ARM::tLSRri, 0, 5, 0, 1, 0, 0,0, 1,0,1 },
84 { ARM::t2LSRrr, 0, ARM::tLSRrr, 0, 0, 0, 1, 0,0, 1,0,1 },
85 { ARM::t2MOVi, ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1,0,0 },
86 { ARM::t2MOVi16,ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1,1,0 },
87 // FIXME: Do we need the 16-bit 'S' variant?
88 { ARM::t2MOVr,ARM::tMOVr, 0, 0, 0, 0, 0, 1,0, 0,0,0 },
89 { ARM::t2MUL, 0, ARM::tMUL, 0, 0, 0, 1, 0,0, 1,0,0 },
90 { ARM::t2MVNr, ARM::tMVN, 0, 0, 0, 1, 0, 0,0, 0,0,0 },
91 { ARM::t2ORRrr, 0, ARM::tORR, 0, 0, 0, 1, 0,0, 1,0,0 },
92 { ARM::t2REV, ARM::tREV, 0, 0, 0, 1, 0, 1,0, 0,0,0 },
93 { ARM::t2REV16, ARM::tREV16, 0, 0, 0, 1, 0, 1,0, 0,0,0 },
94 { ARM::t2REVSH, ARM::tREVSH, 0, 0, 0, 1, 0, 1,0, 0,0,0 },
95 { ARM::t2RORrr, 0, ARM::tROR, 0, 0, 0, 1, 0,0, 1,0,0 },
96 { ARM::t2RSBri, ARM::tRSB, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
97 { ARM::t2RSBSri,ARM::tRSB, 0, 0, 0, 1, 0, 2,0, 0,1,0 },
98 { ARM::t2SBCrr, 0, ARM::tSBC, 0, 0, 0, 1, 0,0, 0,0,0 },
99 { ARM::t2SUBri, ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 0,0, 0,0,0 },
100 { ARM::t2SUBrr, ARM::tSUBrr, 0, 0, 0, 1, 0, 0,0, 0,0,0 },
101 { ARM::t2SUBSri,ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 2,2, 0,0,0 },
102 { ARM::t2SUBSrr,ARM::tSUBrr, 0, 0, 0, 1, 0, 2,0, 0,0,0 },
103 { ARM::t2SXTB, ARM::tSXTB, 0, 0, 0, 1, 0, 1,0, 0,1,0 },
104 { ARM::t2SXTH, ARM::tSXTH, 0, 0, 0, 1, 0, 1,0, 0,1,0 },
105 { ARM::t2TSTrr, ARM::tTST, 0, 0, 0, 1, 0, 2,0, 0,0,0 },
106 { ARM::t2UXTB, ARM::tUXTB, 0, 0, 0, 1, 0, 1,0, 0,1,0 },
107 { ARM::t2UXTH, ARM::tUXTH, 0, 0, 0, 1, 0, 1,0, 0,1,0 },
109 // FIXME: Clean this up after splitting each Thumb load / store opcode
110 // into multiple ones.
111 { ARM::t2LDRi12,ARM::tLDRi, ARM::tLDRspi, 5, 8, 1, 0, 0,0, 0,1,0 },
112 { ARM::t2LDRs, ARM::tLDRr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
113 { ARM::t2LDRBi12,ARM::tLDRBi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
114 { ARM::t2LDRBs, ARM::tLDRBr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
115 { ARM::t2LDRHi12,ARM::tLDRHi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
116 { ARM::t2LDRHs, ARM::tLDRHr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
117 { ARM::t2LDRSBs,ARM::tLDRSB, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
118 { ARM::t2LDRSHs,ARM::tLDRSH, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
119 { ARM::t2LDR_POST,ARM::tLDMIA_UPD,0, 0, 0, 1, 0, 0,0, 0,1,0 },
120 { ARM::t2STRi12,ARM::tSTRi, ARM::tSTRspi, 5, 8, 1, 0, 0,0, 0,1,0 },
121 { ARM::t2STRs, ARM::tSTRr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
122 { ARM::t2STRBi12,ARM::tSTRBi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
123 { ARM::t2STRBs, ARM::tSTRBr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
124 { ARM::t2STRHi12,ARM::tSTRHi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
125 { ARM::t2STRHs, ARM::tSTRHr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
126 { ARM::t2STR_POST,ARM::tSTMIA_UPD,0, 0, 0, 1, 0, 0,0, 0,1,0 },
128 { ARM::t2LDMIA, ARM::tLDMIA, 0, 0, 0, 1, 1, 1,1, 0,1,0 },
129 { ARM::t2LDMIA_RET,0, ARM::tPOP_RET, 0, 0, 1, 1, 1,1, 0,1,0 },
130 { ARM::t2LDMIA_UPD,ARM::tLDMIA_UPD,ARM::tPOP,0, 0, 1, 1, 1,1, 0,1,0 },
131 // ARM::t2STMIA (with no basereg writeback) has no Thumb1 equivalent.
132 // tSTMIA_UPD is a change in semantics which can only be used if the base
133 // register is killed. This difference is correctly handled elsewhere.
134 { ARM::t2STMIA, ARM::tSTMIA_UPD, 0, 0, 0, 1, 1, 1,1, 0,1,0 },
135 { ARM::t2STMIA_UPD,ARM::tSTMIA_UPD, 0, 0, 0, 1, 1, 1,1, 0,1,0 },
136 { ARM::t2STMDB_UPD, 0, ARM::tPUSH, 0, 0, 1, 1, 1,1, 0,1,0 }
139 class Thumb2SizeReduce : public MachineFunctionPass {
142 Thumb2SizeReduce(std::function<bool(const Function &)> Ftor);
144 const Thumb2InstrInfo *TII;
145 const ARMSubtarget *STI;
147 bool runOnMachineFunction(MachineFunction &MF) override;
149 MachineFunctionProperties getRequiredProperties() const override {
150 return MachineFunctionProperties().set(
151 MachineFunctionProperties::Property::NoVRegs);
154 StringRef getPassName() const override {
155 return "Thumb2 instruction size reduction pass";
159 /// ReduceOpcodeMap - Maps wide opcode to index of entry in ReduceTable.
160 DenseMap<unsigned, unsigned> ReduceOpcodeMap;
162 bool canAddPseudoFlagDep(MachineInstr *Use, bool IsSelfLoop);
164 bool VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry,
165 bool is2Addr, ARMCC::CondCodes Pred,
166 bool LiveCPSR, bool &HasCC, bool &CCDead);
168 bool ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
169 const ReduceEntry &Entry);
171 bool ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
172 const ReduceEntry &Entry, bool LiveCPSR, bool IsSelfLoop);
174 /// ReduceTo2Addr - Reduce a 32-bit instruction to a 16-bit two-address
176 bool ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
177 const ReduceEntry &Entry, bool LiveCPSR,
180 /// ReduceToNarrow - Reduce a 32-bit instruction to a 16-bit
181 /// non-two-address instruction.
182 bool ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
183 const ReduceEntry &Entry, bool LiveCPSR,
186 /// ReduceMI - Attempt to reduce MI, return true on success.
187 bool ReduceMI(MachineBasicBlock &MBB, MachineInstr *MI,
188 bool LiveCPSR, bool IsSelfLoop);
190 /// ReduceMBB - Reduce width of instructions in the specified basic block.
191 bool ReduceMBB(MachineBasicBlock &MBB);
196 // Last instruction to define CPSR in the current block.
197 MachineInstr *CPSRDef;
198 // Was CPSR last defined by a high latency instruction?
199 // When CPSRDef is null, this refers to CPSR defs in predecessors.
200 bool HighLatencyCPSR;
203 // The flags leaving this block have high latency.
204 bool HighLatencyCPSR;
205 // Has this block been visited yet?
208 MBBInfo() : HighLatencyCPSR(false), Visited(false) {}
211 SmallVector<MBBInfo, 8> BlockInfo;
213 std::function<bool(const Function &)> PredicateFtor;
215 char Thumb2SizeReduce::ID = 0;
218 Thumb2SizeReduce::Thumb2SizeReduce(std::function<bool(const Function &)> Ftor)
219 : MachineFunctionPass(ID), PredicateFtor(std::move(Ftor)) {
220 OptimizeSize = MinimizeSize = false;
221 for (unsigned i = 0, e = array_lengthof(ReduceTable); i != e; ++i) {
222 unsigned FromOpc = ReduceTable[i].WideOpc;
223 if (!ReduceOpcodeMap.insert(std::make_pair(FromOpc, i)).second)
224 llvm_unreachable("Duplicated entries?");
228 static bool HasImplicitCPSRDef(const MCInstrDesc &MCID) {
229 for (const MCPhysReg *Regs = MCID.getImplicitDefs(); *Regs; ++Regs)
230 if (*Regs == ARM::CPSR)
235 // Check for a likely high-latency flag def.
236 static bool isHighLatencyCPSR(MachineInstr *Def) {
237 switch(Def->getOpcode()) {
245 /// canAddPseudoFlagDep - For A9 (and other out-of-order) implementations,
246 /// the 's' 16-bit instruction partially update CPSR. Abort the
247 /// transformation to avoid adding false dependency on last CPSR setting
248 /// instruction which hurts the ability for out-of-order execution engine
249 /// to do register renaming magic.
250 /// This function checks if there is a read-of-write dependency between the
251 /// last instruction that defines the CPSR and the current instruction. If there
252 /// is, then there is no harm done since the instruction cannot be retired
253 /// before the CPSR setting instruction anyway.
254 /// Note, we are not doing full dependency analysis here for the sake of compile
255 /// time. We're not looking for cases like:
257 /// r1 = add.w r0, ...
260 /// In this case it would have been ok to narrow the mul.w to muls since there
261 /// are indirect RAW dependency between the muls and the mul.w
263 Thumb2SizeReduce::canAddPseudoFlagDep(MachineInstr *Use, bool FirstInSelfLoop) {
264 // Disable the check for -Oz (aka OptimizeForSizeHarder).
265 if (MinimizeSize || !STI->avoidCPSRPartialUpdate())
269 // If this BB loops back to itself, conservatively avoid narrowing the
270 // first instruction that does partial flag update.
271 return HighLatencyCPSR || FirstInSelfLoop;
273 SmallSet<unsigned, 2> Defs;
274 for (const MachineOperand &MO : CPSRDef->operands()) {
275 if (!MO.isReg() || MO.isUndef() || MO.isUse())
277 unsigned Reg = MO.getReg();
278 if (Reg == 0 || Reg == ARM::CPSR)
283 for (const MachineOperand &MO : Use->operands()) {
284 if (!MO.isReg() || MO.isUndef() || MO.isDef())
286 unsigned Reg = MO.getReg();
291 // If the current CPSR has high latency, try to avoid the false dependency.
295 // tMOVi8 usually doesn't start long dependency chains, and there are a lot
296 // of them, so always shrink them when CPSR doesn't have high latency.
297 if (Use->getOpcode() == ARM::t2MOVi ||
298 Use->getOpcode() == ARM::t2MOVi16)
301 // No read-after-write dependency. The narrowing will add false dependency.
306 Thumb2SizeReduce::VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry,
307 bool is2Addr, ARMCC::CondCodes Pred,
308 bool LiveCPSR, bool &HasCC, bool &CCDead) {
309 if ((is2Addr && Entry.PredCC2 == 0) ||
310 (!is2Addr && Entry.PredCC1 == 0)) {
311 if (Pred == ARMCC::AL) {
312 // Not predicated, must set CPSR.
314 // Original instruction was not setting CPSR, but CPSR is not
315 // currently live anyway. It's ok to set it. The CPSR def is
325 // Predicated, must not set CPSR.
329 } else if ((is2Addr && Entry.PredCC2 == 2) ||
330 (!is2Addr && Entry.PredCC1 == 2)) {
331 /// Old opcode has an optional def of CPSR.
334 // If old opcode does not implicitly define CPSR, then it's not ok since
335 // these new opcodes' CPSR def is not meant to be thrown away. e.g. CMP.
336 if (!HasImplicitCPSRDef(MI->getDesc()))
340 // 16-bit instruction does not set CPSR.
348 static bool VerifyLowRegs(MachineInstr *MI) {
349 unsigned Opc = MI->getOpcode();
350 bool isPCOk = (Opc == ARM::t2LDMIA_RET || Opc == ARM::t2LDMIA_UPD);
351 bool isLROk = (Opc == ARM::t2STMDB_UPD);
352 bool isSPOk = isPCOk || isLROk;
353 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
354 const MachineOperand &MO = MI->getOperand(i);
355 if (!MO.isReg() || MO.isImplicit())
357 unsigned Reg = MO.getReg();
358 if (Reg == 0 || Reg == ARM::CPSR)
360 if (isPCOk && Reg == ARM::PC)
362 if (isLROk && Reg == ARM::LR)
364 if (Reg == ARM::SP) {
367 if (i == 1 && (Opc == ARM::t2LDRi12 || Opc == ARM::t2STRi12))
368 // Special case for these ldr / str with sp as base register.
371 if (!isARMLowRegister(Reg))
378 Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
379 const ReduceEntry &Entry) {
380 if (ReduceLimitLdSt != -1 && ((int)NumLdSts >= ReduceLimitLdSt))
384 bool HasImmOffset = false;
385 bool HasShift = false;
386 bool HasOffReg = true;
387 bool isLdStMul = false;
388 unsigned Opc = Entry.NarrowOpc1;
389 unsigned OpNum = 3; // First 'rest' of operands.
390 uint8_t ImmLimit = Entry.Imm1Limit;
392 switch (Entry.WideOpc) {
394 llvm_unreachable("Unexpected Thumb2 load / store opcode!");
397 if (MI->getOperand(1).getReg() == ARM::SP) {
398 Opc = Entry.NarrowOpc2;
399 ImmLimit = Entry.Imm2Limit;
428 case ARM::t2LDR_POST:
429 case ARM::t2STR_POST: {
430 if (!MBB.getParent()->getFunction()->optForMinSize())
433 if (!MI->hasOneMemOperand() ||
434 (*MI->memoperands_begin())->getAlignment() < 4)
437 // We're creating a completely different type of load/store - LDM from LDR.
438 // For this reason we can't reuse the logic at the end of this function; we
439 // have to implement the MI building here.
440 bool IsStore = Entry.WideOpc == ARM::t2STR_POST;
441 unsigned Rt = MI->getOperand(IsStore ? 1 : 0).getReg();
442 unsigned Rn = MI->getOperand(IsStore ? 0 : 1).getReg();
443 unsigned Offset = MI->getOperand(3).getImm();
444 unsigned PredImm = MI->getOperand(4).getImm();
445 unsigned PredReg = MI->getOperand(5).getReg();
446 assert(isARMLowRegister(Rt));
447 assert(isARMLowRegister(Rn));
452 // Add the 16-bit load / store instruction.
453 DebugLoc dl = MI->getDebugLoc();
454 auto MIB = BuildMI(MBB, MI, dl, TII->get(Entry.NarrowOpc1))
455 .addReg(Rn, RegState::Define)
459 .addReg(Rt, IsStore ? 0 : RegState::Define);
461 // Transfer memoperands.
462 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
464 // Transfer MI flags.
465 MIB.setMIFlags(MI->getFlags());
467 // Kill the old instruction.
468 MI->eraseFromBundle();
473 unsigned BaseReg = MI->getOperand(0).getReg();
474 assert(isARMLowRegister(BaseReg));
476 // For the non-writeback version (this one), the base register must be
477 // one of the registers being loaded.
479 for (unsigned i = 3; i < MI->getNumOperands(); ++i) {
480 if (MI->getOperand(i).getReg() == BaseReg) {
494 // If the base register is killed, we don't care what its value is after the
495 // instruction, so we can use an updating STMIA.
496 if (!MI->getOperand(0).isKill())
501 case ARM::t2LDMIA_RET: {
502 unsigned BaseReg = MI->getOperand(1).getReg();
503 if (BaseReg != ARM::SP)
505 Opc = Entry.NarrowOpc2; // tPOP_RET
510 case ARM::t2LDMIA_UPD:
511 case ARM::t2STMIA_UPD:
512 case ARM::t2STMDB_UPD: {
515 unsigned BaseReg = MI->getOperand(1).getReg();
516 if (BaseReg == ARM::SP &&
517 (Entry.WideOpc == ARM::t2LDMIA_UPD ||
518 Entry.WideOpc == ARM::t2STMDB_UPD)) {
519 Opc = Entry.NarrowOpc2; // tPOP or tPUSH
521 } else if (!isARMLowRegister(BaseReg) ||
522 (Entry.WideOpc != ARM::t2LDMIA_UPD &&
523 Entry.WideOpc != ARM::t2STMIA_UPD)) {
532 unsigned OffsetReg = 0;
533 bool OffsetKill = false;
534 bool OffsetInternal = false;
536 OffsetReg = MI->getOperand(2).getReg();
537 OffsetKill = MI->getOperand(2).isKill();
538 OffsetInternal = MI->getOperand(2).isInternalRead();
540 if (MI->getOperand(3).getImm())
541 // Thumb1 addressing mode doesn't support shift.
545 unsigned OffsetImm = 0;
547 OffsetImm = MI->getOperand(2).getImm();
548 unsigned MaxOffset = ((1 << ImmLimit) - 1) * Scale;
550 if ((OffsetImm & (Scale - 1)) || OffsetImm > MaxOffset)
551 // Make sure the immediate field fits.
555 // Add the 16-bit load / store instruction.
556 DebugLoc dl = MI->getDebugLoc();
557 MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, TII->get(Opc));
559 // tSTMIA_UPD takes a defining register operand. We've already checked that
560 // the register is killed, so mark it as dead here.
561 if (Entry.WideOpc == ARM::t2STMIA)
562 MIB.addReg(MI->getOperand(0).getReg(), RegState::Define | RegState::Dead);
565 MIB.addOperand(MI->getOperand(0));
566 MIB.addOperand(MI->getOperand(1));
569 MIB.addImm(OffsetImm / Scale);
571 assert((!HasShift || OffsetReg) && "Invalid so_reg load / store address!");
574 MIB.addReg(OffsetReg, getKillRegState(OffsetKill) |
575 getInternalReadRegState(OffsetInternal));
578 // Transfer the rest of operands.
579 for (unsigned e = MI->getNumOperands(); OpNum != e; ++OpNum)
580 MIB.addOperand(MI->getOperand(OpNum));
582 // Transfer memoperands.
583 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
585 // Transfer MI flags.
586 MIB.setMIFlags(MI->getFlags());
588 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB);
596 Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
597 const ReduceEntry &Entry,
598 bool LiveCPSR, bool IsSelfLoop) {
599 unsigned Opc = MI->getOpcode();
600 if (Opc == ARM::t2ADDri) {
601 // If the source register is SP, try to reduce to tADDrSPi, otherwise
602 // it's a normal reduce.
603 if (MI->getOperand(1).getReg() != ARM::SP) {
604 if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
606 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
608 // Try to reduce to tADDrSPi.
609 unsigned Imm = MI->getOperand(2).getImm();
610 // The immediate must be in range, the destination register must be a low
611 // reg, the predicate must be "always" and the condition flags must not
613 if (Imm & 3 || Imm > 1020)
615 if (!isARMLowRegister(MI->getOperand(0).getReg()))
617 if (MI->getOperand(3).getImm() != ARMCC::AL)
619 const MCInstrDesc &MCID = MI->getDesc();
620 if (MCID.hasOptionalDef() &&
621 MI->getOperand(MCID.getNumOperands()-1).getReg() == ARM::CPSR)
624 MachineInstrBuilder MIB = BuildMI(MBB, MI, MI->getDebugLoc(),
625 TII->get(ARM::tADDrSPi))
626 .addOperand(MI->getOperand(0))
627 .addOperand(MI->getOperand(1))
628 .addImm(Imm / 4); // The tADDrSPi has an implied scale by four.
631 // Transfer MI flags.
632 MIB.setMIFlags(MI->getFlags());
634 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " <<*MIB);
641 if (Entry.LowRegs1 && !VerifyLowRegs(MI))
644 if (MI->mayLoadOrStore())
645 return ReduceLoadStore(MBB, MI, Entry);
650 case ARM::t2ADDSrr: {
651 unsigned PredReg = 0;
652 if (getInstrPredicate(*MI, PredReg) == ARMCC::AL) {
655 case ARM::t2ADDSri: {
656 if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
661 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
672 if (MI->getOperand(2).getImm() == 0)
673 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
676 // Can convert only 'pure' immediate operands, not immediates obtained as
677 // globals' addresses.
678 if (MI->getOperand(1).isImm())
679 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
682 // Try to reduce to the lo-reg only version first. Why there are two
683 // versions of the instruction is a mystery.
684 // It would be nice to just have two entries in the master table that
685 // are prioritized, but the table assumes a unique entry for each
686 // source insn opcode. So for now, we hack a local entry record to use.
687 static const ReduceEntry NarrowEntry =
688 { ARM::t2CMPrr,ARM::tCMPr, 0, 0, 0, 1, 1,2, 0, 0,1,0 };
689 if (ReduceToNarrow(MBB, MI, NarrowEntry, LiveCPSR, IsSelfLoop))
691 return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
698 Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
699 const ReduceEntry &Entry,
700 bool LiveCPSR, bool IsSelfLoop) {
702 if (ReduceLimit2Addr != -1 && ((int)Num2Addrs >= ReduceLimit2Addr))
705 if (!OptimizeSize && Entry.AvoidMovs && STI->avoidMOVsShifterOperand())
706 // Don't issue movs with shifter operand for some CPUs unless we
707 // are optimizing for size.
710 unsigned Reg0 = MI->getOperand(0).getReg();
711 unsigned Reg1 = MI->getOperand(1).getReg();
712 // t2MUL is "special". The tied source operand is second, not first.
713 if (MI->getOpcode() == ARM::t2MUL) {
714 unsigned Reg2 = MI->getOperand(2).getReg();
715 // Early exit if the regs aren't all low regs.
716 if (!isARMLowRegister(Reg0) || !isARMLowRegister(Reg1)
717 || !isARMLowRegister(Reg2))
720 // If the other operand also isn't the same as the destination, we
724 // Try to commute the operands to make it a 2-address instruction.
725 MachineInstr *CommutedMI = TII->commuteInstruction(*MI);
729 } else if (Reg0 != Reg1) {
730 // Try to commute the operands to make it a 2-address instruction.
731 unsigned CommOpIdx1 = 1;
732 unsigned CommOpIdx2 = TargetInstrInfo::CommuteAnyOperandIndex;
733 if (!TII->findCommutedOpIndices(*MI, CommOpIdx1, CommOpIdx2) ||
734 MI->getOperand(CommOpIdx2).getReg() != Reg0)
736 MachineInstr *CommutedMI =
737 TII->commuteInstruction(*MI, false, CommOpIdx1, CommOpIdx2);
741 if (Entry.LowRegs2 && !isARMLowRegister(Reg0))
743 if (Entry.Imm2Limit) {
744 unsigned Imm = MI->getOperand(2).getImm();
745 unsigned Limit = (1 << Entry.Imm2Limit) - 1;
749 unsigned Reg2 = MI->getOperand(2).getReg();
750 if (Entry.LowRegs2 && !isARMLowRegister(Reg2))
754 // Check if it's possible / necessary to transfer the predicate.
755 const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc2);
756 unsigned PredReg = 0;
757 ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg);
758 bool SkipPred = false;
759 if (Pred != ARMCC::AL) {
760 if (!NewMCID.isPredicable())
761 // Can't transfer predicate, fail.
764 SkipPred = !NewMCID.isPredicable();
769 const MCInstrDesc &MCID = MI->getDesc();
770 if (MCID.hasOptionalDef()) {
771 unsigned NumOps = MCID.getNumOperands();
772 HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR);
773 if (HasCC && MI->getOperand(NumOps-1).isDead())
776 if (!VerifyPredAndCC(MI, Entry, true, Pred, LiveCPSR, HasCC, CCDead))
779 // Avoid adding a false dependency on partial flag update by some 16-bit
780 // instructions which has the 's' bit set.
781 if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC &&
782 canAddPseudoFlagDep(MI, IsSelfLoop))
785 // Add the 16-bit instruction.
786 DebugLoc dl = MI->getDebugLoc();
787 MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID);
788 MIB.addOperand(MI->getOperand(0));
789 if (NewMCID.hasOptionalDef()) {
791 AddDefaultT1CC(MIB, CCDead);
796 // Transfer the rest of operands.
797 unsigned NumOps = MCID.getNumOperands();
798 for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
799 if (i < NumOps && MCID.OpInfo[i].isOptionalDef())
801 if (SkipPred && MCID.OpInfo[i].isPredicate())
803 MIB.addOperand(MI->getOperand(i));
806 // Transfer MI flags.
807 MIB.setMIFlags(MI->getFlags());
809 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB);
817 Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
818 const ReduceEntry &Entry,
819 bool LiveCPSR, bool IsSelfLoop) {
820 if (ReduceLimit != -1 && ((int)NumNarrows >= ReduceLimit))
823 if (!OptimizeSize && Entry.AvoidMovs && STI->avoidMOVsShifterOperand())
824 // Don't issue movs with shifter operand for some CPUs unless we
825 // are optimizing for size.
828 unsigned Limit = ~0U;
830 Limit = (1 << Entry.Imm1Limit) - 1;
832 const MCInstrDesc &MCID = MI->getDesc();
833 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
834 if (MCID.OpInfo[i].isPredicate())
836 const MachineOperand &MO = MI->getOperand(i);
838 unsigned Reg = MO.getReg();
839 if (!Reg || Reg == ARM::CPSR)
841 if (Entry.LowRegs1 && !isARMLowRegister(Reg))
843 } else if (MO.isImm() &&
844 !MCID.OpInfo[i].isPredicate()) {
845 if (((unsigned)MO.getImm()) > Limit)
850 // Check if it's possible / necessary to transfer the predicate.
851 const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc1);
852 unsigned PredReg = 0;
853 ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg);
854 bool SkipPred = false;
855 if (Pred != ARMCC::AL) {
856 if (!NewMCID.isPredicable())
857 // Can't transfer predicate, fail.
860 SkipPred = !NewMCID.isPredicable();
865 if (MCID.hasOptionalDef()) {
866 unsigned NumOps = MCID.getNumOperands();
867 HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR);
868 if (HasCC && MI->getOperand(NumOps-1).isDead())
871 if (!VerifyPredAndCC(MI, Entry, false, Pred, LiveCPSR, HasCC, CCDead))
874 // Avoid adding a false dependency on partial flag update by some 16-bit
875 // instructions which has the 's' bit set.
876 if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC &&
877 canAddPseudoFlagDep(MI, IsSelfLoop))
880 // Add the 16-bit instruction.
881 DebugLoc dl = MI->getDebugLoc();
882 MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID);
883 MIB.addOperand(MI->getOperand(0));
884 if (NewMCID.hasOptionalDef()) {
886 AddDefaultT1CC(MIB, CCDead);
891 // Transfer the rest of operands.
892 unsigned NumOps = MCID.getNumOperands();
893 for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
894 if (i < NumOps && MCID.OpInfo[i].isOptionalDef())
896 if ((MCID.getOpcode() == ARM::t2RSBSri ||
897 MCID.getOpcode() == ARM::t2RSBri ||
898 MCID.getOpcode() == ARM::t2SXTB ||
899 MCID.getOpcode() == ARM::t2SXTH ||
900 MCID.getOpcode() == ARM::t2UXTB ||
901 MCID.getOpcode() == ARM::t2UXTH) && i == 2)
902 // Skip the zero immediate operand, it's now implicit.
904 bool isPred = (i < NumOps && MCID.OpInfo[i].isPredicate());
905 if (SkipPred && isPred)
907 const MachineOperand &MO = MI->getOperand(i);
908 if (MO.isReg() && MO.isImplicit() && MO.getReg() == ARM::CPSR)
909 // Skip implicit def of CPSR. Either it's modeled as an optional
910 // def now or it's already an implicit def on the new instruction.
914 if (!MCID.isPredicable() && NewMCID.isPredicable())
917 // Transfer MI flags.
918 MIB.setMIFlags(MI->getFlags());
920 DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB);
927 static bool UpdateCPSRDef(MachineInstr &MI, bool LiveCPSR, bool &DefCPSR) {
929 for (const MachineOperand &MO : MI.operands()) {
930 if (!MO.isReg() || MO.isUndef() || MO.isUse())
932 if (MO.getReg() != ARM::CPSR)
940 return HasDef || LiveCPSR;
943 static bool UpdateCPSRUse(MachineInstr &MI, bool LiveCPSR) {
944 for (const MachineOperand &MO : MI.operands()) {
945 if (!MO.isReg() || MO.isUndef() || MO.isDef())
947 if (MO.getReg() != ARM::CPSR)
949 assert(LiveCPSR && "CPSR liveness tracking is wrong!");
959 bool Thumb2SizeReduce::ReduceMI(MachineBasicBlock &MBB, MachineInstr *MI,
960 bool LiveCPSR, bool IsSelfLoop) {
961 unsigned Opcode = MI->getOpcode();
962 DenseMap<unsigned, unsigned>::iterator OPI = ReduceOpcodeMap.find(Opcode);
963 if (OPI == ReduceOpcodeMap.end())
965 const ReduceEntry &Entry = ReduceTable[OPI->second];
967 // Don't attempt normal reductions on "special" cases for now.
969 return ReduceSpecial(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
971 // Try to transform to a 16-bit two-address instruction.
972 if (Entry.NarrowOpc2 &&
973 ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
976 // Try to transform to a 16-bit non-two-address instruction.
977 if (Entry.NarrowOpc1 &&
978 ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
984 bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) {
985 bool Modified = false;
987 // Yes, CPSR could be livein.
988 bool LiveCPSR = MBB.isLiveIn(ARM::CPSR);
989 MachineInstr *BundleMI = nullptr;
992 HighLatencyCPSR = false;
994 // Check predecessors for the latest CPSRDef.
995 for (auto *Pred : MBB.predecessors()) {
996 const MBBInfo &PInfo = BlockInfo[Pred->getNumber()];
997 if (!PInfo.Visited) {
998 // Since blocks are visited in RPO, this must be a back-edge.
1001 if (PInfo.HighLatencyCPSR) {
1002 HighLatencyCPSR = true;
1007 // If this BB loops back to itself, conservatively avoid narrowing the
1008 // first instruction that does partial flag update.
1009 bool IsSelfLoop = MBB.isSuccessor(&MBB);
1010 MachineBasicBlock::instr_iterator MII = MBB.instr_begin(),E = MBB.instr_end();
1011 MachineBasicBlock::instr_iterator NextMII;
1012 for (; MII != E; MII = NextMII) {
1013 NextMII = std::next(MII);
1015 MachineInstr *MI = &*MII;
1016 if (MI->isBundle()) {
1020 if (MI->isDebugValue())
1023 LiveCPSR = UpdateCPSRUse(*MI, LiveCPSR);
1025 // Does NextMII belong to the same bundle as MI?
1026 bool NextInSameBundle = NextMII != E && NextMII->isBundledWithPred();
1028 if (ReduceMI(MBB, MI, LiveCPSR, IsSelfLoop)) {
1030 MachineBasicBlock::instr_iterator I = std::prev(NextMII);
1032 // Removing and reinserting the first instruction in a bundle will break
1033 // up the bundle. Fix the bundling if it was broken.
1034 if (NextInSameBundle && !NextMII->isBundledWithPred())
1035 NextMII->bundleWithPred();
1038 if (BundleMI && !NextInSameBundle && MI->isInsideBundle()) {
1039 // FIXME: Since post-ra scheduler operates on bundles, the CPSR kill
1040 // marker is only on the BUNDLE instruction. Process the BUNDLE
1041 // instruction as we finish with the bundled instruction to work around
1042 // the inconsistency.
1043 if (BundleMI->killsRegister(ARM::CPSR))
1045 MachineOperand *MO = BundleMI->findRegisterDefOperand(ARM::CPSR);
1046 if (MO && !MO->isDead())
1048 MO = BundleMI->findRegisterUseOperand(ARM::CPSR);
1049 if (MO && !MO->isKill())
1053 bool DefCPSR = false;
1054 LiveCPSR = UpdateCPSRDef(*MI, LiveCPSR, DefCPSR);
1056 // Calls don't really set CPSR.
1058 HighLatencyCPSR = false;
1060 } else if (DefCPSR) {
1061 // This is the last CPSR defining instruction.
1063 HighLatencyCPSR = isHighLatencyCPSR(CPSRDef);
1068 MBBInfo &Info = BlockInfo[MBB.getNumber()];
1069 Info.HighLatencyCPSR = HighLatencyCPSR;
1070 Info.Visited = true;
1074 bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) {
1075 if (PredicateFtor && !PredicateFtor(*MF.getFunction()))
1078 STI = &static_cast<const ARMSubtarget &>(MF.getSubtarget());
1079 if (STI->isThumb1Only() || STI->prefers32BitThumb())
1082 TII = static_cast<const Thumb2InstrInfo *>(STI->getInstrInfo());
1084 // Optimizing / minimizing size? Minimizing size implies optimizing for size.
1085 OptimizeSize = MF.getFunction()->optForSize();
1086 MinimizeSize = MF.getFunction()->optForMinSize();
1089 BlockInfo.resize(MF.getNumBlockIDs());
1091 // Visit blocks in reverse post-order so LastCPSRDef is known for all
1093 ReversePostOrderTraversal<MachineFunction*> RPOT(&MF);
1094 bool Modified = false;
1095 for (ReversePostOrderTraversal<MachineFunction*>::rpo_iterator
1096 I = RPOT.begin(), E = RPOT.end(); I != E; ++I)
1097 Modified |= ReduceMBB(**I);
1101 /// createThumb2SizeReductionPass - Returns an instance of the Thumb2 size
1103 FunctionPass *llvm::createThumb2SizeReductionPass(
1104 std::function<bool(const Function &)> Ftor) {
1105 return new Thumb2SizeReduce(std::move(Ftor));