1 //===-- HexagonSubtarget.cpp - Hexagon Subtarget Information --------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the Hexagon specific subclass of TargetSubtarget.
12 //===----------------------------------------------------------------------===//
14 #include "HexagonSubtarget.h"
16 #include "HexagonRegisterInfo.h"
17 #include "llvm/CodeGen/ScheduleDAG.h"
18 #include "llvm/CodeGen/ScheduleDAGInstrs.h"
19 #include "llvm/Support/CommandLine.h"
20 #include "llvm/Support/ErrorHandling.h"
25 #define DEBUG_TYPE "hexagon-subtarget"
27 #define GET_SUBTARGETINFO_CTOR
28 #define GET_SUBTARGETINFO_TARGET_DESC
29 #include "HexagonGenSubtargetInfo.inc"
31 static cl::opt<bool> EnableMemOps("enable-hexagon-memops",
32 cl::Hidden, cl::ZeroOrMore, cl::ValueDisallowed, cl::init(true),
33 cl::desc("Generate V4 MEMOP in code generation for Hexagon target"));
35 static cl::opt<bool> DisableMemOps("disable-hexagon-memops",
36 cl::Hidden, cl::ZeroOrMore, cl::ValueDisallowed, cl::init(false),
37 cl::desc("Do not generate V4 MEMOP in code generation for Hexagon target"));
39 static cl::opt<bool> EnableIEEERndNear("enable-hexagon-ieee-rnd-near",
40 cl::Hidden, cl::ZeroOrMore, cl::init(false),
41 cl::desc("Generate non-chopped conversion from fp to int."));
43 static cl::opt<bool> EnableBSBSched("enable-bsb-sched",
44 cl::Hidden, cl::ZeroOrMore, cl::init(true));
46 static cl::opt<bool> EnableHexagonHVXDouble("enable-hexagon-hvx-double",
47 cl::Hidden, cl::ZeroOrMore, cl::init(false),
48 cl::desc("Enable Hexagon Double Vector eXtensions"));
50 static cl::opt<bool> EnableHexagonHVX("enable-hexagon-hvx",
51 cl::Hidden, cl::ZeroOrMore, cl::init(false),
52 cl::desc("Enable Hexagon Vector eXtensions"));
54 static cl::opt<bool> EnableTCLatencySched("enable-tc-latency-sched",
55 cl::Hidden, cl::ZeroOrMore, cl::init(false));
57 static cl::opt<bool> EnableDotCurSched("enable-cur-sched",
58 cl::Hidden, cl::ZeroOrMore, cl::init(true),
59 cl::desc("Enable the scheduler to generate .cur"));
61 static cl::opt<bool> EnableVecFrwdSched("enable-evec-frwd-sched",
62 cl::Hidden, cl::ZeroOrMore, cl::init(true));
64 static cl::opt<bool> DisableHexagonMISched("disable-hexagon-misched",
65 cl::Hidden, cl::ZeroOrMore, cl::init(false),
66 cl::desc("Disable Hexagon MI Scheduling"));
68 static cl::opt<bool> EnableSubregLiveness("hexagon-subreg-liveness",
69 cl::Hidden, cl::ZeroOrMore, cl::init(true),
70 cl::desc("Enable subregister liveness tracking for Hexagon"));
72 static cl::opt<bool> OverrideLongCalls("hexagon-long-calls",
73 cl::Hidden, cl::ZeroOrMore, cl::init(false),
74 cl::desc("If present, forces/disables the use of long calls"));
76 void HexagonSubtarget::initializeEnvironment() {
78 ModeIEEERndNear = false;
79 UseBSBScheduling = false;
83 HexagonSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) {
84 CPUString = Hexagon_MC::selectHexagonCPU(getTargetTriple(), CPU);
86 static std::map<StringRef, HexagonArchEnum> CpuTable {
89 { "hexagonv55", V55 },
90 { "hexagonv60", V60 },
91 { "hexagonv62", V62 },
94 auto foundIt = CpuTable.find(CPUString);
95 if (foundIt != CpuTable.end())
96 HexagonArchVersion = foundIt->second;
98 llvm_unreachable("Unrecognized Hexagon processor version");
101 UseHVXDblOps = false;
102 UseLongCalls = false;
103 ParseSubtargetFeatures(CPUString, FS);
105 if (EnableHexagonHVX.getPosition())
106 UseHVXOps = EnableHexagonHVX;
107 if (EnableHexagonHVXDouble.getPosition())
108 UseHVXDblOps = EnableHexagonHVXDouble;
109 if (OverrideLongCalls.getPosition())
110 UseLongCalls = OverrideLongCalls;
115 HexagonSubtarget::HexagonSubtarget(const Triple &TT, StringRef CPU,
116 StringRef FS, const TargetMachine &TM)
117 : HexagonGenSubtargetInfo(TT, CPU, FS), CPUString(CPU),
118 InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this),
121 initializeEnvironment();
123 // Initialize scheduling itinerary for the specified CPU.
124 InstrItins = getInstrItineraryForCPU(CPUString);
126 // UseMemOps on by default unless disabled explicitly
129 else if (EnableMemOps)
134 if (EnableIEEERndNear)
135 ModeIEEERndNear = true;
137 ModeIEEERndNear = false;
139 UseBSBScheduling = hasV60TOps() && EnableBSBSched;
143 void HexagonSubtarget::HexagonDAGMutation::apply(ScheduleDAGInstrs *DAG) {
144 for (auto &SU : DAG->SUnits) {
147 SmallVector<SDep, 4> Erase;
148 for (auto &D : SU.Preds)
149 if (D.getKind() == SDep::Output && D.getReg() == Hexagon::USR_OVF)
151 for (auto &E : Erase)
155 for (auto &SU : DAG->SUnits) {
156 // Update the latency of chain edges between v60 vector load or store
157 // instructions to be 1. These instructions cannot be scheduled in the
159 MachineInstr &MI1 = *SU.getInstr();
160 auto *QII = static_cast<const HexagonInstrInfo*>(DAG->TII);
161 bool IsStoreMI1 = MI1.mayStore();
162 bool IsLoadMI1 = MI1.mayLoad();
163 if (!QII->isV60VectorInstruction(MI1) || !(IsStoreMI1 || IsLoadMI1))
165 for (auto &SI : SU.Succs) {
166 if (SI.getKind() != SDep::Order || SI.getLatency() != 0)
168 MachineInstr &MI2 = *SI.getSUnit()->getInstr();
169 if (!QII->isV60VectorInstruction(MI2))
171 if ((IsStoreMI1 && MI2.mayStore()) || (IsLoadMI1 && MI2.mayLoad())) {
174 // Change the dependence in the opposite direction too.
175 for (auto &PI : SI.getSUnit()->Preds) {
176 if (PI.getSUnit() != &SU || PI.getKind() != SDep::Order)
179 SI.getSUnit()->setDepthDirty();
187 void HexagonSubtarget::getPostRAMutations(
188 std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const {
189 Mutations.push_back(make_unique<HexagonSubtarget::HexagonDAGMutation>());
192 void HexagonSubtarget::getSMSMutations(
193 std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const {
194 Mutations.push_back(make_unique<HexagonSubtarget::HexagonDAGMutation>());
198 // Pin the vtable to this file.
199 void HexagonSubtarget::anchor() {}
201 bool HexagonSubtarget::enableMachineScheduler() const {
202 if (DisableHexagonMISched.getNumOccurrences())
203 return !DisableHexagonMISched;
207 bool HexagonSubtarget::enableSubRegLiveness() const {
208 return EnableSubregLiveness;
211 // This helper function is responsible for increasing the latency only.
212 void HexagonSubtarget::updateLatency(MachineInstr &SrcInst,
213 MachineInstr &DstInst, SDep &Dep) const {
217 auto &QII = static_cast<const HexagonInstrInfo&>(*getInstrInfo());
219 if (EnableVecFrwdSched && QII.addLatencyToSchedule(SrcInst, DstInst)) {
220 // Vec frwd scheduling.
221 Dep.setLatency(Dep.getLatency() + 1);
222 } else if (useBSBScheduling() &&
223 QII.isLateInstrFeedsEarlyInstr(SrcInst, DstInst)) {
225 Dep.setLatency(Dep.getLatency() + 1);
226 } else if (EnableTCLatencySched) {
227 // TClass latency scheduling.
228 // Check if SrcInst produces in 2C an operand of DstInst taken in stage 2B.
229 if (QII.isTC1(SrcInst) || QII.isTC2(SrcInst))
230 if (!QII.isTC1(DstInst) && !QII.isTC2(DstInst))
231 Dep.setLatency(Dep.getLatency() + 1);
235 /// If the SUnit has a zero latency edge, return the other SUnit.
236 static SUnit *getZeroLatency(SUnit *N, SmallVector<SDep, 4> &Deps) {
238 if (I.isAssignedRegDep() && I.getLatency() == 0 &&
239 !I.getSUnit()->getInstr()->isPseudo())
244 /// Change the latency between the two SUnits.
245 void HexagonSubtarget::changeLatency(SUnit *Src, SmallVector<SDep, 4> &Deps,
246 SUnit *Dst, unsigned Lat) const {
247 MachineInstr &SrcI = *Src->getInstr();
248 for (auto &I : Deps) {
249 if (I.getSUnit() != Dst)
252 SUnit *UpdateDst = I.getSUnit();
253 updateLatency(SrcI, *UpdateDst->getInstr(), I);
254 // Update the latency of opposite edge too.
255 for (auto &PI : UpdateDst->Preds) {
256 if (PI.getSUnit() != Src || !PI.isAssignedRegDep())
259 updateLatency(SrcI, *UpdateDst->getInstr(), PI);
264 // Return true if these are the best two instructions to schedule
265 // together with a zero latency. Only one dependence should have a zero
266 // latency. If there are multiple choices, choose the best, and change
267 // ther others, if needed.
268 bool HexagonSubtarget::isBestZeroLatency(SUnit *Src, SUnit *Dst,
269 const HexagonInstrInfo *TII) const {
270 MachineInstr &SrcInst = *Src->getInstr();
271 MachineInstr &DstInst = *Dst->getInstr();
273 // Ignore Boundary SU nodes as these have null instructions.
274 if (Dst->isBoundaryNode())
277 if (SrcInst.isPHI() || DstInst.isPHI())
280 // Check if the Dst instruction is the best candidate first.
281 SUnit *Best = nullptr;
282 SUnit *DstBest = nullptr;
283 SUnit *SrcBest = getZeroLatency(Dst, Dst->Preds);
284 if (SrcBest == nullptr || Src->NodeNum >= SrcBest->NodeNum) {
285 // Check that Src doesn't have a better candidate.
286 DstBest = getZeroLatency(Src, Src->Succs);
287 if (DstBest == nullptr || Dst->NodeNum <= DstBest->NodeNum)
293 // The caller frequents adds the same dependence twice. If so, then
294 // return true for this case too.
295 if (Src == SrcBest && Dst == DstBest)
298 // Reassign the latency for the previous bests, which requires setting
299 // the dependence edge in both directions.
300 if (SrcBest != nullptr)
301 changeLatency(SrcBest, SrcBest->Succs, Dst, 1);
302 if (DstBest != nullptr)
303 changeLatency(Src, Src->Succs, DstBest, 1);
304 // If there is an edge from SrcBest to DstBst, then try to change that
306 if (SrcBest && DstBest)
307 changeLatency(SrcBest, SrcBest->Succs, DstBest, 0);
312 // Update the latency of a Phi when the Phi bridges two instructions that
313 // require a multi-cycle latency.
314 void HexagonSubtarget::changePhiLatency(MachineInstr &SrcInst, SUnit *Dst,
316 if (!SrcInst.isPHI() || Dst->NumPreds == 0 || Dep.getLatency() != 0)
319 for (const SDep &PI : Dst->Preds) {
320 if (PI.getLatency() != 0)
327 /// \brief Perform target specific adjustments to the latency of a schedule
329 void HexagonSubtarget::adjustSchedDependency(SUnit *Src, SUnit *Dst,
331 MachineInstr *SrcInst = Src->getInstr();
332 MachineInstr *DstInst = Dst->getInstr();
333 if (!Src->isInstr() || !Dst->isInstr())
336 const HexagonInstrInfo *QII = static_cast<const HexagonInstrInfo *>(getInstrInfo());
338 // Instructions with .new operands have zero latency.
339 if (QII->canExecuteInBundle(*SrcInst, *DstInst) &&
340 isBestZeroLatency(Src, Dst, QII)) {
348 // Don't adjust the latency of post-increment part of the instruction.
349 if (QII->isPostIncrement(*SrcInst) && Dep.isAssignedRegDep()) {
350 if (SrcInst->mayStore())
352 if (Dep.getReg() != SrcInst->getOperand(0).getReg())
354 } else if (QII->isPostIncrement(*DstInst) && Dep.getKind() == SDep::Anti) {
355 if (DstInst->mayStore())
357 if (Dep.getReg() != DstInst->getOperand(0).getReg())
359 } else if (QII->isPostIncrement(*DstInst) && DstInst->mayStore() &&
360 Dep.isAssignedRegDep()) {
361 MachineOperand &Op = DstInst->getOperand(DstInst->getNumOperands() - 1);
362 if (Op.isReg() && Dep.getReg() != Op.getReg())
366 // Check if we need to change any the latency values when Phis are added.
367 if (useBSBScheduling() && SrcInst->isPHI()) {
368 changePhiLatency(*SrcInst, Dst, Dep);
372 // If it's a REG_SEQUENCE, use its destination instruction to determine
373 // the correct latency.
374 if (DstInst->isRegSequence() && Dst->NumSuccs == 1)
375 DstInst = Dst->Succs[0].getSUnit()->getInstr();
377 // Try to schedule uses near definitions to generate .cur.
378 if (EnableDotCurSched && QII->isToBeScheduledASAP(*SrcInst, *DstInst) &&
379 isBestZeroLatency(Src, Dst, QII)) {
384 updateLatency(*SrcInst, *DstInst, Dep);
387 unsigned HexagonSubtarget::getL1CacheLineSize() const {
391 unsigned HexagonSubtarget::getL1PrefetchDistance() const {