1 //===--------------------- InstrBuilder.cpp ---------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This file implements the InstrBuilder interface.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/MCA/InstrBuilder.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/Support/Debug.h"
20 #include "llvm/Support/WithColor.h"
21 #include "llvm/Support/raw_ostream.h"
23 #define DEBUG_TYPE "llvm-mca"
28 InstrBuilder::InstrBuilder(const llvm::MCSubtargetInfo &sti,
29 const llvm::MCInstrInfo &mcii,
30 const llvm::MCRegisterInfo &mri,
31 const llvm::MCInstrAnalysis *mcia)
32 : STI(sti), MCII(mcii), MRI(mri), MCIA(mcia), FirstCallInst(true),
33 FirstReturnInst(true) {
34 const MCSchedModel &SM = STI.getSchedModel();
35 ProcResourceMasks.resize(SM.getNumProcResourceKinds());
36 computeProcResourceMasks(STI.getSchedModel(), ProcResourceMasks);
39 static void initializeUsedResources(InstrDesc &ID,
40 const MCSchedClassDesc &SCDesc,
41 const MCSubtargetInfo &STI,
42 ArrayRef<uint64_t> ProcResourceMasks) {
43 const MCSchedModel &SM = STI.getSchedModel();
45 // Populate resources consumed.
46 using ResourcePlusCycles = std::pair<uint64_t, ResourceUsage>;
47 std::vector<ResourcePlusCycles> Worklist;
49 // Track cycles contributed by resources that are in a "Super" relationship.
50 // This is required if we want to correctly match the behavior of method
51 // SubtargetEmitter::ExpandProcResource() in Tablegen. When computing the set
52 // of "consumed" processor resources and resource cycles, the logic in
53 // ExpandProcResource() doesn't update the number of resource cycles
54 // contributed by a "Super" resource to a group.
55 // We need to take this into account when we find that a processor resource is
56 // part of a group, and it is also used as the "Super" of other resources.
57 // This map stores the number of cycles contributed by sub-resources that are
58 // part of a "Super" resource. The key value is the "Super" resource mask ID.
59 DenseMap<uint64_t, unsigned> SuperResources;
61 unsigned NumProcResources = SM.getNumProcResourceKinds();
62 APInt Buffers(NumProcResources, 0);
64 bool AllInOrderResources = true;
65 bool AnyDispatchHazards = false;
66 for (unsigned I = 0, E = SCDesc.NumWriteProcResEntries; I < E; ++I) {
67 const MCWriteProcResEntry *PRE = STI.getWriteProcResBegin(&SCDesc) + I;
68 const MCProcResourceDesc &PR = *SM.getProcResource(PRE->ProcResourceIdx);
69 uint64_t Mask = ProcResourceMasks[PRE->ProcResourceIdx];
70 if (PR.BufferSize < 0) {
71 AllInOrderResources = false;
73 Buffers.setBit(PRE->ProcResourceIdx);
74 AnyDispatchHazards |= (PR.BufferSize == 0);
75 AllInOrderResources &= (PR.BufferSize <= 1);
78 CycleSegment RCy(0, PRE->Cycles, false);
79 Worklist.emplace_back(ResourcePlusCycles(Mask, ResourceUsage(RCy)));
81 uint64_t Super = ProcResourceMasks[PR.SuperIdx];
82 SuperResources[Super] += PRE->Cycles;
86 ID.MustIssueImmediately = AllInOrderResources && AnyDispatchHazards;
88 // Sort elements by mask popcount, so that we prioritize resource units over
89 // resource groups, and smaller groups over larger groups.
90 sort(Worklist, [](const ResourcePlusCycles &A, const ResourcePlusCycles &B) {
91 unsigned popcntA = countPopulation(A.first);
92 unsigned popcntB = countPopulation(B.first);
93 if (popcntA < popcntB)
95 if (popcntA > popcntB)
97 return A.first < B.first;
100 uint64_t UsedResourceUnits = 0;
102 // Remove cycles contributed by smaller resources.
103 for (unsigned I = 0, E = Worklist.size(); I < E; ++I) {
104 ResourcePlusCycles &A = Worklist[I];
105 if (!A.second.size()) {
106 A.second.NumUnits = 0;
107 A.second.setReserved();
108 ID.Resources.emplace_back(A);
112 ID.Resources.emplace_back(A);
113 uint64_t NormalizedMask = A.first;
114 if (countPopulation(A.first) == 1) {
115 UsedResourceUnits |= A.first;
117 // Remove the leading 1 from the resource group mask.
118 NormalizedMask ^= PowerOf2Floor(NormalizedMask);
121 for (unsigned J = I + 1; J < E; ++J) {
122 ResourcePlusCycles &B = Worklist[J];
123 if ((NormalizedMask & B.first) == NormalizedMask) {
124 B.second.CS.subtract(A.second.size() - SuperResources[A.first]);
125 if (countPopulation(B.first) > 1)
131 // A SchedWrite may specify a number of cycles in which a resource group
132 // is reserved. For example (on target x86; cpu Haswell):
134 // SchedWriteRes<[HWPort0, HWPort1, HWPort01]> {
135 // let ResourceCycles = [2, 2, 3];
139 // Resource units HWPort0 and HWPort1 are both used for 2cy.
140 // Resource group HWPort01 is the union of HWPort0 and HWPort1.
141 // Since this write touches both HWPort0 and HWPort1 for 2cy, HWPort01
142 // will not be usable for 2 entire cycles from instruction issue.
144 // On top of those 2cy, SchedWriteRes explicitly specifies an extra latency
145 // of 3 cycles for HWPort01. This tool assumes that the 3cy latency is an
146 // extra delay on top of the 2 cycles latency.
147 // During those extra cycles, HWPort01 is not usable by other instructions.
148 for (ResourcePlusCycles &RPC : ID.Resources) {
149 if (countPopulation(RPC.first) > 1 && !RPC.second.isReserved()) {
150 // Remove the leading 1 from the resource group mask.
151 uint64_t Mask = RPC.first ^ PowerOf2Floor(RPC.first);
152 if ((Mask & UsedResourceUnits) == Mask)
153 RPC.second.setReserved();
157 // Identify extra buffers that are consumed through super resources.
158 for (const std::pair<uint64_t, unsigned> &SR : SuperResources) {
159 for (unsigned I = 1, E = NumProcResources; I < E; ++I) {
160 const MCProcResourceDesc &PR = *SM.getProcResource(I);
161 if (PR.BufferSize == -1)
164 uint64_t Mask = ProcResourceMasks[I];
165 if (Mask != SR.first && ((Mask & SR.first) == SR.first))
170 // Now set the buffers.
171 if (unsigned NumBuffers = Buffers.countPopulation()) {
172 ID.Buffers.resize(NumBuffers);
173 for (unsigned I = 0, E = NumProcResources; I < E && NumBuffers; ++I) {
176 ID.Buffers[NumBuffers] = ProcResourceMasks[I];
182 for (const std::pair<uint64_t, ResourceUsage> &R : ID.Resources)
183 dbgs() << "\t\tMask=" << format_hex(R.first, 16) << ", "
184 << "cy=" << R.second.size() << '\n';
185 for (const uint64_t R : ID.Buffers)
186 dbgs() << "\t\tBuffer Mask=" << format_hex(R, 16) << '\n';
190 static void computeMaxLatency(InstrDesc &ID, const MCInstrDesc &MCDesc,
191 const MCSchedClassDesc &SCDesc,
192 const MCSubtargetInfo &STI) {
193 if (MCDesc.isCall()) {
194 // We cannot estimate how long this call will take.
195 // Artificially set an arbitrarily high latency (100cy).
196 ID.MaxLatency = 100U;
200 int Latency = MCSchedModel::computeInstrLatency(STI, SCDesc);
201 // If latency is unknown, then conservatively assume a MaxLatency of 100cy.
202 ID.MaxLatency = Latency < 0 ? 100U : static_cast<unsigned>(Latency);
205 static Error verifyOperands(const MCInstrDesc &MCDesc, const MCInst &MCI) {
206 // Count register definitions, and skip non register operands in the process.
208 unsigned NumExplicitDefs = MCDesc.getNumDefs();
209 for (I = 0, E = MCI.getNumOperands(); NumExplicitDefs && I < E; ++I) {
210 const MCOperand &Op = MCI.getOperand(I);
215 if (NumExplicitDefs) {
216 return make_error<InstructionError<MCInst>>(
217 "Expected more register operand definitions.", MCI);
220 if (MCDesc.hasOptionalDef()) {
221 // Always assume that the optional definition is the last operand.
222 const MCOperand &Op = MCI.getOperand(MCDesc.getNumOperands() - 1);
223 if (I == MCI.getNumOperands() || !Op.isReg()) {
224 std::string Message =
225 "expected a register operand for an optional definition. Instruction "
226 "has not been correctly analyzed.";
227 return make_error<InstructionError<MCInst>>(Message, MCI);
231 return ErrorSuccess();
234 void InstrBuilder::populateWrites(InstrDesc &ID, const MCInst &MCI,
235 unsigned SchedClassID) {
236 const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
237 const MCSchedModel &SM = STI.getSchedModel();
238 const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
240 // Assumptions made by this algorithm:
241 // 1. The number of explicit and implicit register definitions in a MCInst
242 // matches the number of explicit and implicit definitions according to
243 // the opcode descriptor (MCInstrDesc).
244 // 2. Uses start at index #(MCDesc.getNumDefs()).
245 // 3. There can only be a single optional register definition, an it is
246 // always the last operand of the sequence (excluding extra operands
247 // contributed by variadic opcodes).
249 // These assumptions work quite well for most out-of-order in-tree targets
250 // like x86. This is mainly because the vast majority of instructions is
251 // expanded to MCInst using a straightforward lowering logic that preserves
252 // the ordering of the operands.
254 // About assumption 1.
255 // The algorithm allows non-register operands between register operand
256 // definitions. This helps to handle some special ARM instructions with
257 // implicit operand increment (-mtriple=armv7):
259 // vld1.32 {d18, d19}, [r1]! @ <MCInst #1463 VLD1q32wb_fixed
260 // @ <MCOperand Reg:59>
261 // @ <MCOperand Imm:0> (!!)
262 // @ <MCOperand Reg:67>
263 // @ <MCOperand Imm:0>
264 // @ <MCOperand Imm:14>
265 // @ <MCOperand Reg:0>>
268 // 6 explicit operands.
269 // 1 optional definition
270 // 2 explicit definitions (!!)
272 // The presence of an 'Imm' operand between the two register definitions
273 // breaks the assumption that "register definitions are always at the
274 // beginning of the operand sequence".
276 // To workaround this issue, this algorithm ignores (i.e. skips) any
277 // non-register operands between register definitions. The optional
278 // definition is still at index #(NumOperands-1).
280 // According to assumption 2. register reads start at #(NumExplicitDefs-1).
281 // That means, register R1 from the example is both read and written.
282 unsigned NumExplicitDefs = MCDesc.getNumDefs();
283 unsigned NumImplicitDefs = MCDesc.getNumImplicitDefs();
284 unsigned NumWriteLatencyEntries = SCDesc.NumWriteLatencyEntries;
285 unsigned TotalDefs = NumExplicitDefs + NumImplicitDefs;
286 if (MCDesc.hasOptionalDef())
289 unsigned NumVariadicOps = MCI.getNumOperands() - MCDesc.getNumOperands();
290 ID.Writes.resize(TotalDefs + NumVariadicOps);
291 // Iterate over the operands list, and skip non-register operands.
292 // The first NumExplictDefs register operands are expected to be register
294 unsigned CurrentDef = 0;
296 for (; i < MCI.getNumOperands() && CurrentDef < NumExplicitDefs; ++i) {
297 const MCOperand &Op = MCI.getOperand(i);
301 WriteDescriptor &Write = ID.Writes[CurrentDef];
303 if (CurrentDef < NumWriteLatencyEntries) {
304 const MCWriteLatencyEntry &WLE =
305 *STI.getWriteLatencyEntry(&SCDesc, CurrentDef);
306 // Conservatively default to MaxLatency.
308 WLE.Cycles < 0 ? ID.MaxLatency : static_cast<unsigned>(WLE.Cycles);
309 Write.SClassOrWriteResourceID = WLE.WriteResourceID;
311 // Assign a default latency for this write.
312 Write.Latency = ID.MaxLatency;
313 Write.SClassOrWriteResourceID = 0;
315 Write.IsOptionalDef = false;
317 dbgs() << "\t\t[Def] OpIdx=" << Write.OpIndex
318 << ", Latency=" << Write.Latency
319 << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
324 assert(CurrentDef == NumExplicitDefs &&
325 "Expected more register operand definitions.");
326 for (CurrentDef = 0; CurrentDef < NumImplicitDefs; ++CurrentDef) {
327 unsigned Index = NumExplicitDefs + CurrentDef;
328 WriteDescriptor &Write = ID.Writes[Index];
329 Write.OpIndex = ~CurrentDef;
330 Write.RegisterID = MCDesc.getImplicitDefs()[CurrentDef];
331 if (Index < NumWriteLatencyEntries) {
332 const MCWriteLatencyEntry &WLE =
333 *STI.getWriteLatencyEntry(&SCDesc, Index);
334 // Conservatively default to MaxLatency.
336 WLE.Cycles < 0 ? ID.MaxLatency : static_cast<unsigned>(WLE.Cycles);
337 Write.SClassOrWriteResourceID = WLE.WriteResourceID;
339 // Assign a default latency for this write.
340 Write.Latency = ID.MaxLatency;
341 Write.SClassOrWriteResourceID = 0;
344 Write.IsOptionalDef = false;
345 assert(Write.RegisterID != 0 && "Expected a valid phys register!");
347 dbgs() << "\t\t[Def][I] OpIdx=" << ~Write.OpIndex
348 << ", PhysReg=" << MRI.getName(Write.RegisterID)
349 << ", Latency=" << Write.Latency
350 << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
354 if (MCDesc.hasOptionalDef()) {
355 WriteDescriptor &Write = ID.Writes[NumExplicitDefs + NumImplicitDefs];
356 Write.OpIndex = MCDesc.getNumOperands() - 1;
357 // Assign a default latency for this write.
358 Write.Latency = ID.MaxLatency;
359 Write.SClassOrWriteResourceID = 0;
360 Write.IsOptionalDef = true;
362 dbgs() << "\t\t[Def][O] OpIdx=" << Write.OpIndex
363 << ", Latency=" << Write.Latency
364 << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
371 // FIXME: if an instruction opcode is flagged 'mayStore', and it has no
372 // "unmodeledSideEffects', then this logic optimistically assumes that any
373 // extra register operands in the variadic sequence is not a register
376 // Otherwise, we conservatively assume that any register operand from the
377 // variadic sequence is both a register read and a register write.
378 bool AssumeUsesOnly = MCDesc.mayStore() && !MCDesc.mayLoad() &&
379 !MCDesc.hasUnmodeledSideEffects();
380 CurrentDef = NumExplicitDefs + NumImplicitDefs + MCDesc.hasOptionalDef();
381 for (unsigned I = 0, OpIndex = MCDesc.getNumOperands();
382 I < NumVariadicOps && !AssumeUsesOnly; ++I, ++OpIndex) {
383 const MCOperand &Op = MCI.getOperand(OpIndex);
387 WriteDescriptor &Write = ID.Writes[CurrentDef];
388 Write.OpIndex = OpIndex;
389 // Assign a default latency for this write.
390 Write.Latency = ID.MaxLatency;
391 Write.SClassOrWriteResourceID = 0;
392 Write.IsOptionalDef = false;
395 dbgs() << "\t\t[Def][V] OpIdx=" << Write.OpIndex
396 << ", Latency=" << Write.Latency
397 << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
401 ID.Writes.resize(CurrentDef);
404 void InstrBuilder::populateReads(InstrDesc &ID, const MCInst &MCI,
405 unsigned SchedClassID) {
406 const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
407 unsigned NumExplicitUses = MCDesc.getNumOperands() - MCDesc.getNumDefs();
408 unsigned NumImplicitUses = MCDesc.getNumImplicitUses();
409 // Remove the optional definition.
410 if (MCDesc.hasOptionalDef())
412 unsigned NumVariadicOps = MCI.getNumOperands() - MCDesc.getNumOperands();
413 unsigned TotalUses = NumExplicitUses + NumImplicitUses + NumVariadicOps;
414 ID.Reads.resize(TotalUses);
415 unsigned CurrentUse = 0;
416 for (unsigned I = 0, OpIndex = MCDesc.getNumDefs(); I < NumExplicitUses;
418 const MCOperand &Op = MCI.getOperand(OpIndex);
422 ReadDescriptor &Read = ID.Reads[CurrentUse];
423 Read.OpIndex = OpIndex;
425 Read.SchedClassID = SchedClassID;
427 LLVM_DEBUG(dbgs() << "\t\t[Use] OpIdx=" << Read.OpIndex
428 << ", UseIndex=" << Read.UseIndex << '\n');
431 // For the purpose of ReadAdvance, implicit uses come directly after explicit
432 // uses. The "UseIndex" must be updated according to that implicit layout.
433 for (unsigned I = 0; I < NumImplicitUses; ++I) {
434 ReadDescriptor &Read = ID.Reads[CurrentUse + I];
436 Read.UseIndex = NumExplicitUses + I;
437 Read.RegisterID = MCDesc.getImplicitUses()[I];
438 Read.SchedClassID = SchedClassID;
439 LLVM_DEBUG(dbgs() << "\t\t[Use][I] OpIdx=" << ~Read.OpIndex
440 << ", UseIndex=" << Read.UseIndex << ", RegisterID="
441 << MRI.getName(Read.RegisterID) << '\n');
444 CurrentUse += NumImplicitUses;
446 // FIXME: If an instruction opcode is marked as 'mayLoad', and it has no
447 // "unmodeledSideEffects", then this logic optimistically assumes that any
448 // extra register operands in the variadic sequence are not register
451 bool AssumeDefsOnly = !MCDesc.mayStore() && MCDesc.mayLoad() &&
452 !MCDesc.hasUnmodeledSideEffects();
453 for (unsigned I = 0, OpIndex = MCDesc.getNumOperands();
454 I < NumVariadicOps && !AssumeDefsOnly; ++I, ++OpIndex) {
455 const MCOperand &Op = MCI.getOperand(OpIndex);
459 ReadDescriptor &Read = ID.Reads[CurrentUse];
460 Read.OpIndex = OpIndex;
461 Read.UseIndex = NumExplicitUses + NumImplicitUses + I;
462 Read.SchedClassID = SchedClassID;
464 LLVM_DEBUG(dbgs() << "\t\t[Use][V] OpIdx=" << Read.OpIndex
465 << ", UseIndex=" << Read.UseIndex << '\n');
468 ID.Reads.resize(CurrentUse);
471 Error InstrBuilder::verifyInstrDesc(const InstrDesc &ID,
472 const MCInst &MCI) const {
473 if (ID.NumMicroOps != 0)
474 return ErrorSuccess();
476 bool UsesMemory = ID.MayLoad || ID.MayStore;
477 bool UsesBuffers = !ID.Buffers.empty();
478 bool UsesResources = !ID.Resources.empty();
479 if (!UsesMemory && !UsesBuffers && !UsesResources)
480 return ErrorSuccess();
484 Message = "found an inconsistent instruction that decodes "
485 "into zero opcodes and that consumes load/store "
488 Message = "found an inconsistent instruction that decodes "
489 "to zero opcodes and that consumes scheduler "
493 return make_error<InstructionError<MCInst>>(Message, MCI);
496 Expected<const InstrDesc &>
497 InstrBuilder::createInstrDescImpl(const MCInst &MCI) {
498 assert(STI.getSchedModel().hasInstrSchedModel() &&
499 "Itineraries are not yet supported!");
501 // Obtain the instruction descriptor from the opcode.
502 unsigned short Opcode = MCI.getOpcode();
503 const MCInstrDesc &MCDesc = MCII.get(Opcode);
504 const MCSchedModel &SM = STI.getSchedModel();
506 // Then obtain the scheduling class information from the instruction.
507 unsigned SchedClassID = MCDesc.getSchedClass();
508 bool IsVariant = SM.getSchedClassDesc(SchedClassID)->isVariant();
510 // Try to solve variant scheduling classes.
512 unsigned CPUID = SM.getProcessorID();
513 while (SchedClassID && SM.getSchedClassDesc(SchedClassID)->isVariant())
514 SchedClassID = STI.resolveVariantSchedClass(SchedClassID, &MCI, CPUID);
517 return make_error<InstructionError<MCInst>>(
518 "unable to resolve scheduling class for write variant.", MCI);
522 // Check if this instruction is supported. Otherwise, report an error.
523 const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
524 if (SCDesc.NumMicroOps == MCSchedClassDesc::InvalidNumMicroOps) {
525 return make_error<InstructionError<MCInst>>(
526 "found an unsupported instruction in the input assembly sequence.",
530 LLVM_DEBUG(dbgs() << "\n\t\tOpcode Name= " << MCII.getName(Opcode) << '\n');
531 LLVM_DEBUG(dbgs() << "\t\tSchedClassID=" << SchedClassID << '\n');
533 // Create a new empty descriptor.
534 std::unique_ptr<InstrDesc> ID = llvm::make_unique<InstrDesc>();
535 ID->NumMicroOps = SCDesc.NumMicroOps;
537 if (MCDesc.isCall() && FirstCallInst) {
538 // We don't correctly model calls.
539 WithColor::warning() << "found a call in the input assembly sequence.\n";
540 WithColor::note() << "call instructions are not correctly modeled. "
541 << "Assume a latency of 100cy.\n";
542 FirstCallInst = false;
545 if (MCDesc.isReturn() && FirstReturnInst) {
546 WithColor::warning() << "found a return instruction in the input"
547 << " assembly sequence.\n";
548 WithColor::note() << "program counter updates are ignored.\n";
549 FirstReturnInst = false;
552 ID->MayLoad = MCDesc.mayLoad();
553 ID->MayStore = MCDesc.mayStore();
554 ID->HasSideEffects = MCDesc.hasUnmodeledSideEffects();
555 ID->BeginGroup = SCDesc.BeginGroup;
556 ID->EndGroup = SCDesc.EndGroup;
558 initializeUsedResources(*ID, SCDesc, STI, ProcResourceMasks);
559 computeMaxLatency(*ID, MCDesc, SCDesc, STI);
561 if (Error Err = verifyOperands(MCDesc, MCI))
562 return std::move(Err);
564 populateWrites(*ID, MCI, SchedClassID);
565 populateReads(*ID, MCI, SchedClassID);
567 LLVM_DEBUG(dbgs() << "\t\tMaxLatency=" << ID->MaxLatency << '\n');
568 LLVM_DEBUG(dbgs() << "\t\tNumMicroOps=" << ID->NumMicroOps << '\n');
570 // Sanity check on the instruction descriptor.
571 if (Error Err = verifyInstrDesc(*ID, MCI))
572 return std::move(Err);
574 // Now add the new descriptor.
575 SchedClassID = MCDesc.getSchedClass();
576 bool IsVariadic = MCDesc.isVariadic();
577 if (!IsVariadic && !IsVariant) {
578 Descriptors[MCI.getOpcode()] = std::move(ID);
579 return *Descriptors[MCI.getOpcode()];
582 VariantDescriptors[&MCI] = std::move(ID);
583 return *VariantDescriptors[&MCI];
586 Expected<const InstrDesc &>
587 InstrBuilder::getOrCreateInstrDesc(const MCInst &MCI) {
588 if (Descriptors.find_as(MCI.getOpcode()) != Descriptors.end())
589 return *Descriptors[MCI.getOpcode()];
591 if (VariantDescriptors.find(&MCI) != VariantDescriptors.end())
592 return *VariantDescriptors[&MCI];
594 return createInstrDescImpl(MCI);
597 Expected<std::unique_ptr<Instruction>>
598 InstrBuilder::createInstruction(const MCInst &MCI) {
599 Expected<const InstrDesc &> DescOrErr = getOrCreateInstrDesc(MCI);
601 return DescOrErr.takeError();
602 const InstrDesc &D = *DescOrErr;
603 std::unique_ptr<Instruction> NewIS = llvm::make_unique<Instruction>(D);
605 // Check if this is a dependency breaking instruction.
608 bool IsZeroIdiom = false;
609 bool IsDepBreaking = false;
611 unsigned ProcID = STI.getSchedModel().getProcessorID();
612 IsZeroIdiom = MCIA->isZeroIdiom(MCI, Mask, ProcID);
614 IsZeroIdiom || MCIA->isDependencyBreaking(MCI, Mask, ProcID);
615 if (MCIA->isOptimizableRegisterMove(MCI, ProcID))
616 NewIS->setOptimizableMove();
619 // Initialize Reads first.
620 for (const ReadDescriptor &RD : D.Reads) {
622 if (!RD.isImplicitRead()) {
624 const MCOperand &Op = MCI.getOperand(RD.OpIndex);
625 // Skip non-register operands.
631 RegID = RD.RegisterID;
634 // Skip invalid register operands.
638 // Okay, this is a register operand. Create a ReadState for it.
639 assert(RegID > 0 && "Invalid register ID found!");
640 NewIS->getUses().emplace_back(RD, RegID);
641 ReadState &RS = NewIS->getUses().back();
644 // A mask of all zeroes means: explicit input operands are not
646 if (Mask.isNullValue()) {
647 if (!RD.isImplicitRead())
648 RS.setIndependentFromDef();
650 // Check if this register operand is independent according to `Mask`.
651 // Note that Mask may not have enough bits to describe all explicit and
652 // implicit input operands. If this register operand doesn't have a
653 // corresponding bit in Mask, then conservatively assume that it is
655 if (Mask.getBitWidth() > RD.UseIndex) {
656 // Okay. This map describe register use `RD.UseIndex`.
657 if (Mask[RD.UseIndex])
658 RS.setIndependentFromDef();
664 // Early exit if there are no writes.
665 if (D.Writes.empty())
666 return std::move(NewIS);
668 // Track register writes that implicitly clear the upper portion of the
669 // underlying super-registers using an APInt.
670 APInt WriteMask(D.Writes.size(), 0);
672 // Now query the MCInstrAnalysis object to obtain information about which
673 // register writes implicitly clear the upper portion of a super-register.
675 MCIA->clearsSuperRegisters(MRI, MCI, WriteMask);
677 // Initialize writes.
678 unsigned WriteIndex = 0;
679 for (const WriteDescriptor &WD : D.Writes) {
680 unsigned RegID = WD.isImplicitWrite() ? WD.RegisterID
681 : MCI.getOperand(WD.OpIndex).getReg();
682 // Check if this is a optional definition that references NoReg.
683 if (WD.IsOptionalDef && !RegID) {
688 assert(RegID && "Expected a valid register ID!");
689 NewIS->getDefs().emplace_back(WD, RegID,
690 /* ClearsSuperRegs */ WriteMask[WriteIndex],
691 /* WritesZero */ IsZeroIdiom);
695 return std::move(NewIS);