1 //===--------------------- InstrBuilder.cpp ---------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file implements the InstrBuilder interface.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/MCA/InstrBuilder.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/MC/MCInst.h"
18 #include "llvm/Support/Debug.h"
19 #include "llvm/Support/WithColor.h"
20 #include "llvm/Support/raw_ostream.h"
22 #define DEBUG_TYPE "llvm-mca"
27 InstrBuilder::InstrBuilder(const llvm::MCSubtargetInfo &sti,
28 const llvm::MCInstrInfo &mcii,
29 const llvm::MCRegisterInfo &mri,
30 const llvm::MCInstrAnalysis *mcia)
31 : STI(sti), MCII(mcii), MRI(mri), MCIA(mcia), FirstCallInst(true),
32 FirstReturnInst(true) {
33 const MCSchedModel &SM = STI.getSchedModel();
34 ProcResourceMasks.resize(SM.getNumProcResourceKinds());
35 computeProcResourceMasks(STI.getSchedModel(), ProcResourceMasks);
38 static void initializeUsedResources(InstrDesc &ID,
39 const MCSchedClassDesc &SCDesc,
40 const MCSubtargetInfo &STI,
41 ArrayRef<uint64_t> ProcResourceMasks) {
42 const MCSchedModel &SM = STI.getSchedModel();
44 // Populate resources consumed.
45 using ResourcePlusCycles = std::pair<uint64_t, ResourceUsage>;
46 std::vector<ResourcePlusCycles> Worklist;
48 // Track cycles contributed by resources that are in a "Super" relationship.
49 // This is required if we want to correctly match the behavior of method
50 // SubtargetEmitter::ExpandProcResource() in Tablegen. When computing the set
51 // of "consumed" processor resources and resource cycles, the logic in
52 // ExpandProcResource() doesn't update the number of resource cycles
53 // contributed by a "Super" resource to a group.
54 // We need to take this into account when we find that a processor resource is
55 // part of a group, and it is also used as the "Super" of other resources.
56 // This map stores the number of cycles contributed by sub-resources that are
57 // part of a "Super" resource. The key value is the "Super" resource mask ID.
58 DenseMap<uint64_t, unsigned> SuperResources;
60 unsigned NumProcResources = SM.getNumProcResourceKinds();
61 APInt Buffers(NumProcResources, 0);
63 bool AllInOrderResources = true;
64 bool AnyDispatchHazards = false;
65 for (unsigned I = 0, E = SCDesc.NumWriteProcResEntries; I < E; ++I) {
66 const MCWriteProcResEntry *PRE = STI.getWriteProcResBegin(&SCDesc) + I;
67 const MCProcResourceDesc &PR = *SM.getProcResource(PRE->ProcResourceIdx);
71 << "Ignoring invalid write of zero cycles on processor resource "
73 WithColor::note() << "found in scheduling class " << SCDesc.Name
74 << " (write index #" << I << ")\n";
79 uint64_t Mask = ProcResourceMasks[PRE->ProcResourceIdx];
80 if (PR.BufferSize < 0) {
81 AllInOrderResources = false;
83 Buffers.setBit(PRE->ProcResourceIdx);
84 AnyDispatchHazards |= (PR.BufferSize == 0);
85 AllInOrderResources &= (PR.BufferSize <= 1);
88 CycleSegment RCy(0, PRE->Cycles, false);
89 Worklist.emplace_back(ResourcePlusCycles(Mask, ResourceUsage(RCy)));
91 uint64_t Super = ProcResourceMasks[PR.SuperIdx];
92 SuperResources[Super] += PRE->Cycles;
96 ID.MustIssueImmediately = AllInOrderResources && AnyDispatchHazards;
98 // Sort elements by mask popcount, so that we prioritize resource units over
99 // resource groups, and smaller groups over larger groups.
100 sort(Worklist, [](const ResourcePlusCycles &A, const ResourcePlusCycles &B) {
101 unsigned popcntA = countPopulation(A.first);
102 unsigned popcntB = countPopulation(B.first);
103 if (popcntA < popcntB)
105 if (popcntA > popcntB)
107 return A.first < B.first;
110 uint64_t UsedResourceUnits = 0;
111 uint64_t UsedResourceGroups = 0;
113 // Remove cycles contributed by smaller resources.
114 for (unsigned I = 0, E = Worklist.size(); I < E; ++I) {
115 ResourcePlusCycles &A = Worklist[I];
116 if (!A.second.size()) {
117 assert(countPopulation(A.first) > 1 && "Expected a group!");
118 UsedResourceGroups |= PowerOf2Floor(A.first);
122 ID.Resources.emplace_back(A);
123 uint64_t NormalizedMask = A.first;
124 if (countPopulation(A.first) == 1) {
125 UsedResourceUnits |= A.first;
127 // Remove the leading 1 from the resource group mask.
128 NormalizedMask ^= PowerOf2Floor(NormalizedMask);
129 UsedResourceGroups |= (A.first ^ NormalizedMask);
132 for (unsigned J = I + 1; J < E; ++J) {
133 ResourcePlusCycles &B = Worklist[J];
134 if ((NormalizedMask & B.first) == NormalizedMask) {
135 B.second.CS.subtract(A.second.size() - SuperResources[A.first]);
136 if (countPopulation(B.first) > 1)
142 ID.UsedProcResUnits = UsedResourceUnits;
143 ID.UsedProcResGroups = UsedResourceGroups;
145 // A SchedWrite may specify a number of cycles in which a resource group
146 // is reserved. For example (on target x86; cpu Haswell):
148 // SchedWriteRes<[HWPort0, HWPort1, HWPort01]> {
149 // let ResourceCycles = [2, 2, 3];
153 // Resource units HWPort0 and HWPort1 are both used for 2cy.
154 // Resource group HWPort01 is the union of HWPort0 and HWPort1.
155 // Since this write touches both HWPort0 and HWPort1 for 2cy, HWPort01
156 // will not be usable for 2 entire cycles from instruction issue.
158 // On top of those 2cy, SchedWriteRes explicitly specifies an extra latency
159 // of 3 cycles for HWPort01. This tool assumes that the 3cy latency is an
160 // extra delay on top of the 2 cycles latency.
161 // During those extra cycles, HWPort01 is not usable by other instructions.
162 for (ResourcePlusCycles &RPC : ID.Resources) {
163 if (countPopulation(RPC.first) > 1 && !RPC.second.isReserved()) {
164 // Remove the leading 1 from the resource group mask.
165 uint64_t Mask = RPC.first ^ PowerOf2Floor(RPC.first);
166 if ((Mask & UsedResourceUnits) == Mask)
167 RPC.second.setReserved();
171 // Identify extra buffers that are consumed through super resources.
172 for (const std::pair<uint64_t, unsigned> &SR : SuperResources) {
173 for (unsigned I = 1, E = NumProcResources; I < E; ++I) {
174 const MCProcResourceDesc &PR = *SM.getProcResource(I);
175 if (PR.BufferSize == -1)
178 uint64_t Mask = ProcResourceMasks[I];
179 if (Mask != SR.first && ((Mask & SR.first) == SR.first))
184 // Now set the buffers.
185 if (unsigned NumBuffers = Buffers.countPopulation()) {
186 ID.Buffers.resize(NumBuffers);
187 for (unsigned I = 0, E = NumProcResources; I < E && NumBuffers; ++I) {
190 ID.Buffers[NumBuffers] = ProcResourceMasks[I];
196 for (const std::pair<uint64_t, ResourceUsage> &R : ID.Resources)
197 dbgs() << "\t\tResource Mask=" << format_hex(R.first, 16) << ", "
198 << "Reserved=" << R.second.isReserved() << ", "
199 << "#Units=" << R.second.NumUnits << ", "
200 << "cy=" << R.second.size() << '\n';
201 for (const uint64_t R : ID.Buffers)
202 dbgs() << "\t\tBuffer Mask=" << format_hex(R, 16) << '\n';
203 dbgs() << "\t\t Used Units=" << format_hex(ID.UsedProcResUnits, 16) << '\n';
204 dbgs() << "\t\tUsed Groups=" << format_hex(ID.UsedProcResGroups, 16)
209 static void computeMaxLatency(InstrDesc &ID, const MCInstrDesc &MCDesc,
210 const MCSchedClassDesc &SCDesc,
211 const MCSubtargetInfo &STI) {
212 if (MCDesc.isCall()) {
213 // We cannot estimate how long this call will take.
214 // Artificially set an arbitrarily high latency (100cy).
215 ID.MaxLatency = 100U;
219 int Latency = MCSchedModel::computeInstrLatency(STI, SCDesc);
220 // If latency is unknown, then conservatively assume a MaxLatency of 100cy.
221 ID.MaxLatency = Latency < 0 ? 100U : static_cast<unsigned>(Latency);
224 static Error verifyOperands(const MCInstrDesc &MCDesc, const MCInst &MCI) {
225 // Count register definitions, and skip non register operands in the process.
227 unsigned NumExplicitDefs = MCDesc.getNumDefs();
228 for (I = 0, E = MCI.getNumOperands(); NumExplicitDefs && I < E; ++I) {
229 const MCOperand &Op = MCI.getOperand(I);
234 if (NumExplicitDefs) {
235 return make_error<InstructionError<MCInst>>(
236 "Expected more register operand definitions.", MCI);
239 if (MCDesc.hasOptionalDef()) {
240 // Always assume that the optional definition is the last operand.
241 const MCOperand &Op = MCI.getOperand(MCDesc.getNumOperands() - 1);
242 if (I == MCI.getNumOperands() || !Op.isReg()) {
243 std::string Message =
244 "expected a register operand for an optional definition. Instruction "
245 "has not been correctly analyzed.";
246 return make_error<InstructionError<MCInst>>(Message, MCI);
250 return ErrorSuccess();
253 void InstrBuilder::populateWrites(InstrDesc &ID, const MCInst &MCI,
254 unsigned SchedClassID) {
255 const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
256 const MCSchedModel &SM = STI.getSchedModel();
257 const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
259 // Assumptions made by this algorithm:
260 // 1. The number of explicit and implicit register definitions in a MCInst
261 // matches the number of explicit and implicit definitions according to
262 // the opcode descriptor (MCInstrDesc).
263 // 2. Uses start at index #(MCDesc.getNumDefs()).
264 // 3. There can only be a single optional register definition, an it is
265 // always the last operand of the sequence (excluding extra operands
266 // contributed by variadic opcodes).
268 // These assumptions work quite well for most out-of-order in-tree targets
269 // like x86. This is mainly because the vast majority of instructions is
270 // expanded to MCInst using a straightforward lowering logic that preserves
271 // the ordering of the operands.
273 // About assumption 1.
274 // The algorithm allows non-register operands between register operand
275 // definitions. This helps to handle some special ARM instructions with
276 // implicit operand increment (-mtriple=armv7):
278 // vld1.32 {d18, d19}, [r1]! @ <MCInst #1463 VLD1q32wb_fixed
279 // @ <MCOperand Reg:59>
280 // @ <MCOperand Imm:0> (!!)
281 // @ <MCOperand Reg:67>
282 // @ <MCOperand Imm:0>
283 // @ <MCOperand Imm:14>
284 // @ <MCOperand Reg:0>>
287 // 6 explicit operands.
288 // 1 optional definition
289 // 2 explicit definitions (!!)
291 // The presence of an 'Imm' operand between the two register definitions
292 // breaks the assumption that "register definitions are always at the
293 // beginning of the operand sequence".
295 // To workaround this issue, this algorithm ignores (i.e. skips) any
296 // non-register operands between register definitions. The optional
297 // definition is still at index #(NumOperands-1).
299 // According to assumption 2. register reads start at #(NumExplicitDefs-1).
300 // That means, register R1 from the example is both read and written.
301 unsigned NumExplicitDefs = MCDesc.getNumDefs();
302 unsigned NumImplicitDefs = MCDesc.getNumImplicitDefs();
303 unsigned NumWriteLatencyEntries = SCDesc.NumWriteLatencyEntries;
304 unsigned TotalDefs = NumExplicitDefs + NumImplicitDefs;
305 if (MCDesc.hasOptionalDef())
308 unsigned NumVariadicOps = MCI.getNumOperands() - MCDesc.getNumOperands();
309 ID.Writes.resize(TotalDefs + NumVariadicOps);
310 // Iterate over the operands list, and skip non-register operands.
311 // The first NumExplictDefs register operands are expected to be register
313 unsigned CurrentDef = 0;
315 for (; i < MCI.getNumOperands() && CurrentDef < NumExplicitDefs; ++i) {
316 const MCOperand &Op = MCI.getOperand(i);
320 WriteDescriptor &Write = ID.Writes[CurrentDef];
322 if (CurrentDef < NumWriteLatencyEntries) {
323 const MCWriteLatencyEntry &WLE =
324 *STI.getWriteLatencyEntry(&SCDesc, CurrentDef);
325 // Conservatively default to MaxLatency.
327 WLE.Cycles < 0 ? ID.MaxLatency : static_cast<unsigned>(WLE.Cycles);
328 Write.SClassOrWriteResourceID = WLE.WriteResourceID;
330 // Assign a default latency for this write.
331 Write.Latency = ID.MaxLatency;
332 Write.SClassOrWriteResourceID = 0;
334 Write.IsOptionalDef = false;
336 dbgs() << "\t\t[Def] OpIdx=" << Write.OpIndex
337 << ", Latency=" << Write.Latency
338 << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
343 assert(CurrentDef == NumExplicitDefs &&
344 "Expected more register operand definitions.");
345 for (CurrentDef = 0; CurrentDef < NumImplicitDefs; ++CurrentDef) {
346 unsigned Index = NumExplicitDefs + CurrentDef;
347 WriteDescriptor &Write = ID.Writes[Index];
348 Write.OpIndex = ~CurrentDef;
349 Write.RegisterID = MCDesc.getImplicitDefs()[CurrentDef];
350 if (Index < NumWriteLatencyEntries) {
351 const MCWriteLatencyEntry &WLE =
352 *STI.getWriteLatencyEntry(&SCDesc, Index);
353 // Conservatively default to MaxLatency.
355 WLE.Cycles < 0 ? ID.MaxLatency : static_cast<unsigned>(WLE.Cycles);
356 Write.SClassOrWriteResourceID = WLE.WriteResourceID;
358 // Assign a default latency for this write.
359 Write.Latency = ID.MaxLatency;
360 Write.SClassOrWriteResourceID = 0;
363 Write.IsOptionalDef = false;
364 assert(Write.RegisterID != 0 && "Expected a valid phys register!");
366 dbgs() << "\t\t[Def][I] OpIdx=" << ~Write.OpIndex
367 << ", PhysReg=" << MRI.getName(Write.RegisterID)
368 << ", Latency=" << Write.Latency
369 << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
373 if (MCDesc.hasOptionalDef()) {
374 WriteDescriptor &Write = ID.Writes[NumExplicitDefs + NumImplicitDefs];
375 Write.OpIndex = MCDesc.getNumOperands() - 1;
376 // Assign a default latency for this write.
377 Write.Latency = ID.MaxLatency;
378 Write.SClassOrWriteResourceID = 0;
379 Write.IsOptionalDef = true;
381 dbgs() << "\t\t[Def][O] OpIdx=" << Write.OpIndex
382 << ", Latency=" << Write.Latency
383 << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
390 // FIXME: if an instruction opcode is flagged 'mayStore', and it has no
391 // "unmodeledSideEffects', then this logic optimistically assumes that any
392 // extra register operands in the variadic sequence is not a register
395 // Otherwise, we conservatively assume that any register operand from the
396 // variadic sequence is both a register read and a register write.
397 bool AssumeUsesOnly = MCDesc.mayStore() && !MCDesc.mayLoad() &&
398 !MCDesc.hasUnmodeledSideEffects();
399 CurrentDef = NumExplicitDefs + NumImplicitDefs + MCDesc.hasOptionalDef();
400 for (unsigned I = 0, OpIndex = MCDesc.getNumOperands();
401 I < NumVariadicOps && !AssumeUsesOnly; ++I, ++OpIndex) {
402 const MCOperand &Op = MCI.getOperand(OpIndex);
406 WriteDescriptor &Write = ID.Writes[CurrentDef];
407 Write.OpIndex = OpIndex;
408 // Assign a default latency for this write.
409 Write.Latency = ID.MaxLatency;
410 Write.SClassOrWriteResourceID = 0;
411 Write.IsOptionalDef = false;
414 dbgs() << "\t\t[Def][V] OpIdx=" << Write.OpIndex
415 << ", Latency=" << Write.Latency
416 << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
420 ID.Writes.resize(CurrentDef);
423 void InstrBuilder::populateReads(InstrDesc &ID, const MCInst &MCI,
424 unsigned SchedClassID) {
425 const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
426 unsigned NumExplicitUses = MCDesc.getNumOperands() - MCDesc.getNumDefs();
427 unsigned NumImplicitUses = MCDesc.getNumImplicitUses();
428 // Remove the optional definition.
429 if (MCDesc.hasOptionalDef())
431 unsigned NumVariadicOps = MCI.getNumOperands() - MCDesc.getNumOperands();
432 unsigned TotalUses = NumExplicitUses + NumImplicitUses + NumVariadicOps;
433 ID.Reads.resize(TotalUses);
434 unsigned CurrentUse = 0;
435 for (unsigned I = 0, OpIndex = MCDesc.getNumDefs(); I < NumExplicitUses;
437 const MCOperand &Op = MCI.getOperand(OpIndex);
441 ReadDescriptor &Read = ID.Reads[CurrentUse];
442 Read.OpIndex = OpIndex;
444 Read.SchedClassID = SchedClassID;
446 LLVM_DEBUG(dbgs() << "\t\t[Use] OpIdx=" << Read.OpIndex
447 << ", UseIndex=" << Read.UseIndex << '\n');
450 // For the purpose of ReadAdvance, implicit uses come directly after explicit
451 // uses. The "UseIndex" must be updated according to that implicit layout.
452 for (unsigned I = 0; I < NumImplicitUses; ++I) {
453 ReadDescriptor &Read = ID.Reads[CurrentUse + I];
455 Read.UseIndex = NumExplicitUses + I;
456 Read.RegisterID = MCDesc.getImplicitUses()[I];
457 Read.SchedClassID = SchedClassID;
458 LLVM_DEBUG(dbgs() << "\t\t[Use][I] OpIdx=" << ~Read.OpIndex
459 << ", UseIndex=" << Read.UseIndex << ", RegisterID="
460 << MRI.getName(Read.RegisterID) << '\n');
463 CurrentUse += NumImplicitUses;
465 // FIXME: If an instruction opcode is marked as 'mayLoad', and it has no
466 // "unmodeledSideEffects", then this logic optimistically assumes that any
467 // extra register operands in the variadic sequence are not register
470 bool AssumeDefsOnly = !MCDesc.mayStore() && MCDesc.mayLoad() &&
471 !MCDesc.hasUnmodeledSideEffects();
472 for (unsigned I = 0, OpIndex = MCDesc.getNumOperands();
473 I < NumVariadicOps && !AssumeDefsOnly; ++I, ++OpIndex) {
474 const MCOperand &Op = MCI.getOperand(OpIndex);
478 ReadDescriptor &Read = ID.Reads[CurrentUse];
479 Read.OpIndex = OpIndex;
480 Read.UseIndex = NumExplicitUses + NumImplicitUses + I;
481 Read.SchedClassID = SchedClassID;
483 LLVM_DEBUG(dbgs() << "\t\t[Use][V] OpIdx=" << Read.OpIndex
484 << ", UseIndex=" << Read.UseIndex << '\n');
487 ID.Reads.resize(CurrentUse);
490 Error InstrBuilder::verifyInstrDesc(const InstrDesc &ID,
491 const MCInst &MCI) const {
492 if (ID.NumMicroOps != 0)
493 return ErrorSuccess();
495 bool UsesMemory = ID.MayLoad || ID.MayStore;
496 bool UsesBuffers = !ID.Buffers.empty();
497 bool UsesResources = !ID.Resources.empty();
498 if (!UsesMemory && !UsesBuffers && !UsesResources)
499 return ErrorSuccess();
503 Message = "found an inconsistent instruction that decodes "
504 "into zero opcodes and that consumes load/store "
507 Message = "found an inconsistent instruction that decodes "
508 "to zero opcodes and that consumes scheduler "
512 return make_error<InstructionError<MCInst>>(Message, MCI);
515 Expected<const InstrDesc &>
516 InstrBuilder::createInstrDescImpl(const MCInst &MCI) {
517 assert(STI.getSchedModel().hasInstrSchedModel() &&
518 "Itineraries are not yet supported!");
520 // Obtain the instruction descriptor from the opcode.
521 unsigned short Opcode = MCI.getOpcode();
522 const MCInstrDesc &MCDesc = MCII.get(Opcode);
523 const MCSchedModel &SM = STI.getSchedModel();
525 // Then obtain the scheduling class information from the instruction.
526 unsigned SchedClassID = MCDesc.getSchedClass();
527 bool IsVariant = SM.getSchedClassDesc(SchedClassID)->isVariant();
529 // Try to solve variant scheduling classes.
531 unsigned CPUID = SM.getProcessorID();
532 while (SchedClassID && SM.getSchedClassDesc(SchedClassID)->isVariant())
533 SchedClassID = STI.resolveVariantSchedClass(SchedClassID, &MCI, CPUID);
536 return make_error<InstructionError<MCInst>>(
537 "unable to resolve scheduling class for write variant.", MCI);
541 // Check if this instruction is supported. Otherwise, report an error.
542 const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
543 if (SCDesc.NumMicroOps == MCSchedClassDesc::InvalidNumMicroOps) {
544 return make_error<InstructionError<MCInst>>(
545 "found an unsupported instruction in the input assembly sequence.",
549 LLVM_DEBUG(dbgs() << "\n\t\tOpcode Name= " << MCII.getName(Opcode) << '\n');
550 LLVM_DEBUG(dbgs() << "\t\tSchedClassID=" << SchedClassID << '\n');
552 // Create a new empty descriptor.
553 std::unique_ptr<InstrDesc> ID = llvm::make_unique<InstrDesc>();
554 ID->NumMicroOps = SCDesc.NumMicroOps;
555 ID->SchedClassID = SchedClassID;
557 if (MCDesc.isCall() && FirstCallInst) {
558 // We don't correctly model calls.
559 WithColor::warning() << "found a call in the input assembly sequence.\n";
560 WithColor::note() << "call instructions are not correctly modeled. "
561 << "Assume a latency of 100cy.\n";
562 FirstCallInst = false;
565 if (MCDesc.isReturn() && FirstReturnInst) {
566 WithColor::warning() << "found a return instruction in the input"
567 << " assembly sequence.\n";
568 WithColor::note() << "program counter updates are ignored.\n";
569 FirstReturnInst = false;
572 ID->MayLoad = MCDesc.mayLoad();
573 ID->MayStore = MCDesc.mayStore();
574 ID->HasSideEffects = MCDesc.hasUnmodeledSideEffects();
575 ID->BeginGroup = SCDesc.BeginGroup;
576 ID->EndGroup = SCDesc.EndGroup;
578 initializeUsedResources(*ID, SCDesc, STI, ProcResourceMasks);
579 computeMaxLatency(*ID, MCDesc, SCDesc, STI);
581 if (Error Err = verifyOperands(MCDesc, MCI))
582 return std::move(Err);
584 populateWrites(*ID, MCI, SchedClassID);
585 populateReads(*ID, MCI, SchedClassID);
587 LLVM_DEBUG(dbgs() << "\t\tMaxLatency=" << ID->MaxLatency << '\n');
588 LLVM_DEBUG(dbgs() << "\t\tNumMicroOps=" << ID->NumMicroOps << '\n');
590 // Sanity check on the instruction descriptor.
591 if (Error Err = verifyInstrDesc(*ID, MCI))
592 return std::move(Err);
594 // Now add the new descriptor.
595 bool IsVariadic = MCDesc.isVariadic();
596 if (!IsVariadic && !IsVariant) {
597 Descriptors[MCI.getOpcode()] = std::move(ID);
598 return *Descriptors[MCI.getOpcode()];
601 VariantDescriptors[&MCI] = std::move(ID);
602 return *VariantDescriptors[&MCI];
605 Expected<const InstrDesc &>
606 InstrBuilder::getOrCreateInstrDesc(const MCInst &MCI) {
607 if (Descriptors.find_as(MCI.getOpcode()) != Descriptors.end())
608 return *Descriptors[MCI.getOpcode()];
610 if (VariantDescriptors.find(&MCI) != VariantDescriptors.end())
611 return *VariantDescriptors[&MCI];
613 return createInstrDescImpl(MCI);
616 Expected<std::unique_ptr<Instruction>>
617 InstrBuilder::createInstruction(const MCInst &MCI) {
618 Expected<const InstrDesc &> DescOrErr = getOrCreateInstrDesc(MCI);
620 return DescOrErr.takeError();
621 const InstrDesc &D = *DescOrErr;
622 std::unique_ptr<Instruction> NewIS = llvm::make_unique<Instruction>(D);
624 // Check if this is a dependency breaking instruction.
627 bool IsZeroIdiom = false;
628 bool IsDepBreaking = false;
630 unsigned ProcID = STI.getSchedModel().getProcessorID();
631 IsZeroIdiom = MCIA->isZeroIdiom(MCI, Mask, ProcID);
633 IsZeroIdiom || MCIA->isDependencyBreaking(MCI, Mask, ProcID);
634 if (MCIA->isOptimizableRegisterMove(MCI, ProcID))
635 NewIS->setOptimizableMove();
638 // Initialize Reads first.
639 for (const ReadDescriptor &RD : D.Reads) {
641 if (!RD.isImplicitRead()) {
643 const MCOperand &Op = MCI.getOperand(RD.OpIndex);
644 // Skip non-register operands.
650 RegID = RD.RegisterID;
653 // Skip invalid register operands.
657 // Okay, this is a register operand. Create a ReadState for it.
658 assert(RegID > 0 && "Invalid register ID found!");
659 NewIS->getUses().emplace_back(RD, RegID);
660 ReadState &RS = NewIS->getUses().back();
663 // A mask of all zeroes means: explicit input operands are not
665 if (Mask.isNullValue()) {
666 if (!RD.isImplicitRead())
667 RS.setIndependentFromDef();
669 // Check if this register operand is independent according to `Mask`.
670 // Note that Mask may not have enough bits to describe all explicit and
671 // implicit input operands. If this register operand doesn't have a
672 // corresponding bit in Mask, then conservatively assume that it is
674 if (Mask.getBitWidth() > RD.UseIndex) {
675 // Okay. This map describe register use `RD.UseIndex`.
676 if (Mask[RD.UseIndex])
677 RS.setIndependentFromDef();
683 // Early exit if there are no writes.
684 if (D.Writes.empty())
685 return std::move(NewIS);
687 // Track register writes that implicitly clear the upper portion of the
688 // underlying super-registers using an APInt.
689 APInt WriteMask(D.Writes.size(), 0);
691 // Now query the MCInstrAnalysis object to obtain information about which
692 // register writes implicitly clear the upper portion of a super-register.
694 MCIA->clearsSuperRegisters(MRI, MCI, WriteMask);
696 // Initialize writes.
697 unsigned WriteIndex = 0;
698 for (const WriteDescriptor &WD : D.Writes) {
699 unsigned RegID = WD.isImplicitWrite() ? WD.RegisterID
700 : MCI.getOperand(WD.OpIndex).getReg();
701 // Check if this is a optional definition that references NoReg.
702 if (WD.IsOptionalDef && !RegID) {
707 assert(RegID && "Expected a valid register ID!");
708 NewIS->getDefs().emplace_back(WD, RegID,
709 /* ClearsSuperRegs */ WriteMask[WriteIndex],
710 /* WritesZero */ IsZeroIdiom);
714 return std::move(NewIS);