1 //===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This implements routines for translating from LLVM IR into SelectionDAG IR.
11 //===----------------------------------------------------------------------===//
13 #include "SelectionDAGBuilder.h"
14 #include "SDNodeDbgValue.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/BitVector.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/Optional.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/ADT/Triple.h"
28 #include "llvm/ADT/Twine.h"
29 #include "llvm/Analysis/AliasAnalysis.h"
30 #include "llvm/Analysis/BlockFrequencyInfo.h"
31 #include "llvm/Analysis/BranchProbabilityInfo.h"
32 #include "llvm/Analysis/ConstantFolding.h"
33 #include "llvm/Analysis/EHPersonalities.h"
34 #include "llvm/Analysis/Loads.h"
35 #include "llvm/Analysis/MemoryLocation.h"
36 #include "llvm/Analysis/ProfileSummaryInfo.h"
37 #include "llvm/Analysis/TargetLibraryInfo.h"
38 #include "llvm/Analysis/ValueTracking.h"
39 #include "llvm/Analysis/VectorUtils.h"
40 #include "llvm/CodeGen/Analysis.h"
41 #include "llvm/CodeGen/FunctionLoweringInfo.h"
42 #include "llvm/CodeGen/GCMetadata.h"
43 #include "llvm/CodeGen/ISDOpcodes.h"
44 #include "llvm/CodeGen/MachineBasicBlock.h"
45 #include "llvm/CodeGen/MachineFrameInfo.h"
46 #include "llvm/CodeGen/MachineFunction.h"
47 #include "llvm/CodeGen/MachineInstr.h"
48 #include "llvm/CodeGen/MachineInstrBuilder.h"
49 #include "llvm/CodeGen/MachineJumpTableInfo.h"
50 #include "llvm/CodeGen/MachineMemOperand.h"
51 #include "llvm/CodeGen/MachineModuleInfo.h"
52 #include "llvm/CodeGen/MachineOperand.h"
53 #include "llvm/CodeGen/MachineRegisterInfo.h"
54 #include "llvm/CodeGen/RuntimeLibcalls.h"
55 #include "llvm/CodeGen/SelectionDAG.h"
56 #include "llvm/CodeGen/SelectionDAGNodes.h"
57 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
58 #include "llvm/CodeGen/StackMaps.h"
59 #include "llvm/CodeGen/SwiftErrorValueTracking.h"
60 #include "llvm/CodeGen/TargetFrameLowering.h"
61 #include "llvm/CodeGen/TargetInstrInfo.h"
62 #include "llvm/CodeGen/TargetLowering.h"
63 #include "llvm/CodeGen/TargetOpcodes.h"
64 #include "llvm/CodeGen/TargetRegisterInfo.h"
65 #include "llvm/CodeGen/TargetSubtargetInfo.h"
66 #include "llvm/CodeGen/ValueTypes.h"
67 #include "llvm/CodeGen/WinEHFuncInfo.h"
68 #include "llvm/IR/Argument.h"
69 #include "llvm/IR/Attributes.h"
70 #include "llvm/IR/BasicBlock.h"
71 #include "llvm/IR/CFG.h"
72 #include "llvm/IR/CallSite.h"
73 #include "llvm/IR/CallingConv.h"
74 #include "llvm/IR/Constant.h"
75 #include "llvm/IR/ConstantRange.h"
76 #include "llvm/IR/Constants.h"
77 #include "llvm/IR/DataLayout.h"
78 #include "llvm/IR/DebugInfoMetadata.h"
79 #include "llvm/IR/DebugLoc.h"
80 #include "llvm/IR/DerivedTypes.h"
81 #include "llvm/IR/Function.h"
82 #include "llvm/IR/GetElementPtrTypeIterator.h"
83 #include "llvm/IR/InlineAsm.h"
84 #include "llvm/IR/InstrTypes.h"
85 #include "llvm/IR/Instruction.h"
86 #include "llvm/IR/Instructions.h"
87 #include "llvm/IR/IntrinsicInst.h"
88 #include "llvm/IR/Intrinsics.h"
89 #include "llvm/IR/IntrinsicsAArch64.h"
90 #include "llvm/IR/IntrinsicsWebAssembly.h"
91 #include "llvm/IR/LLVMContext.h"
92 #include "llvm/IR/Metadata.h"
93 #include "llvm/IR/Module.h"
94 #include "llvm/IR/Operator.h"
95 #include "llvm/IR/PatternMatch.h"
96 #include "llvm/IR/Statepoint.h"
97 #include "llvm/IR/Type.h"
98 #include "llvm/IR/User.h"
99 #include "llvm/IR/Value.h"
100 #include "llvm/MC/MCContext.h"
101 #include "llvm/MC/MCSymbol.h"
102 #include "llvm/Support/AtomicOrdering.h"
103 #include "llvm/Support/BranchProbability.h"
104 #include "llvm/Support/Casting.h"
105 #include "llvm/Support/CodeGen.h"
106 #include "llvm/Support/CommandLine.h"
107 #include "llvm/Support/Compiler.h"
108 #include "llvm/Support/Debug.h"
109 #include "llvm/Support/ErrorHandling.h"
110 #include "llvm/Support/MachineValueType.h"
111 #include "llvm/Support/MathExtras.h"
112 #include "llvm/Support/raw_ostream.h"
113 #include "llvm/Target/TargetIntrinsicInfo.h"
114 #include "llvm/Target/TargetMachine.h"
115 #include "llvm/Target/TargetOptions.h"
116 #include "llvm/Transforms/Utils/Local.h"
129 using namespace llvm;
130 using namespace PatternMatch;
131 using namespace SwitchCG;
133 #define DEBUG_TYPE "isel"
135 /// LimitFloatPrecision - Generate low-precision inline sequences for
136 /// some float libcalls (6, 8 or 12 bits).
137 static unsigned LimitFloatPrecision;
139 static cl::opt<unsigned, true>
140 LimitFPPrecision("limit-float-precision",
141 cl::desc("Generate low-precision inline sequences "
142 "for some float libcalls"),
143 cl::location(LimitFloatPrecision), cl::Hidden,
146 static cl::opt<unsigned> SwitchPeelThreshold(
147 "switch-peel-threshold", cl::Hidden, cl::init(66),
148 cl::desc("Set the case probability threshold for peeling the case from a "
149 "switch statement. A value greater than 100 will void this "
152 // Limit the width of DAG chains. This is important in general to prevent
153 // DAG-based analysis from blowing up. For example, alias analysis and
154 // load clustering may not complete in reasonable time. It is difficult to
155 // recognize and avoid this situation within each individual analysis, and
156 // future analyses are likely to have the same behavior. Limiting DAG width is
157 // the safe approach and will be especially important with global DAGs.
159 // MaxParallelChains default is arbitrarily high to avoid affecting
160 // optimization, but could be lowered to improve compile time. Any ld-ld-st-st
161 // sequence over this should have been converted to llvm.memcpy by the
162 // frontend. It is easy to induce this behavior with .ll code such as:
163 // %buffer = alloca [4096 x i8]
164 // %data = load [4096 x i8]* %argPtr
165 // store [4096 x i8] %data, [4096 x i8]* %buffer
166 static const unsigned MaxParallelChains = 64;
168 // Return the calling convention if the Value passed requires ABI mangling as it
169 // is a parameter to a function or a return value from a function which is not
171 static Optional<CallingConv::ID> getABIRegCopyCC(const Value *V) {
172 if (auto *R = dyn_cast<ReturnInst>(V))
173 return R->getParent()->getParent()->getCallingConv();
175 if (auto *CI = dyn_cast<CallInst>(V)) {
176 const bool IsInlineAsm = CI->isInlineAsm();
177 const bool IsIndirectFunctionCall =
178 !IsInlineAsm && !CI->getCalledFunction();
180 // It is possible that the call instruction is an inline asm statement or an
181 // indirect function call in which case the return value of
182 // getCalledFunction() would be nullptr.
183 const bool IsInstrinsicCall =
184 !IsInlineAsm && !IsIndirectFunctionCall &&
185 CI->getCalledFunction()->getIntrinsicID() != Intrinsic::not_intrinsic;
187 if (!IsInlineAsm && !IsInstrinsicCall)
188 return CI->getCallingConv();
194 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
195 const SDValue *Parts, unsigned NumParts,
196 MVT PartVT, EVT ValueVT, const Value *V,
197 Optional<CallingConv::ID> CC);
199 /// getCopyFromParts - Create a value that contains the specified legal parts
200 /// combined into the value they represent. If the parts combine to a type
201 /// larger than ValueVT then AssertOp can be used to specify whether the extra
202 /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
203 /// (ISD::AssertSext).
204 static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL,
205 const SDValue *Parts, unsigned NumParts,
206 MVT PartVT, EVT ValueVT, const Value *V,
207 Optional<CallingConv::ID> CC = None,
208 Optional<ISD::NodeType> AssertOp = None) {
209 if (ValueVT.isVector())
210 return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
213 assert(NumParts > 0 && "No parts to assemble!");
214 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
215 SDValue Val = Parts[0];
218 // Assemble the value from multiple parts.
219 if (ValueVT.isInteger()) {
220 unsigned PartBits = PartVT.getSizeInBits();
221 unsigned ValueBits = ValueVT.getSizeInBits();
223 // Assemble the power of 2 part.
224 unsigned RoundParts =
225 (NumParts & (NumParts - 1)) ? 1 << Log2_32(NumParts) : NumParts;
226 unsigned RoundBits = PartBits * RoundParts;
227 EVT RoundVT = RoundBits == ValueBits ?
228 ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
231 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
233 if (RoundParts > 2) {
234 Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2,
236 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
237 RoundParts / 2, PartVT, HalfVT, V);
239 Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
240 Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
243 if (DAG.getDataLayout().isBigEndian())
246 Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
248 if (RoundParts < NumParts) {
249 // Assemble the trailing non-power-of-2 part.
250 unsigned OddParts = NumParts - RoundParts;
251 EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
252 Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
255 // Combine the round and odd parts.
257 if (DAG.getDataLayout().isBigEndian())
259 EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
260 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
262 DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
263 DAG.getConstant(Lo.getValueSizeInBits(), DL,
264 TLI.getPointerTy(DAG.getDataLayout())));
265 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
266 Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
268 } else if (PartVT.isFloatingPoint()) {
269 // FP split into multiple FP parts (for ppcf128)
270 assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
273 Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
274 Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
275 if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
277 Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
279 // FP split into integer parts (soft fp)
280 assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
281 !PartVT.isVector() && "Unexpected split");
282 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
283 Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V, CC);
287 // There is now one part, held in Val. Correct it to match ValueVT.
288 // PartEVT is the type of the register class that holds the value.
289 // ValueVT is the type of the inline asm operation.
290 EVT PartEVT = Val.getValueType();
292 if (PartEVT == ValueVT)
295 if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
296 ValueVT.bitsLT(PartEVT)) {
297 // For an FP value in an integer part, we need to truncate to the right
299 PartEVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
300 Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
303 // Handle types that have the same size.
304 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
305 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
307 // Handle types with different sizes.
308 if (PartEVT.isInteger() && ValueVT.isInteger()) {
309 if (ValueVT.bitsLT(PartEVT)) {
310 // For a truncate, see if we have any information to
311 // indicate whether the truncated bits will always be
312 // zero or sign-extension.
313 if (AssertOp.hasValue())
314 Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
315 DAG.getValueType(ValueVT));
316 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
318 return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
321 if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
322 // FP_ROUND's are always exact here.
323 if (ValueVT.bitsLT(Val.getValueType()))
325 ISD::FP_ROUND, DL, ValueVT, Val,
326 DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout())));
328 return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
331 // Handle MMX to a narrower integer type by bitcasting MMX to integer and
333 if (PartEVT == MVT::x86mmx && ValueVT.isInteger() &&
334 ValueVT.bitsLT(PartEVT)) {
335 Val = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Val);
336 return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
339 report_fatal_error("Unknown mismatch in getCopyFromParts!");
342 static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
343 const Twine &ErrMsg) {
344 const Instruction *I = dyn_cast_or_null<Instruction>(V);
346 return Ctx.emitError(ErrMsg);
348 const char *AsmError = ", possible invalid constraint for vector type";
349 if (const CallInst *CI = dyn_cast<CallInst>(I))
350 if (isa<InlineAsm>(CI->getCalledValue()))
351 return Ctx.emitError(I, ErrMsg + AsmError);
353 return Ctx.emitError(I, ErrMsg);
356 /// getCopyFromPartsVector - Create a value that contains the specified legal
357 /// parts combined into the value they represent. If the parts combine to a
358 /// type larger than ValueVT then AssertOp can be used to specify whether the
359 /// extra bits are known to be zero (ISD::AssertZext) or sign extended from
360 /// ValueVT (ISD::AssertSext).
361 static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
362 const SDValue *Parts, unsigned NumParts,
363 MVT PartVT, EVT ValueVT, const Value *V,
364 Optional<CallingConv::ID> CallConv) {
365 assert(ValueVT.isVector() && "Not a vector value");
366 assert(NumParts > 0 && "No parts to assemble!");
367 const bool IsABIRegCopy = CallConv.hasValue();
369 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
370 SDValue Val = Parts[0];
372 // Handle a multi-element vector.
376 unsigned NumIntermediates;
380 NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
381 *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
382 NumIntermediates, RegisterVT);
385 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
386 NumIntermediates, RegisterVT);
389 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
390 NumParts = NumRegs; // Silence a compiler warning.
391 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
392 assert(RegisterVT.getSizeInBits() ==
393 Parts[0].getSimpleValueType().getSizeInBits() &&
394 "Part type sizes don't match!");
396 // Assemble the parts into intermediate operands.
397 SmallVector<SDValue, 8> Ops(NumIntermediates);
398 if (NumIntermediates == NumParts) {
399 // If the register was not expanded, truncate or copy the value,
401 for (unsigned i = 0; i != NumParts; ++i)
402 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1,
403 PartVT, IntermediateVT, V);
404 } else if (NumParts > 0) {
405 // If the intermediate type was expanded, build the intermediate
406 // operands from the parts.
407 assert(NumParts % NumIntermediates == 0 &&
408 "Must expand into a divisible number of parts!");
409 unsigned Factor = NumParts / NumIntermediates;
410 for (unsigned i = 0; i != NumIntermediates; ++i)
411 Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor,
412 PartVT, IntermediateVT, V);
415 // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
416 // intermediate operands.
418 EVT::getVectorVT(*DAG.getContext(), IntermediateVT.getScalarType(),
419 (IntermediateVT.isVector()
420 ? IntermediateVT.getVectorNumElements() * NumParts
421 : NumIntermediates));
422 Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
424 DL, BuiltVectorTy, Ops);
427 // There is now one part, held in Val. Correct it to match ValueVT.
428 EVT PartEVT = Val.getValueType();
430 if (PartEVT == ValueVT)
433 if (PartEVT.isVector()) {
434 // If the element type of the source/dest vectors are the same, but the
435 // parts vector has more elements than the value vector, then we have a
436 // vector widening case (e.g. <2 x float> -> <4 x float>). Extract the
438 if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) {
439 assert(PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() &&
440 "Cannot narrow, it would be a lossy transformation");
442 ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
443 DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
446 // Vector/Vector bitcast.
447 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
448 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
450 assert(PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() &&
451 "Cannot handle this kind of promotion");
452 // Promoted vector extract
453 return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
457 // Trivial bitcast if the types are the same size and the destination
458 // vector type is legal.
459 if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
460 TLI.isTypeLegal(ValueVT))
461 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
463 if (ValueVT.getVectorNumElements() != 1) {
464 // Certain ABIs require that vectors are passed as integers. For vectors
465 // are the same size, this is an obvious bitcast.
466 if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
467 return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
468 } else if (ValueVT.getSizeInBits() < PartEVT.getSizeInBits()) {
469 // Bitcast Val back the original type and extract the corresponding
471 unsigned Elts = PartEVT.getSizeInBits() / ValueVT.getScalarSizeInBits();
472 EVT WiderVecType = EVT::getVectorVT(*DAG.getContext(),
473 ValueVT.getVectorElementType(), Elts);
474 Val = DAG.getBitcast(WiderVecType, Val);
476 ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val,
477 DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
480 diagnosePossiblyInvalidConstraint(
481 *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
482 return DAG.getUNDEF(ValueVT);
485 // Handle cases such as i8 -> <1 x i1>
486 EVT ValueSVT = ValueVT.getVectorElementType();
487 if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT)
488 Val = ValueVT.isFloatingPoint() ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
489 : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
491 return DAG.getBuildVector(ValueVT, DL, Val);
494 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
495 SDValue Val, SDValue *Parts, unsigned NumParts,
496 MVT PartVT, const Value *V,
497 Optional<CallingConv::ID> CallConv);
499 /// getCopyToParts - Create a series of nodes that contain the specified value
500 /// split into legal parts. If the parts contain more bits than Val, then, for
501 /// integers, ExtendKind can be used to specify how to generate the extra bits.
502 static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val,
503 SDValue *Parts, unsigned NumParts, MVT PartVT,
505 Optional<CallingConv::ID> CallConv = None,
506 ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
507 EVT ValueVT = Val.getValueType();
509 // Handle the vector case separately.
510 if (ValueVT.isVector())
511 return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
514 unsigned PartBits = PartVT.getSizeInBits();
515 unsigned OrigNumParts = NumParts;
516 assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
517 "Copying to an illegal type!");
522 assert(!ValueVT.isVector() && "Vector case handled elsewhere");
523 EVT PartEVT = PartVT;
524 if (PartEVT == ValueVT) {
525 assert(NumParts == 1 && "No-op copy with multiple parts!");
530 if (NumParts * PartBits > ValueVT.getSizeInBits()) {
531 // If the parts cover more bits than the value has, promote the value.
532 if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
533 assert(NumParts == 1 && "Do not know what to promote to!");
534 Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
536 if (ValueVT.isFloatingPoint()) {
537 // FP values need to be bitcast, then extended if they are being put
538 // into a larger container.
539 ValueVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
540 Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
542 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
543 ValueVT.isInteger() &&
544 "Unknown mismatch!");
545 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
546 Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
547 if (PartVT == MVT::x86mmx)
548 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
550 } else if (PartBits == ValueVT.getSizeInBits()) {
551 // Different types of the same size.
552 assert(NumParts == 1 && PartEVT != ValueVT);
553 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
554 } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
555 // If the parts cover less bits than value has, truncate the value.
556 assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
557 ValueVT.isInteger() &&
558 "Unknown mismatch!");
559 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
560 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
561 if (PartVT == MVT::x86mmx)
562 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
565 // The value may have changed - recompute ValueVT.
566 ValueVT = Val.getValueType();
567 assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
568 "Failed to tile the value with PartVT!");
571 if (PartEVT != ValueVT) {
572 diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
573 "scalar-to-vector conversion failed");
574 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
581 // Expand the value into multiple parts.
582 if (NumParts & (NumParts - 1)) {
583 // The number of parts is not a power of 2. Split off and copy the tail.
584 assert(PartVT.isInteger() && ValueVT.isInteger() &&
585 "Do not know what to expand to!");
586 unsigned RoundParts = 1 << Log2_32(NumParts);
587 unsigned RoundBits = RoundParts * PartBits;
588 unsigned OddParts = NumParts - RoundParts;
589 SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
590 DAG.getShiftAmountConstant(RoundBits, ValueVT, DL, /*LegalTypes*/false));
592 getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
595 if (DAG.getDataLayout().isBigEndian())
596 // The odd parts were reversed by getCopyToParts - unreverse them.
597 std::reverse(Parts + RoundParts, Parts + NumParts);
599 NumParts = RoundParts;
600 ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
601 Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
604 // The number of parts is a power of 2. Repeatedly bisect the value using
606 Parts[0] = DAG.getNode(ISD::BITCAST, DL,
607 EVT::getIntegerVT(*DAG.getContext(),
608 ValueVT.getSizeInBits()),
611 for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
612 for (unsigned i = 0; i < NumParts; i += StepSize) {
613 unsigned ThisBits = StepSize * PartBits / 2;
614 EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
615 SDValue &Part0 = Parts[i];
616 SDValue &Part1 = Parts[i+StepSize/2];
618 Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
619 ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
620 Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
621 ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
623 if (ThisBits == PartBits && ThisVT != PartVT) {
624 Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
625 Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
630 if (DAG.getDataLayout().isBigEndian())
631 std::reverse(Parts, Parts + OrigNumParts);
634 static SDValue widenVectorToPartType(SelectionDAG &DAG,
635 SDValue Val, const SDLoc &DL, EVT PartVT) {
636 if (!PartVT.isVector())
639 EVT ValueVT = Val.getValueType();
640 unsigned PartNumElts = PartVT.getVectorNumElements();
641 unsigned ValueNumElts = ValueVT.getVectorNumElements();
642 if (PartNumElts > ValueNumElts &&
643 PartVT.getVectorElementType() == ValueVT.getVectorElementType()) {
644 EVT ElementVT = PartVT.getVectorElementType();
645 // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in
647 SmallVector<SDValue, 16> Ops;
648 DAG.ExtractVectorElements(Val, Ops);
649 SDValue EltUndef = DAG.getUNDEF(ElementVT);
650 for (unsigned i = ValueNumElts, e = PartNumElts; i != e; ++i)
651 Ops.push_back(EltUndef);
653 // FIXME: Use CONCAT for 2x -> 4x.
654 return DAG.getBuildVector(PartVT, DL, Ops);
660 /// getCopyToPartsVector - Create a series of nodes that contain the specified
661 /// value split into legal parts.
662 static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
663 SDValue Val, SDValue *Parts, unsigned NumParts,
664 MVT PartVT, const Value *V,
665 Optional<CallingConv::ID> CallConv) {
666 EVT ValueVT = Val.getValueType();
667 assert(ValueVT.isVector() && "Not a vector");
668 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
669 const bool IsABIRegCopy = CallConv.hasValue();
672 EVT PartEVT = PartVT;
673 if (PartEVT == ValueVT) {
675 } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
676 // Bitconvert vector->vector case.
677 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
678 } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) {
680 } else if (PartVT.isVector() &&
681 PartEVT.getVectorElementType().bitsGE(
682 ValueVT.getVectorElementType()) &&
683 PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements()) {
685 // Promoted vector extract
686 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
688 if (ValueVT.getVectorNumElements() == 1) {
690 ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
691 DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
693 assert(PartVT.getSizeInBits() > ValueVT.getSizeInBits() &&
694 "lossy conversion of vector to scalar type");
695 EVT IntermediateType =
696 EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
697 Val = DAG.getBitcast(IntermediateType, Val);
698 Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
702 assert(Val.getValueType() == PartVT && "Unexpected vector part value type");
707 // Handle a multi-element vector.
710 unsigned NumIntermediates;
713 NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
714 *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT,
715 NumIntermediates, RegisterVT);
718 TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
719 NumIntermediates, RegisterVT);
722 assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
723 NumParts = NumRegs; // Silence a compiler warning.
724 assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
726 unsigned IntermediateNumElts = IntermediateVT.isVector() ?
727 IntermediateVT.getVectorNumElements() : 1;
729 // Convert the vector to the appropriate type if necessary.
730 unsigned DestVectorNoElts = NumIntermediates * IntermediateNumElts;
732 EVT BuiltVectorTy = EVT::getVectorVT(
733 *DAG.getContext(), IntermediateVT.getScalarType(), DestVectorNoElts);
734 MVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
735 if (ValueVT != BuiltVectorTy) {
736 if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy))
739 Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
742 // Split the vector into intermediate operands.
743 SmallVector<SDValue, 8> Ops(NumIntermediates);
744 for (unsigned i = 0; i != NumIntermediates; ++i) {
745 if (IntermediateVT.isVector()) {
746 Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
747 DAG.getConstant(i * IntermediateNumElts, DL, IdxVT));
749 Ops[i] = DAG.getNode(
750 ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
751 DAG.getConstant(i, DL, IdxVT));
755 // Split the intermediate operands into legal parts.
756 if (NumParts == NumIntermediates) {
757 // If the register was not expanded, promote or copy the value,
759 for (unsigned i = 0; i != NumParts; ++i)
760 getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
761 } else if (NumParts > 0) {
762 // If the intermediate type was expanded, split each the value into
764 assert(NumIntermediates != 0 && "division by zero");
765 assert(NumParts % NumIntermediates == 0 &&
766 "Must expand into a divisible number of parts!");
767 unsigned Factor = NumParts / NumIntermediates;
768 for (unsigned i = 0; i != NumIntermediates; ++i)
769 getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
774 RegsForValue::RegsForValue(const SmallVector<unsigned, 4> ®s, MVT regvt,
775 EVT valuevt, Optional<CallingConv::ID> CC)
776 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
777 RegCount(1, regs.size()), CallConv(CC) {}
779 RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
780 const DataLayout &DL, unsigned Reg, Type *Ty,
781 Optional<CallingConv::ID> CC) {
782 ComputeValueVTs(TLI, DL, Ty, ValueVTs);
786 for (EVT ValueVT : ValueVTs) {
789 ? TLI.getNumRegistersForCallingConv(Context, CC.getValue(), ValueVT)
790 : TLI.getNumRegisters(Context, ValueVT);
793 ? TLI.getRegisterTypeForCallingConv(Context, CC.getValue(), ValueVT)
794 : TLI.getRegisterType(Context, ValueVT);
795 for (unsigned i = 0; i != NumRegs; ++i)
796 Regs.push_back(Reg + i);
797 RegVTs.push_back(RegisterVT);
798 RegCount.push_back(NumRegs);
803 SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
804 FunctionLoweringInfo &FuncInfo,
805 const SDLoc &dl, SDValue &Chain,
806 SDValue *Flag, const Value *V) const {
807 // A Value with type {} or [0 x %t] needs no registers.
808 if (ValueVTs.empty())
811 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
813 // Assemble the legal parts into the final values.
814 SmallVector<SDValue, 4> Values(ValueVTs.size());
815 SmallVector<SDValue, 8> Parts;
816 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
817 // Copy the legal parts from the registers.
818 EVT ValueVT = ValueVTs[Value];
819 unsigned NumRegs = RegCount[Value];
820 MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
822 CallConv.getValue(), RegVTs[Value])
825 Parts.resize(NumRegs);
826 for (unsigned i = 0; i != NumRegs; ++i) {
829 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
831 P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag);
832 *Flag = P.getValue(2);
835 Chain = P.getValue(1);
838 // If the source register was virtual and if we know something about it,
839 // add an assert node.
840 if (!Register::isVirtualRegister(Regs[Part + i]) ||
841 !RegisterVT.isInteger())
844 const FunctionLoweringInfo::LiveOutInfo *LOI =
845 FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
849 unsigned RegSize = RegisterVT.getScalarSizeInBits();
850 unsigned NumSignBits = LOI->NumSignBits;
851 unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
853 if (NumZeroBits == RegSize) {
854 // The current value is a zero.
855 // Explicitly express that as it would be easier for
856 // optimizations to kick in.
857 Parts[i] = DAG.getConstant(0, dl, RegisterVT);
861 // FIXME: We capture more information than the dag can represent. For
862 // now, just use the tightest assertzext/assertsext possible.
864 EVT FromVT(MVT::Other);
866 FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
868 } else if (NumSignBits > 1) {
870 EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
875 // Add an assertion node.
876 assert(FromVT != MVT::Other);
877 Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
878 RegisterVT, P, DAG.getValueType(FromVT));
881 Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
882 RegisterVT, ValueVT, V, CallConv);
887 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
890 void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
891 const SDLoc &dl, SDValue &Chain, SDValue *Flag,
893 ISD::NodeType PreferredExtendType) const {
894 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
895 ISD::NodeType ExtendKind = PreferredExtendType;
897 // Get the list of the values's legal parts.
898 unsigned NumRegs = Regs.size();
899 SmallVector<SDValue, 8> Parts(NumRegs);
900 for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
901 unsigned NumParts = RegCount[Value];
903 MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(
905 CallConv.getValue(), RegVTs[Value])
908 if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
909 ExtendKind = ISD::ZERO_EXTEND;
911 getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
912 NumParts, RegisterVT, V, CallConv, ExtendKind);
916 // Copy the parts into the registers.
917 SmallVector<SDValue, 8> Chains(NumRegs);
918 for (unsigned i = 0; i != NumRegs; ++i) {
921 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
923 Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag);
924 *Flag = Part.getValue(1);
927 Chains[i] = Part.getValue(0);
930 if (NumRegs == 1 || Flag)
931 // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is
932 // flagged to it. That is the CopyToReg nodes and the user are considered
933 // a single scheduling unit. If we create a TokenFactor and return it as
934 // chain, then the TokenFactor is both a predecessor (operand) of the
935 // user as well as a successor (the TF operands are flagged to the user).
936 // c1, f1 = CopyToReg
937 // c2, f2 = CopyToReg
938 // c3 = TokenFactor c1, c2
941 Chain = Chains[NumRegs-1];
943 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
946 void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching,
947 unsigned MatchingIdx, const SDLoc &dl,
949 std::vector<SDValue> &Ops) const {
950 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
952 unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size());
954 Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx);
955 else if (!Regs.empty() && Register::isVirtualRegister(Regs.front())) {
956 // Put the register class of the virtual registers in the flag word. That
957 // way, later passes can recompute register class constraints for inline
958 // assembly as well as normal instructions.
959 // Don't do this for tied operands that can use the regclass information
961 const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
962 const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
963 Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID());
966 SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
969 if (Code == InlineAsm::Kind_Clobber) {
970 // Clobbers should always have a 1:1 mapping with registers, and may
971 // reference registers that have illegal (e.g. vector) types. Hence, we
972 // shouldn't try to apply any sort of splitting logic to them.
973 assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&
974 "No 1:1 mapping from clobbers to regs?");
975 unsigned SP = TLI.getStackPointerRegisterToSaveRestore();
977 for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
978 Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
981 DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) &&
982 "If we clobbered the stack pointer, MFI should know about it.");
987 for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
988 unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
989 MVT RegisterVT = RegVTs[Value];
990 for (unsigned i = 0; i != NumRegs; ++i) {
991 assert(Reg < Regs.size() && "Mismatch in # registers expected");
992 unsigned TheReg = Regs[Reg++];
993 Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
998 SmallVector<std::pair<unsigned, unsigned>, 4>
999 RegsForValue::getRegsAndSizes() const {
1000 SmallVector<std::pair<unsigned, unsigned>, 4> OutVec;
1002 for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
1003 unsigned RegCount = std::get<0>(CountAndVT);
1004 MVT RegisterVT = std::get<1>(CountAndVT);
1005 unsigned RegisterSize = RegisterVT.getSizeInBits();
1006 for (unsigned E = I + RegCount; I != E; ++I)
1007 OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
1012 void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa,
1013 const TargetLibraryInfo *li) {
1017 DL = &DAG.getDataLayout();
1018 Context = DAG.getContext();
1019 LPadToCallSiteMap.clear();
1020 SL->init(DAG.getTargetLoweringInfo(), TM, DAG.getDataLayout());
1023 void SelectionDAGBuilder::clear() {
1025 UnusedArgNodeMap.clear();
1026 PendingLoads.clear();
1027 PendingExports.clear();
1028 PendingConstrainedFP.clear();
1029 PendingConstrainedFPStrict.clear();
1031 HasTailCall = false;
1032 SDNodeOrder = LowestSDNodeOrder;
1033 StatepointLowering.clear();
1036 void SelectionDAGBuilder::clearDanglingDebugInfo() {
1037 DanglingDebugInfoMap.clear();
1040 // Update DAG root to include dependencies on Pending chains.
1041 SDValue SelectionDAGBuilder::updateRoot(SmallVectorImpl<SDValue> &Pending) {
1042 SDValue Root = DAG.getRoot();
1044 if (Pending.empty())
1047 // Add current root to PendingChains, unless we already indirectly
1049 if (Root.getOpcode() != ISD::EntryToken) {
1050 unsigned i = 0, e = Pending.size();
1051 for (; i != e; ++i) {
1052 assert(Pending[i].getNode()->getNumOperands() > 1);
1053 if (Pending[i].getNode()->getOperand(0) == Root)
1054 break; // Don't add the root if we already indirectly depend on it.
1058 Pending.push_back(Root);
1061 if (Pending.size() == 1)
1064 Root = DAG.getTokenFactor(getCurSDLoc(), Pending);
1071 SDValue SelectionDAGBuilder::getMemoryRoot() {
1072 return updateRoot(PendingLoads);
1075 SDValue SelectionDAGBuilder::getRoot() {
1076 // Chain up all pending constrained intrinsics together with all
1077 // pending loads, by simply appending them to PendingLoads and
1078 // then calling getMemoryRoot().
1079 PendingLoads.reserve(PendingLoads.size() +
1080 PendingConstrainedFP.size() +
1081 PendingConstrainedFPStrict.size());
1082 PendingLoads.append(PendingConstrainedFP.begin(),
1083 PendingConstrainedFP.end());
1084 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1085 PendingConstrainedFPStrict.end());
1086 PendingConstrainedFP.clear();
1087 PendingConstrainedFPStrict.clear();
1088 return getMemoryRoot();
1091 SDValue SelectionDAGBuilder::getControlRoot() {
1092 // We need to emit pending fpexcept.strict constrained intrinsics,
1093 // so append them to the PendingExports list.
1094 PendingExports.append(PendingConstrainedFPStrict.begin(),
1095 PendingConstrainedFPStrict.end());
1096 PendingConstrainedFPStrict.clear();
1097 return updateRoot(PendingExports);
1100 void SelectionDAGBuilder::visit(const Instruction &I) {
1101 // Set up outgoing PHI node register values before emitting the terminator.
1102 if (I.isTerminator()) {
1103 HandlePHINodesInSuccessorBlocks(I.getParent());
1106 // Increase the SDNodeOrder if dealing with a non-debug instruction.
1107 if (!isa<DbgInfoIntrinsic>(I))
1112 visit(I.getOpcode(), I);
1114 if (auto *FPMO = dyn_cast<FPMathOperator>(&I)) {
1115 // Propagate the fast-math-flags of this IR instruction to the DAG node that
1116 // maps to this instruction.
1117 // TODO: We could handle all flags (nsw, etc) here.
1118 // TODO: If an IR instruction maps to >1 node, only the final node will have
1120 if (SDNode *Node = getNodeForIRValue(&I)) {
1121 SDNodeFlags IncomingFlags;
1122 IncomingFlags.copyFMF(*FPMO);
1123 if (!Node->getFlags().isDefined())
1124 Node->setFlags(IncomingFlags);
1126 Node->intersectFlagsWith(IncomingFlags);
1129 // Constrained FP intrinsics with fpexcept.ignore should also get
1130 // the NoFPExcept flag.
1131 if (auto *FPI = dyn_cast<ConstrainedFPIntrinsic>(&I))
1132 if (FPI->getExceptionBehavior() == fp::ExceptionBehavior::ebIgnore)
1133 if (SDNode *Node = getNodeForIRValue(&I)) {
1134 SDNodeFlags Flags = Node->getFlags();
1135 Flags.setNoFPExcept(true);
1136 Node->setFlags(Flags);
1139 if (!I.isTerminator() && !HasTailCall &&
1140 !isStatepoint(&I)) // statepoints handle their exports internally
1141 CopyToExportRegsIfNeeded(&I);
1146 void SelectionDAGBuilder::visitPHI(const PHINode &) {
1147 llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
1150 void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
1151 // Note: this doesn't use InstVisitor, because it has to work with
1152 // ConstantExpr's in addition to instructions.
1154 default: llvm_unreachable("Unknown instruction type encountered!");
1155 // Build the switch statement using the Instruction.def file.
1156 #define HANDLE_INST(NUM, OPCODE, CLASS) \
1157 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1158 #include "llvm/IR/Instruction.def"
1162 void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
1163 const DIExpression *Expr) {
1164 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1165 const DbgValueInst *DI = DDI.getDI();
1166 DIVariable *DanglingVariable = DI->getVariable();
1167 DIExpression *DanglingExpr = DI->getExpression();
1168 if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
1169 LLVM_DEBUG(dbgs() << "Dropping dangling debug info for " << *DI << "\n");
1175 for (auto &DDIMI : DanglingDebugInfoMap) {
1176 DanglingDebugInfoVector &DDIV = DDIMI.second;
1178 // If debug info is to be dropped, run it through final checks to see
1179 // whether it can be salvaged.
1180 for (auto &DDI : DDIV)
1181 if (isMatchingDbgValue(DDI))
1182 salvageUnresolvedDbgValue(DDI);
1184 DDIV.erase(remove_if(DDIV, isMatchingDbgValue), DDIV.end());
1188 // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1189 // generate the debug data structures now that we've seen its definition.
1190 void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
1192 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1193 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1196 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1197 for (auto &DDI : DDIV) {
1198 const DbgValueInst *DI = DDI.getDI();
1199 assert(DI && "Ill-formed DanglingDebugInfo");
1200 DebugLoc dl = DDI.getdl();
1201 unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
1202 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1203 DILocalVariable *Variable = DI->getVariable();
1204 DIExpression *Expr = DI->getExpression();
1205 assert(Variable->isValidLocationForIntrinsic(dl) &&
1206 "Expected inlined-at fields to agree");
1208 if (Val.getNode()) {
1209 // FIXME: I doubt that it is correct to resolve a dangling DbgValue as a
1210 // FuncArgumentDbgValue (it would be hoisted to the function entry, and if
1211 // we couldn't resolve it directly when examining the DbgValue intrinsic
1212 // in the first place we should not be more successful here). Unless we
1213 // have some test case that prove this to be correct we should avoid
1214 // calling EmitFuncArgumentDbgValue here.
1215 if (!EmitFuncArgumentDbgValue(V, Variable, Expr, dl, false, Val)) {
1216 LLVM_DEBUG(dbgs() << "Resolve dangling debug info [order="
1217 << DbgSDNodeOrder << "] for:\n " << *DI << "\n");
1218 LLVM_DEBUG(dbgs() << " By mapping to:\n "; Val.dump());
1219 // Increase the SDNodeOrder for the DbgValue here to make sure it is
1220 // inserted after the definition of Val when emitting the instructions
1221 // after ISel. An alternative could be to teach
1222 // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
1223 LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()
1224 << "changing SDNodeOrder from " << DbgSDNodeOrder << " to "
1225 << ValSDNodeOrder << "\n");
1226 SDV = getDbgValue(Val, Variable, Expr, dl,
1227 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1228 DAG.AddDbgValue(SDV, Val.getNode(), false);
1230 LLVM_DEBUG(dbgs() << "Resolved dangling debug info for " << *DI
1231 << "in EmitFuncArgumentDbgValue\n");
1233 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1235 UndefValue::get(DDI.getDI()->getVariableLocation()->getType());
1237 DAG.getConstantDbgValue(Variable, Expr, Undef, dl, DbgSDNodeOrder);
1238 DAG.AddDbgValue(SDV, nullptr, false);
1244 void SelectionDAGBuilder::salvageUnresolvedDbgValue(DanglingDebugInfo &DDI) {
1245 Value *V = DDI.getDI()->getValue();
1246 DILocalVariable *Var = DDI.getDI()->getVariable();
1247 DIExpression *Expr = DDI.getDI()->getExpression();
1248 DebugLoc DL = DDI.getdl();
1249 DebugLoc InstDL = DDI.getDI()->getDebugLoc();
1250 unsigned SDOrder = DDI.getSDNodeOrder();
1252 // Currently we consider only dbg.value intrinsics -- we tell the salvager
1253 // that DW_OP_stack_value is desired.
1254 assert(isa<DbgValueInst>(DDI.getDI()));
1255 bool StackValue = true;
1257 // Can this Value can be encoded without any further work?
1258 if (handleDebugValue(V, Var, Expr, DL, InstDL, SDOrder))
1261 // Attempt to salvage back through as many instructions as possible. Bail if
1262 // a non-instruction is seen, such as a constant expression or global
1263 // variable. FIXME: Further work could recover those too.
1264 while (isa<Instruction>(V)) {
1265 Instruction &VAsInst = *cast<Instruction>(V);
1266 DIExpression *NewExpr = salvageDebugInfoImpl(VAsInst, Expr, StackValue);
1268 // If we cannot salvage any further, and haven't yet found a suitable debug
1269 // expression, bail out.
1273 // New value and expr now represent this debuginfo.
1274 V = VAsInst.getOperand(0);
1277 // Some kind of simplification occurred: check whether the operand of the
1278 // salvaged debug expression can be encoded in this DAG.
1279 if (handleDebugValue(V, Var, Expr, DL, InstDL, SDOrder)) {
1280 LLVM_DEBUG(dbgs() << "Salvaged debug location info for:\n "
1281 << DDI.getDI() << "\nBy stripping back to:\n " << V);
1286 // This was the final opportunity to salvage this debug information, and it
1287 // couldn't be done. Place an undef DBG_VALUE at this location to terminate
1288 // any earlier variable location.
1289 auto Undef = UndefValue::get(DDI.getDI()->getVariableLocation()->getType());
1290 auto SDV = DAG.getConstantDbgValue(Var, Expr, Undef, DL, SDNodeOrder);
1291 DAG.AddDbgValue(SDV, nullptr, false);
1293 LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n " << DDI.getDI()
1295 LLVM_DEBUG(dbgs() << " Last seen at:\n " << *DDI.getDI()->getOperand(0)
1299 bool SelectionDAGBuilder::handleDebugValue(const Value *V, DILocalVariable *Var,
1300 DIExpression *Expr, DebugLoc dl,
1301 DebugLoc InstDL, unsigned Order) {
1302 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1304 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1305 isa<ConstantPointerNull>(V)) {
1306 SDV = DAG.getConstantDbgValue(Var, Expr, V, dl, SDNodeOrder);
1307 DAG.AddDbgValue(SDV, nullptr, false);
1311 // If the Value is a frame index, we can create a FrameIndex debug value
1312 // without relying on the DAG at all.
1313 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1314 auto SI = FuncInfo.StaticAllocaMap.find(AI);
1315 if (SI != FuncInfo.StaticAllocaMap.end()) {
1317 DAG.getFrameIndexDbgValue(Var, Expr, SI->second,
1318 /*IsIndirect*/ false, dl, SDNodeOrder);
1319 // Do not attach the SDNodeDbgValue to an SDNode: this variable location
1320 // is still available even if the SDNode gets optimized out.
1321 DAG.AddDbgValue(SDV, nullptr, false);
1326 // Do not use getValue() in here; we don't want to generate code at
1327 // this point if it hasn't been done yet.
1328 SDValue N = NodeMap[V];
1329 if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
1330 N = UnusedArgNodeMap[V];
1332 if (EmitFuncArgumentDbgValue(V, Var, Expr, dl, false, N))
1334 SDV = getDbgValue(N, Var, Expr, dl, SDNodeOrder);
1335 DAG.AddDbgValue(SDV, N.getNode(), false);
1339 // Special rules apply for the first dbg.values of parameter variables in a
1340 // function. Identify them by the fact they reference Argument Values, that
1341 // they're parameters, and they are parameters of the current function. We
1342 // need to let them dangle until they get an SDNode.
1343 bool IsParamOfFunc = isa<Argument>(V) && Var->isParameter() &&
1344 !InstDL.getInlinedAt();
1345 if (!IsParamOfFunc) {
1346 // The value is not used in this block yet (or it would have an SDNode).
1347 // We still want the value to appear for the user if possible -- if it has
1348 // an associated VReg, we can refer to that instead.
1349 auto VMI = FuncInfo.ValueMap.find(V);
1350 if (VMI != FuncInfo.ValueMap.end()) {
1351 unsigned Reg = VMI->second;
1352 // If this is a PHI node, it may be split up into several MI PHI nodes
1353 // (in FunctionLoweringInfo::set).
1354 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
1355 V->getType(), None);
1356 if (RFV.occupiesMultipleRegs()) {
1357 unsigned Offset = 0;
1358 unsigned BitsToDescribe = 0;
1359 if (auto VarSize = Var->getSizeInBits())
1360 BitsToDescribe = *VarSize;
1361 if (auto Fragment = Expr->getFragmentInfo())
1362 BitsToDescribe = Fragment->SizeInBits;
1363 for (auto RegAndSize : RFV.getRegsAndSizes()) {
1364 unsigned RegisterSize = RegAndSize.second;
1365 // Bail out if all bits are described already.
1366 if (Offset >= BitsToDescribe)
1368 unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
1369 ? BitsToDescribe - Offset
1371 auto FragmentExpr = DIExpression::createFragmentExpression(
1372 Expr, Offset, FragmentSize);
1375 SDV = DAG.getVRegDbgValue(Var, *FragmentExpr, RegAndSize.first,
1376 false, dl, SDNodeOrder);
1377 DAG.AddDbgValue(SDV, nullptr, false);
1378 Offset += RegisterSize;
1381 SDV = DAG.getVRegDbgValue(Var, Expr, Reg, false, dl, SDNodeOrder);
1382 DAG.AddDbgValue(SDV, nullptr, false);
1391 void SelectionDAGBuilder::resolveOrClearDbgInfo() {
1392 // Try to fixup any remaining dangling debug info -- and drop it if we can't.
1393 for (auto &Pair : DanglingDebugInfoMap)
1394 for (auto &DDI : Pair.second)
1395 salvageUnresolvedDbgValue(DDI);
1396 clearDanglingDebugInfo();
1399 /// getCopyFromRegs - If there was virtual register allocated for the value V
1400 /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
1401 SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
1402 DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V);
1405 if (It != FuncInfo.ValueMap.end()) {
1406 unsigned InReg = It->second;
1408 RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
1409 DAG.getDataLayout(), InReg, Ty,
1410 None); // This is not an ABI copy.
1411 SDValue Chain = DAG.getEntryNode();
1412 Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
1414 resolveDanglingDebugInfo(V, Result);
1420 /// getValue - Return an SDValue for the given Value.
1421 SDValue SelectionDAGBuilder::getValue(const Value *V) {
1422 // If we already have an SDValue for this value, use it. It's important
1423 // to do this first, so that we don't create a CopyFromReg if we already
1424 // have a regular SDValue.
1425 SDValue &N = NodeMap[V];
1426 if (N.getNode()) return N;
1428 // If there's a virtual register allocated and initialized for this
1430 if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
1433 // Otherwise create a new SDValue and remember it.
1434 SDValue Val = getValueImpl(V);
1436 resolveDanglingDebugInfo(V, Val);
1440 // Return true if SDValue exists for the given Value
1441 bool SelectionDAGBuilder::findValue(const Value *V) const {
1442 return (NodeMap.find(V) != NodeMap.end()) ||
1443 (FuncInfo.ValueMap.find(V) != FuncInfo.ValueMap.end());
1446 /// getNonRegisterValue - Return an SDValue for the given Value, but
1447 /// don't look in FuncInfo.ValueMap for a virtual register.
1448 SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1449 // If we already have an SDValue for this value, use it.
1450 SDValue &N = NodeMap[V];
1452 if (isa<ConstantSDNode>(N) || isa<ConstantFPSDNode>(N)) {
1453 // Remove the debug location from the node as the node is about to be used
1454 // in a location which may differ from the original debug location. This
1455 // is relevant to Constant and ConstantFP nodes because they can appear
1456 // as constant expressions inside PHI nodes.
1457 N->setDebugLoc(DebugLoc());
1462 // Otherwise create a new SDValue and remember it.
1463 SDValue Val = getValueImpl(V);
1465 resolveDanglingDebugInfo(V, Val);
1469 /// getValueImpl - Helper function for getValue and getNonRegisterValue.
1470 /// Create an SDValue for the given value.
1471 SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1472 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1474 if (const Constant *C = dyn_cast<Constant>(V)) {
1475 EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1477 if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1478 return DAG.getConstant(*CI, getCurSDLoc(), VT);
1480 if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1481 return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1483 if (isa<ConstantPointerNull>(C)) {
1484 unsigned AS = V->getType()->getPointerAddressSpace();
1485 return DAG.getConstant(0, getCurSDLoc(),
1486 TLI.getPointerTy(DAG.getDataLayout(), AS));
1489 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1490 return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1492 if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1493 return DAG.getUNDEF(VT);
1495 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1496 visit(CE->getOpcode(), *CE);
1497 SDValue N1 = NodeMap[V];
1498 assert(N1.getNode() && "visit didn't populate the NodeMap!");
1502 if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1503 SmallVector<SDValue, 4> Constants;
1504 for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end();
1506 SDNode *Val = getValue(*OI).getNode();
1507 // If the operand is an empty aggregate, there are no values.
1509 // Add each leaf value from the operand to the Constants list
1510 // to form a flattened list of all the values.
1511 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1512 Constants.push_back(SDValue(Val, i));
1515 return DAG.getMergeValues(Constants, getCurSDLoc());
1518 if (const ConstantDataSequential *CDS =
1519 dyn_cast<ConstantDataSequential>(C)) {
1520 SmallVector<SDValue, 4> Ops;
1521 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1522 SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1523 // Add each leaf value from the operand to the Constants list
1524 // to form a flattened list of all the values.
1525 for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1526 Ops.push_back(SDValue(Val, i));
1529 if (isa<ArrayType>(CDS->getType()))
1530 return DAG.getMergeValues(Ops, getCurSDLoc());
1531 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1534 if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1535 assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1536 "Unknown struct or array constant!");
1538 SmallVector<EVT, 4> ValueVTs;
1539 ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1540 unsigned NumElts = ValueVTs.size();
1542 return SDValue(); // empty struct
1543 SmallVector<SDValue, 4> Constants(NumElts);
1544 for (unsigned i = 0; i != NumElts; ++i) {
1545 EVT EltVT = ValueVTs[i];
1546 if (isa<UndefValue>(C))
1547 Constants[i] = DAG.getUNDEF(EltVT);
1548 else if (EltVT.isFloatingPoint())
1549 Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1551 Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1554 return DAG.getMergeValues(Constants, getCurSDLoc());
1557 if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1558 return DAG.getBlockAddress(BA, VT);
1560 VectorType *VecTy = cast<VectorType>(V->getType());
1561 unsigned NumElements = VecTy->getNumElements();
1563 // Now that we know the number and type of the elements, get that number of
1564 // elements into the Ops array based on what kind of constant it is.
1565 SmallVector<SDValue, 16> Ops;
1566 if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1567 for (unsigned i = 0; i != NumElements; ++i)
1568 Ops.push_back(getValue(CV->getOperand(i)));
1570 assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!");
1572 TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1575 if (EltVT.isFloatingPoint())
1576 Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1578 Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1579 Ops.assign(NumElements, Op);
1582 // Create a BUILD_VECTOR node.
1583 return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1586 // If this is a static alloca, generate it as the frameindex instead of
1588 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1589 DenseMap<const AllocaInst*, int>::iterator SI =
1590 FuncInfo.StaticAllocaMap.find(AI);
1591 if (SI != FuncInfo.StaticAllocaMap.end())
1592 return DAG.getFrameIndex(SI->second,
1593 TLI.getFrameIndexTy(DAG.getDataLayout()));
1596 // If this is an instruction which fast-isel has deferred, select it now.
1597 if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1598 unsigned InReg = FuncInfo.InitializeRegForValue(Inst);
1600 RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1601 Inst->getType(), getABIRegCopyCC(V));
1602 SDValue Chain = DAG.getEntryNode();
1603 return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1606 llvm_unreachable("Can't get register for value!");
1609 void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1610 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1611 bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1612 bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1613 bool IsSEH = isAsynchronousEHPersonality(Pers);
1614 bool IsWasmCXX = Pers == EHPersonality::Wasm_CXX;
1615 MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1617 CatchPadMBB->setIsEHScopeEntry();
1618 // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1619 if (IsMSVCCXX || IsCoreCLR)
1620 CatchPadMBB->setIsEHFuncletEntry();
1621 // Wasm does not need catchpads anymore
1623 DAG.setRoot(DAG.getNode(ISD::CATCHPAD, getCurSDLoc(), MVT::Other,
1627 void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
1628 // Update machine-CFG edge.
1629 MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()];
1630 FuncInfo.MBB->addSuccessor(TargetMBB);
1632 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1633 bool IsSEH = isAsynchronousEHPersonality(Pers);
1635 // If this is not a fall-through branch or optimizations are switched off,
1637 if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1638 TM.getOptLevel() == CodeGenOpt::None)
1639 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
1640 getControlRoot(), DAG.getBasicBlock(TargetMBB)));
1644 // Figure out the funclet membership for the catchret's successor.
1645 // This will be used by the FuncletLayout pass to determine how to order the
1647 // A 'catchret' returns to the outer scope's color.
1648 Value *ParentPad = I.getCatchSwitchParentPad();
1649 const BasicBlock *SuccessorColor;
1650 if (isa<ConstantTokenNone>(ParentPad))
1651 SuccessorColor = &FuncInfo.Fn->getEntryBlock();
1653 SuccessorColor = cast<Instruction>(ParentPad)->getParent();
1654 assert(SuccessorColor && "No parent funclet for catchret!");
1655 MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor];
1656 assert(SuccessorColorMBB && "No MBB for SuccessorColor!");
1658 // Create the terminator node.
1659 SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
1660 getControlRoot(), DAG.getBasicBlock(TargetMBB),
1661 DAG.getBasicBlock(SuccessorColorMBB));
1665 void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
1666 // Don't emit any special code for the cleanuppad instruction. It just marks
1667 // the start of an EH scope/funclet.
1668 FuncInfo.MBB->setIsEHScopeEntry();
1669 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1670 if (Pers != EHPersonality::Wasm_CXX) {
1671 FuncInfo.MBB->setIsEHFuncletEntry();
1672 FuncInfo.MBB->setIsCleanupFuncletEntry();
1676 // For wasm, there's alwyas a single catch pad attached to a catchswitch, and
1677 // the control flow always stops at the single catch pad, as it does for a
1678 // cleanup pad. In case the exception caught is not of the types the catch pad
1679 // catches, it will be rethrown by a rethrow.
1680 static void findWasmUnwindDestinations(
1681 FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1682 BranchProbability Prob,
1683 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1686 const Instruction *Pad = EHPadBB->getFirstNonPHI();
1687 if (isa<CleanupPadInst>(Pad)) {
1688 // Stop on cleanup pads.
1689 UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1690 UnwindDests.back().first->setIsEHScopeEntry();
1692 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1693 // Add the catchpad handlers to the possible destinations. We don't
1694 // continue to the unwind destination of the catchswitch for wasm.
1695 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1696 UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1697 UnwindDests.back().first->setIsEHScopeEntry();
1706 /// When an invoke or a cleanupret unwinds to the next EH pad, there are
1707 /// many places it could ultimately go. In the IR, we have a single unwind
1708 /// destination, but in the machine CFG, we enumerate all the possible blocks.
1709 /// This function skips over imaginary basic blocks that hold catchswitch
1710 /// instructions, and finds all the "real" machine
1711 /// basic block destinations. As those destinations may not be successors of
1712 /// EHPadBB, here we also calculate the edge probability to those destinations.
1713 /// The passed-in Prob is the edge probability to EHPadBB.
1714 static void findUnwindDestinations(
1715 FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
1716 BranchProbability Prob,
1717 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
1719 EHPersonality Personality =
1720 classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1721 bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
1722 bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
1723 bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
1724 bool IsSEH = isAsynchronousEHPersonality(Personality);
1727 findWasmUnwindDestinations(FuncInfo, EHPadBB, Prob, UnwindDests);
1728 assert(UnwindDests.size() <= 1 &&
1729 "There should be at most one unwind destination for wasm");
1734 const Instruction *Pad = EHPadBB->getFirstNonPHI();
1735 BasicBlock *NewEHPadBB = nullptr;
1736 if (isa<LandingPadInst>(Pad)) {
1737 // Stop on landingpads. They are not funclets.
1738 UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1740 } else if (isa<CleanupPadInst>(Pad)) {
1741 // Stop on cleanup pads. Cleanups are always funclet entries for all known
1743 UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
1744 UnwindDests.back().first->setIsEHScopeEntry();
1745 UnwindDests.back().first->setIsEHFuncletEntry();
1747 } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
1748 // Add the catchpad handlers to the possible destinations.
1749 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
1750 UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
1751 // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
1752 if (IsMSVCCXX || IsCoreCLR)
1753 UnwindDests.back().first->setIsEHFuncletEntry();
1755 UnwindDests.back().first->setIsEHScopeEntry();
1757 NewEHPadBB = CatchSwitch->getUnwindDest();
1762 BranchProbabilityInfo *BPI = FuncInfo.BPI;
1763 if (BPI && NewEHPadBB)
1764 Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
1765 EHPadBB = NewEHPadBB;
1769 void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
1770 // Update successor info.
1771 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
1772 auto UnwindDest = I.getUnwindDest();
1773 BranchProbabilityInfo *BPI = FuncInfo.BPI;
1774 BranchProbability UnwindDestProb =
1776 ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
1777 : BranchProbability::getZero();
1778 findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
1779 for (auto &UnwindDest : UnwindDests) {
1780 UnwindDest.first->setIsEHPad();
1781 addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
1783 FuncInfo.MBB->normalizeSuccProbs();
1785 // Create the terminator node.
1787 DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot());
1791 void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
1792 report_fatal_error("visitCatchSwitch not yet implemented!");
1795 void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
1796 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1797 auto &DL = DAG.getDataLayout();
1798 SDValue Chain = getControlRoot();
1799 SmallVector<ISD::OutputArg, 8> Outs;
1800 SmallVector<SDValue, 8> OutVals;
1802 // Calls to @llvm.experimental.deoptimize don't generate a return value, so
1805 // %val = call <ty> @llvm.experimental.deoptimize()
1809 if (I.getParent()->getTerminatingDeoptimizeCall()) {
1810 LowerDeoptimizingReturn();
1814 if (!FuncInfo.CanLowerReturn) {
1815 unsigned DemoteReg = FuncInfo.DemoteRegister;
1816 const Function *F = I.getParent()->getParent();
1818 // Emit a store of the return value through the virtual register.
1819 // Leave Outs empty so that LowerReturn won't try to load return
1820 // registers the usual way.
1821 SmallVector<EVT, 1> PtrValueVTs;
1822 ComputeValueVTs(TLI, DL,
1823 F->getReturnType()->getPointerTo(
1824 DAG.getDataLayout().getAllocaAddrSpace()),
1827 SDValue RetPtr = DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
1828 DemoteReg, PtrValueVTs[0]);
1829 SDValue RetOp = getValue(I.getOperand(0));
1831 SmallVector<EVT, 4> ValueVTs, MemVTs;
1832 SmallVector<uint64_t, 4> Offsets;
1833 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs,
1835 unsigned NumValues = ValueVTs.size();
1837 SmallVector<SDValue, 4> Chains(NumValues);
1838 for (unsigned i = 0; i != NumValues; ++i) {
1839 // An aggregate return value cannot wrap around the address space, so
1840 // offsets to its parts don't wrap either.
1841 SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr, Offsets[i]);
1843 SDValue Val = RetOp.getValue(RetOp.getResNo() + i);
1844 if (MemVTs[i] != ValueVTs[i])
1845 Val = DAG.getPtrExtOrTrunc(Val, getCurSDLoc(), MemVTs[i]);
1846 Chains[i] = DAG.getStore(Chain, getCurSDLoc(), Val,
1847 // FIXME: better loc info would be nice.
1848 Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()));
1851 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
1852 MVT::Other, Chains);
1853 } else if (I.getNumOperands() != 0) {
1854 SmallVector<EVT, 4> ValueVTs;
1855 ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
1856 unsigned NumValues = ValueVTs.size();
1858 SDValue RetOp = getValue(I.getOperand(0));
1860 const Function *F = I.getParent()->getParent();
1862 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
1863 I.getOperand(0)->getType(), F->getCallingConv(),
1864 /*IsVarArg*/ false);
1866 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1867 if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1869 ExtendKind = ISD::SIGN_EXTEND;
1870 else if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1872 ExtendKind = ISD::ZERO_EXTEND;
1874 LLVMContext &Context = F->getContext();
1875 bool RetInReg = F->getAttributes().hasAttribute(
1876 AttributeList::ReturnIndex, Attribute::InReg);
1878 for (unsigned j = 0; j != NumValues; ++j) {
1879 EVT VT = ValueVTs[j];
1881 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
1882 VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
1884 CallingConv::ID CC = F->getCallingConv();
1886 unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
1887 MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
1888 SmallVector<SDValue, 4> Parts(NumParts);
1889 getCopyToParts(DAG, getCurSDLoc(),
1890 SDValue(RetOp.getNode(), RetOp.getResNo() + j),
1891 &Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
1893 // 'inreg' on function refers to return value
1894 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1898 if (I.getOperand(0)->getType()->isPointerTy()) {
1900 Flags.setPointerAddrSpace(
1901 cast<PointerType>(I.getOperand(0)->getType())->getAddressSpace());
1904 if (NeedsRegBlock) {
1905 Flags.setInConsecutiveRegs();
1906 if (j == NumValues - 1)
1907 Flags.setInConsecutiveRegsLast();
1910 // Propagate extension type if any
1911 if (ExtendKind == ISD::SIGN_EXTEND)
1913 else if (ExtendKind == ISD::ZERO_EXTEND)
1916 for (unsigned i = 0; i < NumParts; ++i) {
1917 Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(),
1918 VT, /*isfixed=*/true, 0, 0));
1919 OutVals.push_back(Parts[i]);
1925 // Push in swifterror virtual register as the last element of Outs. This makes
1926 // sure swifterror virtual register will be returned in the swifterror
1927 // physical register.
1928 const Function *F = I.getParent()->getParent();
1929 if (TLI.supportSwiftError() &&
1930 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
1931 assert(SwiftError.getFunctionArg() && "Need a swift error argument");
1932 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1933 Flags.setSwiftError();
1934 Outs.push_back(ISD::OutputArg(Flags, EVT(TLI.getPointerTy(DL)) /*vt*/,
1935 EVT(TLI.getPointerTy(DL)) /*argvt*/,
1936 true /*isfixed*/, 1 /*origidx*/,
1938 // Create SDNode for the swifterror virtual register.
1940 DAG.getRegister(SwiftError.getOrCreateVRegUseAt(
1941 &I, FuncInfo.MBB, SwiftError.getFunctionArg()),
1942 EVT(TLI.getPointerTy(DL))));
1945 bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
1946 CallingConv::ID CallConv =
1947 DAG.getMachineFunction().getFunction().getCallingConv();
1948 Chain = DAG.getTargetLoweringInfo().LowerReturn(
1949 Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
1951 // Verify that the target's LowerReturn behaved as expected.
1952 assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
1953 "LowerReturn didn't return a valid chain!");
1955 // Update the DAG with the new chain value resulting from return lowering.
1959 /// CopyToExportRegsIfNeeded - If the given value has virtual registers
1960 /// created for it, emit nodes to copy the value into the virtual
1962 void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
1964 if (V->getType()->isEmptyTy())
1967 DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V);
1968 if (VMI != FuncInfo.ValueMap.end()) {
1969 assert(!V->use_empty() && "Unused value assigned virtual registers!");
1970 CopyValueToVirtualRegister(V, VMI->second);
1974 /// ExportFromCurrentBlock - If this condition isn't known to be exported from
1975 /// the current basic block, add it to ValueMap now so that we'll get a
1977 void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
1978 // No need to export constants.
1979 if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
1981 // Already exported?
1982 if (FuncInfo.isExportedInst(V)) return;
1984 unsigned Reg = FuncInfo.InitializeRegForValue(V);
1985 CopyValueToVirtualRegister(V, Reg);
1988 bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
1989 const BasicBlock *FromBB) {
1990 // The operands of the setcc have to be in this block. We don't know
1991 // how to export them from some other block.
1992 if (const Instruction *VI = dyn_cast<Instruction>(V)) {
1993 // Can export from current BB.
1994 if (VI->getParent() == FromBB)
1997 // Is already exported, noop.
1998 return FuncInfo.isExportedInst(V);
2001 // If this is an argument, we can export it if the BB is the entry block or
2002 // if it is already exported.
2003 if (isa<Argument>(V)) {
2004 if (FromBB == &FromBB->getParent()->getEntryBlock())
2007 // Otherwise, can only export this if it is already exported.
2008 return FuncInfo.isExportedInst(V);
2011 // Otherwise, constants can always be exported.
2015 /// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
2017 SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
2018 const MachineBasicBlock *Dst) const {
2019 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2020 const BasicBlock *SrcBB = Src->getBasicBlock();
2021 const BasicBlock *DstBB = Dst->getBasicBlock();
2023 // If BPI is not available, set the default probability as 1 / N, where N is
2024 // the number of successors.
2025 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
2026 return BranchProbability(1, SuccSize);
2028 return BPI->getEdgeProbability(SrcBB, DstBB);
2031 void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
2032 MachineBasicBlock *Dst,
2033 BranchProbability Prob) {
2035 Src->addSuccessorWithoutProb(Dst);
2037 if (Prob.isUnknown())
2038 Prob = getEdgeProbability(Src, Dst);
2039 Src->addSuccessor(Dst, Prob);
2043 static bool InBlock(const Value *V, const BasicBlock *BB) {
2044 if (const Instruction *I = dyn_cast<Instruction>(V))
2045 return I->getParent() == BB;
2049 /// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
2050 /// This function emits a branch and is used at the leaves of an OR or an
2051 /// AND operator tree.
2053 SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
2054 MachineBasicBlock *TBB,
2055 MachineBasicBlock *FBB,
2056 MachineBasicBlock *CurBB,
2057 MachineBasicBlock *SwitchBB,
2058 BranchProbability TProb,
2059 BranchProbability FProb,
2061 const BasicBlock *BB = CurBB->getBasicBlock();
2063 // If the leaf of the tree is a comparison, merge the condition into
2065 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
2066 // The operands of the cmp have to be in this block. We don't know
2067 // how to export them from some other block. If this is the first block
2068 // of the sequence, no exporting is needed.
2069 if (CurBB == SwitchBB ||
2070 (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
2071 isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
2072 ISD::CondCode Condition;
2073 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
2074 ICmpInst::Predicate Pred =
2075 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2076 Condition = getICmpCondCode(Pred);
2078 const FCmpInst *FC = cast<FCmpInst>(Cond);
2079 FCmpInst::Predicate Pred =
2080 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2081 Condition = getFCmpCondCode(Pred);
2082 if (TM.Options.NoNaNsFPMath)
2083 Condition = getFCmpCodeWithoutNaN(Condition);
2086 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
2087 TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2088 SL->SwitchCases.push_back(CB);
2093 // Create a CaseBlock record representing this branch.
2094 ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
2095 CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
2096 nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2097 SL->SwitchCases.push_back(CB);
2100 void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
2101 MachineBasicBlock *TBB,
2102 MachineBasicBlock *FBB,
2103 MachineBasicBlock *CurBB,
2104 MachineBasicBlock *SwitchBB,
2105 Instruction::BinaryOps Opc,
2106 BranchProbability TProb,
2107 BranchProbability FProb,
2109 // Skip over not part of the tree and remember to invert op and operands at
2112 if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
2113 InBlock(NotCond, CurBB->getBasicBlock())) {
2114 FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
2119 const Instruction *BOp = dyn_cast<Instruction>(Cond);
2120 // Compute the effective opcode for Cond, taking into account whether it needs
2121 // to be inverted, e.g.
2122 // and (not (or A, B)), C
2124 // and (and (not A, not B), C)
2127 BOpc = BOp->getOpcode();
2129 if (BOpc == Instruction::And)
2130 BOpc = Instruction::Or;
2131 else if (BOpc == Instruction::Or)
2132 BOpc = Instruction::And;
2136 // If this node is not part of the or/and tree, emit it as a branch.
2137 if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) ||
2138 BOpc != unsigned(Opc) || !BOp->hasOneUse() ||
2139 BOp->getParent() != CurBB->getBasicBlock() ||
2140 !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) ||
2141 !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) {
2142 EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
2143 TProb, FProb, InvertCond);
2147 // Create TmpBB after CurBB.
2148 MachineFunction::iterator BBI(CurBB);
2149 MachineFunction &MF = DAG.getMachineFunction();
2150 MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
2151 CurBB->getParent()->insert(++BBI, TmpBB);
2153 if (Opc == Instruction::Or) {
2154 // Codegen X | Y as:
2163 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2164 // The requirement is that
2165 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
2166 // = TrueProb for original BB.
2167 // Assuming the original probabilities are A and B, one choice is to set
2168 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
2169 // A/(1+B) and 2B/(1+B). This choice assumes that
2170 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
2171 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
2172 // TmpBB, but the math is more complicated.
2174 auto NewTrueProb = TProb / 2;
2175 auto NewFalseProb = TProb / 2 + FProb;
2176 // Emit the LHS condition.
2177 FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc,
2178 NewTrueProb, NewFalseProb, InvertCond);
2180 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
2181 SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
2182 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2183 // Emit the RHS condition into TmpBB.
2184 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
2185 Probs[0], Probs[1], InvertCond);
2187 assert(Opc == Instruction::And && "Unknown merge op!");
2188 // Codegen X & Y as:
2196 // This requires creation of TmpBB after CurBB.
2198 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2199 // The requirement is that
2200 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
2201 // = FalseProb for original BB.
2202 // Assuming the original probabilities are A and B, one choice is to set
2203 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
2204 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
2205 // TrueProb for BB1 * FalseProb for TmpBB.
2207 auto NewTrueProb = TProb + FProb / 2;
2208 auto NewFalseProb = FProb / 2;
2209 // Emit the LHS condition.
2210 FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc,
2211 NewTrueProb, NewFalseProb, InvertCond);
2213 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
2214 SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
2215 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2216 // Emit the RHS condition into TmpBB.
2217 FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc,
2218 Probs[0], Probs[1], InvertCond);
2222 /// If the set of cases should be emitted as a series of branches, return true.
2223 /// If we should emit this as a bunch of and/or'd together conditions, return
2226 SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
2227 if (Cases.size() != 2) return true;
2229 // If this is two comparisons of the same values or'd or and'd together, they
2230 // will get folded into a single comparison, so don't emit two blocks.
2231 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2232 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2233 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2234 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2238 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
2239 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
2240 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2241 Cases[0].CC == Cases[1].CC &&
2242 isa<Constant>(Cases[0].CmpRHS) &&
2243 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2244 if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2246 if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2253 void SelectionDAGBuilder::visitBr(const BranchInst &I) {
2254 MachineBasicBlock *BrMBB = FuncInfo.MBB;
2256 // Update machine-CFG edges.
2257 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
2259 if (I.isUnconditional()) {
2260 // Update machine-CFG edges.
2261 BrMBB->addSuccessor(Succ0MBB);
2263 // If this is not a fall-through branch or optimizations are switched off,
2265 if (Succ0MBB != NextBlock(BrMBB) || TM.getOptLevel() == CodeGenOpt::None)
2266 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2267 MVT::Other, getControlRoot(),
2268 DAG.getBasicBlock(Succ0MBB)));
2273 // If this condition is one of the special cases we handle, do special stuff
2275 const Value *CondVal = I.getCondition();
2276 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
2278 // If this is a series of conditions that are or'd or and'd together, emit
2279 // this as a sequence of branches instead of setcc's with and/or operations.
2280 // As long as jumps are not expensive, this should improve performance.
2281 // For example, instead of something like:
2293 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) {
2294 Instruction::BinaryOps Opcode = BOp->getOpcode();
2295 if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp->hasOneUse() &&
2296 !I.hasMetadata(LLVMContext::MD_unpredictable) &&
2297 (Opcode == Instruction::And || Opcode == Instruction::Or)) {
2298 FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB,
2300 getEdgeProbability(BrMBB, Succ0MBB),
2301 getEdgeProbability(BrMBB, Succ1MBB),
2302 /*InvertCond=*/false);
2303 // If the compares in later blocks need to use values not currently
2304 // exported from this block, export them now. This block should always
2305 // be the first entry.
2306 assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
2308 // Allow some cases to be rejected.
2309 if (ShouldEmitAsBranches(SL->SwitchCases)) {
2310 for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) {
2311 ExportFromCurrentBlock(SL->SwitchCases[i].CmpLHS);
2312 ExportFromCurrentBlock(SL->SwitchCases[i].CmpRHS);
2315 // Emit the branch for this block.
2316 visitSwitchCase(SL->SwitchCases[0], BrMBB);
2317 SL->SwitchCases.erase(SL->SwitchCases.begin());
2321 // Okay, we decided not to do this, remove any inserted MBB's and clear
2323 for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i)
2324 FuncInfo.MF->erase(SL->SwitchCases[i].ThisBB);
2326 SL->SwitchCases.clear();
2330 // Create a CaseBlock record representing this branch.
2331 CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
2332 nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc());
2334 // Use visitSwitchCase to actually insert the fast branch sequence for this
2336 visitSwitchCase(CB, BrMBB);
2339 /// visitSwitchCase - Emits the necessary code to represent a single node in
2340 /// the binary search tree resulting from lowering a switch instruction.
2341 void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
2342 MachineBasicBlock *SwitchBB) {
2344 SDValue CondLHS = getValue(CB.CmpLHS);
2347 if (CB.CC == ISD::SETTRUE) {
2348 // Branch or fall through to TrueBB.
2349 addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2350 SwitchBB->normalizeSuccProbs();
2351 if (CB.TrueBB != NextBlock(SwitchBB)) {
2352 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, getControlRoot(),
2353 DAG.getBasicBlock(CB.TrueBB)));
2358 auto &TLI = DAG.getTargetLoweringInfo();
2359 EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType());
2361 // Build the setcc now.
2363 // Fold "(X == true)" to X and "(X == false)" to !X to
2364 // handle common cases produced by branch lowering.
2365 if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
2366 CB.CC == ISD::SETEQ)
2368 else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
2369 CB.CC == ISD::SETEQ) {
2370 SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
2371 Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
2373 SDValue CondRHS = getValue(CB.CmpRHS);
2375 // If a pointer's DAG type is larger than its memory type then the DAG
2376 // values are zero-extended. This breaks signed comparisons so truncate
2377 // back to the underlying type before doing the compare.
2378 if (CondLHS.getValueType() != MemVT) {
2379 CondLHS = DAG.getPtrExtOrTrunc(CondLHS, getCurSDLoc(), MemVT);
2380 CondRHS = DAG.getPtrExtOrTrunc(CondRHS, getCurSDLoc(), MemVT);
2382 Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.CC);
2385 assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
2387 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2388 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2390 SDValue CmpOp = getValue(CB.CmpMHS);
2391 EVT VT = CmpOp.getValueType();
2393 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
2394 Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
2397 SDValue SUB = DAG.getNode(ISD::SUB, dl,
2398 VT, CmpOp, DAG.getConstant(Low, dl, VT));
2399 Cond = DAG.getSetCC(dl, MVT::i1, SUB,
2400 DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
2404 // Update successor info
2405 addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2406 // TrueBB and FalseBB are always different unless the incoming IR is
2407 // degenerate. This only happens when running llc on weird IR.
2408 if (CB.TrueBB != CB.FalseBB)
2409 addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
2410 SwitchBB->normalizeSuccProbs();
2412 // If the lhs block is the next block, invert the condition so that we can
2413 // fall through to the lhs instead of the rhs block.
2414 if (CB.TrueBB == NextBlock(SwitchBB)) {
2415 std::swap(CB.TrueBB, CB.FalseBB);
2416 SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
2417 Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
2420 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2421 MVT::Other, getControlRoot(), Cond,
2422 DAG.getBasicBlock(CB.TrueBB));
2424 // Insert the false branch. Do this even if it's a fall through branch,
2425 // this makes it easier to do DAG optimizations which require inverting
2426 // the branch condition.
2427 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2428 DAG.getBasicBlock(CB.FalseBB));
2430 DAG.setRoot(BrCond);
2433 /// visitJumpTable - Emit JumpTable node in the current MBB
2434 void SelectionDAGBuilder::visitJumpTable(SwitchCG::JumpTable &JT) {
2435 // Emit the code for the jump table
2436 assert(JT.Reg != -1U && "Should lower JT Header first!");
2437 EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
2438 SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(),
2440 SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
2441 SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(),
2442 MVT::Other, Index.getValue(1),
2444 DAG.setRoot(BrJumpTable);
2447 /// visitJumpTableHeader - This function emits necessary code to produce index
2448 /// in the JumpTable from switch case.
2449 void SelectionDAGBuilder::visitJumpTableHeader(SwitchCG::JumpTable &JT,
2450 JumpTableHeader &JTH,
2451 MachineBasicBlock *SwitchBB) {
2452 SDLoc dl = getCurSDLoc();
2454 // Subtract the lowest switch case value from the value being switched on.
2455 SDValue SwitchOp = getValue(JTH.SValue);
2456 EVT VT = SwitchOp.getValueType();
2457 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2458 DAG.getConstant(JTH.First, dl, VT));
2460 // The SDNode we just created, which holds the value being switched on minus
2461 // the smallest case value, needs to be copied to a virtual register so it
2462 // can be used as an index into the jump table in a subsequent basic block.
2463 // This value may be smaller or larger than the target's pointer type, and
2464 // therefore require extension or truncating.
2465 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2466 SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout()));
2468 unsigned JumpTableReg =
2469 FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout()));
2470 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl,
2471 JumpTableReg, SwitchOp);
2472 JT.Reg = JumpTableReg;
2474 if (!JTH.OmitRangeCheck) {
2475 // Emit the range check for the jump table, and branch to the default block
2476 // for the switch statement if the value being switched on exceeds the
2477 // largest case in the switch.
2478 SDValue CMP = DAG.getSetCC(
2479 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2480 Sub.getValueType()),
2481 Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
2483 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2484 MVT::Other, CopyTo, CMP,
2485 DAG.getBasicBlock(JT.Default));
2487 // Avoid emitting unnecessary branches to the next block.
2488 if (JT.MBB != NextBlock(SwitchBB))
2489 BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2490 DAG.getBasicBlock(JT.MBB));
2492 DAG.setRoot(BrCond);
2494 // Avoid emitting unnecessary branches to the next block.
2495 if (JT.MBB != NextBlock(SwitchBB))
2496 DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo,
2497 DAG.getBasicBlock(JT.MBB)));
2499 DAG.setRoot(CopyTo);
2503 /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
2504 /// variable if there exists one.
2505 static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
2507 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2508 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2509 EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2510 MachineFunction &MF = DAG.getMachineFunction();
2511 Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent());
2512 MachineSDNode *Node =
2513 DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
2515 MachinePointerInfo MPInfo(Global);
2516 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
2517 MachineMemOperand::MODereferenceable;
2518 MachineMemOperand *MemRef = MF.getMachineMemOperand(
2519 MPInfo, Flags, PtrTy.getSizeInBits() / 8, DAG.getEVTAlignment(PtrTy));
2520 DAG.setNodeMemRefs(Node, {MemRef});
2522 if (PtrTy != PtrMemTy)
2523 return DAG.getPtrExtOrTrunc(SDValue(Node, 0), DL, PtrMemTy);
2524 return SDValue(Node, 0);
2527 /// Codegen a new tail for a stack protector check ParentMBB which has had its
2528 /// tail spliced into a stack protector check success bb.
2530 /// For a high level explanation of how this fits into the stack protector
2531 /// generation see the comment on the declaration of class
2532 /// StackProtectorDescriptor.
2533 void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
2534 MachineBasicBlock *ParentBB) {
2536 // First create the loads to the guard/stack slot for the comparison.
2537 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2538 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2539 EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2541 MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
2542 int FI = MFI.getStackProtectorIndex();
2545 SDLoc dl = getCurSDLoc();
2546 SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
2547 const Module &M = *ParentBB->getParent()->getFunction().getParent();
2548 unsigned Align = DL->getPrefTypeAlignment(Type::getInt8PtrTy(M.getContext()));
2550 // Generate code to load the content of the guard slot.
2551 SDValue GuardVal = DAG.getLoad(
2552 PtrMemTy, dl, DAG.getEntryNode(), StackSlotPtr,
2553 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
2554 MachineMemOperand::MOVolatile);
2556 if (TLI.useStackGuardXorFP())
2557 GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
2559 // Retrieve guard check function, nullptr if instrumentation is inlined.
2560 if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) {
2561 // The target provides a guard check function to validate the guard value.
2562 // Generate a call to that function with the content of the guard slot as
2564 FunctionType *FnTy = GuardCheckFn->getFunctionType();
2565 assert(FnTy->getNumParams() == 1 && "Invalid function signature");
2567 TargetLowering::ArgListTy Args;
2568 TargetLowering::ArgListEntry Entry;
2569 Entry.Node = GuardVal;
2570 Entry.Ty = FnTy->getParamType(0);
2571 if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
2572 Entry.IsInReg = true;
2573 Args.push_back(Entry);
2575 TargetLowering::CallLoweringInfo CLI(DAG);
2576 CLI.setDebugLoc(getCurSDLoc())
2577 .setChain(DAG.getEntryNode())
2578 .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(),
2579 getValue(GuardCheckFn), std::move(Args));
2581 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
2582 DAG.setRoot(Result.second);
2586 // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
2587 // Otherwise, emit a volatile load to retrieve the stack guard value.
2588 SDValue Chain = DAG.getEntryNode();
2589 if (TLI.useLoadStackGuardNode()) {
2590 Guard = getLoadStackGuard(DAG, dl, Chain);
2592 const Value *IRGuard = TLI.getSDagStackGuard(M);
2593 SDValue GuardPtr = getValue(IRGuard);
2595 Guard = DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
2596 MachinePointerInfo(IRGuard, 0), Align,
2597 MachineMemOperand::MOVolatile);
2600 // Perform the comparison via a subtract/getsetcc.
2601 EVT VT = Guard.getValueType();
2602 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Guard, GuardVal);
2604 SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
2606 Sub.getValueType()),
2607 Sub, DAG.getConstant(0, dl, VT), ISD::SETNE);
2609 // If the sub is not 0, then we know the guard/stackslot do not equal, so
2610 // branch to failure MBB.
2611 SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2612 MVT::Other, GuardVal.getOperand(0),
2613 Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
2614 // Otherwise branch to success MBB.
2615 SDValue Br = DAG.getNode(ISD::BR, dl,
2617 DAG.getBasicBlock(SPD.getSuccessMBB()));
2622 /// Codegen the failure basic block for a stack protector check.
2624 /// A failure stack protector machine basic block consists simply of a call to
2625 /// __stack_chk_fail().
2627 /// For a high level explanation of how this fits into the stack protector
2628 /// generation see the comment on the declaration of class
2629 /// StackProtectorDescriptor.
2631 SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
2632 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2633 TargetLowering::MakeLibCallOptions CallOptions;
2634 CallOptions.setDiscardResult(true);
2636 TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
2637 None, CallOptions, getCurSDLoc()).second;
2638 // On PS4, the "return address" must still be within the calling function,
2639 // even if it's at the very end, so emit an explicit TRAP here.
2640 // Passing 'true' for doesNotReturn above won't generate the trap for us.
2641 if (TM.getTargetTriple().isPS4CPU())
2642 Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
2647 /// visitBitTestHeader - This function emits necessary code to produce value
2648 /// suitable for "bit tests"
2649 void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
2650 MachineBasicBlock *SwitchBB) {
2651 SDLoc dl = getCurSDLoc();
2653 // Subtract the minimum value.
2654 SDValue SwitchOp = getValue(B.SValue);
2655 EVT VT = SwitchOp.getValueType();
2657 DAG.getNode(ISD::SUB, dl, VT, SwitchOp, DAG.getConstant(B.First, dl, VT));
2659 // Determine the type of the test operands.
2660 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2661 bool UsePtrType = false;
2662 if (!TLI.isTypeLegal(VT)) {
2665 for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
2666 if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
2667 // Switch table case range are encoded into series of masks.
2668 // Just use pointer type, it's guaranteed to fit.
2673 SDValue Sub = RangeSub;
2675 VT = TLI.getPointerTy(DAG.getDataLayout());
2676 Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
2679 B.RegVT = VT.getSimpleVT();
2680 B.Reg = FuncInfo.CreateReg(B.RegVT);
2681 SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
2683 MachineBasicBlock* MBB = B.Cases[0].ThisBB;
2685 if (!B.OmitRangeCheck)
2686 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
2687 addSuccessorWithProb(SwitchBB, MBB, B.Prob);
2688 SwitchBB->normalizeSuccProbs();
2690 SDValue Root = CopyTo;
2691 if (!B.OmitRangeCheck) {
2692 // Conditional branch to the default block.
2693 SDValue RangeCmp = DAG.getSetCC(dl,
2694 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2695 RangeSub.getValueType()),
2696 RangeSub, DAG.getConstant(B.Range, dl, RangeSub.getValueType()),
2699 Root = DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp,
2700 DAG.getBasicBlock(B.Default));
2703 // Avoid emitting unnecessary branches to the next block.
2704 if (MBB != NextBlock(SwitchBB))
2705 Root = DAG.getNode(ISD::BR, dl, MVT::Other, Root, DAG.getBasicBlock(MBB));
2710 /// visitBitTestCase - this function produces one "bit test"
2711 void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
2712 MachineBasicBlock* NextMBB,
2713 BranchProbability BranchProbToNext,
2716 MachineBasicBlock *SwitchBB) {
2717 SDLoc dl = getCurSDLoc();
2719 SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
2721 unsigned PopCount = countPopulation(B.Mask);
2722 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2723 if (PopCount == 1) {
2724 // Testing for a single bit; just compare the shift count with what it
2725 // would need to be to shift a 1 bit in that position.
2727 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2728 ShiftOp, DAG.getConstant(countTrailingZeros(B.Mask), dl, VT),
2730 } else if (PopCount == BB.Range) {
2731 // There is only one zero bit in the range, test for it directly.
2733 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2734 ShiftOp, DAG.getConstant(countTrailingOnes(B.Mask), dl, VT),
2737 // Make desired shift
2738 SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
2739 DAG.getConstant(1, dl, VT), ShiftOp);
2741 // Emit bit tests and jumps
2742 SDValue AndOp = DAG.getNode(ISD::AND, dl,
2743 VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
2745 dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
2746 AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
2749 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
2750 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
2751 // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
2752 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
2753 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
2754 // one as they are relative probabilities (and thus work more like weights),
2755 // and hence we need to normalize them to let the sum of them become one.
2756 SwitchBB->normalizeSuccProbs();
2758 SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
2759 MVT::Other, getControlRoot(),
2760 Cmp, DAG.getBasicBlock(B.TargetBB));
2762 // Avoid emitting unnecessary branches to the next block.
2763 if (NextMBB != NextBlock(SwitchBB))
2764 BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
2765 DAG.getBasicBlock(NextMBB));
2770 void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
2771 MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
2773 // Retrieve successors. Look through artificial IR level blocks like
2774 // catchswitch for successors.
2775 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
2776 const BasicBlock *EHPadBB = I.getSuccessor(1);
2778 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2779 // have to do anything here to lower funclet bundles.
2780 assert(!I.hasOperandBundlesOtherThan({LLVMContext::OB_deopt,
2781 LLVMContext::OB_funclet,
2782 LLVMContext::OB_cfguardtarget}) &&
2783 "Cannot lower invokes with arbitrary operand bundles yet!");
2785 const Value *Callee(I.getCalledValue());
2786 const Function *Fn = dyn_cast<Function>(Callee);
2787 if (isa<InlineAsm>(Callee))
2789 else if (Fn && Fn->isIntrinsic()) {
2790 switch (Fn->getIntrinsicID()) {
2792 llvm_unreachable("Cannot invoke this intrinsic");
2793 case Intrinsic::donothing:
2794 // Ignore invokes to @llvm.donothing: jump directly to the next BB.
2796 case Intrinsic::experimental_patchpoint_void:
2797 case Intrinsic::experimental_patchpoint_i64:
2798 visitPatchpoint(&I, EHPadBB);
2800 case Intrinsic::experimental_gc_statepoint:
2801 LowerStatepoint(ImmutableStatepoint(&I), EHPadBB);
2803 case Intrinsic::wasm_rethrow_in_catch: {
2804 // This is usually done in visitTargetIntrinsic, but this intrinsic is
2805 // special because it can be invoked, so we manually lower it to a DAG
2807 SmallVector<SDValue, 8> Ops;
2808 Ops.push_back(getRoot()); // inchain
2809 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2811 DAG.getTargetConstant(Intrinsic::wasm_rethrow_in_catch, getCurSDLoc(),
2812 TLI.getPointerTy(DAG.getDataLayout())));
2813 SDVTList VTs = DAG.getVTList(ArrayRef<EVT>({MVT::Other})); // outchain
2814 DAG.setRoot(DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops));
2818 } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) {
2819 // Currently we do not lower any intrinsic calls with deopt operand bundles.
2820 // Eventually we will support lowering the @llvm.experimental.deoptimize
2821 // intrinsic, and right now there are no plans to support other intrinsics
2822 // with deopt state.
2823 LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
2825 LowerCallTo(&I, getValue(Callee), false, EHPadBB);
2828 // If the value of the invoke is used outside of its defining block, make it
2829 // available as a virtual register.
2830 // We already took care of the exported value for the statepoint instruction
2831 // during call to the LowerStatepoint.
2832 if (!isStatepoint(I)) {
2833 CopyToExportRegsIfNeeded(&I);
2836 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2837 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2838 BranchProbability EHPadBBProb =
2839 BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2840 : BranchProbability::getZero();
2841 findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
2843 // Update successor info.
2844 addSuccessorWithProb(InvokeMBB, Return);
2845 for (auto &UnwindDest : UnwindDests) {
2846 UnwindDest.first->setIsEHPad();
2847 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2849 InvokeMBB->normalizeSuccProbs();
2851 // Drop into normal successor.
2852 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(),
2853 DAG.getBasicBlock(Return)));
2856 void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) {
2857 MachineBasicBlock *CallBrMBB = FuncInfo.MBB;
2859 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
2860 // have to do anything here to lower funclet bundles.
2861 assert(!I.hasOperandBundlesOtherThan(
2862 {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
2863 "Cannot lower callbrs with arbitrary operand bundles yet!");
2865 assert(isa<InlineAsm>(I.getCalledValue()) &&
2866 "Only know how to handle inlineasm callbr");
2869 // Retrieve successors.
2870 MachineBasicBlock *Return = FuncInfo.MBBMap[I.getDefaultDest()];
2872 // Update successor info.
2873 addSuccessorWithProb(CallBrMBB, Return);
2874 for (unsigned i = 0, e = I.getNumIndirectDests(); i < e; ++i) {
2875 MachineBasicBlock *Target = FuncInfo.MBBMap[I.getIndirectDest(i)];
2876 addSuccessorWithProb(CallBrMBB, Target);
2878 CallBrMBB->normalizeSuccProbs();
2880 // Drop into default successor.
2881 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
2882 MVT::Other, getControlRoot(),
2883 DAG.getBasicBlock(Return)));
2886 void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
2887 llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
2890 void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
2891 assert(FuncInfo.MBB->isEHPad() &&
2892 "Call to landingpad not in landing pad!");
2894 // If there aren't registers to copy the values into (e.g., during SjLj
2895 // exceptions), then don't bother to create these DAG nodes.
2896 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2897 const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
2898 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
2899 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
2902 // If landingpad's return type is token type, we don't create DAG nodes
2903 // for its exception pointer and selector value. The extraction of exception
2904 // pointer or selector value from token type landingpads is not currently
2906 if (LP.getType()->isTokenTy())
2909 SmallVector<EVT, 2> ValueVTs;
2910 SDLoc dl = getCurSDLoc();
2911 ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
2912 assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
2914 // Get the two live-in registers as SDValues. The physregs have already been
2915 // copied into virtual registers.
2917 if (FuncInfo.ExceptionPointerVirtReg) {
2918 Ops[0] = DAG.getZExtOrTrunc(
2919 DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2920 FuncInfo.ExceptionPointerVirtReg,
2921 TLI.getPointerTy(DAG.getDataLayout())),
2924 Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
2926 Ops[1] = DAG.getZExtOrTrunc(
2927 DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2928 FuncInfo.ExceptionSelectorVirtReg,
2929 TLI.getPointerTy(DAG.getDataLayout())),
2933 SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
2934 DAG.getVTList(ValueVTs), Ops);
2938 void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
2939 MachineBasicBlock *Last) {
2941 for (unsigned i = 0, e = SL->JTCases.size(); i != e; ++i)
2942 if (SL->JTCases[i].first.HeaderBB == First)
2943 SL->JTCases[i].first.HeaderBB = Last;
2945 // Update BitTestCases.
2946 for (unsigned i = 0, e = SL->BitTestCases.size(); i != e; ++i)
2947 if (SL->BitTestCases[i].Parent == First)
2948 SL->BitTestCases[i].Parent = Last;
2951 void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
2952 MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
2954 // Update machine-CFG edges with unique successors.
2955 SmallSet<BasicBlock*, 32> Done;
2956 for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
2957 BasicBlock *BB = I.getSuccessor(i);
2958 bool Inserted = Done.insert(BB).second;
2962 MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
2963 addSuccessorWithProb(IndirectBrMBB, Succ);
2965 IndirectBrMBB->normalizeSuccProbs();
2967 DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
2968 MVT::Other, getControlRoot(),
2969 getValue(I.getAddress())));
2972 void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
2973 if (!DAG.getTarget().Options.TrapUnreachable)
2976 // We may be able to ignore unreachable behind a noreturn call.
2977 if (DAG.getTarget().Options.NoTrapAfterNoreturn) {
2978 const BasicBlock &BB = *I.getParent();
2979 if (&I != &BB.front()) {
2980 BasicBlock::const_iterator PredI =
2981 std::prev(BasicBlock::const_iterator(&I));
2982 if (const CallInst *Call = dyn_cast<CallInst>(&*PredI)) {
2983 if (Call->doesNotReturn())
2989 DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
2992 void SelectionDAGBuilder::visitFSub(const User &I) {
2993 // -0.0 - X --> fneg
2994 Type *Ty = I.getType();
2995 if (isa<Constant>(I.getOperand(0)) &&
2996 I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) {
2997 SDValue Op2 = getValue(I.getOperand(1));
2998 setValue(&I, DAG.getNode(ISD::FNEG, getCurSDLoc(),
2999 Op2.getValueType(), Op2));
3003 visitBinary(I, ISD::FSUB);
3006 /// Checks if the given instruction performs a vector reduction, in which case
3007 /// we have the freedom to alter the elements in the result as long as the
3008 /// reduction of them stays unchanged.
3009 static bool isVectorReductionOp(const User *I) {
3010 const Instruction *Inst = dyn_cast<Instruction>(I);
3011 if (!Inst || !Inst->getType()->isVectorTy())
3014 auto OpCode = Inst->getOpcode();
3016 case Instruction::Add:
3017 case Instruction::Mul:
3018 case Instruction::And:
3019 case Instruction::Or:
3020 case Instruction::Xor:
3022 case Instruction::FAdd:
3023 case Instruction::FMul:
3024 if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst))
3025 if (FPOp->getFastMathFlags().isFast())
3032 unsigned ElemNum = Inst->getType()->getVectorNumElements();
3033 // Ensure the reduction size is a power of 2.
3034 if (!isPowerOf2_32(ElemNum))
3037 unsigned ElemNumToReduce = ElemNum;
3039 // Do DFS search on the def-use chain from the given instruction. We only
3040 // allow four kinds of operations during the search until we reach the
3041 // instruction that extracts the first element from the vector:
3043 // 1. The reduction operation of the same opcode as the given instruction.
3047 // 3. ShuffleVector instruction together with a reduction operation that
3048 // does a partial reduction.
3050 // 4. ExtractElement that extracts the first element from the vector, and we
3051 // stop searching the def-use chain here.
3053 // 3 & 4 above perform a reduction on all elements of the vector. We push defs
3054 // from 1-3 to the stack to continue the DFS. The given instruction is not
3055 // a reduction operation if we meet any other instructions other than those
3058 SmallVector<const User *, 16> UsersToVisit{Inst};
3059 SmallPtrSet<const User *, 16> Visited;
3060 bool ReduxExtracted = false;
3062 while (!UsersToVisit.empty()) {
3063 auto User = UsersToVisit.back();
3064 UsersToVisit.pop_back();
3065 if (!Visited.insert(User).second)
3068 for (const auto *U : User->users()) {
3069 auto Inst = dyn_cast<Instruction>(U);
3073 if (Inst->getOpcode() == OpCode || isa<PHINode>(U)) {
3074 if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst))
3075 if (!isa<PHINode>(FPOp) && !FPOp->getFastMathFlags().isFast())
3077 UsersToVisit.push_back(U);
3078 } else if (const ShuffleVectorInst *ShufInst =
3079 dyn_cast<ShuffleVectorInst>(U)) {
3080 // Detect the following pattern: A ShuffleVector instruction together
3081 // with a reduction that do partial reduction on the first and second
3082 // ElemNumToReduce / 2 elements, and store the result in
3083 // ElemNumToReduce / 2 elements in another vector.
3085 unsigned ResultElements = ShufInst->getType()->getVectorNumElements();
3086 if (ResultElements < ElemNum)
3089 if (ElemNumToReduce == 1)
3091 if (!isa<UndefValue>(U->getOperand(1)))
3093 for (unsigned i = 0; i < ElemNumToReduce / 2; ++i)
3094 if (ShufInst->getMaskValue(i) != int(i + ElemNumToReduce / 2))
3096 for (unsigned i = ElemNumToReduce / 2; i < ElemNum; ++i)
3097 if (ShufInst->getMaskValue(i) != -1)
3100 // There is only one user of this ShuffleVector instruction, which
3101 // must be a reduction operation.
3102 if (!U->hasOneUse())
3105 auto U2 = dyn_cast<Instruction>(*U->user_begin());
3106 if (!U2 || U2->getOpcode() != OpCode)
3109 // Check operands of the reduction operation.
3110 if ((U2->getOperand(0) == U->getOperand(0) && U2->getOperand(1) == U) ||
3111 (U2->getOperand(1) == U->getOperand(0) && U2->getOperand(0) == U)) {
3112 UsersToVisit.push_back(U2);
3113 ElemNumToReduce /= 2;
3116 } else if (isa<ExtractElementInst>(U)) {
3117 // At this moment we should have reduced all elements in the vector.
3118 if (ElemNumToReduce != 1)
3121 const ConstantInt *Val = dyn_cast<ConstantInt>(U->getOperand(1));
3122 if (!Val || !Val->isZero())
3125 ReduxExtracted = true;
3130 return ReduxExtracted;
3133 void SelectionDAGBuilder::visitUnary(const User &I, unsigned Opcode) {
3136 SDValue Op = getValue(I.getOperand(0));
3137 SDValue UnNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op.getValueType(),
3139 setValue(&I, UnNodeValue);
3142 void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
3144 if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) {
3145 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3146 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3148 if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I)) {
3149 Flags.setExact(ExactOp->isExact());
3151 if (isVectorReductionOp(&I)) {
3152 Flags.setVectorReduction(true);
3153 LLVM_DEBUG(dbgs() << "Detected a reduction operation:" << I << "\n");
3155 // If no flags are set we will propagate the incoming flags, if any flags
3156 // are set, we will intersect them with the incoming flag and so we need to
3157 // copy the FMF flags here.
3158 if (auto *FPOp = dyn_cast<FPMathOperator>(&I)) {
3159 Flags.copyFMF(*FPOp);
3163 SDValue Op1 = getValue(I.getOperand(0));
3164 SDValue Op2 = getValue(I.getOperand(1));
3165 SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(),
3167 setValue(&I, BinNodeValue);
3170 void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
3171 SDValue Op1 = getValue(I.getOperand(0));
3172 SDValue Op2 = getValue(I.getOperand(1));
3174 EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
3175 Op1.getValueType(), DAG.getDataLayout());
3177 // Coerce the shift amount to the right type if we can.
3178 if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
3179 unsigned ShiftSize = ShiftTy.getSizeInBits();
3180 unsigned Op2Size = Op2.getValueSizeInBits();
3181 SDLoc DL = getCurSDLoc();
3183 // If the operand is smaller than the shift count type, promote it.
3184 if (ShiftSize > Op2Size)
3185 Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2);
3187 // If the operand is larger than the shift count type but the shift
3188 // count type has enough bits to represent any shift value, truncate
3189 // it now. This is a common case and it exposes the truncate to
3190 // optimization early.
3191 else if (ShiftSize >= Log2_32_Ceil(Op2.getValueSizeInBits()))
3192 Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2);
3193 // Otherwise we'll need to temporarily settle for some other convenient
3194 // type. Type legalization will make adjustments once the shiftee is split.
3196 Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32);
3203 if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
3205 if (const OverflowingBinaryOperator *OFBinOp =
3206 dyn_cast<const OverflowingBinaryOperator>(&I)) {
3207 nuw = OFBinOp->hasNoUnsignedWrap();
3208 nsw = OFBinOp->hasNoSignedWrap();
3210 if (const PossiblyExactOperator *ExactOp =
3211 dyn_cast<const PossiblyExactOperator>(&I))
3212 exact = ExactOp->isExact();
3215 Flags.setExact(exact);
3216 Flags.setNoSignedWrap(nsw);
3217 Flags.setNoUnsignedWrap(nuw);
3218 SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
3223 void SelectionDAGBuilder::visitSDiv(const User &I) {
3224 SDValue Op1 = getValue(I.getOperand(0));
3225 SDValue Op2 = getValue(I.getOperand(1));
3228 Flags.setExact(isa<PossiblyExactOperator>(&I) &&
3229 cast<PossiblyExactOperator>(&I)->isExact());
3230 setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
3234 void SelectionDAGBuilder::visitICmp(const User &I) {
3235 ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
3236 if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
3237 predicate = IC->getPredicate();
3238 else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
3239 predicate = ICmpInst::Predicate(IC->getPredicate());
3240 SDValue Op1 = getValue(I.getOperand(0));
3241 SDValue Op2 = getValue(I.getOperand(1));
3242 ISD::CondCode Opcode = getICmpCondCode(predicate);
3244 auto &TLI = DAG.getTargetLoweringInfo();
3246 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3248 // If a pointer's DAG type is larger than its memory type then the DAG values
3249 // are zero-extended. This breaks signed comparisons so truncate back to the
3250 // underlying type before doing the compare.
3251 if (Op1.getValueType() != MemVT) {
3252 Op1 = DAG.getPtrExtOrTrunc(Op1, getCurSDLoc(), MemVT);
3253 Op2 = DAG.getPtrExtOrTrunc(Op2, getCurSDLoc(), MemVT);
3256 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3258 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
3261 void SelectionDAGBuilder::visitFCmp(const User &I) {
3262 FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
3263 if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
3264 predicate = FC->getPredicate();
3265 else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
3266 predicate = FCmpInst::Predicate(FC->getPredicate());
3267 SDValue Op1 = getValue(I.getOperand(0));
3268 SDValue Op2 = getValue(I.getOperand(1));
3270 ISD::CondCode Condition = getFCmpCondCode(predicate);
3271 auto *FPMO = dyn_cast<FPMathOperator>(&I);
3272 if ((FPMO && FPMO->hasNoNaNs()) || TM.Options.NoNaNsFPMath)
3273 Condition = getFCmpCodeWithoutNaN(Condition);
3275 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3277 setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
3280 // Check if the condition of the select has one use or two users that are both
3281 // selects with the same condition.
3282 static bool hasOnlySelectUsers(const Value *Cond) {
3283 return llvm::all_of(Cond->users(), [](const Value *V) {
3284 return isa<SelectInst>(V);
3288 void SelectionDAGBuilder::visitSelect(const User &I) {
3289 SmallVector<EVT, 4> ValueVTs;
3290 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
3292 unsigned NumValues = ValueVTs.size();
3293 if (NumValues == 0) return;
3295 SmallVector<SDValue, 4> Values(NumValues);
3296 SDValue Cond = getValue(I.getOperand(0));
3297 SDValue LHSVal = getValue(I.getOperand(1));
3298 SDValue RHSVal = getValue(I.getOperand(2));
3299 auto BaseOps = {Cond};
3300 ISD::NodeType OpCode = Cond.getValueType().isVector() ?
3301 ISD::VSELECT : ISD::SELECT;
3303 bool IsUnaryAbs = false;
3305 // Min/max matching is only viable if all output VTs are the same.
3306 if (is_splat(ValueVTs)) {
3307 EVT VT = ValueVTs[0];
3308 LLVMContext &Ctx = *DAG.getContext();
3309 auto &TLI = DAG.getTargetLoweringInfo();
3311 // We care about the legality of the operation after it has been type
3313 while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal)
3314 VT = TLI.getTypeToTransformTo(Ctx, VT);
3316 // If the vselect is legal, assume we want to leave this as a vector setcc +
3317 // vselect. Otherwise, if this is going to be scalarized, we want to see if
3318 // min/max is legal on the scalar type.
3319 bool UseScalarMinMax = VT.isVector() &&
3320 !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT);
3323 auto SPR = matchSelectPattern(const_cast<User*>(&I), LHS, RHS);
3324 ISD::NodeType Opc = ISD::DELETED_NODE;
3325 switch (SPR.Flavor) {
3326 case SPF_UMAX: Opc = ISD::UMAX; break;
3327 case SPF_UMIN: Opc = ISD::UMIN; break;
3328 case SPF_SMAX: Opc = ISD::SMAX; break;
3329 case SPF_SMIN: Opc = ISD::SMIN; break;
3331 switch (SPR.NaNBehavior) {
3332 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3333 case SPNB_RETURNS_NAN: Opc = ISD::FMINIMUM; break;
3334 case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
3335 case SPNB_RETURNS_ANY: {
3336 if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT))
3338 else if (TLI.isOperationLegalOrCustom(ISD::FMINIMUM, VT))
3339 Opc = ISD::FMINIMUM;
3340 else if (UseScalarMinMax)
3341 Opc = TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType()) ?
3342 ISD::FMINNUM : ISD::FMINIMUM;
3348 switch (SPR.NaNBehavior) {
3349 case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3350 case SPNB_RETURNS_NAN: Opc = ISD::FMAXIMUM; break;
3351 case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
3352 case SPNB_RETURNS_ANY:
3354 if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT))
3356 else if (TLI.isOperationLegalOrCustom(ISD::FMAXIMUM, VT))
3357 Opc = ISD::FMAXIMUM;
3358 else if (UseScalarMinMax)
3359 Opc = TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType()) ?
3360 ISD::FMAXNUM : ISD::FMAXIMUM;
3369 // TODO: we need to produce sub(0, abs(X)).
3373 if (!IsUnaryAbs && Opc != ISD::DELETED_NODE &&
3374 (TLI.isOperationLegalOrCustom(Opc, VT) ||
3376 TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
3377 // If the underlying comparison instruction is used by any other
3378 // instruction, the consumed instructions won't be destroyed, so it is
3379 // not profitable to convert to a min/max.
3380 hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
3382 LHSVal = getValue(LHS);
3383 RHSVal = getValue(RHS);
3389 LHSVal = getValue(LHS);
3395 for (unsigned i = 0; i != NumValues; ++i) {
3397 DAG.getNode(OpCode, getCurSDLoc(),
3398 LHSVal.getNode()->getValueType(LHSVal.getResNo() + i),
3399 SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3402 for (unsigned i = 0; i != NumValues; ++i) {
3403 SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
3404 Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3405 Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
3406 Values[i] = DAG.getNode(
3407 OpCode, getCurSDLoc(),
3408 LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops);
3412 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3413 DAG.getVTList(ValueVTs), Values));
3416 void SelectionDAGBuilder::visitTrunc(const User &I) {
3417 // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3418 SDValue N = getValue(I.getOperand(0));
3419 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3421 setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
3424 void SelectionDAGBuilder::visitZExt(const User &I) {
3425 // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3426 // ZExt also can't be a cast to bool for same reason. So, nothing much to do
3427 SDValue N = getValue(I.getOperand(0));
3428 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3430 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N));
3433 void SelectionDAGBuilder::visitSExt(const User &I) {
3434 // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3435 // SExt also can't be a cast to bool for same reason. So, nothing much to do
3436 SDValue N = getValue(I.getOperand(0));
3437 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3439 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3442 void SelectionDAGBuilder::visitFPTrunc(const User &I) {
3443 // FPTrunc is never a no-op cast, no need to check
3444 SDValue N = getValue(I.getOperand(0));
3445 SDLoc dl = getCurSDLoc();
3446 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3447 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3448 setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
3449 DAG.getTargetConstant(
3450 0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
3453 void SelectionDAGBuilder::visitFPExt(const User &I) {
3454 // FPExt is never a no-op cast, no need to check
3455 SDValue N = getValue(I.getOperand(0));
3456 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3458 setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
3461 void SelectionDAGBuilder::visitFPToUI(const User &I) {
3462 // FPToUI is never a no-op cast, no need to check
3463 SDValue N = getValue(I.getOperand(0));
3464 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3466 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
3469 void SelectionDAGBuilder::visitFPToSI(const User &I) {
3470 // FPToSI is never a no-op cast, no need to check
3471 SDValue N = getValue(I.getOperand(0));
3472 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3474 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
3477 void SelectionDAGBuilder::visitUIToFP(const User &I) {
3478 // UIToFP is never a no-op cast, no need to check
3479 SDValue N = getValue(I.getOperand(0));
3480 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3482 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
3485 void SelectionDAGBuilder::visitSIToFP(const User &I) {
3486 // SIToFP is never a no-op cast, no need to check
3487 SDValue N = getValue(I.getOperand(0));
3488 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3490 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
3493 void SelectionDAGBuilder::visitPtrToInt(const User &I) {
3494 // What to do depends on the size of the integer and the size of the pointer.
3495 // We can either truncate, zero extend, or no-op, accordingly.
3496 SDValue N = getValue(I.getOperand(0));
3497 auto &TLI = DAG.getTargetLoweringInfo();
3498 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3501 TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3502 N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3503 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT);
3507 void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3508 // What to do depends on the size of the integer and the size of the pointer.
3509 // We can either truncate, zero extend, or no-op, accordingly.
3510 SDValue N = getValue(I.getOperand(0));
3511 auto &TLI = DAG.getTargetLoweringInfo();
3512 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3513 EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
3514 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3515 N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), DestVT);
3519 void SelectionDAGBuilder::visitBitCast(const User &I) {
3520 SDValue N = getValue(I.getOperand(0));
3521 SDLoc dl = getCurSDLoc();
3522 EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3525 // BitCast assures us that source and destination are the same size so this is
3526 // either a BITCAST or a no-op.
3527 if (DestVT != N.getValueType())
3528 setValue(&I, DAG.getNode(ISD::BITCAST, dl,
3529 DestVT, N)); // convert types.
3530 // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3531 // might fold any kind of constant expression to an integer constant and that
3532 // is not what we are looking for. Only recognize a bitcast of a genuine
3533 // constant integer as an opaque constant.
3534 else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
3535 setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
3538 setValue(&I, N); // noop cast.
3541 void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
3542 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3543 const Value *SV = I.getOperand(0);
3544 SDValue N = getValue(SV);
3545 EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3547 unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3548 unsigned DestAS = I.getType()->getPointerAddressSpace();
3550 if (!TLI.isNoopAddrSpaceCast(SrcAS, DestAS))
3551 N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
3556 void SelectionDAGBuilder::visitInsertElement(const User &I) {
3557 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3558 SDValue InVec = getValue(I.getOperand(0));
3559 SDValue InVal = getValue(I.getOperand(1));
3560 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
3561 TLI.getVectorIdxTy(DAG.getDataLayout()));
3562 setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
3563 TLI.getValueType(DAG.getDataLayout(), I.getType()),
3564 InVec, InVal, InIdx));
3567 void SelectionDAGBuilder::visitExtractElement(const User &I) {
3568 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3569 SDValue InVec = getValue(I.getOperand(0));
3570 SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
3571 TLI.getVectorIdxTy(DAG.getDataLayout()));
3572 setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
3573 TLI.getValueType(DAG.getDataLayout(), I.getType()),
3577 void SelectionDAGBuilder::visitShuffleVector(const User &I) {
3578 SDValue Src1 = getValue(I.getOperand(0));
3579 SDValue Src2 = getValue(I.getOperand(1));
3580 Constant *MaskV = cast<Constant>(I.getOperand(2));
3581 SDLoc DL = getCurSDLoc();
3582 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3583 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3584 EVT SrcVT = Src1.getValueType();
3585 unsigned SrcNumElts = SrcVT.getVectorNumElements();
3587 if (MaskV->isNullValue() && VT.isScalableVector()) {
3588 // Canonical splat form of first element of first input vector.
3589 SDValue FirstElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
3590 SrcVT.getScalarType(), Src1,
3591 DAG.getConstant(0, DL,
3592 TLI.getVectorIdxTy(DAG.getDataLayout())));
3593 setValue(&I, DAG.getNode(ISD::SPLAT_VECTOR, DL, VT, FirstElt));
3597 // For now, we only handle splats for scalable vectors.
3598 // The DAGCombiner will perform a BUILD_VECTOR -> SPLAT_VECTOR transformation
3599 // for targets that support a SPLAT_VECTOR for non-scalable vector types.
3600 assert(!VT.isScalableVector() && "Unsupported scalable vector shuffle");
3602 SmallVector<int, 8> Mask;
3603 ShuffleVectorInst::getShuffleMask(MaskV, Mask);
3604 unsigned MaskNumElts = Mask.size();
3606 if (SrcNumElts == MaskNumElts) {
3607 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
3611 // Normalize the shuffle vector since mask and vector length don't match.
3612 if (SrcNumElts < MaskNumElts) {
3613 // Mask is longer than the source vectors. We can use concatenate vector to
3614 // make the mask and vectors lengths match.
3616 if (MaskNumElts % SrcNumElts == 0) {
3617 // Mask length is a multiple of the source vector length.
3618 // Check if the shuffle is some kind of concatenation of the input
3620 unsigned NumConcat = MaskNumElts / SrcNumElts;
3621 bool IsConcat = true;
3622 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
3623 for (unsigned i = 0; i != MaskNumElts; ++i) {
3627 // Ensure the indices in each SrcVT sized piece are sequential and that
3628 // the same source is used for the whole piece.
3629 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
3630 (ConcatSrcs[i / SrcNumElts] >= 0 &&
3631 ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
3635 // Remember which source this index came from.
3636 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
3639 // The shuffle is concatenating multiple vectors together. Just emit
3640 // a CONCAT_VECTORS operation.
3642 SmallVector<SDValue, 8> ConcatOps;
3643 for (auto Src : ConcatSrcs) {
3645 ConcatOps.push_back(DAG.getUNDEF(SrcVT));
3647 ConcatOps.push_back(Src1);
3649 ConcatOps.push_back(Src2);
3651 setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
3656 unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
3657 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
3658 EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
3661 // Pad both vectors with undefs to make them the same length as the mask.
3662 SDValue UndefVal = DAG.getUNDEF(SrcVT);
3664 SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
3665 SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
3669 Src1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
3670 Src2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
3672 // Readjust mask for new input vector length.
3673 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
3674 for (unsigned i = 0; i != MaskNumElts; ++i) {
3676 if (Idx >= (int)SrcNumElts)
3677 Idx -= SrcNumElts - PaddedMaskNumElts;
3681 SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
3683 // If the concatenated vector was padded, extract a subvector with the
3684 // correct number of elements.
3685 if (MaskNumElts != PaddedMaskNumElts)
3686 Result = DAG.getNode(
3687 ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
3688 DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout())));
3690 setValue(&I, Result);
3694 if (SrcNumElts > MaskNumElts) {
3695 // Analyze the access pattern of the vector to see if we can extract
3696 // two subvectors and do the shuffle.
3697 int StartIdx[2] = { -1, -1 }; // StartIdx to extract from
3698 bool CanExtract = true;
3699 for (int Idx : Mask) {
3704 if (Idx >= (int)SrcNumElts) {
3709 // If all the indices come from the same MaskNumElts sized portion of
3710 // the sources we can use extract. Also make sure the extract wouldn't
3711 // extract past the end of the source.
3712 int NewStartIdx = alignDown(Idx, MaskNumElts);
3713 if (NewStartIdx + MaskNumElts > SrcNumElts ||
3714 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
3716 // Make sure we always update StartIdx as we use it to track if all
3717 // elements are undef.
3718 StartIdx[Input] = NewStartIdx;
3721 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
3722 setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
3726 // Extract appropriate subvector and generate a vector shuffle
3727 for (unsigned Input = 0; Input < 2; ++Input) {
3728 SDValue &Src = Input == 0 ? Src1 : Src2;
3729 if (StartIdx[Input] < 0)
3730 Src = DAG.getUNDEF(VT);
3733 ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
3734 DAG.getConstant(StartIdx[Input], DL,
3735 TLI.getVectorIdxTy(DAG.getDataLayout())));
3739 // Calculate new mask.
3740 SmallVector<int, 8> MappedOps(Mask.begin(), Mask.end());
3741 for (int &Idx : MappedOps) {
3742 if (Idx >= (int)SrcNumElts)
3743 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3748 setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
3753 // We can't use either concat vectors or extract subvectors so fall back to
3754 // replacing the shuffle with extract and build vector.
3755 // to insert and build vector.
3756 EVT EltVT = VT.getVectorElementType();
3757 EVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
3758 SmallVector<SDValue,8> Ops;
3759 for (int Idx : Mask) {
3763 Res = DAG.getUNDEF(EltVT);
3765 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
3766 if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
3768 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
3769 EltVT, Src, DAG.getConstant(Idx, DL, IdxVT));
3775 setValue(&I, DAG.getBuildVector(VT, DL, Ops));
3778 void SelectionDAGBuilder::visitInsertValue(const User &I) {
3779 ArrayRef<unsigned> Indices;
3780 if (const InsertValueInst *IV = dyn_cast<InsertValueInst>(&I))
3781 Indices = IV->getIndices();
3783 Indices = cast<ConstantExpr>(&I)->getIndices();
3785 const Value *Op0 = I.getOperand(0);
3786 const Value *Op1 = I.getOperand(1);
3787 Type *AggTy = I.getType();
3788 Type *ValTy = Op1->getType();
3789 bool IntoUndef = isa<UndefValue>(Op0);
3790 bool FromUndef = isa<UndefValue>(Op1);
3792 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3794 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3795 SmallVector<EVT, 4> AggValueVTs;
3796 ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
3797 SmallVector<EVT, 4> ValValueVTs;
3798 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3800 unsigned NumAggValues = AggValueVTs.size();
3801 unsigned NumValValues = ValValueVTs.size();
3802 SmallVector<SDValue, 4> Values(NumAggValues);
3804 // Ignore an insertvalue that produces an empty object
3805 if (!NumAggValues) {
3806 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3810 SDValue Agg = getValue(Op0);
3812 // Copy the beginning value(s) from the original aggregate.
3813 for (; i != LinearIndex; ++i)
3814 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3815 SDValue(Agg.getNode(), Agg.getResNo() + i);
3816 // Copy values from the inserted value(s).
3818 SDValue Val = getValue(Op1);
3819 for (; i != LinearIndex + NumValValues; ++i)
3820 Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3821 SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
3823 // Copy remaining value(s) from the original aggregate.
3824 for (; i != NumAggValues; ++i)
3825 Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
3826 SDValue(Agg.getNode(), Agg.getResNo() + i);
3828 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3829 DAG.getVTList(AggValueVTs), Values));
3832 void SelectionDAGBuilder::visitExtractValue(const User &I) {
3833 ArrayRef<unsigned> Indices;
3834 if (const ExtractValueInst *EV = dyn_cast<ExtractValueInst>(&I))
3835 Indices = EV->getIndices();
3837 Indices = cast<ConstantExpr>(&I)->getIndices();
3839 const Value *Op0 = I.getOperand(0);
3840 Type *AggTy = Op0->getType();
3841 Type *ValTy = I.getType();
3842 bool OutOfUndef = isa<UndefValue>(Op0);
3844 unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
3846 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3847 SmallVector<EVT, 4> ValValueVTs;
3848 ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
3850 unsigned NumValValues = ValValueVTs.size();
3852 // Ignore a extractvalue that produces an empty object
3853 if (!NumValValues) {
3854 setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
3858 SmallVector<SDValue, 4> Values(NumValValues);
3860 SDValue Agg = getValue(Op0);
3861 // Copy out the selected value(s).
3862 for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
3863 Values[i - LinearIndex] =
3865 DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
3866 SDValue(Agg.getNode(), Agg.getResNo() + i);
3868 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3869 DAG.getVTList(ValValueVTs), Values));
3872 void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
3873 Value *Op0 = I.getOperand(0);
3874 // Note that the pointer operand may be a vector of pointers. Take the scalar
3875 // element which holds a pointer.
3876 unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
3877 SDValue N = getValue(Op0);
3878 SDLoc dl = getCurSDLoc();
3879 auto &TLI = DAG.getTargetLoweringInfo();
3880 MVT PtrTy = TLI.getPointerTy(DAG.getDataLayout(), AS);
3881 MVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout(), AS);
3883 // Normalize Vector GEP - all scalar operands should be converted to the
3885 unsigned VectorWidth = I.getType()->isVectorTy() ?
3886 I.getType()->getVectorNumElements() : 0;
3888 if (VectorWidth && !N.getValueType().isVector()) {
3889 LLVMContext &Context = *DAG.getContext();
3890 EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorWidth);
3891 N = DAG.getSplatBuildVector(VT, dl, N);
3894 for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
3896 const Value *Idx = GTI.getOperand();
3897 if (StructType *StTy = GTI.getStructTypeOrNull()) {
3898 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
3901 uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field);
3903 // In an inbounds GEP with an offset that is nonnegative even when
3904 // interpreted as signed, assume there is no unsigned overflow.
3906 if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds())
3907 Flags.setNoUnsignedWrap(true);
3909 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
3910 DAG.getConstant(Offset, dl, N.getValueType()), Flags);
3913 unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS);
3914 MVT IdxTy = MVT::getIntegerVT(IdxSize);
3915 APInt ElementSize(IdxSize, DL->getTypeAllocSize(GTI.getIndexedType()));
3917 // If this is a scalar constant or a splat vector of constants,
3918 // handle it quickly.
3919 const auto *C = dyn_cast<Constant>(Idx);
3920 if (C && isa<VectorType>(C->getType()))
3921 C = C->getSplatValue();
3923 if (const auto *CI = dyn_cast_or_null<ConstantInt>(C)) {
3926 APInt Offs = ElementSize * CI->getValue().sextOrTrunc(IdxSize);
3927 LLVMContext &Context = *DAG.getContext();
3928 SDValue OffsVal = VectorWidth ?
3929 DAG.getConstant(Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorWidth)) :
3930 DAG.getConstant(Offs, dl, IdxTy);
3932 // In an inbounds GEP with an offset that is nonnegative even when
3933 // interpreted as signed, assume there is no unsigned overflow.
3935 if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds())
3936 Flags.setNoUnsignedWrap(true);
3938 OffsVal = DAG.getSExtOrTrunc(OffsVal, dl, N.getValueType());
3940 N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags);
3944 // N = N + Idx * ElementSize;
3945 SDValue IdxN = getValue(Idx);
3947 if (!IdxN.getValueType().isVector() && VectorWidth) {
3948 EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(), VectorWidth);
3949 IdxN = DAG.getSplatBuildVector(VT, dl, IdxN);
3952 // If the index is smaller or larger than intptr_t, truncate or extend
3954 IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
3956 // If this is a multiply by a power of two, turn it into a shl
3957 // immediately. This is a very common case.
3958 if (ElementSize != 1) {
3959 if (ElementSize.isPowerOf2()) {
3960 unsigned Amt = ElementSize.logBase2();
3961 IdxN = DAG.getNode(ISD::SHL, dl,
3962 N.getValueType(), IdxN,
3963 DAG.getConstant(Amt, dl, IdxN.getValueType()));
3965 SDValue Scale = DAG.getConstant(ElementSize.getZExtValue(), dl,
3966 IdxN.getValueType());
3967 IdxN = DAG.getNode(ISD::MUL, dl,
3968 N.getValueType(), IdxN, Scale);
3972 N = DAG.getNode(ISD::ADD, dl,
3973 N.getValueType(), N, IdxN);
3977 if (PtrMemTy != PtrTy && !cast<GEPOperator>(I).isInBounds())
3978 N = DAG.getPtrExtendInReg(N, dl, PtrMemTy);
3983 void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
3984 // If this is a fixed sized alloca in the entry block of the function,
3985 // allocate it statically on the stack.
3986 if (FuncInfo.StaticAllocaMap.count(&I))
3987 return; // getValue will auto-populate this.
3989 SDLoc dl = getCurSDLoc();
3990 Type *Ty = I.getAllocatedType();
3991 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3992 auto &DL = DAG.getDataLayout();
3993 uint64_t TySize = DL.getTypeAllocSize(Ty);
3995 std::max((unsigned)DL.getPrefTypeAlignment(Ty), I.getAlignment());
3997 SDValue AllocSize = getValue(I.getArraySize());
3999 EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout(), DL.getAllocaAddrSpace());
4000 if (AllocSize.getValueType() != IntPtr)
4001 AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
4003 AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr,
4005 DAG.getConstant(TySize, dl, IntPtr));
4007 // Handle alignment. If the requested alignment is less than or equal to
4008 // the stack alignment, ignore it. If the size is greater than or equal to
4009 // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
4010 unsigned StackAlign =
4011 DAG.getSubtarget().getFrameLowering()->getStackAlignment();
4012 if (Align <= StackAlign)
4015 // Round the size of the allocation up to the stack alignment size
4016 // by add SA-1 to the size. This doesn't overflow because we're computing
4017 // an address inside an alloca.
4019 Flags.setNoUnsignedWrap(true);
4020 AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize,
4021 DAG.getConstant(StackAlign - 1, dl, IntPtr), Flags);
4023 // Mask out the low bits for alignment purposes.
4025 DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize,
4026 DAG.getConstant(~(uint64_t)(StackAlign - 1), dl, IntPtr));
4028 SDValue Ops[] = {getRoot(), AllocSize, DAG.getConstant(Align, dl, IntPtr)};
4029 SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
4030 SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
4032 DAG.setRoot(DSA.getValue(1));
4034 assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
4037 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
4039 return visitAtomicLoad(I);
4041 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4042 const Value *SV = I.getOperand(0);
4043 if (TLI.supportSwiftError()) {
4044 // Swifterror values can come from either a function parameter with
4045 // swifterror attribute or an alloca with swifterror attribute.
4046 if (const Argument *Arg = dyn_cast<Argument>(SV)) {
4047 if (Arg->hasSwiftErrorAttr())
4048 return visitLoadFromSwiftError(I);
4051 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
4052 if (Alloca->isSwiftError())
4053 return visitLoadFromSwiftError(I);
4057 SDValue Ptr = getValue(SV);
4059 Type *Ty = I.getType();
4061 bool isVolatile = I.isVolatile();
4062 bool isNonTemporal = I.hasMetadata(LLVMContext::MD_nontemporal);
4063 bool isInvariant = I.hasMetadata(LLVMContext::MD_invariant_load);
4064 bool isDereferenceable =
4065 isDereferenceablePointer(SV, I.getType(), DAG.getDataLayout());
4066 unsigned Alignment = I.getAlignment();
4069 I.getAAMetadata(AAInfo);
4070 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4072 SmallVector<EVT, 4> ValueVTs, MemVTs;
4073 SmallVector<uint64_t, 4> Offsets;
4074 ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &MemVTs, &Offsets);
4075 unsigned NumValues = ValueVTs.size();
4080 bool ConstantMemory = false;
4082 // Serialize volatile loads with other side effects.
4084 else if (NumValues > MaxParallelChains)
4085 Root = getMemoryRoot();
4087 AA->pointsToConstantMemory(MemoryLocation(
4089 LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4091 // Do not serialize (non-volatile) loads of constant memory with anything.
4092 Root = DAG.getEntryNode();
4093 ConstantMemory = true;
4095 // Do not serialize non-volatile loads against each other.
4096 Root = DAG.getRoot();
4099 SDLoc dl = getCurSDLoc();
4102 Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
4104 // An aggregate load cannot wrap around the address space, so offsets to its
4105 // parts don't wrap either.
4107 Flags.setNoUnsignedWrap(true);
4109 SmallVector<SDValue, 4> Values(NumValues);
4110 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4111 EVT PtrVT = Ptr.getValueType();
4112 unsigned ChainI = 0;
4113 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4114 // Serializing loads here may result in excessive register pressure, and
4115 // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
4116 // could recover a bit by hoisting nodes upward in the chain by recognizing
4117 // they are side-effect free or do not alias. The optimizer should really
4118 // avoid this case by converting large object/array copies to llvm.memcpy
4119 // (MaxParallelChains should always remain as failsafe).
4120 if (ChainI == MaxParallelChains) {
4121 assert(PendingLoads.empty() && "PendingLoads must be serialized first");
4122 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4123 makeArrayRef(Chains.data(), ChainI));
4127 SDValue A = DAG.getNode(ISD::ADD, dl,
4129 DAG.getConstant(Offsets[i], dl, PtrVT),
4131 auto MMOFlags = MachineMemOperand::MONone;
4133 MMOFlags |= MachineMemOperand::MOVolatile;
4135 MMOFlags |= MachineMemOperand::MONonTemporal;
4137 MMOFlags |= MachineMemOperand::MOInvariant;
4138 if (isDereferenceable)
4139 MMOFlags |= MachineMemOperand::MODereferenceable;
4140 MMOFlags |= TLI.getMMOFlags(I);
4142 SDValue L = DAG.getLoad(MemVTs[i], dl, Root, A,
4143 MachinePointerInfo(SV, Offsets[i]), Alignment,
4144 MMOFlags, AAInfo, Ranges);
4145 Chains[ChainI] = L.getValue(1);
4147 if (MemVTs[i] != ValueVTs[i])
4148 L = DAG.getZExtOrTrunc(L, dl, ValueVTs[i]);
4153 if (!ConstantMemory) {
4154 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4155 makeArrayRef(Chains.data(), ChainI));
4159 PendingLoads.push_back(Chain);
4162 setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
4163 DAG.getVTList(ValueVTs), Values));
4166 void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
4167 assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4168 "call visitStoreToSwiftError when backend supports swifterror");
4170 SmallVector<EVT, 4> ValueVTs;
4171 SmallVector<uint64_t, 4> Offsets;
4172 const Value *SrcV = I.getOperand(0);
4173 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4174 SrcV->getType(), ValueVTs, &Offsets);
4175 assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4176 "expect a single EVT for swifterror");
4178 SDValue Src = getValue(SrcV);
4179 // Create a virtual register, then update the virtual register.
4181 SwiftError.getOrCreateVRegDefAt(&I, FuncInfo.MBB, I.getPointerOperand());
4182 // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
4183 // Chain can be getRoot or getControlRoot.
4184 SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
4185 SDValue(Src.getNode(), Src.getResNo()));
4186 DAG.setRoot(CopyNode);
4189 void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
4190 assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4191 "call visitLoadFromSwiftError when backend supports swifterror");
4193 assert(!I.isVolatile() &&
4194 !I.hasMetadata(LLVMContext::MD_nontemporal) &&
4195 !I.hasMetadata(LLVMContext::MD_invariant_load) &&
4196 "Support volatile, non temporal, invariant for load_from_swift_error");
4198 const Value *SV = I.getOperand(0);
4199 Type *Ty = I.getType();
4201 I.getAAMetadata(AAInfo);
4204 !AA->pointsToConstantMemory(MemoryLocation(
4205 SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4207 "load_from_swift_error should not be constant memory");
4209 SmallVector<EVT, 4> ValueVTs;
4210 SmallVector<uint64_t, 4> Offsets;
4211 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
4212 ValueVTs, &Offsets);
4213 assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4214 "expect a single EVT for swifterror");
4216 // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
4217 SDValue L = DAG.getCopyFromReg(
4218 getRoot(), getCurSDLoc(),
4219 SwiftError.getOrCreateVRegUseAt(&I, FuncInfo.MBB, SV), ValueVTs[0]);
4224 void SelectionDAGBuilder::visitStore(const StoreInst &I) {
4226 return visitAtomicStore(I);
4228 const Value *SrcV = I.getOperand(0);
4229 const Value *PtrV = I.getOperand(1);
4231 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4232 if (TLI.supportSwiftError()) {
4233 // Swifterror values can come from either a function parameter with
4234 // swifterror attribute or an alloca with swifterror attribute.
4235 if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
4236 if (Arg->hasSwiftErrorAttr())
4237 return visitStoreToSwiftError(I);
4240 if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
4241 if (Alloca->isSwiftError())
4242 return visitStoreToSwiftError(I);
4246 SmallVector<EVT, 4> ValueVTs, MemVTs;
4247 SmallVector<uint64_t, 4> Offsets;
4248 ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4249 SrcV->getType(), ValueVTs, &MemVTs, &Offsets);
4250 unsigned NumValues = ValueVTs.size();
4254 // Get the lowered operands. Note that we do this after
4255 // checking if NumResults is zero, because with zero results
4256 // the operands won't have values in the map.
4257 SDValue Src = getValue(SrcV);
4258 SDValue Ptr = getValue(PtrV);
4260 SDValue Root = I.isVolatile() ? getRoot() : getMemoryRoot();
4261 SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4262 SDLoc dl = getCurSDLoc();
4263 unsigned Alignment = I.getAlignment();
4265 I.getAAMetadata(AAInfo);
4267 auto MMOFlags = MachineMemOperand::MONone;
4269 MMOFlags |= MachineMemOperand::MOVolatile;
4270 if (I.hasMetadata(LLVMContext::MD_nontemporal))
4271 MMOFlags |= MachineMemOperand::MONonTemporal;
4272 MMOFlags |= TLI.getMMOFlags(I);
4274 // An aggregate load cannot wrap around the address space, so offsets to its
4275 // parts don't wrap either.
4277 Flags.setNoUnsignedWrap(true);
4279 unsigned ChainI = 0;
4280 for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4281 // See visitLoad comments.
4282 if (ChainI == MaxParallelChains) {
4283 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4284 makeArrayRef(Chains.data(), ChainI));
4288 SDValue Add = DAG.getMemBasePlusOffset(Ptr, Offsets[i], dl, Flags);
4289 SDValue Val = SDValue(Src.getNode(), Src.getResNo() + i);
4290 if (MemVTs[i] != ValueVTs[i])
4291 Val = DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4293 DAG.getStore(Root, dl, Val, Add, MachinePointerInfo(PtrV, Offsets[i]),
4294 Alignment, MMOFlags, AAInfo);
4295 Chains[ChainI] = St;
4298 SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4299 makeArrayRef(Chains.data(), ChainI));
4300 DAG.setRoot(StoreNode);
4303 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
4304 bool IsCompressing) {
4305 SDLoc sdl = getCurSDLoc();
4307 auto getMaskedStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
4308 unsigned& Alignment) {
4309 // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
4310 Src0 = I.getArgOperand(0);
4311 Ptr = I.getArgOperand(1);
4312 Alignment = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
4313 Mask = I.getArgOperand(3);
4315 auto getCompressingStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
4316 unsigned& Alignment) {
4317 // llvm.masked.compressstore.*(Src0, Ptr, Mask)
4318 Src0 = I.getArgOperand(0);
4319 Ptr = I.getArgOperand(1);
4320 Mask = I.getArgOperand(2);
4324 Value *PtrOperand, *MaskOperand, *Src0Operand;
4327 getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4329 getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4331 SDValue Ptr = getValue(PtrOperand);
4332 SDValue Src0 = getValue(Src0Operand);
4333 SDValue Mask = getValue(MaskOperand);
4334 SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4336 EVT VT = Src0.getValueType();
4338 Alignment = DAG.getEVTAlignment(VT);
4341 I.getAAMetadata(AAInfo);
4343 MachineMemOperand *MMO =
4344 DAG.getMachineFunction().
4345 getMachineMemOperand(MachinePointerInfo(PtrOperand),
4346 MachineMemOperand::MOStore,
4347 // TODO: Make MachineMemOperands aware of scalable
4349 VT.getStoreSize().getKnownMinSize(),
4352 DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask, VT, MMO,
4353 ISD::UNINDEXED, false /* Truncating */, IsCompressing);
4354 DAG.setRoot(StoreNode);
4355 setValue(&I, StoreNode);
4358 // Get a uniform base for the Gather/Scatter intrinsic.
4359 // The first argument of the Gather/Scatter intrinsic is a vector of pointers.
4360 // We try to represent it as a base pointer + vector of indices.
4361 // Usually, the vector of pointers comes from a 'getelementptr' instruction.
4362 // The first operand of the GEP may be a single pointer or a vector of pointers
4364 // %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
4366 // %gep.ptr = getelementptr i32, i32* %ptr, <8 x i32> %ind
4367 // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
4369 // When the first GEP operand is a single pointer - it is the uniform base we
4370 // are looking for. If first operand of the GEP is a splat vector - we
4371 // extract the splat value and use it as a uniform base.
4372 // In all other cases the function returns 'false'.
4373 static bool getUniformBase(const Value *&Ptr, SDValue &Base, SDValue &Index,
4374 ISD::MemIndexType &IndexType, SDValue &Scale,
4375 SelectionDAGBuilder *SDB) {
4376 SelectionDAG& DAG = SDB->DAG;
4377 LLVMContext &Context = *DAG.getContext();
4379 assert(Ptr->getType()->isVectorTy() && "Uexpected pointer type");
4380 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4384 const Value *GEPPtr = GEP->getPointerOperand();
4385 if (!GEPPtr->getType()->isVectorTy())
4387 else if (!(Ptr = getSplatValue(GEPPtr)))
4390 unsigned FinalIndex = GEP->getNumOperands() - 1;
4391 Value *IndexVal = GEP->getOperand(FinalIndex);
4392 gep_type_iterator GTI = gep_type_begin(*GEP);
4394 // Ensure all the other indices are 0.
4395 for (unsigned i = 1; i < FinalIndex; ++i, ++GTI) {
4396 auto *C = dyn_cast<Constant>(GEP->getOperand(i));
4399 if (isa<VectorType>(C->getType()))
4400 C = C->getSplatValue();
4401 auto *CI = dyn_cast_or_null<ConstantInt>(C);
4402 if (!CI || !CI->isZero())
4406 // The operands of the GEP may be defined in another basic block.
4407 // In this case we'll not find nodes for the operands.
4408 if (!SDB->findValue(Ptr))
4410 Constant *C = dyn_cast<Constant>(IndexVal);
4411 if (!C && !SDB->findValue(IndexVal))
4414 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4415 const DataLayout &DL = DAG.getDataLayout();
4416 StructType *STy = GTI.getStructTypeOrNull();
4419 const StructLayout *SL = DL.getStructLayout(STy);
4420 if (isa<VectorType>(C->getType())) {
4421 C = C->getSplatValue();
4422 // FIXME: If getSplatValue may return nullptr for a structure?
4423 // If not, the following check can be removed.
4427 auto *CI = cast<ConstantInt>(C);
4428 Scale = DAG.getTargetConstant(1, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4429 Index = DAG.getConstant(SL->getElementOffset(CI->getZExtValue()),
4430 SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4432 Scale = DAG.getTargetConstant(
4433 DL.getTypeAllocSize(GEP->getResultElementType()),
4434 SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4435 Index = SDB->getValue(IndexVal);
4437 Base = SDB->getValue(Ptr);
4438 IndexType = ISD::SIGNED_SCALED;
4440 if (STy || !Index.getValueType().isVector()) {
4441 unsigned GEPWidth = GEP->getType()->getVectorNumElements();
4442 EVT VT = EVT::getVectorVT(Context, Index.getValueType(), GEPWidth);
4443 Index = DAG.getSplatBuildVector(VT, SDLoc(Index), Index);
4448 void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
4449 SDLoc sdl = getCurSDLoc();
4451 // llvm.masked.scatter.*(Src0, Ptrs, alignment, Mask)
4452 const Value *Ptr = I.getArgOperand(1);
4453 SDValue Src0 = getValue(I.getArgOperand(0));
4454 SDValue Mask = getValue(I.getArgOperand(3));
4455 EVT VT = Src0.getValueType();
4456 unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(2)))->getZExtValue();
4458 Alignment = DAG.getEVTAlignment(VT);
4459 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4462 I.getAAMetadata(AAInfo);
4466 ISD::MemIndexType IndexType;
4468 const Value *BasePtr = Ptr;
4469 bool UniformBase = getUniformBase(BasePtr, Base, Index, IndexType, Scale,
4472 const Value *MemOpBasePtr = UniformBase ? BasePtr : nullptr;
4473 MachineMemOperand *MMO = DAG.getMachineFunction().
4474 getMachineMemOperand(MachinePointerInfo(MemOpBasePtr),
4475 MachineMemOperand::MOStore,
4476 // TODO: Make MachineMemOperands aware of scalable
4478 VT.getStoreSize().getKnownMinSize(),
4481 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4482 Index = getValue(Ptr);
4483 IndexType = ISD::SIGNED_SCALED;
4484 Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4486 SDValue Ops[] = { getMemoryRoot(), Src0, Mask, Base, Index, Scale };
4487 SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
4488 Ops, MMO, IndexType);
4489 DAG.setRoot(Scatter);
4490 setValue(&I, Scatter);
4493 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
4494 SDLoc sdl = getCurSDLoc();
4496 auto getMaskedLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
4497 unsigned& Alignment) {
4498 // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
4499 Ptr = I.getArgOperand(0);
4500 Alignment = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
4501 Mask = I.getArgOperand(2);
4502 Src0 = I.getArgOperand(3);
4504 auto getExpandingLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
4505 unsigned& Alignment) {
4506 // @llvm.masked.expandload.*(Ptr, Mask, Src0)
4507 Ptr = I.getArgOperand(0);
4509 Mask = I.getArgOperand(1);
4510 Src0 = I.getArgOperand(2);
4513 Value *PtrOperand, *MaskOperand, *Src0Operand;
4516 getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4518 getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4520 SDValue Ptr = getValue(PtrOperand);
4521 SDValue Src0 = getValue(Src0Operand);
4522 SDValue Mask = getValue(MaskOperand);
4523 SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4525 EVT VT = Src0.getValueType();
4527 Alignment = DAG.getEVTAlignment(VT);
4530 I.getAAMetadata(AAInfo);
4531 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4533 // Do not serialize masked loads of constant memory with anything.
4535 if (VT.isScalableVector())
4536 ML = MemoryLocation(PtrOperand);
4538 ML = MemoryLocation(PtrOperand, LocationSize::precise(
4539 DAG.getDataLayout().getTypeStoreSize(I.getType())),
4541 bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
4543 SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
4545 MachineMemOperand *MMO =
4546 DAG.getMachineFunction().
4547 getMachineMemOperand(MachinePointerInfo(PtrOperand),
4548 MachineMemOperand::MOLoad,
4549 // TODO: Make MachineMemOperands aware of scalable
4551 VT.getStoreSize().getKnownMinSize(),
4552 Alignment, AAInfo, Ranges);
4555 DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO,
4556 ISD::UNINDEXED, ISD::NON_EXTLOAD, IsExpanding);
4558 PendingLoads.push_back(Load.getValue(1));
4562 void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
4563 SDLoc sdl = getCurSDLoc();
4565 // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
4566 const Value *Ptr = I.getArgOperand(0);
4567 SDValue Src0 = getValue(I.getArgOperand(3));
4568 SDValue Mask = getValue(I.getArgOperand(2));
4570 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4571 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4572 unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(1)))->getZExtValue();
4574 Alignment = DAG.getEVTAlignment(VT);
4577 I.getAAMetadata(AAInfo);
4578 const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range);
4580 SDValue Root = DAG.getRoot();
4583 ISD::MemIndexType IndexType;
4585 const Value *BasePtr = Ptr;
4586 bool UniformBase = getUniformBase(BasePtr, Base, Index, IndexType, Scale,
4588 bool ConstantMemory = false;
4589 if (UniformBase && AA &&
4590 AA->pointsToConstantMemory(
4591 MemoryLocation(BasePtr,
4592 LocationSize::precise(
4593 DAG.getDataLayout().getTypeStoreSize(I.getType())),
4595 // Do not serialize (non-volatile) loads of constant memory with anything.
4596 Root = DAG.getEntryNode();
4597 ConstantMemory = true;
4600 MachineMemOperand *MMO =
4601 DAG.getMachineFunction().
4602 getMachineMemOperand(MachinePointerInfo(UniformBase ? BasePtr : nullptr),
4603 MachineMemOperand::MOLoad,
4604 // TODO: Make MachineMemOperands aware of scalable
4606 VT.getStoreSize().getKnownMinSize(),
4607 Alignment, AAInfo, Ranges);
4610 Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4611 Index = getValue(Ptr);
4612 IndexType = ISD::SIGNED_SCALED;
4613 Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4615 SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
4616 SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
4617 Ops, MMO, IndexType);
4619 SDValue OutChain = Gather.getValue(1);
4620 if (!ConstantMemory)
4621 PendingLoads.push_back(OutChain);
4622 setValue(&I, Gather);
4625 void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
4626 SDLoc dl = getCurSDLoc();
4627 AtomicOrdering SuccessOrdering = I.getSuccessOrdering();
4628 AtomicOrdering FailureOrdering = I.getFailureOrdering();
4629 SyncScope::ID SSID = I.getSyncScopeID();
4631 SDValue InChain = getRoot();
4633 MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
4634 SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
4636 auto Alignment = DAG.getEVTAlignment(MemVT);
4638 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
4640 Flags |= MachineMemOperand::MOVolatile;
4641 Flags |= DAG.getTargetLoweringInfo().getMMOFlags(I);
4643 MachineFunction &MF = DAG.getMachineFunction();
4644 MachineMemOperand *MMO =
4645 MF.getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
4646 Flags, MemVT.getStoreSize(), Alignment,
4647 AAMDNodes(), nullptr, SSID, SuccessOrdering,
4650 SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
4651 dl, MemVT, VTs, InChain,
4652 getValue(I.getPointerOperand()),
4653 getValue(I.getCompareOperand()),
4654 getValue(I.getNewValOperand()), MMO);
4656 SDValue OutChain = L.getValue(2);
4659 DAG.setRoot(OutChain);
4662 void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
4663 SDLoc dl = getCurSDLoc();
4665 switch (I.getOperation()) {
4666 default: llvm_unreachable("Unknown atomicrmw operation");
4667 case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
4668 case AtomicRMWInst::Add: NT = ISD::ATOMIC_LOAD_ADD; break;
4669 case AtomicRMWInst::Sub: NT = ISD::ATOMIC_LOAD_SUB; break;
4670 case AtomicRMWInst::And: NT = ISD::ATOMIC_LOAD_AND; break;
4671 case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
4672 case AtomicRMWInst::Or: NT = ISD::ATOMIC_LOAD_OR; break;
4673 case AtomicRMWInst::Xor: NT = ISD::ATOMIC_LOAD_XOR; break;
4674 case AtomicRMWInst::Max: NT = ISD::ATOMIC_LOAD_MAX; break;
4675 case AtomicRMWInst::Min: NT = ISD::ATOMIC_LOAD_MIN; break;
4676 case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
4677 case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
4678 case AtomicRMWInst::FAdd: NT = ISD::ATOMIC_LOAD_FADD; break;
4679 case AtomicRMWInst::FSub: NT = ISD::ATOMIC_LOAD_FSUB; break;
4681 AtomicOrdering Ordering = I.getOrdering();
4682 SyncScope::ID SSID = I.getSyncScopeID();
4684 SDValue InChain = getRoot();
4686 auto MemVT = getValue(I.getValOperand()).getSimpleValueType();
4687 auto Alignment = DAG.getEVTAlignment(MemVT);
4689 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
4691 Flags |= MachineMemOperand::MOVolatile;
4692 Flags |= DAG.getTargetLoweringInfo().getMMOFlags(I);
4694 MachineFunction &MF = DAG.getMachineFunction();
4695 MachineMemOperand *MMO =
4696 MF.getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), Flags,
4697 MemVT.getStoreSize(), Alignment, AAMDNodes(),
4698 nullptr, SSID, Ordering);
4701 DAG.getAtomic(NT, dl, MemVT, InChain,
4702 getValue(I.getPointerOperand()), getValue(I.getValOperand()),
4705 SDValue OutChain = L.getValue(1);
4708 DAG.setRoot(OutChain);
4711 void SelectionDAGBuilder::visitFence(const FenceInst &I) {
4712 SDLoc dl = getCurSDLoc();
4713 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4716 Ops[1] = DAG.getTargetConstant((unsigned)I.getOrdering(), dl,
4717 TLI.getFenceOperandTy(DAG.getDataLayout()));
4718 Ops[2] = DAG.getTargetConstant(I.getSyncScopeID(), dl,
4719 TLI.getFenceOperandTy(DAG.getDataLayout()));
4720 DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops));
4723 void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
4724 SDLoc dl = getCurSDLoc();
4725 AtomicOrdering Order = I.getOrdering();
4726 SyncScope::ID SSID = I.getSyncScopeID();
4728 SDValue InChain = getRoot();
4730 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4731 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4732 EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
4734 if (!TLI.supportsUnalignedAtomics() &&
4735 I.getAlignment() < MemVT.getSizeInBits() / 8)
4736 report_fatal_error("Cannot generate unaligned atomic load");
4738 auto Flags = MachineMemOperand::MOLoad;
4740 Flags |= MachineMemOperand::MOVolatile;
4741 if (I.hasMetadata(LLVMContext::MD_invariant_load))
4742 Flags |= MachineMemOperand::MOInvariant;
4743 if (isDereferenceablePointer(I.getPointerOperand(), I.getType(),
4744 DAG.getDataLayout()))
4745 Flags |= MachineMemOperand::MODereferenceable;
4747 Flags |= TLI.getMMOFlags(I);
4749 MachineMemOperand *MMO =
4750 DAG.getMachineFunction().
4751 getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
4752 Flags, MemVT.getStoreSize(),
4753 I.getAlignment() ? I.getAlignment() :
4754 DAG.getEVTAlignment(MemVT),
4755 AAMDNodes(), nullptr, SSID, Order);
4757 InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
4759 SDValue Ptr = getValue(I.getPointerOperand());
4761 if (TLI.lowerAtomicLoadAsLoadSDNode(I)) {
4762 // TODO: Once this is better exercised by tests, it should be merged with
4763 // the normal path for loads to prevent future divergence.
4764 SDValue L = DAG.getLoad(MemVT, dl, InChain, Ptr, MMO);
4766 L = DAG.getPtrExtOrTrunc(L, dl, VT);
4769 SDValue OutChain = L.getValue(1);
4770 if (!I.isUnordered())
4771 DAG.setRoot(OutChain);
4773 PendingLoads.push_back(OutChain);
4777 SDValue L = DAG.getAtomic(ISD::ATOMIC_LOAD, dl, MemVT, MemVT, InChain,
4780 SDValue OutChain = L.getValue(1);
4782 L = DAG.getPtrExtOrTrunc(L, dl, VT);
4785 DAG.setRoot(OutChain);
4788 void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
4789 SDLoc dl = getCurSDLoc();
4791 AtomicOrdering Ordering = I.getOrdering();
4792 SyncScope::ID SSID = I.getSyncScopeID();
4794 SDValue InChain = getRoot();
4796 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4798 TLI.getMemValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
4800 if (I.getAlignment() < MemVT.getSizeInBits() / 8)
4801 report_fatal_error("Cannot generate unaligned atomic store");
4803 auto Flags = MachineMemOperand::MOStore;
4805 Flags |= MachineMemOperand::MOVolatile;
4806 Flags |= TLI.getMMOFlags(I);
4808 MachineFunction &MF = DAG.getMachineFunction();
4809 MachineMemOperand *MMO =
4810 MF.getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), Flags,
4811 MemVT.getStoreSize(), I.getAlignment(), AAMDNodes(),
4812 nullptr, SSID, Ordering);
4814 SDValue Val = getValue(I.getValueOperand());
4815 if (Val.getValueType() != MemVT)
4816 Val = DAG.getPtrExtOrTrunc(Val, dl, MemVT);
4817 SDValue Ptr = getValue(I.getPointerOperand());
4819 if (TLI.lowerAtomicStoreAsStoreSDNode(I)) {
4820 // TODO: Once this is better exercised by tests, it should be merged with
4821 // the normal path for stores to prevent future divergence.
4822 SDValue S = DAG.getStore(InChain, dl, Val, Ptr, MMO);
4826 SDValue OutChain = DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain,
4830 DAG.setRoot(OutChain);
4833 /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
4835 void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
4836 unsigned Intrinsic) {
4837 // Ignore the callsite's attributes. A specific call site may be marked with
4838 // readnone, but the lowering code will expect the chain based on the
4840 const Function *F = I.getCalledFunction();
4841 bool HasChain = !F->doesNotAccessMemory();
4842 bool OnlyLoad = HasChain && F->onlyReadsMemory();
4844 // Build the operand list.
4845 SmallVector<SDValue, 8> Ops;
4846 if (HasChain) { // If this intrinsic has side-effects, chainify it.
4848 // We don't need to serialize loads against other loads.
4849 Ops.push_back(DAG.getRoot());
4851 Ops.push_back(getRoot());
4855 // Info is set by getTgtMemInstrinsic
4856 TargetLowering::IntrinsicInfo Info;
4857 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4858 bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I,
4859 DAG.getMachineFunction(),
4862 // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
4863 if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
4864 Info.opc == ISD::INTRINSIC_W_CHAIN)
4865 Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
4866 TLI.getPointerTy(DAG.getDataLayout())));
4868 // Add all operands of the call to the operand list.
4869 for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
4870 const Value *Arg = I.getArgOperand(i);
4871 if (!I.paramHasAttr(i, Attribute::ImmArg)) {
4872 Ops.push_back(getValue(Arg));
4876 // Use TargetConstant instead of a regular constant for immarg.
4877 EVT VT = TLI.getValueType(*DL, Arg->getType(), true);
4878 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) {
4879 assert(CI->getBitWidth() <= 64 &&
4880 "large intrinsic immediates not handled");
4881 Ops.push_back(DAG.getTargetConstant(*CI, SDLoc(), VT));
4884 DAG.getTargetConstantFP(*cast<ConstantFP>(Arg), SDLoc(), VT));
4888 SmallVector<EVT, 4> ValueVTs;
4889 ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
4892 ValueVTs.push_back(MVT::Other);
4894 SDVTList VTs = DAG.getVTList(ValueVTs);
4898 if (IsTgtIntrinsic) {
4899 // This is target intrinsic that touches memory
4901 I.getAAMetadata(AAInfo);
4902 Result = DAG.getMemIntrinsicNode(
4903 Info.opc, getCurSDLoc(), VTs, Ops, Info.memVT,
4904 MachinePointerInfo(Info.ptrVal, Info.offset),
4905 Info.align ? Info.align->value() : 0, Info.flags, Info.size, AAInfo);
4906 } else if (!HasChain) {
4907 Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
4908 } else if (!I.getType()->isVoidTy()) {
4909 Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
4911 Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
4915 SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
4917 PendingLoads.push_back(Chain);
4922 if (!I.getType()->isVoidTy()) {
4923 if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
4924 EVT VT = TLI.getValueType(DAG.getDataLayout(), PTy);
4925 Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result);
4927 Result = lowerRangeToAssertZExt(DAG, I, Result);
4929 setValue(&I, Result);
4933 /// GetSignificand - Get the significand and build it into a floating-point
4934 /// number with exponent of 1:
4936 /// Op = (Op & 0x007fffff) | 0x3f800000;
4938 /// where Op is the hexadecimal representation of floating point value.
4939 static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
4940 SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4941 DAG.getConstant(0x007fffff, dl, MVT::i32));
4942 SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
4943 DAG.getConstant(0x3f800000, dl, MVT::i32));
4944 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
4947 /// GetExponent - Get the exponent:
4949 /// (float)(int)(((Op & 0x7f800000) >> 23) - 127);
4951 /// where Op is the hexadecimal representation of floating point value.
4952 static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
4953 const TargetLowering &TLI, const SDLoc &dl) {
4954 SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
4955 DAG.getConstant(0x7f800000, dl, MVT::i32));
4956 SDValue t1 = DAG.getNode(
4957 ISD::SRL, dl, MVT::i32, t0,
4958 DAG.getConstant(23, dl, TLI.getPointerTy(DAG.getDataLayout())));
4959 SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
4960 DAG.getConstant(127, dl, MVT::i32));
4961 return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
4964 /// getF32Constant - Get 32-bit floating point constant.
4965 static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
4967 return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
4971 static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
4972 SelectionDAG &DAG) {
4973 // TODO: What fast-math-flags should be set on the floating-point nodes?
4975 // IntegerPartOfX = ((int32_t)(t0);
4976 SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
4978 // FractionalPartOfX = t0 - (float)IntegerPartOfX;
4979 SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
4980 SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
4982 // IntegerPartOfX <<= 23;
4983 IntegerPartOfX = DAG.getNode(
4984 ISD::SHL, dl, MVT::i32, IntegerPartOfX,
4985 DAG.getConstant(23, dl, DAG.getTargetLoweringInfo().getPointerTy(
4986 DAG.getDataLayout())));
4988 SDValue TwoToFractionalPartOfX;
4989 if (LimitFloatPrecision <= 6) {
4990 // For floating-point precision of 6:
4992 // TwoToFractionalPartOfX =
4994 // (0.735607626f + 0.252464424f * x) * x;
4996 // error 0.0144103317, which is 6 bits
4997 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
4998 getF32Constant(DAG, 0x3e814304, dl));
4999 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5000 getF32Constant(DAG, 0x3f3c50c8, dl));
5001 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5002 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5003 getF32Constant(DAG, 0x3f7f5e7e, dl));
5004 } else if (LimitFloatPrecision <= 12) {
5005 // For floating-point precision of 12:
5007 // TwoToFractionalPartOfX =
5010 // (0.224338339f + 0.792043434e-1f * x) * x) * x;
5012 // error 0.000107046256, which is 13 to 14 bits
5013 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5014 getF32Constant(DAG, 0x3da235e3, dl));
5015 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5016 getF32Constant(DAG, 0x3e65b8f3, dl));
5017 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5018 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5019 getF32Constant(DAG, 0x3f324b07, dl));
5020 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5021 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5022 getF32Constant(DAG, 0x3f7ff8fd, dl));
5023 } else { // LimitFloatPrecision <= 18
5024 // For floating-point precision of 18:
5026 // TwoToFractionalPartOfX =
5030 // (0.554906021e-1f +
5031 // (0.961591928e-2f +
5032 // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
5033 // error 2.47208000*10^(-7), which is better than 18 bits
5034 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5035 getF32Constant(DAG, 0x3924b03e, dl));
5036 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5037 getF32Constant(DAG, 0x3ab24b87, dl));
5038 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5039 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5040 getF32Constant(DAG, 0x3c1d8c17, dl));
5041 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5042 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5043 getF32Constant(DAG, 0x3d634a1d, dl));
5044 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5045 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5046 getF32Constant(DAG, 0x3e75fe14, dl));
5047 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5048 SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
5049 getF32Constant(DAG, 0x3f317234, dl));
5050 SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
5051 TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
5052 getF32Constant(DAG, 0x3f800000, dl));
5055 // Add the exponent into the result in integer domain.
5056 SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
5057 return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
5058 DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
5061 /// expandExp - Lower an exp intrinsic. Handles the special sequences for
5062 /// limited-precision mode.
5063 static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5064 const TargetLowering &TLI) {
5065 if (Op.getValueType() == MVT::f32 &&
5066 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5068 // Put the exponent in the right bit position for later addition to the
5071 // t0 = Op * log2(e)
5073 // TODO: What fast-math-flags should be set here?
5074 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
5075 DAG.getConstantFP(numbers::log2ef, dl, MVT::f32));
5076 return getLimitedPrecisionExp2(t0, dl, DAG);
5079 // No special expansion.
5080 return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op);
5083 /// expandLog - Lower a log intrinsic. Handles the special sequences for
5084 /// limited-precision mode.
5085 static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5086 const TargetLowering &TLI) {
5087 // TODO: What fast-math-flags should be set on the floating-point nodes?
5089 if (Op.getValueType() == MVT::f32 &&
5090 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5091 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5093 // Scale the exponent by log(2).
5094 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5095 SDValue LogOfExponent =
5096 DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5097 DAG.getConstantFP(numbers::ln2f, dl, MVT::f32));
5099 // Get the significand and build it into a floating-point number with
5101 SDValue X = GetSignificand(DAG, Op1, dl);
5103 SDValue LogOfMantissa;
5104 if (LimitFloatPrecision <= 6) {
5105 // For floating-point precision of 6:
5109 // (1.4034025f - 0.23903021f * x) * x;
5111 // error 0.0034276066, which is better than 8 bits
5112 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5113 getF32Constant(DAG, 0xbe74c456, dl));
5114 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5115 getF32Constant(DAG, 0x3fb3a2b1, dl));
5116 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5117 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5118 getF32Constant(DAG, 0x3f949a29, dl));
5119 } else if (LimitFloatPrecision <= 12) {
5120 // For floating-point precision of 12:
5126 // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
5128 // error 0.000061011436, which is 14 bits
5129 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5130 getF32Constant(DAG, 0xbd67b6d6, dl));
5131 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5132 getF32Constant(DAG, 0x3ee4f4b8, dl));
5133 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5134 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5135 getF32Constant(DAG, 0x3fbc278b, dl));
5136 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5137 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5138 getF32Constant(DAG, 0x40348e95, dl));
5139 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5140 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5141 getF32Constant(DAG, 0x3fdef31a, dl));
5142 } else { // LimitFloatPrecision <= 18
5143 // For floating-point precision of 18:
5151 // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
5153 // error 0.0000023660568, which is better than 18 bits
5154 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5155 getF32Constant(DAG, 0xbc91e5ac, dl));
5156 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5157 getF32Constant(DAG, 0x3e4350aa, dl));
5158 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5159 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5160 getF32Constant(DAG, 0x3f60d3e3, dl));
5161 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5162 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5163 getF32Constant(DAG, 0x4011cdf0, dl));
5164 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5165 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5166 getF32Constant(DAG, 0x406cfd1c, dl));
5167 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5168 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5169 getF32Constant(DAG, 0x408797cb, dl));
5170 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5171 LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5172 getF32Constant(DAG, 0x4006dcab, dl));
5175 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
5178 // No special expansion.
5179 return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op);
5182 /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
5183 /// limited-precision mode.
5184 static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5185 const TargetLowering &TLI) {
5186 // TODO: What fast-math-flags should be set on the floating-point nodes?
5188 if (Op.getValueType() == MVT::f32 &&
5189 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5190 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5192 // Get the exponent.
5193 SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
5195 // Get the significand and build it into a floating-point number with
5197 SDValue X = GetSignificand(DAG, Op1, dl);
5199 // Different possible minimax approximations of significand in
5200 // floating-point for various degrees of accuracy over [1,2].
5201 SDValue Log2ofMantissa;
5202 if (LimitFloatPrecision <= 6) {
5203 // For floating-point precision of 6:
5205 // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
5207 // error 0.0049451742, which is more than 7 bits
5208 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5209 getF32Constant(DAG, 0xbeb08fe0, dl));
5210 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5211 getF32Constant(DAG, 0x40019463, dl));
5212 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5213 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5214 getF32Constant(DAG, 0x3fd6633d, dl));
5215 } else if (LimitFloatPrecision <= 12) {
5216 // For floating-point precision of 12:
5222 // (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
5224 // error 0.0000876136000, which is better than 13 bits
5225 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5226 getF32Constant(DAG, 0xbda7262e, dl));
5227 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5228 getF32Constant(DAG, 0x3f25280b, dl));
5229 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5230 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5231 getF32Constant(DAG, 0x4007b923, dl));
5232 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5233 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5234 getF32Constant(DAG, 0x40823e2f, dl));
5235 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5236 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5237 getF32Constant(DAG, 0x4020d29c, dl));
5238 } else { // LimitFloatPrecision <= 18
5239 // For floating-point precision of 18:
5248 // 0.25691327e-1f * x) * x) * x) * x) * x) * x;
5250 // error 0.0000018516, which is better than 18 bits
5251 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5252 getF32Constant(DAG, 0xbcd2769e, dl));
5253 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5254 getF32Constant(DAG, 0x3e8ce0b9, dl));
5255 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5256 SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5257 getF32Constant(DAG, 0x3fa22ae7, dl));
5258 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5259 SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5260 getF32Constant(DAG, 0x40525723, dl));
5261 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5262 SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5263 getF32Constant(DAG, 0x40aaf200, dl));
5264 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5265 SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5266 getF32Constant(DAG, 0x40c39dad, dl));
5267 SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5268 Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5269 getF32Constant(DAG, 0x4042902c, dl));
5272 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5275 // No special expansion.
5276 return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op);
5279 /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
5280 /// limited-precision mode.
5281 static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5282 const TargetLowering &TLI) {
5283 // TODO: What fast-math-flags should be set on the floating-point nodes?
5285 if (Op.getValueType() == MVT::f32 &&
5286 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5287 SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5289 // Scale the exponent by log10(2) [0.30102999f].
5290 SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5291 SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5292 getF32Constant(DAG, 0x3e9a209a, dl));
5294 // Get the significand and build it into a floating-point number with
5296 SDValue X = GetSignificand(DAG, Op1, dl);
5298 SDValue Log10ofMantissa;
5299 if (LimitFloatPrecision <= 6) {
5300 // For floating-point precision of 6:
5302 // Log10ofMantissa =
5304 // (0.60948995f - 0.10380950f * x) * x;
5306 // error 0.0014886165, which is 6 bits
5307 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5308 getF32Constant(DAG, 0xbdd49a13, dl));
5309 SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5310 getF32Constant(DAG, 0x3f1c0789, dl));
5311 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5312 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5313 getF32Constant(DAG, 0x3f011300, dl));
5314 } else if (LimitFloatPrecision <= 12) {
5315 // For floating-point precision of 12:
5317 // Log10ofMantissa =
5320 // (-0.31664806f + 0.47637168e-1f * x) * x) * x;
5322 // error 0.00019228036, which is better than 12 bits
5323 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5324 getF32Constant(DAG, 0x3d431f31, dl));
5325 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5326 getF32Constant(DAG, 0x3ea21fb2, dl));
5327 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5328 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5329 getF32Constant(DAG, 0x3f6ae232, dl));
5330 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5331 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5332 getF32Constant(DAG, 0x3f25f7c3, dl));
5333 } else { // LimitFloatPrecision <= 18
5334 // For floating-point precision of 18:
5336 // Log10ofMantissa =
5341 // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
5343 // error 0.0000037995730, which is better than 18 bits
5344 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5345 getF32Constant(DAG, 0x3c5d51ce, dl));
5346 SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5347 getF32Constant(DAG, 0x3e00685a, dl));
5348 SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5349 SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5350 getF32Constant(DAG, 0x3efb6798, dl));
5351 SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5352 SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5353 getF32Constant(DAG, 0x3f88d192, dl));
5354 SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5355 SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5356 getF32Constant(DAG, 0x3fc4316c, dl));
5357 SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5358 Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
5359 getF32Constant(DAG, 0x3f57ce70, dl));
5362 return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5365 // No special expansion.
5366 return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op);
5369 /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
5370 /// limited-precision mode.
5371 static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5372 const TargetLowering &TLI) {
5373 if (Op.getValueType() == MVT::f32 &&
5374 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
5375 return getLimitedPrecisionExp2(Op, dl, DAG);
5377 // No special expansion.
5378 return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op);
5381 /// visitPow - Lower a pow intrinsic. Handles the special sequences for
5382 /// limited-precision mode with x == 10.0f.
5383 static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
5384 SelectionDAG &DAG, const TargetLowering &TLI) {
5385 bool IsExp10 = false;
5386 if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
5387 LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5388 if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
5390 IsExp10 = LHSC->isExactlyValue(Ten);
5394 // TODO: What fast-math-flags should be set on the FMUL node?
5396 // Put the exponent in the right bit position for later addition to the
5399 // #define LOG2OF10 3.3219281f
5400 // t0 = Op * LOG2OF10;
5401 SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
5402 getF32Constant(DAG, 0x40549a78, dl));
5403 return getLimitedPrecisionExp2(t0, dl, DAG);
5406 // No special expansion.
5407 return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS);
5410 /// ExpandPowI - Expand a llvm.powi intrinsic.
5411 static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
5412 SelectionDAG &DAG) {
5413 // If RHS is a constant, we can expand this out to a multiplication tree,
5414 // otherwise we end up lowering to a call to __powidf2 (for example). When
5415 // optimizing for size, we only want to do this if the expansion would produce
5416 // a small number of multiplies, otherwise we do the full expansion.
5417 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
5418 // Get the exponent as a positive value.
5419 unsigned Val = RHSC->getSExtValue();
5420 if ((int)Val < 0) Val = -Val;
5422 // powi(x, 0) -> 1.0
5424 return DAG.getConstantFP(1.0, DL, LHS.getValueType());
5426 bool OptForSize = DAG.shouldOptForSize();
5428 // If optimizing for size, don't insert too many multiplies.
5429 // This inserts up to 5 multiplies.
5430 countPopulation(Val) + Log2_32(Val) < 7) {
5431 // We use the simple binary decomposition method to generate the multiply
5432 // sequence. There are more optimal ways to do this (for example,
5433 // powi(x,15) generates one more multiply than it should), but this has
5434 // the benefit of being both really simple and much better than a libcall.
5435 SDValue Res; // Logically starts equal to 1.0
5436 SDValue CurSquare = LHS;
5437 // TODO: Intrinsics should have fast-math-flags that propagate to these
5442 Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare);
5444 Res = CurSquare; // 1.0*CurSquare.
5447 CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
5448 CurSquare, CurSquare);
5452 // If the original was negative, invert the result, producing 1/(x*x*x).
5453 if (RHSC->getSExtValue() < 0)
5454 Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
5455 DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
5460 // Otherwise, expand to a libcall.
5461 return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
5464 static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL,
5465 SDValue LHS, SDValue RHS, SDValue Scale,
5466 SelectionDAG &DAG, const TargetLowering &TLI) {
5467 EVT VT = LHS.getValueType();
5468 bool Signed = Opcode == ISD::SDIVFIX;
5469 LLVMContext &Ctx = *DAG.getContext();
5471 // If the type is legal but the operation isn't, this node might survive all
5472 // the way to operation legalization. If we end up there and we do not have
5473 // the ability to widen the type (if VT*2 is not legal), we cannot expand the
5476 // Coax the legalizer into expanding the node during type legalization instead
5477 // by bumping the size by one bit. This will force it to Promote, enabling the
5478 // early expansion and avoiding the need to expand later.
5480 // We don't have to do this if Scale is 0; that can always be expanded.
5482 // FIXME: We wouldn't have to do this (or any of the early
5483 // expansion/promotion) if it was possible to expand a libcall of an
5484 // illegal type during operation legalization. But it's not, so things
5486 unsigned ScaleInt = cast<ConstantSDNode>(Scale)->getZExtValue();
5488 (TLI.isTypeLegal(VT) ||
5489 (VT.isVector() && TLI.isTypeLegal(VT.getVectorElementType())))) {
5490 TargetLowering::LegalizeAction Action = TLI.getFixedPointOperationAction(
5491 Opcode, VT, ScaleInt);
5492 if (Action != TargetLowering::Legal && Action != TargetLowering::Custom) {
5494 if (VT.isScalarInteger())
5495 PromVT = EVT::getIntegerVT(Ctx, VT.getSizeInBits() + 1);
5496 else if (VT.isVector()) {
5497 PromVT = VT.getVectorElementType();
5498 PromVT = EVT::getIntegerVT(Ctx, PromVT.getSizeInBits() + 1);
5499 PromVT = EVT::getVectorVT(Ctx, PromVT, VT.getVectorElementCount());
5501 llvm_unreachable("Wrong VT for DIVFIX?");
5503 LHS = DAG.getSExtOrTrunc(LHS, DL, PromVT);
5504 RHS = DAG.getSExtOrTrunc(RHS, DL, PromVT);
5506 LHS = DAG.getZExtOrTrunc(LHS, DL, PromVT);
5507 RHS = DAG.getZExtOrTrunc(RHS, DL, PromVT);
5509 // TODO: Saturation.
5510 SDValue Res = DAG.getNode(Opcode, DL, PromVT, LHS, RHS, Scale);
5511 return DAG.getZExtOrTrunc(Res, DL, VT);
5515 return DAG.getNode(Opcode, DL, VT, LHS, RHS, Scale);
5518 // getUnderlyingArgRegs - Find underlying registers used for a truncated,
5519 // bitcasted, or split argument. Returns a list of <Register, size in bits>
5521 getUnderlyingArgRegs(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
5523 switch (N.getOpcode()) {
5524 case ISD::CopyFromReg: {
5525 SDValue Op = N.getOperand(1);
5526 Regs.emplace_back(cast<RegisterSDNode>(Op)->getReg(),
5527 Op.getValueType().getSizeInBits());
5531 case ISD::AssertZext:
5532 case ISD::AssertSext:
5534 getUnderlyingArgRegs(Regs, N.getOperand(0));
5536 case ISD::BUILD_PAIR:
5537 case ISD::BUILD_VECTOR:
5538 case ISD::CONCAT_VECTORS:
5539 for (SDValue Op : N->op_values())
5540 getUnderlyingArgRegs(Regs, Op);
5547 /// If the DbgValueInst is a dbg_value of a function argument, create the
5548 /// corresponding DBG_VALUE machine instruction for it now. At the end of
5549 /// instruction selection, they will be inserted to the entry BB.
5550 bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
5551 const Value *V, DILocalVariable *Variable, DIExpression *Expr,
5552 DILocation *DL, bool IsDbgDeclare, const SDValue &N) {
5553 const Argument *Arg = dyn_cast<Argument>(V);
5557 if (!IsDbgDeclare) {
5558 // ArgDbgValues are hoisted to the beginning of the entry block. So we
5559 // should only emit as ArgDbgValue if the dbg.value intrinsic is found in
5561 bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front();
5562 if (!IsInEntryBlock)
5565 // ArgDbgValues are hoisted to the beginning of the entry block. So we
5566 // should only emit as ArgDbgValue if the dbg.value intrinsic describes a
5567 // variable that also is a param.
5569 // Although, if we are at the top of the entry block already, we can still
5570 // emit using ArgDbgValue. This might catch some situations when the
5571 // dbg.value refers to an argument that isn't used in the entry block, so
5572 // any CopyToReg node would be optimized out and the only way to express
5573 // this DBG_VALUE is by using the physical reg (or FI) as done in this
5574 // method. ArgDbgValues are hoisted to the beginning of the entry block. So
5575 // we should only emit as ArgDbgValue if the Variable is an argument to the
5576 // current function, and the dbg.value intrinsic is found in the entry
5578 bool VariableIsFunctionInputArg = Variable->isParameter() &&
5579 !DL->getInlinedAt();
5580 bool IsInPrologue = SDNodeOrder == LowestSDNodeOrder;
5581 if (!IsInPrologue && !VariableIsFunctionInputArg)
5584 // Here we assume that a function argument on IR level only can be used to
5585 // describe one input parameter on source level. If we for example have
5586 // source code like this
5588 // struct A { long x, y; };
5589 // void foo(struct A a, long b) {
5597 // define void @foo(i32 %a1, i32 %a2, i32 %b) {
5599 // call void @llvm.dbg.value(metadata i32 %a1, "a", DW_OP_LLVM_fragment
5600 // call void @llvm.dbg.value(metadata i32 %a2, "a", DW_OP_LLVM_fragment
5601 // call void @llvm.dbg.value(metadata i32 %b, "b",
5603 // call void @llvm.dbg.value(metadata i32 %a1, "b"
5606 // then the last dbg.value is describing a parameter "b" using a value that
5607 // is an argument. But since we already has used %a1 to describe a parameter
5608 // we should not handle that last dbg.value here (that would result in an
5609 // incorrect hoisting of the DBG_VALUE to the function entry).
5610 // Notice that we allow one dbg.value per IR level argument, to accommodate
5611 // for the situation with fragments above.
5612 if (VariableIsFunctionInputArg) {
5613 unsigned ArgNo = Arg->getArgNo();
5614 if (ArgNo >= FuncInfo.DescribedArgs.size())
5615 FuncInfo.DescribedArgs.resize(ArgNo + 1, false);
5616 else if (!IsInPrologue && FuncInfo.DescribedArgs.test(ArgNo))
5618 FuncInfo.DescribedArgs.set(ArgNo);
5622 MachineFunction &MF = DAG.getMachineFunction();
5623 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
5625 bool IsIndirect = false;
5626 Optional<MachineOperand> Op;
5627 // Some arguments' frame index is recorded during argument lowering.
5628 int FI = FuncInfo.getArgumentFrameIndex(Arg);
5629 if (FI != std::numeric_limits<int>::max())
5630 Op = MachineOperand::CreateFI(FI);
5632 SmallVector<std::pair<unsigned, unsigned>, 8> ArgRegsAndSizes;
5633 if (!Op && N.getNode()) {
5634 getUnderlyingArgRegs(ArgRegsAndSizes, N);
5636 if (ArgRegsAndSizes.size() == 1)
5637 Reg = ArgRegsAndSizes.front().first;
5639 if (Reg && Reg.isVirtual()) {
5640 MachineRegisterInfo &RegInfo = MF.getRegInfo();
5641 Register PR = RegInfo.getLiveInPhysReg(Reg);
5646 Op = MachineOperand::CreateReg(Reg, false);
5647 IsIndirect = IsDbgDeclare;
5651 if (!Op && N.getNode()) {
5652 // Check if frame index is available.
5653 SDValue LCandidate = peekThroughBitcasts(N);
5654 if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(LCandidate.getNode()))
5655 if (FrameIndexSDNode *FINode =
5656 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
5657 Op = MachineOperand::CreateFI(FINode->getIndex());
5661 // Create a DBG_VALUE for each decomposed value in ArgRegs to cover Reg
5662 auto splitMultiRegDbgValue
5663 = [&](ArrayRef<std::pair<unsigned, unsigned>> SplitRegs) {
5664 unsigned Offset = 0;
5665 for (auto RegAndSize : SplitRegs) {
5666 // If the expression is already a fragment, the current register
5667 // offset+size might extend beyond the fragment. In this case, only
5668 // the register bits that are inside the fragment are relevant.
5669 int RegFragmentSizeInBits = RegAndSize.second;
5670 if (auto ExprFragmentInfo = Expr->getFragmentInfo()) {
5671 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
5672 // The register is entirely outside the expression fragment,
5673 // so is irrelevant for debug info.
5674 if (Offset >= ExprFragmentSizeInBits)
5676 // The register is partially outside the expression fragment, only
5677 // the low bits within the fragment are relevant for debug info.
5678 if (Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
5679 RegFragmentSizeInBits = ExprFragmentSizeInBits - Offset;
5683 auto FragmentExpr = DIExpression::createFragmentExpression(
5684 Expr, Offset, RegFragmentSizeInBits);
5685 Offset += RegAndSize.second;
5686 // If a valid fragment expression cannot be created, the variable's
5687 // correct value cannot be determined and so it is set as Undef.
5688 if (!FragmentExpr) {
5689 SDDbgValue *SDV = DAG.getConstantDbgValue(
5690 Variable, Expr, UndefValue::get(V->getType()), DL, SDNodeOrder);
5691 DAG.AddDbgValue(SDV, nullptr, false);
5694 assert(!IsDbgDeclare && "DbgDeclare operand is not in memory?");
5695 FuncInfo.ArgDbgValues.push_back(
5696 BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsDbgDeclare,
5697 RegAndSize.first, Variable, *FragmentExpr));
5701 // Check if ValueMap has reg number.
5702 DenseMap<const Value *, unsigned>::const_iterator
5703 VMI = FuncInfo.ValueMap.find(V);
5704 if (VMI != FuncInfo.ValueMap.end()) {
5705 const auto &TLI = DAG.getTargetLoweringInfo();
5706 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
5707 V->getType(), getABIRegCopyCC(V));
5708 if (RFV.occupiesMultipleRegs()) {
5709 splitMultiRegDbgValue(RFV.getRegsAndSizes());
5713 Op = MachineOperand::CreateReg(VMI->second, false);
5714 IsIndirect = IsDbgDeclare;
5715 } else if (ArgRegsAndSizes.size() > 1) {
5716 // This was split due to the calling convention, and no virtual register
5717 // mapping exists for the value.
5718 splitMultiRegDbgValue(ArgRegsAndSizes);
5726 assert(Variable->isValidLocationForIntrinsic(DL) &&
5727 "Expected inlined-at fields to agree");
5728 IsIndirect = (Op->isReg()) ? IsIndirect : true;
5729 FuncInfo.ArgDbgValues.push_back(
5730 BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect,
5731 *Op, Variable, Expr));
5736 /// Return the appropriate SDDbgValue based on N.
5737 SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
5738 DILocalVariable *Variable,
5741 unsigned DbgSDNodeOrder) {
5742 if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
5743 // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
5744 // stack slot locations.
5746 // Consider "int x = 0; int *px = &x;". There are two kinds of interesting
5747 // debug values here after optimization:
5749 // dbg.value(i32* %px, !"int *px", !DIExpression()), and
5750 // dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
5752 // Both describe the direct values of their associated variables.
5753 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
5754 /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5756 return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(),
5757 /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5760 static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic) {
5761 switch (Intrinsic) {
5762 case Intrinsic::smul_fix:
5763 return ISD::SMULFIX;
5764 case Intrinsic::umul_fix:
5765 return ISD::UMULFIX;
5766 case Intrinsic::smul_fix_sat:
5767 return ISD::SMULFIXSAT;
5768 case Intrinsic::umul_fix_sat:
5769 return ISD::UMULFIXSAT;
5770 case Intrinsic::sdiv_fix:
5771 return ISD::SDIVFIX;
5772 case Intrinsic::udiv_fix:
5773 return ISD::UDIVFIX;
5775 llvm_unreachable("Unhandled fixed point intrinsic");
5779 void SelectionDAGBuilder::lowerCallToExternalSymbol(const CallInst &I,
5780 const char *FunctionName) {
5781 assert(FunctionName && "FunctionName must not be nullptr");
5782 SDValue Callee = DAG.getExternalSymbol(
5784 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
5785 LowerCallTo(&I, Callee, I.isTailCall());
5788 /// Lower the call to the specified intrinsic function.
5789 void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
5790 unsigned Intrinsic) {
5791 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5792 SDLoc sdl = getCurSDLoc();
5793 DebugLoc dl = getCurDebugLoc();
5796 switch (Intrinsic) {
5798 // By default, turn this into a target intrinsic node.
5799 visitTargetIntrinsic(I, Intrinsic);
5801 case Intrinsic::vastart: visitVAStart(I); return;
5802 case Intrinsic::vaend: visitVAEnd(I); return;
5803 case Intrinsic::vacopy: visitVACopy(I); return;
5804 case Intrinsic::returnaddress:
5805 setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
5806 TLI.getPointerTy(DAG.getDataLayout()),
5807 getValue(I.getArgOperand(0))));
5809 case Intrinsic::addressofreturnaddress:
5810 setValue(&I, DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
5811 TLI.getPointerTy(DAG.getDataLayout())));
5813 case Intrinsic::sponentry:
5814 setValue(&I, DAG.getNode(ISD::SPONENTRY, sdl,
5815 TLI.getFrameIndexTy(DAG.getDataLayout())));
5817 case Intrinsic::frameaddress:
5818 setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
5819 TLI.getFrameIndexTy(DAG.getDataLayout()),
5820 getValue(I.getArgOperand(0))));
5822 case Intrinsic::read_register: {
5823 Value *Reg = I.getArgOperand(0);
5824 SDValue Chain = getRoot();
5826 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5827 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
5828 Res = DAG.getNode(ISD::READ_REGISTER, sdl,
5829 DAG.getVTList(VT, MVT::Other), Chain, RegName);
5831 DAG.setRoot(Res.getValue(1));
5834 case Intrinsic::write_register: {
5835 Value *Reg = I.getArgOperand(0);
5836 Value *RegValue = I.getArgOperand(1);
5837 SDValue Chain = getRoot();
5839 DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
5840 DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
5841 RegName, getValue(RegValue)));
5844 case Intrinsic::memcpy: {
5845 const auto &MCI = cast<MemCpyInst>(I);
5846 SDValue Op1 = getValue(I.getArgOperand(0));
5847 SDValue Op2 = getValue(I.getArgOperand(1));
5848 SDValue Op3 = getValue(I.getArgOperand(2));
5849 // @llvm.memcpy defines 0 and 1 to both mean no alignment.
5850 unsigned DstAlign = std::max<unsigned>(MCI.getDestAlignment(), 1);
5851 unsigned SrcAlign = std::max<unsigned>(MCI.getSourceAlignment(), 1);
5852 unsigned Align = MinAlign(DstAlign, SrcAlign);
5853 bool isVol = MCI.isVolatile();
5854 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5855 // FIXME: Support passing different dest/src alignments to the memcpy DAG
5857 SDValue Root = isVol ? getRoot() : getMemoryRoot();
5858 SDValue MC = DAG.getMemcpy(Root, sdl, Op1, Op2, Op3, Align, isVol,
5860 MachinePointerInfo(I.getArgOperand(0)),
5861 MachinePointerInfo(I.getArgOperand(1)));
5862 updateDAGForMaybeTailCall(MC);
5865 case Intrinsic::memset: {
5866 const auto &MSI = cast<MemSetInst>(I);
5867 SDValue Op1 = getValue(I.getArgOperand(0));
5868 SDValue Op2 = getValue(I.getArgOperand(1));
5869 SDValue Op3 = getValue(I.getArgOperand(2));
5870 // @llvm.memset defines 0 and 1 to both mean no alignment.
5871 unsigned Align = std::max<unsigned>(MSI.getDestAlignment(), 1);
5872 bool isVol = MSI.isVolatile();
5873 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5874 SDValue Root = isVol ? getRoot() : getMemoryRoot();
5875 SDValue MS = DAG.getMemset(Root, sdl, Op1, Op2, Op3, Align, isVol,
5876 isTC, MachinePointerInfo(I.getArgOperand(0)));
5877 updateDAGForMaybeTailCall(MS);
5880 case Intrinsic::memmove: {
5881 const auto &MMI = cast<MemMoveInst>(I);
5882 SDValue Op1 = getValue(I.getArgOperand(0));
5883 SDValue Op2 = getValue(I.getArgOperand(1));
5884 SDValue Op3 = getValue(I.getArgOperand(2));
5885 // @llvm.memmove defines 0 and 1 to both mean no alignment.
5886 unsigned DstAlign = std::max<unsigned>(MMI.getDestAlignment(), 1);
5887 unsigned SrcAlign = std::max<unsigned>(MMI.getSourceAlignment(), 1);
5888 unsigned Align = MinAlign(DstAlign, SrcAlign);
5889 bool isVol = MMI.isVolatile();
5890 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5891 // FIXME: Support passing different dest/src alignments to the memmove DAG
5893 SDValue Root = isVol ? getRoot() : getMemoryRoot();
5894 SDValue MM = DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Align, isVol,
5895 isTC, MachinePointerInfo(I.getArgOperand(0)),
5896 MachinePointerInfo(I.getArgOperand(1)));
5897 updateDAGForMaybeTailCall(MM);
5900 case Intrinsic::memcpy_element_unordered_atomic: {
5901 const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I);
5902 SDValue Dst = getValue(MI.getRawDest());
5903 SDValue Src = getValue(MI.getRawSource());
5904 SDValue Length = getValue(MI.getLength());
5906 unsigned DstAlign = MI.getDestAlignment();
5907 unsigned SrcAlign = MI.getSourceAlignment();
5908 Type *LengthTy = MI.getLength()->getType();
5909 unsigned ElemSz = MI.getElementSizeInBytes();
5910 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5911 SDValue MC = DAG.getAtomicMemcpy(getRoot(), sdl, Dst, DstAlign, Src,
5912 SrcAlign, Length, LengthTy, ElemSz, isTC,
5913 MachinePointerInfo(MI.getRawDest()),
5914 MachinePointerInfo(MI.getRawSource()));
5915 updateDAGForMaybeTailCall(MC);
5918 case Intrinsic::memmove_element_unordered_atomic: {
5919 auto &MI = cast<AtomicMemMoveInst>(I);
5920 SDValue Dst = getValue(MI.getRawDest());
5921 SDValue Src = getValue(MI.getRawSource());
5922 SDValue Length = getValue(MI.getLength());
5924 unsigned DstAlign = MI.getDestAlignment();
5925 unsigned SrcAlign = MI.getSourceAlignment();
5926 Type *LengthTy = MI.getLength()->getType();
5927 unsigned ElemSz = MI.getElementSizeInBytes();
5928 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5929 SDValue MC = DAG.getAtomicMemmove(getRoot(), sdl, Dst, DstAlign, Src,
5930 SrcAlign, Length, LengthTy, ElemSz, isTC,
5931 MachinePointerInfo(MI.getRawDest()),
5932 MachinePointerInfo(MI.getRawSource()));
5933 updateDAGForMaybeTailCall(MC);
5936 case Intrinsic::memset_element_unordered_atomic: {
5937 auto &MI = cast<AtomicMemSetInst>(I);
5938 SDValue Dst = getValue(MI.getRawDest());
5939 SDValue Val = getValue(MI.getValue());
5940 SDValue Length = getValue(MI.getLength());
5942 unsigned DstAlign = MI.getDestAlignment();
5943 Type *LengthTy = MI.getLength()->getType();
5944 unsigned ElemSz = MI.getElementSizeInBytes();
5945 bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
5946 SDValue MC = DAG.getAtomicMemset(getRoot(), sdl, Dst, DstAlign, Val, Length,
5947 LengthTy, ElemSz, isTC,
5948 MachinePointerInfo(MI.getRawDest()));
5949 updateDAGForMaybeTailCall(MC);
5952 case Intrinsic::dbg_addr:
5953 case Intrinsic::dbg_declare: {
5954 const auto &DI = cast<DbgVariableIntrinsic>(I);
5955 DILocalVariable *Variable = DI.getVariable();
5956 DIExpression *Expression = DI.getExpression();
5957 dropDanglingDebugInfo(Variable, Expression);
5958 assert(Variable && "Missing variable");
5960 // Check if address has undef value.
5961 const Value *Address = DI.getVariableLocation();
5962 if (!Address || isa<UndefValue>(Address) ||
5963 (Address->use_empty() && !isa<Argument>(Address))) {
5964 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
5968 bool isParameter = Variable->isParameter() || isa<Argument>(Address);
5970 // Check if this variable can be described by a frame index, typically
5971 // either as a static alloca or a byval parameter.
5972 int FI = std::numeric_limits<int>::max();
5973 if (const auto *AI =
5974 dyn_cast<AllocaInst>(Address->stripInBoundsConstantOffsets())) {
5975 if (AI->isStaticAlloca()) {
5976 auto I = FuncInfo.StaticAllocaMap.find(AI);
5977 if (I != FuncInfo.StaticAllocaMap.end())
5980 } else if (const auto *Arg = dyn_cast<Argument>(
5981 Address->stripInBoundsConstantOffsets())) {
5982 FI = FuncInfo.getArgumentFrameIndex(Arg);
5985 // llvm.dbg.addr is control dependent and always generates indirect
5986 // DBG_VALUE instructions. llvm.dbg.declare is handled as a frame index in
5987 // the MachineFunction variable table.
5988 if (FI != std::numeric_limits<int>::max()) {
5989 if (Intrinsic == Intrinsic::dbg_addr) {
5990 SDDbgValue *SDV = DAG.getFrameIndexDbgValue(
5991 Variable, Expression, FI, /*IsIndirect*/ true, dl, SDNodeOrder);
5992 DAG.AddDbgValue(SDV, getRoot().getNode(), isParameter);
5997 SDValue &N = NodeMap[Address];
5998 if (!N.getNode() && isa<Argument>(Address))
5999 // Check unused arguments map.
6000 N = UnusedArgNodeMap[Address];
6003 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
6004 Address = BCI->getOperand(0);
6005 // Parameters are handled specially.
6006 auto FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
6007 if (isParameter && FINode) {
6008 // Byval parameter. We have a frame index at this point.
6010 DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(),
6011 /*IsIndirect*/ true, dl, SDNodeOrder);
6012 } else if (isa<Argument>(Address)) {
6013 // Address is an argument, so try to emit its dbg value using
6014 // virtual register info from the FuncInfo.ValueMap.
6015 EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true, N);
6018 SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
6019 true, dl, SDNodeOrder);
6021 DAG.AddDbgValue(SDV, N.getNode(), isParameter);
6023 // If Address is an argument then try to emit its dbg value using
6024 // virtual register info from the FuncInfo.ValueMap.
6025 if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true,
6027 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
6032 case Intrinsic::dbg_label: {
6033 const DbgLabelInst &DI = cast<DbgLabelInst>(I);
6034 DILabel *Label = DI.getLabel();
6035 assert(Label && "Missing label");
6038 SDV = DAG.getDbgLabel(Label, dl, SDNodeOrder);
6039 DAG.AddDbgLabel(SDV);
6042 case Intrinsic::dbg_value: {
6043 const DbgValueInst &DI = cast<DbgValueInst>(I);
6044 assert(DI.getVariable() && "Missing variable");
6046 DILocalVariable *Variable = DI.getVariable();
6047 DIExpression *Expression = DI.getExpression();
6048 dropDanglingDebugInfo(Variable, Expression);
6049 const Value *V = DI.getValue();
6053 if (handleDebugValue(V, Variable, Expression, dl, DI.getDebugLoc(),
6057 // TODO: Dangling debug info will eventually either be resolved or produce
6058 // an Undef DBG_VALUE. However in the resolution case, a gap may appear
6059 // between the original dbg.value location and its resolved DBG_VALUE, which
6060 // we should ideally fill with an extra Undef DBG_VALUE.
6062 DanglingDebugInfoMap[V].emplace_back(&DI, dl, SDNodeOrder);
6066 case Intrinsic::eh_typeid_for: {
6067 // Find the type id for the given typeinfo.
6068 GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
6069 unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
6070 Res = DAG.getConstant(TypeID, sdl, MVT::i32);
6075 case Intrinsic::eh_return_i32:
6076 case Intrinsic::eh_return_i64:
6077 DAG.getMachineFunction().setCallsEHReturn(true);
6078 DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
6081 getValue(I.getArgOperand(0)),
6082 getValue(I.getArgOperand(1))));
6084 case Intrinsic::eh_unwind_init:
6085 DAG.getMachineFunction().setCallsUnwindInit(true);
6087 case Intrinsic::eh_dwarf_cfa:
6088 setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl,
6089 TLI.getPointerTy(DAG.getDataLayout()),
6090 getValue(I.getArgOperand(0))));
6092 case Intrinsic::eh_sjlj_callsite: {
6093 MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
6094 ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0));
6095 assert(CI && "Non-constant call site value in eh.sjlj.callsite!");
6096 assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
6098 MMI.setCurrentCallSite(CI->getZExtValue());
6101 case Intrinsic::eh_sjlj_functioncontext: {
6102 // Get and store the index of the function context.
6103 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
6105 cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
6106 int FI = FuncInfo.StaticAllocaMap[FnCtx];
6107 MFI.setFunctionContextIndex(FI);
6110 case Intrinsic::eh_sjlj_setjmp: {
6113 Ops[1] = getValue(I.getArgOperand(0));
6114 SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
6115 DAG.getVTList(MVT::i32, MVT::Other), Ops);
6116 setValue(&I, Op.getValue(0));
6117 DAG.setRoot(Op.getValue(1));
6120 case Intrinsic::eh_sjlj_longjmp:
6121 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
6122 getRoot(), getValue(I.getArgOperand(0))));
6124 case Intrinsic::eh_sjlj_setup_dispatch:
6125 DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other,
6128 case Intrinsic::masked_gather:
6129 visitMaskedGather(I);
6131 case Intrinsic::masked_load:
6134 case Intrinsic::masked_scatter:
6135 visitMaskedScatter(I);
6137 case Intrinsic::masked_store:
6138 visitMaskedStore(I);
6140 case Intrinsic::masked_expandload:
6141 visitMaskedLoad(I, true /* IsExpanding */);
6143 case Intrinsic::masked_compressstore:
6144 visitMaskedStore(I, true /* IsCompressing */);
6146 case Intrinsic::powi:
6147 setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
6148 getValue(I.getArgOperand(1)), DAG));
6150 case Intrinsic::log:
6151 setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
6153 case Intrinsic::log2:
6154 setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
6156 case Intrinsic::log10:
6157 setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
6159 case Intrinsic::exp:
6160 setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
6162 case Intrinsic::exp2:
6163 setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI));
6165 case Intrinsic::pow:
6166 setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
6167 getValue(I.getArgOperand(1)), DAG, TLI));
6169 case Intrinsic::sqrt:
6170 case Intrinsic::fabs:
6171 case Intrinsic::sin:
6172 case Intrinsic::cos:
6173 case Intrinsic::floor:
6174 case Intrinsic::ceil:
6175 case Intrinsic::trunc:
6176 case Intrinsic::rint:
6177 case Intrinsic::nearbyint:
6178 case Intrinsic::round:
6179 case Intrinsic::canonicalize: {
6181 switch (Intrinsic) {
6182 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
6183 case Intrinsic::sqrt: Opcode = ISD::FSQRT; break;
6184 case Intrinsic::fabs: Opcode = ISD::FABS; break;
6185 case Intrinsic::sin: Opcode = ISD::FSIN; break;
6186 case Intrinsic::cos: Opcode = ISD::FCOS; break;
6187 case Intrinsic::floor: Opcode = ISD::FFLOOR; break;
6188 case Intrinsic::ceil: Opcode = ISD::FCEIL; break;
6189 case Intrinsic::trunc: Opcode = ISD::FTRUNC; break;
6190 case Intrinsic::rint: Opcode = ISD::FRINT; break;
6191 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
6192 case Intrinsic::round: Opcode = ISD::FROUND; break;
6193 case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
6196 setValue(&I, DAG.getNode(Opcode, sdl,
6197 getValue(I.getArgOperand(0)).getValueType(),
6198 getValue(I.getArgOperand(0))));
6201 case Intrinsic::lround:
6202 case Intrinsic::llround:
6203 case Intrinsic::lrint:
6204 case Intrinsic::llrint: {
6206 switch (Intrinsic) {
6207 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
6208 case Intrinsic::lround: Opcode = ISD::LROUND; break;
6209 case Intrinsic::llround: Opcode = ISD::LLROUND; break;
6210 case Intrinsic::lrint: Opcode = ISD::LRINT; break;
6211 case Intrinsic::llrint: Opcode = ISD::LLRINT; break;
6214 EVT RetVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6215 setValue(&I, DAG.getNode(Opcode, sdl, RetVT,
6216 getValue(I.getArgOperand(0))));
6219 case Intrinsic::minnum:
6220 setValue(&I, DAG.getNode(ISD::FMINNUM, sdl,
6221 getValue(I.getArgOperand(0)).getValueType(),
6222 getValue(I.getArgOperand(0)),
6223 getValue(I.getArgOperand(1))));
6225 case Intrinsic::maxnum:
6226 setValue(&I, DAG.getNode(ISD::FMAXNUM, sdl,
6227 getValue(I.getArgOperand(0)).getValueType(),
6228 getValue(I.getArgOperand(0)),
6229 getValue(I.getArgOperand(1))));
6231 case Intrinsic::minimum:
6232 setValue(&I, DAG.getNode(ISD::FMINIMUM, sdl,
6233 getValue(I.getArgOperand(0)).getValueType(),
6234 getValue(I.getArgOperand(0)),
6235 getValue(I.getArgOperand(1))));
6237 case Intrinsic::maximum:
6238 setValue(&I, DAG.getNode(ISD::FMAXIMUM, sdl,
6239 getValue(I.getArgOperand(0)).getValueType(),
6240 getValue(I.getArgOperand(0)),
6241 getValue(I.getArgOperand(1))));
6243 case Intrinsic::copysign:
6244 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
6245 getValue(I.getArgOperand(0)).getValueType(),
6246 getValue(I.getArgOperand(0)),
6247 getValue(I.getArgOperand(1))));
6249 case Intrinsic::fma:
6250 setValue(&I, DAG.getNode(ISD::FMA, sdl,
6251 getValue(I.getArgOperand(0)).getValueType(),
6252 getValue(I.getArgOperand(0)),
6253 getValue(I.getArgOperand(1)),
6254 getValue(I.getArgOperand(2))));
6256 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
6257 case Intrinsic::INTRINSIC:
6258 #include "llvm/IR/ConstrainedOps.def"
6259 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
6261 case Intrinsic::fmuladd: {
6262 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6263 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
6264 TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
6265 setValue(&I, DAG.getNode(ISD::FMA, sdl,
6266 getValue(I.getArgOperand(0)).getValueType(),
6267 getValue(I.getArgOperand(0)),
6268 getValue(I.getArgOperand(1)),
6269 getValue(I.getArgOperand(2))));
6271 // TODO: Intrinsic calls should have fast-math-flags.
6272 SDValue Mul = DAG.getNode(ISD::FMUL, sdl,
6273 getValue(I.getArgOperand(0)).getValueType(),
6274 getValue(I.getArgOperand(0)),
6275 getValue(I.getArgOperand(1)));
6276 SDValue Add = DAG.getNode(ISD::FADD, sdl,
6277 getValue(I.getArgOperand(0)).getValueType(),
6279 getValue(I.getArgOperand(2)));
6284 case Intrinsic::convert_to_fp16:
6285 setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
6286 DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
6287 getValue(I.getArgOperand(0)),
6288 DAG.getTargetConstant(0, sdl,
6291 case Intrinsic::convert_from_fp16:
6292 setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
6293 TLI.getValueType(DAG.getDataLayout(), I.getType()),
6294 DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
6295 getValue(I.getArgOperand(0)))));
6297 case Intrinsic::pcmarker: {
6298 SDValue Tmp = getValue(I.getArgOperand(0));
6299 DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
6302 case Intrinsic::readcyclecounter: {
6303 SDValue Op = getRoot();
6304 Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
6305 DAG.getVTList(MVT::i64, MVT::Other), Op);
6307 DAG.setRoot(Res.getValue(1));
6310 case Intrinsic::bitreverse:
6311 setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
6312 getValue(I.getArgOperand(0)).getValueType(),
6313 getValue(I.getArgOperand(0))));
6315 case Intrinsic::bswap:
6316 setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
6317 getValue(I.getArgOperand(0)).getValueType(),
6318 getValue(I.getArgOperand(0))));
6320 case Intrinsic::cttz: {
6321 SDValue Arg = getValue(I.getArgOperand(0));
6322 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6323 EVT Ty = Arg.getValueType();
6324 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
6328 case Intrinsic::ctlz: {
6329 SDValue Arg = getValue(I.getArgOperand(0));
6330 ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6331 EVT Ty = Arg.getValueType();
6332 setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
6336 case Intrinsic::ctpop: {
6337 SDValue Arg = getValue(I.getArgOperand(0));
6338 EVT Ty = Arg.getValueType();
6339 setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
6342 case Intrinsic::fshl:
6343 case Intrinsic::fshr: {
6344 bool IsFSHL = Intrinsic == Intrinsic::fshl;
6345 SDValue X = getValue(I.getArgOperand(0));
6346 SDValue Y = getValue(I.getArgOperand(1));
6347 SDValue Z = getValue(I.getArgOperand(2));
6348 EVT VT = X.getValueType();
6349 SDValue BitWidthC = DAG.getConstant(VT.getScalarSizeInBits(), sdl, VT);
6350 SDValue Zero = DAG.getConstant(0, sdl, VT);
6351 SDValue ShAmt = DAG.getNode(ISD::UREM, sdl, VT, Z, BitWidthC);
6353 auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR;
6354 if (TLI.isOperationLegalOrCustom(FunnelOpcode, VT)) {
6355 setValue(&I, DAG.getNode(FunnelOpcode, sdl, VT, X, Y, Z));
6359 // When X == Y, this is rotate. If the data type has a power-of-2 size, we
6360 // avoid the select that is necessary in the general case to filter out
6361 // the 0-shift possibility that leads to UB.
6362 if (X == Y && isPowerOf2_32(VT.getScalarSizeInBits())) {
6363 auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
6364 if (TLI.isOperationLegalOrCustom(RotateOpcode, VT)) {
6365 setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
6369 // Some targets only rotate one way. Try the opposite direction.
6370 RotateOpcode = IsFSHL ? ISD::ROTR : ISD::ROTL;
6371 if (TLI.isOperationLegalOrCustom(RotateOpcode, VT)) {
6372 // Negate the shift amount because it is safe to ignore the high bits.
6373 SDValue NegShAmt = DAG.getNode(ISD::SUB, sdl, VT, Zero, Z);
6374 setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, NegShAmt));
6378 // fshl (rotl): (X << (Z % BW)) | (X >> ((0 - Z) % BW))
6379 // fshr (rotr): (X << ((0 - Z) % BW)) | (X >> (Z % BW))
6380 SDValue NegZ = DAG.getNode(ISD::SUB, sdl, VT, Zero, Z);
6381 SDValue NShAmt = DAG.getNode(ISD::UREM, sdl, VT, NegZ, BitWidthC);
6382 SDValue ShX = DAG.getNode(ISD::SHL, sdl, VT, X, IsFSHL ? ShAmt : NShAmt);
6383 SDValue ShY = DAG.getNode(ISD::SRL, sdl, VT, X, IsFSHL ? NShAmt : ShAmt);
6384 setValue(&I, DAG.getNode(ISD::OR, sdl, VT, ShX, ShY));
6388 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
6389 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
6390 SDValue InvShAmt = DAG.getNode(ISD::SUB, sdl, VT, BitWidthC, ShAmt);
6391 SDValue ShX = DAG.getNode(ISD::SHL, sdl, VT, X, IsFSHL ? ShAmt : InvShAmt);
6392 SDValue ShY = DAG.getNode(ISD::SRL, sdl, VT, Y, IsFSHL ? InvShAmt : ShAmt);
6393 SDValue Or = DAG.getNode(ISD::OR, sdl, VT, ShX, ShY);
6395 // If (Z % BW == 0), then the opposite direction shift is shift-by-bitwidth,
6396 // and that is undefined. We must compare and select to avoid UB.
6399 CCVT = EVT::getVectorVT(*Context, CCVT, VT.getVectorNumElements());
6401 // For fshl, 0-shift returns the 1st arg (X).
6402 // For fshr, 0-shift returns the 2nd arg (Y).
6403 SDValue IsZeroShift = DAG.getSetCC(sdl, CCVT, ShAmt, Zero, ISD::SETEQ);
6404 setValue(&I, DAG.getSelect(sdl, VT, IsZeroShift, IsFSHL ? X : Y, Or));
6407 case Intrinsic::sadd_sat: {
6408 SDValue Op1 = getValue(I.getArgOperand(0));
6409 SDValue Op2 = getValue(I.getArgOperand(1));
6410 setValue(&I, DAG.getNode(ISD::SADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6413 case Intrinsic::uadd_sat: {
6414 SDValue Op1 = getValue(I.getArgOperand(0));
6415 SDValue Op2 = getValue(I.getArgOperand(1));
6416 setValue(&I, DAG.getNode(ISD::UADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6419 case Intrinsic::ssub_sat: {
6420 SDValue Op1 = getValue(I.getArgOperand(0));
6421 SDValue Op2 = getValue(I.getArgOperand(1));
6422 setValue(&I, DAG.getNode(ISD::SSUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6425 case Intrinsic::usub_sat: {
6426 SDValue Op1 = getValue(I.getArgOperand(0));
6427 SDValue Op2 = getValue(I.getArgOperand(1));
6428 setValue(&I, DAG.getNode(ISD::USUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6431 case Intrinsic::smul_fix:
6432 case Intrinsic::umul_fix:
6433 case Intrinsic::smul_fix_sat:
6434 case Intrinsic::umul_fix_sat: {
6435 SDValue Op1 = getValue(I.getArgOperand(0));
6436 SDValue Op2 = getValue(I.getArgOperand(1));
6437 SDValue Op3 = getValue(I.getArgOperand(2));
6438 setValue(&I, DAG.getNode(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6439 Op1.getValueType(), Op1, Op2, Op3));
6442 case Intrinsic::sdiv_fix:
6443 case Intrinsic::udiv_fix: {
6444 SDValue Op1 = getValue(I.getArgOperand(0));
6445 SDValue Op2 = getValue(I.getArgOperand(1));
6446 SDValue Op3 = getValue(I.getArgOperand(2));
6447 setValue(&I, expandDivFix(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6448 Op1, Op2, Op3, DAG, TLI));
6451 case Intrinsic::stacksave: {
6452 SDValue Op = getRoot();
6454 ISD::STACKSAVE, sdl,
6455 DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Op);
6457 DAG.setRoot(Res.getValue(1));
6460 case Intrinsic::stackrestore:
6461 Res = getValue(I.getArgOperand(0));
6462 DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
6464 case Intrinsic::get_dynamic_area_offset: {
6465 SDValue Op = getRoot();
6466 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
6467 EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
6468 // Result type for @llvm.get.dynamic.area.offset should match PtrTy for
6470 if (PtrTy.getSizeInBits() < ResTy.getSizeInBits())
6471 report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
6473 Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
6479 case Intrinsic::stackguard: {
6480 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
6481 MachineFunction &MF = DAG.getMachineFunction();
6482 const Module &M = *MF.getFunction().getParent();
6483 SDValue Chain = getRoot();
6484 if (TLI.useLoadStackGuardNode()) {
6485 Res = getLoadStackGuard(DAG, sdl, Chain);
6487 const Value *Global = TLI.getSDagStackGuard(M);
6488 unsigned Align = DL->getPrefTypeAlignment(Global->getType());
6489 Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
6490 MachinePointerInfo(Global, 0), Align,
6491 MachineMemOperand::MOVolatile);
6493 if (TLI.useStackGuardXorFP())
6494 Res = TLI.emitStackGuardXorFP(DAG, Res, sdl);
6499 case Intrinsic::stackprotector: {
6500 // Emit code into the DAG to store the stack guard onto the stack.
6501 MachineFunction &MF = DAG.getMachineFunction();
6502 MachineFrameInfo &MFI = MF.getFrameInfo();
6503 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
6504 SDValue Src, Chain = getRoot();
6506 if (TLI.useLoadStackGuardNode())
6507 Src = getLoadStackGuard(DAG, sdl, Chain);
6509 Src = getValue(I.getArgOperand(0)); // The guard's value.
6511 AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
6513 int FI = FuncInfo.StaticAllocaMap[Slot];
6514 MFI.setStackProtectorIndex(FI);
6516 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
6518 // Store the stack protector onto the stack.
6519 Res = DAG.getStore(Chain, sdl, Src, FIN, MachinePointerInfo::getFixedStack(
6520 DAG.getMachineFunction(), FI),
6521 /* Alignment = */ 0, MachineMemOperand::MOVolatile);
6526 case Intrinsic::objectsize:
6527 llvm_unreachable("llvm.objectsize.* should have been lowered already");
6529 case Intrinsic::is_constant:
6530 llvm_unreachable("llvm.is.constant.* should have been lowered already");
6532 case Intrinsic::annotation:
6533 case Intrinsic::ptr_annotation:
6534 case Intrinsic::launder_invariant_group:
6535 case Intrinsic::strip_invariant_group:
6536 // Drop the intrinsic, but forward the value
6537 setValue(&I, getValue(I.getOperand(0)));
6539 case Intrinsic::assume:
6540 case Intrinsic::var_annotation:
6541 case Intrinsic::sideeffect:
6542 // Discard annotate attributes, assumptions, and artificial side-effects.
6545 case Intrinsic::codeview_annotation: {
6546 // Emit a label associated with this metadata.
6547 MachineFunction &MF = DAG.getMachineFunction();
6549 MF.getMMI().getContext().createTempSymbol("annotation", true);
6550 Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata();
6551 MF.addCodeViewAnnotation(Label, cast<MDNode>(MD));
6552 Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label);
6557 case Intrinsic::init_trampoline: {
6558 const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
6562 Ops[1] = getValue(I.getArgOperand(0));
6563 Ops[2] = getValue(I.getArgOperand(1));
6564 Ops[3] = getValue(I.getArgOperand(2));
6565 Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
6566 Ops[5] = DAG.getSrcValue(F);
6568 Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
6573 case Intrinsic::adjust_trampoline:
6574 setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
6575 TLI.getPointerTy(DAG.getDataLayout()),
6576 getValue(I.getArgOperand(0))));
6578 case Intrinsic::gcroot: {
6579 assert(DAG.getMachineFunction().getFunction().hasGC() &&
6580 "only valid in functions with gc specified, enforced by Verifier");
6581 assert(GFI && "implied by previous");
6582 const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
6583 const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
6585 FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
6586 GFI->addStackRoot(FI->getIndex(), TypeMap);
6589 case Intrinsic::gcread:
6590 case Intrinsic::gcwrite:
6591 llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
6592 case Intrinsic::flt_rounds:
6593 setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, sdl, MVT::i32));
6596 case Intrinsic::expect:
6597 // Just replace __builtin_expect(exp, c) with EXP.
6598 setValue(&I, getValue(I.getArgOperand(0)));
6601 case Intrinsic::debugtrap:
6602 case Intrinsic::trap: {
6603 StringRef TrapFuncName =
6605 .getAttribute(AttributeList::FunctionIndex, "trap-func-name")
6606 .getValueAsString();
6607 if (TrapFuncName.empty()) {
6608 ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ?
6609 ISD::TRAP : ISD::DEBUGTRAP;
6610 DAG.setRoot(DAG.getNode(Op, sdl,MVT::Other, getRoot()));
6613 TargetLowering::ArgListTy Args;
6615 TargetLowering::CallLoweringInfo CLI(DAG);
6616 CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
6617 CallingConv::C, I.getType(),
6618 DAG.getExternalSymbol(TrapFuncName.data(),
6619 TLI.getPointerTy(DAG.getDataLayout())),
6622 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
6623 DAG.setRoot(Result.second);
6627 case Intrinsic::uadd_with_overflow:
6628 case Intrinsic::sadd_with_overflow:
6629 case Intrinsic::usub_with_overflow:
6630 case Intrinsic::ssub_with_overflow:
6631 case Intrinsic::umul_with_overflow:
6632 case Intrinsic::smul_with_overflow: {
6634 switch (Intrinsic) {
6635 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
6636 case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
6637 case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
6638 case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
6639 case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
6640 case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
6641 case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
6643 SDValue Op1 = getValue(I.getArgOperand(0));
6644 SDValue Op2 = getValue(I.getArgOperand(1));
6646 EVT ResultVT = Op1.getValueType();
6647 EVT OverflowVT = MVT::i1;
6648 if (ResultVT.isVector())
6649 OverflowVT = EVT::getVectorVT(
6650 *Context, OverflowVT, ResultVT.getVectorNumElements());
6652 SDVTList VTs = DAG.getVTList(ResultVT, OverflowVT);
6653 setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
6656 case Intrinsic::prefetch: {
6658 unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
6659 auto Flags = rw == 0 ? MachineMemOperand::MOLoad :MachineMemOperand::MOStore;
6660 Ops[0] = DAG.getRoot();
6661 Ops[1] = getValue(I.getArgOperand(0));
6662 Ops[2] = getValue(I.getArgOperand(1));
6663 Ops[3] = getValue(I.getArgOperand(2));
6664 Ops[4] = getValue(I.getArgOperand(3));
6665 SDValue Result = DAG.getMemIntrinsicNode(ISD::PREFETCH, sdl,
6666 DAG.getVTList(MVT::Other), Ops,
6667 EVT::getIntegerVT(*Context, 8),
6668 MachinePointerInfo(I.getArgOperand(0)),
6672 // Chain the prefetch in parallell with any pending loads, to stay out of
6673 // the way of later optimizations.
6674 PendingLoads.push_back(Result);
6676 DAG.setRoot(Result);
6679 case Intrinsic::lifetime_start:
6680 case Intrinsic::lifetime_end: {
6681 bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
6682 // Stack coloring is not enabled in O0, discard region information.
6683 if (TM.getOptLevel() == CodeGenOpt::None)
6686 const int64_t ObjectSize =
6687 cast<ConstantInt>(I.getArgOperand(0))->getSExtValue();
6688 Value *const ObjectPtr = I.getArgOperand(1);
6689 SmallVector<const Value *, 4> Allocas;
6690 GetUnderlyingObjects(ObjectPtr, Allocas, *DL);
6692 for (SmallVectorImpl<const Value*>::iterator Object = Allocas.begin(),
6693 E = Allocas.end(); Object != E; ++Object) {
6694 const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object);
6696 // Could not find an Alloca.
6697 if (!LifetimeObject)
6700 // First check that the Alloca is static, otherwise it won't have a
6701 // valid frame index.
6702 auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
6703 if (SI == FuncInfo.StaticAllocaMap.end())
6706 const int FrameIndex = SI->second;
6708 if (GetPointerBaseWithConstantOffset(
6709 ObjectPtr, Offset, DAG.getDataLayout()) != LifetimeObject)
6710 Offset = -1; // Cannot determine offset from alloca to lifetime object.
6711 Res = DAG.getLifetimeNode(IsStart, sdl, getRoot(), FrameIndex, ObjectSize,
6717 case Intrinsic::invariant_start:
6718 // Discard region information.
6719 setValue(&I, DAG.getUNDEF(TLI.getPointerTy(DAG.getDataLayout())));
6721 case Intrinsic::invariant_end:
6722 // Discard region information.
6724 case Intrinsic::clear_cache:
6725 /// FunctionName may be null.
6726 if (const char *FunctionName = TLI.getClearCacheBuiltinName())
6727 lowerCallToExternalSymbol(I, FunctionName);
6729 case Intrinsic::donothing:
6732 case Intrinsic::experimental_stackmap:
6735 case Intrinsic::experimental_patchpoint_void:
6736 case Intrinsic::experimental_patchpoint_i64:
6737 visitPatchpoint(&I);
6739 case Intrinsic::experimental_gc_statepoint:
6740 LowerStatepoint(ImmutableStatepoint(&I));
6742 case Intrinsic::experimental_gc_result:
6743 visitGCResult(cast<GCResultInst>(I));
6745 case Intrinsic::experimental_gc_relocate:
6746 visitGCRelocate(cast<GCRelocateInst>(I));
6748 case Intrinsic::instrprof_increment:
6749 llvm_unreachable("instrprof failed to lower an increment");
6750 case Intrinsic::instrprof_value_profile:
6751 llvm_unreachable("instrprof failed to lower a value profiling call");
6752 case Intrinsic::localescape: {
6753 MachineFunction &MF = DAG.getMachineFunction();
6754 const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
6756 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
6757 // is the same on all targets.
6758 for (unsigned Idx = 0, E = I.getNumArgOperands(); Idx < E; ++Idx) {
6759 Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
6760 if (isa<ConstantPointerNull>(Arg))
6761 continue; // Skip null pointers. They represent a hole in index space.
6762 AllocaInst *Slot = cast<AllocaInst>(Arg);
6763 assert(FuncInfo.StaticAllocaMap.count(Slot) &&
6764 "can only escape static allocas");
6765 int FI = FuncInfo.StaticAllocaMap[Slot];
6766 MCSymbol *FrameAllocSym =
6767 MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
6768 GlobalValue::dropLLVMManglingEscape(MF.getName()), Idx);
6769 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
6770 TII->get(TargetOpcode::LOCAL_ESCAPE))
6771 .addSym(FrameAllocSym)
6778 case Intrinsic::localrecover: {
6779 // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
6780 MachineFunction &MF = DAG.getMachineFunction();
6781 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout(), 0);
6783 // Get the symbol that defines the frame offset.
6784 auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
6785 auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
6787 unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
6788 MCSymbol *FrameAllocSym =
6789 MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
6790 GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal);
6792 // Create a MCSymbol for the label to avoid any target lowering
6793 // that would make this PC relative.
6794 SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
6796 DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
6798 // Add the offset to the FP.
6799 Value *FP = I.getArgOperand(1);
6800 SDValue FPVal = getValue(FP);
6801 SDValue Add = DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
6807 case Intrinsic::eh_exceptionpointer:
6808 case Intrinsic::eh_exceptioncode: {
6809 // Get the exception pointer vreg, copy from it, and resize it to fit.
6810 const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0));
6811 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
6812 const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
6813 unsigned VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC);
6815 DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), VReg, PtrVT);
6816 if (Intrinsic == Intrinsic::eh_exceptioncode)
6817 N = DAG.getZExtOrTrunc(N, getCurSDLoc(), MVT::i32);
6821 case Intrinsic::xray_customevent: {
6822 // Here we want to make sure that the intrinsic behaves as if it has a
6823 // specific calling convention, and only for x86_64.
6824 // FIXME: Support other platforms later.
6825 const auto &Triple = DAG.getTarget().getTargetTriple();
6826 if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
6829 SDLoc DL = getCurSDLoc();
6830 SmallVector<SDValue, 8> Ops;
6832 // We want to say that we always want the arguments in registers.
6833 SDValue LogEntryVal = getValue(I.getArgOperand(0));
6834 SDValue StrSizeVal = getValue(I.getArgOperand(1));
6835 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6836 SDValue Chain = getRoot();
6837 Ops.push_back(LogEntryVal);
6838 Ops.push_back(StrSizeVal);
6839 Ops.push_back(Chain);
6841 // We need to enforce the calling convention for the callsite, so that
6842 // argument ordering is enforced correctly, and that register allocation can
6843 // see that some registers may be assumed clobbered and have to preserve
6844 // them across calls to the intrinsic.
6845 MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
6847 SDValue patchableNode = SDValue(MN, 0);
6848 DAG.setRoot(patchableNode);
6849 setValue(&I, patchableNode);
6852 case Intrinsic::xray_typedevent: {
6853 // Here we want to make sure that the intrinsic behaves as if it has a
6854 // specific calling convention, and only for x86_64.
6855 // FIXME: Support other platforms later.
6856 const auto &Triple = DAG.getTarget().getTargetTriple();
6857 if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
6860 SDLoc DL = getCurSDLoc();
6861 SmallVector<SDValue, 8> Ops;
6863 // We want to say that we always want the arguments in registers.
6864 // It's unclear to me how manipulating the selection DAG here forces callers
6865 // to provide arguments in registers instead of on the stack.
6866 SDValue LogTypeId = getValue(I.getArgOperand(0));
6867 SDValue LogEntryVal = getValue(I.getArgOperand(1));
6868 SDValue StrSizeVal = getValue(I.getArgOperand(2));
6869 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
6870 SDValue Chain = getRoot();
6871 Ops.push_back(LogTypeId);
6872 Ops.push_back(LogEntryVal);
6873 Ops.push_back(StrSizeVal);
6874 Ops.push_back(Chain);
6876 // We need to enforce the calling convention for the callsite, so that
6877 // argument ordering is enforced correctly, and that register allocation can
6878 // see that some registers may be assumed clobbered and have to preserve
6879 // them across calls to the intrinsic.
6880 MachineSDNode *MN = DAG.getMachineNode(
6881 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, DL, NodeTys, Ops);
6882 SDValue patchableNode = SDValue(MN, 0);
6883 DAG.setRoot(patchableNode);
6884 setValue(&I, patchableNode);
6887 case Intrinsic::experimental_deoptimize:
6888 LowerDeoptimizeCall(&I);
6891 case Intrinsic::experimental_vector_reduce_v2_fadd:
6892 case Intrinsic::experimental_vector_reduce_v2_fmul:
6893 case Intrinsic::experimental_vector_reduce_add:
6894 case Intrinsic::experimental_vector_reduce_mul:
6895 case Intrinsic::experimental_vector_reduce_and:
6896 case Intrinsic::experimental_vector_reduce_or:
6897 case Intrinsic::experimental_vector_reduce_xor:
6898 case Intrinsic::experimental_vector_reduce_smax:
6899 case Intrinsic::experimental_vector_reduce_smin:
6900 case Intrinsic::experimental_vector_reduce_umax:
6901 case Intrinsic::experimental_vector_reduce_umin:
6902 case Intrinsic::experimental_vector_reduce_fmax:
6903 case Intrinsic::experimental_vector_reduce_fmin:
6904 visitVectorReduce(I, Intrinsic);
6907 case Intrinsic::icall_branch_funnel: {
6908 SmallVector<SDValue, 16> Ops;
6909 Ops.push_back(getValue(I.getArgOperand(0)));
6912 auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
6913 I.getArgOperand(1), Offset, DAG.getDataLayout()));
6916 "llvm.icall.branch.funnel operand must be a GlobalValue");
6917 Ops.push_back(DAG.getTargetGlobalAddress(Base, getCurSDLoc(), MVT::i64, 0));
6919 struct BranchFunnelTarget {
6923 SmallVector<BranchFunnelTarget, 8> Targets;
6925 for (unsigned Op = 1, N = I.getNumArgOperands(); Op != N; Op += 2) {
6926 auto *ElemBase = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
6927 I.getArgOperand(Op), Offset, DAG.getDataLayout()));
6928 if (ElemBase != Base)
6929 report_fatal_error("all llvm.icall.branch.funnel operands must refer "
6930 "to the same GlobalValue");
6932 SDValue Val = getValue(I.getArgOperand(Op + 1));
6933 auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
6936 "llvm.icall.branch.funnel operand must be a GlobalValue");
6937 Targets.push_back({Offset, DAG.getTargetGlobalAddress(
6938 GA->getGlobal(), getCurSDLoc(),
6939 Val.getValueType(), GA->getOffset())});
6942 [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
6943 return T1.Offset < T2.Offset;
6946 for (auto &T : Targets) {
6947 Ops.push_back(DAG.getTargetConstant(T.Offset, getCurSDLoc(), MVT::i32));
6948 Ops.push_back(T.Target);
6951 Ops.push_back(DAG.getRoot()); // Chain
6952 SDValue N(DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL,
6953 getCurSDLoc(), MVT::Other, Ops),
6961 case Intrinsic::wasm_landingpad_index:
6962 // Information this intrinsic contained has been transferred to
6963 // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely
6967 case Intrinsic::aarch64_settag:
6968 case Intrinsic::aarch64_settag_zero: {
6969 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
6970 bool ZeroMemory = Intrinsic == Intrinsic::aarch64_settag_zero;
6971 SDValue Val = TSI.EmitTargetCodeForSetTag(
6972 DAG, getCurSDLoc(), getRoot(), getValue(I.getArgOperand(0)),
6973 getValue(I.getArgOperand(1)), MachinePointerInfo(I.getArgOperand(0)),
6979 case Intrinsic::ptrmask: {
6980 SDValue Ptr = getValue(I.getOperand(0));
6981 SDValue Const = getValue(I.getOperand(1));
6984 EVT(DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
6986 setValue(&I, DAG.getNode(ISD::AND, getCurSDLoc(), DestVT, Ptr,
6987 DAG.getZExtOrTrunc(Const, getCurSDLoc(), DestVT)));
6993 void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
6994 const ConstrainedFPIntrinsic &FPI) {
6995 SDLoc sdl = getCurSDLoc();
6997 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6998 SmallVector<EVT, 4> ValueVTs;
6999 ComputeValueVTs(TLI, DAG.getDataLayout(), FPI.getType(), ValueVTs);
7000 ValueVTs.push_back(MVT::Other); // Out chain
7002 // We do not need to serialize constrained FP intrinsics against
7003 // each other or against (nonvolatile) loads, so they can be
7004 // chained like loads.
7005 SDValue Chain = DAG.getRoot();
7006 SmallVector<SDValue, 4> Opers;
7007 Opers.push_back(Chain);
7008 if (FPI.isUnaryOp()) {
7009 Opers.push_back(getValue(FPI.getArgOperand(0)));
7010 } else if (FPI.isTernaryOp()) {
7011 Opers.push_back(getValue(FPI.getArgOperand(0)));
7012 Opers.push_back(getValue(FPI.getArgOperand(1)));
7013 Opers.push_back(getValue(FPI.getArgOperand(2)));
7015 Opers.push_back(getValue(FPI.getArgOperand(0)));
7016 Opers.push_back(getValue(FPI.getArgOperand(1)));
7020 switch (FPI.getIntrinsicID()) {
7021 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
7022 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
7023 case Intrinsic::INTRINSIC: \
7024 Opcode = ISD::STRICT_##DAGN; \
7026 #include "llvm/IR/ConstrainedOps.def"
7029 // A few strict DAG nodes carry additional operands that are not
7030 // set up by the default code above.
7033 case ISD::STRICT_FP_ROUND:
7035 DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())));
7037 case ISD::STRICT_FSETCC:
7038 case ISD::STRICT_FSETCCS: {
7039 auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
7040 Opers.push_back(DAG.getCondCode(getFCmpCondCode(FPCmp->getPredicate())));
7045 SDVTList VTs = DAG.getVTList(ValueVTs);
7046 SDValue Result = DAG.getNode(Opcode, sdl, VTs, Opers);
7048 assert(Result.getNode()->getNumValues() == 2);
7050 // Push node to the appropriate list so that future instructions can be
7051 // chained up correctly.
7052 SDValue OutChain = Result.getValue(1);
7053 switch (FPI.getExceptionBehavior().getValue()) {
7054 case fp::ExceptionBehavior::ebIgnore:
7055 // The only reason why ebIgnore nodes still need to be chained is that
7056 // they might depend on the current rounding mode, and therefore must
7057 // not be moved across instruction that may change that mode.
7059 case fp::ExceptionBehavior::ebMayTrap:
7060 // These must not be moved across calls or instructions that may change
7061 // floating-point exception masks.
7062 PendingConstrainedFP.push_back(OutChain);
7064 case fp::ExceptionBehavior::ebStrict:
7065 // These must not be moved across calls or instructions that may change
7066 // floating-point exception masks or read floating-point exception flags.
7067 // In addition, they cannot be optimized out even if unused.
7068 PendingConstrainedFPStrict.push_back(OutChain);
7072 SDValue FPResult = Result.getValue(0);
7073 setValue(&FPI, FPResult);
7076 std::pair<SDValue, SDValue>
7077 SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
7078 const BasicBlock *EHPadBB) {
7079 MachineFunction &MF = DAG.getMachineFunction();
7080 MachineModuleInfo &MMI = MF.getMMI();
7081 MCSymbol *BeginLabel = nullptr;
7084 // Insert a label before the invoke call to mark the try range. This can be
7085 // used to detect deletion of the invoke via the MachineModuleInfo.
7086 BeginLabel = MMI.getContext().createTempSymbol();
7088 // For SjLj, keep track of which landing pads go with which invokes
7089 // so as to maintain the ordering of pads in the LSDA.
7090 unsigned CallSiteIndex = MMI.getCurrentCallSite();
7091 if (CallSiteIndex) {
7092 MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
7093 LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex);
7095 // Now that the call site is handled, stop tracking it.
7096 MMI.setCurrentCallSite(0);
7099 // Both PendingLoads and PendingExports must be flushed here;
7100 // this call might not return.
7102 DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel));
7104 CLI.setChain(getRoot());
7106 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7107 std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
7109 assert((CLI.IsTailCall || Result.second.getNode()) &&
7110 "Non-null chain expected with non-tail call!");
7111 assert((Result.second.getNode() || !Result.first.getNode()) &&
7112 "Null value expected with tail call!");
7114 if (!Result.second.getNode()) {
7115 // As a special case, a null chain means that a tail call has been emitted
7116 // and the DAG root is already updated.
7119 // Since there's no actual continuation from this block, nothing can be
7120 // relying on us setting vregs for them.
7121 PendingExports.clear();
7123 DAG.setRoot(Result.second);
7127 // Insert a label at the end of the invoke call to mark the try range. This
7128 // can be used to detect deletion of the invoke via the MachineModuleInfo.
7129 MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
7130 DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getRoot(), EndLabel));
7132 // Inform MachineModuleInfo of range.
7133 auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
7134 // There is a platform (e.g. wasm) that uses funclet style IR but does not
7135 // actually use outlined funclets and their LSDA info style.
7136 if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) {
7138 WinEHFuncInfo *EHInfo = DAG.getMachineFunction().getWinEHFuncInfo();
7139 EHInfo->addIPToStateRange(cast<InvokeInst>(CLI.CS.getInstruction()),
7140 BeginLabel, EndLabel);
7141 } else if (!isScopedEHPersonality(Pers)) {
7142 MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel);
7149 void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
7151 const BasicBlock *EHPadBB) {
7152 auto &DL = DAG.getDataLayout();
7153 FunctionType *FTy = CS.getFunctionType();
7154 Type *RetTy = CS.getType();
7156 TargetLowering::ArgListTy Args;
7157 Args.reserve(CS.arg_size());
7159 const Value *SwiftErrorVal = nullptr;
7160 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7163 // Avoid emitting tail calls in functions with the disable-tail-calls
7165 auto *Caller = CS.getInstruction()->getParent()->getParent();
7166 if (Caller->getFnAttribute("disable-tail-calls").getValueAsString() ==
7170 // We can't tail call inside a function with a swifterror argument. Lowering
7171 // does not support this yet. It would have to move into the swifterror
7172 // register before the call.
7173 if (TLI.supportSwiftError() &&
7174 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
7178 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
7180 TargetLowering::ArgListEntry Entry;
7181 const Value *V = *i;
7184 if (V->getType()->isEmptyTy())
7187 SDValue ArgNode = getValue(V);
7188 Entry.Node = ArgNode; Entry.Ty = V->getType();
7190 Entry.setAttributes(&CS, i - CS.arg_begin());
7192 // Use swifterror virtual register as input to the call.
7193 if (Entry.IsSwiftError && TLI.supportSwiftError()) {
7195 // We find the virtual register for the actual swifterror argument.
7196 // Instead of using the Value, we use the virtual register instead.
7197 Entry.Node = DAG.getRegister(
7198 SwiftError.getOrCreateVRegUseAt(CS.getInstruction(), FuncInfo.MBB, V),
7199 EVT(TLI.getPointerTy(DL)));
7202 Args.push_back(Entry);
7204 // If we have an explicit sret argument that is an Instruction, (i.e., it
7205 // might point to function-local memory), we can't meaningfully tail-call.
7206 if (Entry.IsSRet && isa<Instruction>(V))
7210 // If call site has a cfguardtarget operand bundle, create and add an
7211 // additional ArgListEntry.
7212 if (auto Bundle = CS.getOperandBundle(LLVMContext::OB_cfguardtarget)) {
7213 TargetLowering::ArgListEntry Entry;
7214 Value *V = Bundle->Inputs[0];
7215 SDValue ArgNode = getValue(V);
7216 Entry.Node = ArgNode;
7217 Entry.Ty = V->getType();
7218 Entry.IsCFGuardTarget = true;
7219 Args.push_back(Entry);
7222 // Check if target-independent constraints permit a tail call here.
7223 // Target-dependent constraints are checked within TLI->LowerCallTo.
7224 if (isTailCall && !isInTailCallPosition(CS, DAG.getTarget()))
7227 // Disable tail calls if there is an swifterror argument. Targets have not
7228 // been updated to support tail calls.
7229 if (TLI.supportSwiftError() && SwiftErrorVal)
7232 TargetLowering::CallLoweringInfo CLI(DAG);
7233 CLI.setDebugLoc(getCurSDLoc())
7234 .setChain(getRoot())
7235 .setCallee(RetTy, FTy, Callee, std::move(Args), CS)
7236 .setTailCall(isTailCall)
7237 .setConvergent(CS.isConvergent());
7238 std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
7240 if (Result.first.getNode()) {
7241 const Instruction *Inst = CS.getInstruction();
7242 Result.first = lowerRangeToAssertZExt(DAG, *Inst, Result.first);
7243 setValue(Inst, Result.first);
7246 // The last element of CLI.InVals has the SDValue for swifterror return.
7247 // Here we copy it to a virtual register and update SwiftErrorMap for
7249 if (SwiftErrorVal && TLI.supportSwiftError()) {
7250 // Get the last element of InVals.
7251 SDValue Src = CLI.InVals.back();
7252 Register VReg = SwiftError.getOrCreateVRegDefAt(
7253 CS.getInstruction(), FuncInfo.MBB, SwiftErrorVal);
7254 SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
7255 DAG.setRoot(CopyNode);
7259 static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
7260 SelectionDAGBuilder &Builder) {
7261 // Check to see if this load can be trivially constant folded, e.g. if the
7262 // input is from a string literal.
7263 if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
7264 // Cast pointer to the type we really want to load.
7266 Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
7267 if (LoadVT.isVector())
7268 LoadTy = VectorType::get(LoadTy, LoadVT.getVectorNumElements());
7270 LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
7271 PointerType::getUnqual(LoadTy));
7273 if (const Constant *LoadCst = ConstantFoldLoadFromConstPtr(
7274 const_cast<Constant *>(LoadInput), LoadTy, *Builder.DL))
7275 return Builder.getValue(LoadCst);
7278 // Otherwise, we have to emit the load. If the pointer is to unfoldable but
7279 // still constant memory, the input chain can be the entry node.
7281 bool ConstantMemory = false;
7283 // Do not serialize (non-volatile) loads of constant memory with anything.
7284 if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) {
7285 Root = Builder.DAG.getEntryNode();
7286 ConstantMemory = true;
7288 // Do not serialize non-volatile loads against each other.
7289 Root = Builder.DAG.getRoot();
7292 SDValue Ptr = Builder.getValue(PtrVal);
7293 SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root,
7294 Ptr, MachinePointerInfo(PtrVal),
7295 /* Alignment = */ 1);
7297 if (!ConstantMemory)
7298 Builder.PendingLoads.push_back(LoadVal.getValue(1));
7302 /// Record the value for an instruction that produces an integer result,
7303 /// converting the type where necessary.
7304 void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
7307 EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
7310 Value = DAG.getSExtOrTrunc(Value, getCurSDLoc(), VT);
7312 Value = DAG.getZExtOrTrunc(Value, getCurSDLoc(), VT);
7313 setValue(&I, Value);
7316 /// See if we can lower a memcmp call into an optimized form. If so, return
7317 /// true and lower it. Otherwise return false, and it will be lowered like a
7319 /// The caller already checked that \p I calls the appropriate LibFunc with a
7320 /// correct prototype.
7321 bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) {
7322 const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
7323 const Value *Size = I.getArgOperand(2);
7324 const ConstantInt *CSize = dyn_cast<ConstantInt>(Size);
7325 if (CSize && CSize->getZExtValue() == 0) {
7326 EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
7328 setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
7332 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7333 std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
7334 DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
7335 getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS));
7336 if (Res.first.getNode()) {
7337 processIntegerCallValue(I, Res.first, true);
7338 PendingLoads.push_back(Res.second);
7342 // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0
7343 // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0
7344 if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I))
7347 // If the target has a fast compare for the given size, it will return a
7348 // preferred load type for that size. Require that the load VT is legal and
7349 // that the target supports unaligned loads of that type. Otherwise, return
7351 auto hasFastLoadsAndCompare = [&](unsigned NumBits) {
7352 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7353 MVT LVT = TLI.hasFastEqualityCompare(NumBits);
7354 if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) {
7355 // TODO: Handle 5 byte compare as 4-byte + 1 byte.
7356 // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
7357 // TODO: Check alignment of src and dest ptrs.
7358 unsigned DstAS = LHS->getType()->getPointerAddressSpace();
7359 unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
7360 if (!TLI.isTypeLegal(LVT) ||
7361 !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) ||
7362 !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS))
7363 LVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
7369 // This turns into unaligned loads. We only do this if the target natively
7370 // supports the MVT we'll be loading or if it is small enough (<= 4) that
7371 // we'll only produce a small number of byte loads.
7373 unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
7374 switch (NumBitsToCompare) {
7386 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
7390 if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE)
7393 SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this);
7394 SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this);
7396 // Bitcast to a wide integer type if the loads are vectors.
7397 if (LoadVT.isVector()) {
7398 EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
7399 LoadL = DAG.getBitcast(CmpVT, LoadL);
7400 LoadR = DAG.getBitcast(CmpVT, LoadR);
7403 SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE);
7404 processIntegerCallValue(I, Cmp, false);
7408 /// See if we can lower a memchr call into an optimized form. If so, return
7409 /// true and lower it. Otherwise return false, and it will be lowered like a
7411 /// The caller already checked that \p I calls the appropriate LibFunc with a
7412 /// correct prototype.
7413 bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
7414 const Value *Src = I.getArgOperand(0);
7415 const Value *Char = I.getArgOperand(1);
7416 const Value *Length = I.getArgOperand(2);
7418 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7419 std::pair<SDValue, SDValue> Res =
7420 TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
7421 getValue(Src), getValue(Char), getValue(Length),
7422 MachinePointerInfo(Src));
7423 if (Res.first.getNode()) {
7424 setValue(&I, Res.first);
7425 PendingLoads.push_back(Res.second);
7432 /// See if we can lower a mempcpy call into an optimized form. If so, return
7433 /// true and lower it. Otherwise return false, and it will be lowered like a
7435 /// The caller already checked that \p I calls the appropriate LibFunc with a
7436 /// correct prototype.
7437 bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
7438 SDValue Dst = getValue(I.getArgOperand(0));
7439 SDValue Src = getValue(I.getArgOperand(1));
7440 SDValue Size = getValue(I.getArgOperand(2));
7442 unsigned DstAlign = DAG.InferPtrAlignment(Dst);
7443 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
7444 unsigned Align = std::min(DstAlign, SrcAlign);
7445 if (Align == 0) // Alignment of one or both could not be inferred.
7446 Align = 1; // 0 and 1 both specify no alignment, but 0 is reserved.
7449 SDLoc sdl = getCurSDLoc();
7451 // In the mempcpy context we need to pass in a false value for isTailCall
7452 // because the return pointer needs to be adjusted by the size of
7453 // the copied memory.
7454 SDValue Root = isVol ? getRoot() : getMemoryRoot();
7455 SDValue MC = DAG.getMemcpy(Root, sdl, Dst, Src, Size, Align, isVol,
7456 false, /*isTailCall=*/false,
7457 MachinePointerInfo(I.getArgOperand(0)),
7458 MachinePointerInfo(I.getArgOperand(1)));
7459 assert(MC.getNode() != nullptr &&
7460 "** memcpy should not be lowered as TailCall in mempcpy context **");
7463 // Check if Size needs to be truncated or extended.
7464 Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType());
7466 // Adjust return pointer to point just past the last dst byte.
7467 SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(),
7469 setValue(&I, DstPlusSize);
7473 /// See if we can lower a strcpy call into an optimized form. If so, return
7474 /// true and lower it, otherwise return false and it will be lowered like a
7476 /// The caller already checked that \p I calls the appropriate LibFunc with a
7477 /// correct prototype.
7478 bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
7479 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
7481 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7482 std::pair<SDValue, SDValue> Res =
7483 TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
7484 getValue(Arg0), getValue(Arg1),
7485 MachinePointerInfo(Arg0),
7486 MachinePointerInfo(Arg1), isStpcpy);
7487 if (Res.first.getNode()) {
7488 setValue(&I, Res.first);
7489 DAG.setRoot(Res.second);
7496 /// See if we can lower a strcmp call into an optimized form. If so, return
7497 /// true and lower it, otherwise return false and it will be lowered like a
7499 /// The caller already checked that \p I calls the appropriate LibFunc with a
7500 /// correct prototype.
7501 bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
7502 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
7504 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7505 std::pair<SDValue, SDValue> Res =
7506 TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
7507 getValue(Arg0), getValue(Arg1),
7508 MachinePointerInfo(Arg0),
7509 MachinePointerInfo(Arg1));
7510 if (Res.first.getNode()) {
7511 processIntegerCallValue(I, Res.first, true);
7512 PendingLoads.push_back(Res.second);
7519 /// See if we can lower a strlen call into an optimized form. If so, return
7520 /// true and lower it, otherwise return false and it will be lowered like a
7522 /// The caller already checked that \p I calls the appropriate LibFunc with a
7523 /// correct prototype.
7524 bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
7525 const Value *Arg0 = I.getArgOperand(0);
7527 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7528 std::pair<SDValue, SDValue> Res =
7529 TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
7530 getValue(Arg0), MachinePointerInfo(Arg0));
7531 if (Res.first.getNode()) {
7532 processIntegerCallValue(I, Res.first, false);
7533 PendingLoads.push_back(Res.second);
7540 /// See if we can lower a strnlen call into an optimized form. If so, return
7541 /// true and lower it, otherwise return false and it will be lowered like a
7543 /// The caller already checked that \p I calls the appropriate LibFunc with a
7544 /// correct prototype.
7545 bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
7546 const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
7548 const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7549 std::pair<SDValue, SDValue> Res =
7550 TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
7551 getValue(Arg0), getValue(Arg1),
7552 MachinePointerInfo(Arg0));
7553 if (Res.first.getNode()) {
7554 processIntegerCallValue(I, Res.first, false);
7555 PendingLoads.push_back(Res.second);
7562 /// See if we can lower a unary floating-point operation into an SDNode with
7563 /// the specified Opcode. If so, return true and lower it, otherwise return
7564 /// false and it will be lowered like a normal call.
7565 /// The caller already checked that \p I calls the appropriate LibFunc with a
7566 /// correct prototype.
7567 bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
7569 // We already checked this call's prototype; verify it doesn't modify errno.
7570 if (!I.onlyReadsMemory())
7573 SDValue Tmp = getValue(I.getArgOperand(0));
7574 setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp));
7578 /// See if we can lower a binary floating-point operation into an SDNode with
7579 /// the specified Opcode. If so, return true and lower it. Otherwise return
7580 /// false, and it will be lowered like a normal call.
7581 /// The caller already checked that \p I calls the appropriate LibFunc with a
7582 /// correct prototype.
7583 bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
7585 // We already checked this call's prototype; verify it doesn't modify errno.
7586 if (!I.onlyReadsMemory())
7589 SDValue Tmp0 = getValue(I.getArgOperand(0));
7590 SDValue Tmp1 = getValue(I.getArgOperand(1));
7591 EVT VT = Tmp0.getValueType();
7592 setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1));
7596 void SelectionDAGBuilder::visitCall(const CallInst &I) {
7597 // Handle inline assembly differently.
7598 if (isa<InlineAsm>(I.getCalledValue())) {
7603 if (Function *F = I.getCalledFunction()) {
7604 if (F->isDeclaration()) {
7605 // Is this an LLVM intrinsic or a target-specific intrinsic?
7606 unsigned IID = F->getIntrinsicID();
7608 if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo())
7609 IID = II->getIntrinsicID(F);
7612 visitIntrinsicCall(I, IID);
7617 // Check for well-known libc/libm calls. If the function is internal, it
7618 // can't be a library call. Don't do the check if marked as nobuiltin for
7619 // some reason or the call site requires strict floating point semantics.
7621 if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() &&
7622 F->hasName() && LibInfo->getLibFunc(*F, Func) &&
7623 LibInfo->hasOptimizedCodeGen(Func)) {
7626 case LibFunc_copysign:
7627 case LibFunc_copysignf:
7628 case LibFunc_copysignl:
7629 // We already checked this call's prototype; verify it doesn't modify
7631 if (I.onlyReadsMemory()) {
7632 SDValue LHS = getValue(I.getArgOperand(0));
7633 SDValue RHS = getValue(I.getArgOperand(1));
7634 setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
7635 LHS.getValueType(), LHS, RHS));
7642 if (visitUnaryFloatCall(I, ISD::FABS))
7648 if (visitBinaryFloatCall(I, ISD::FMINNUM))
7654 if (visitBinaryFloatCall(I, ISD::FMAXNUM))
7660 if (visitUnaryFloatCall(I, ISD::FSIN))
7666 if (visitUnaryFloatCall(I, ISD::FCOS))
7672 case LibFunc_sqrt_finite:
7673 case LibFunc_sqrtf_finite:
7674 case LibFunc_sqrtl_finite:
7675 if (visitUnaryFloatCall(I, ISD::FSQRT))
7679 case LibFunc_floorf:
7680 case LibFunc_floorl:
7681 if (visitUnaryFloatCall(I, ISD::FFLOOR))
7684 case LibFunc_nearbyint:
7685 case LibFunc_nearbyintf:
7686 case LibFunc_nearbyintl:
7687 if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
7693 if (visitUnaryFloatCall(I, ISD::FCEIL))
7699 if (visitUnaryFloatCall(I, ISD::FRINT))
7703 case LibFunc_roundf:
7704 case LibFunc_roundl:
7705 if (visitUnaryFloatCall(I, ISD::FROUND))
7709 case LibFunc_truncf:
7710 case LibFunc_truncl:
7711 if (visitUnaryFloatCall(I, ISD::FTRUNC))
7717 if (visitUnaryFloatCall(I, ISD::FLOG2))
7723 if (visitUnaryFloatCall(I, ISD::FEXP2))
7726 case LibFunc_memcmp:
7727 if (visitMemCmpCall(I))
7730 case LibFunc_mempcpy:
7731 if (visitMemPCpyCall(I))
7734 case LibFunc_memchr:
7735 if (visitMemChrCall(I))
7738 case LibFunc_strcpy:
7739 if (visitStrCpyCall(I, false))
7742 case LibFunc_stpcpy:
7743 if (visitStrCpyCall(I, true))
7746 case LibFunc_strcmp:
7747 if (visitStrCmpCall(I))
7750 case LibFunc_strlen:
7751 if (visitStrLenCall(I))
7754 case LibFunc_strnlen:
7755 if (visitStrNLenCall(I))
7762 // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
7763 // have to do anything here to lower funclet bundles.
7764 // CFGuardTarget bundles are lowered in LowerCallTo.
7765 assert(!I.hasOperandBundlesOtherThan({LLVMContext::OB_deopt,
7766 LLVMContext::OB_funclet,
7767 LLVMContext::OB_cfguardtarget}) &&
7768 "Cannot lower calls with arbitrary operand bundles!");
7770 SDValue Callee = getValue(I.getCalledValue());
7772 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
7773 LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
7775 // Check if we can potentially perform a tail call. More detailed checking
7776 // is be done within LowerCallTo, after more information about the call is
7778 LowerCallTo(&I, Callee, I.isTailCall());
7783 /// AsmOperandInfo - This contains information for each constraint that we are
7785 class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
7787 /// CallOperand - If this is the result output operand or a clobber
7788 /// this is null, otherwise it is the incoming operand to the CallInst.
7789 /// This gets modified as the asm is processed.
7790 SDValue CallOperand;
7792 /// AssignedRegs - If this is a register or register class operand, this
7793 /// contains the set of register corresponding to the operand.
7794 RegsForValue AssignedRegs;
7796 explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
7797 : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) {
7800 /// Whether or not this operand accesses memory
7801 bool hasMemory(const TargetLowering &TLI) const {
7802 // Indirect operand accesses access memory.
7806 for (const auto &Code : Codes)
7807 if (TLI.getConstraintType(Code) == TargetLowering::C_Memory)
7813 /// getCallOperandValEVT - Return the EVT of the Value* that this operand
7814 /// corresponds to. If there is no Value* for this operand, it returns
7816 EVT getCallOperandValEVT(LLVMContext &Context, const TargetLowering &TLI,
7817 const DataLayout &DL) const {
7818 if (!CallOperandVal) return MVT::Other;
7820 if (isa<BasicBlock>(CallOperandVal))
7821 return TLI.getPointerTy(DL);
7823 llvm::Type *OpTy = CallOperandVal->getType();
7825 // FIXME: code duplicated from TargetLowering::ParseConstraints().
7826 // If this is an indirect operand, the operand is a pointer to the
7829 PointerType *PtrTy = dyn_cast<PointerType>(OpTy);
7831 report_fatal_error("Indirect operand for inline asm not a pointer!");
7832 OpTy = PtrTy->getElementType();
7835 // Look for vector wrapped in a struct. e.g. { <16 x i8> }.
7836 if (StructType *STy = dyn_cast<StructType>(OpTy))
7837 if (STy->getNumElements() == 1)
7838 OpTy = STy->getElementType(0);
7840 // If OpTy is not a single value, it may be a struct/union that we
7841 // can tile with integers.
7842 if (!OpTy->isSingleValueType() && OpTy->isSized()) {
7843 unsigned BitSize = DL.getTypeSizeInBits(OpTy);
7852 OpTy = IntegerType::get(Context, BitSize);
7857 return TLI.getValueType(DL, OpTy, true);
7861 using SDISelAsmOperandInfoVector = SmallVector<SDISelAsmOperandInfo, 16>;
7863 } // end anonymous namespace
7865 /// Make sure that the output operand \p OpInfo and its corresponding input
7866 /// operand \p MatchingOpInfo have compatible constraint types (otherwise error
7868 static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo,
7869 SDISelAsmOperandInfo &MatchingOpInfo,
7870 SelectionDAG &DAG) {
7871 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
7874 const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
7875 const auto &TLI = DAG.getTargetLoweringInfo();
7877 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
7878 TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
7879 OpInfo.ConstraintVT);
7880 std::pair<unsigned, const TargetRegisterClass *> InputRC =
7881 TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
7882 MatchingOpInfo.ConstraintVT);
7883 if ((OpInfo.ConstraintVT.isInteger() !=
7884 MatchingOpInfo.ConstraintVT.isInteger()) ||
7885 (MatchRC.second != InputRC.second)) {
7886 // FIXME: error out in a more elegant fashion
7887 report_fatal_error("Unsupported asm: input constraint"
7888 " with a matching output constraint of"
7889 " incompatible type!");
7891 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
7894 /// Get a direct memory input to behave well as an indirect operand.
7895 /// This may introduce stores, hence the need for a \p Chain.
7896 /// \return The (possibly updated) chain.
7897 static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
7898 SDISelAsmOperandInfo &OpInfo,
7899 SelectionDAG &DAG) {
7900 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7902 // If we don't have an indirect input, put it in the constpool if we can,
7903 // otherwise spill it to a stack slot.
7904 // TODO: This isn't quite right. We need to handle these according to
7905 // the addressing mode that the constraint wants. Also, this may take
7906 // an additional register for the computation and we don't want that
7909 // If the operand is a float, integer, or vector constant, spill to a
7910 // constant pool entry to get its address.
7911 const Value *OpVal = OpInfo.CallOperandVal;
7912 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
7913 isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
7914 OpInfo.CallOperand = DAG.getConstantPool(
7915 cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
7919 // Otherwise, create a stack slot and emit a store to it before the asm.
7920 Type *Ty = OpVal->getType();
7921 auto &DL = DAG.getDataLayout();
7922 uint64_t TySize = DL.getTypeAllocSize(Ty);
7923 unsigned Align = DL.getPrefTypeAlignment(Ty);
7924 MachineFunction &MF = DAG.getMachineFunction();
7925 int SSFI = MF.getFrameInfo().CreateStackObject(TySize, Align, false);
7926 SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL));
7927 Chain = DAG.getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
7928 MachinePointerInfo::getFixedStack(MF, SSFI),
7929 TLI.getMemValueType(DL, Ty));
7930 OpInfo.CallOperand = StackSlot;
7935 /// GetRegistersForValue - Assign registers (virtual or physical) for the
7936 /// specified operand. We prefer to assign virtual registers, to allow the
7937 /// register allocator to handle the assignment process. However, if the asm
7938 /// uses features that we can't model on machineinstrs, we have SDISel do the
7939 /// allocation. This produces generally horrible, but correct, code.
7941 /// OpInfo describes the operand
7942 /// RefOpInfo describes the matching operand if any, the operand otherwise
7943 static void GetRegistersForValue(SelectionDAG &DAG, const SDLoc &DL,
7944 SDISelAsmOperandInfo &OpInfo,
7945 SDISelAsmOperandInfo &RefOpInfo) {
7946 LLVMContext &Context = *DAG.getContext();
7947 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7949 MachineFunction &MF = DAG.getMachineFunction();
7950 SmallVector<unsigned, 4> Regs;
7951 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
7953 // No work to do for memory operations.
7954 if (OpInfo.ConstraintType == TargetLowering::C_Memory)
7957 // If this is a constraint for a single physreg, or a constraint for a
7958 // register class, find it.
7959 unsigned AssignedReg;
7960 const TargetRegisterClass *RC;
7961 std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
7962 &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
7963 // RC is unset only on failure. Return immediately.
7967 // Get the actual register value type. This is important, because the user
7968 // may have asked for (e.g.) the AX register in i32 type. We need to
7969 // remember that AX is actually i16 to get the right extension.
7970 const MVT RegVT = *TRI.legalclasstypes_begin(*RC);
7972 if (OpInfo.ConstraintVT != MVT::Other) {
7973 // If this is an FP operand in an integer register (or visa versa), or more
7974 // generally if the operand value disagrees with the register class we plan
7975 // to stick it in, fix the operand type.
7977 // If this is an input value, the bitcast to the new type is done now.
7978 // Bitcast for output value is done at the end of visitInlineAsm().
7979 if ((OpInfo.Type == InlineAsm::isOutput ||
7980 OpInfo.Type == InlineAsm::isInput) &&
7981 !TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
7982 // Try to convert to the first EVT that the reg class contains. If the
7983 // types are identical size, use a bitcast to convert (e.g. two differing
7984 // vector types). Note: output bitcast is done at the end of
7985 // visitInlineAsm().
7986 if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
7987 // Exclude indirect inputs while they are unsupported because the code
7988 // to perform the load is missing and thus OpInfo.CallOperand still
7989 // refers to the input address rather than the pointed-to value.
7990 if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect)
7991 OpInfo.CallOperand =
7992 DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
7993 OpInfo.ConstraintVT = RegVT;
7994 // If the operand is an FP value and we want it in integer registers,
7995 // use the corresponding integer type. This turns an f64 value into
7996 // i64, which can be passed with two i32 values on a 32-bit machine.
7997 } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
7998 MVT VT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
7999 if (OpInfo.Type == InlineAsm::isInput)
8000 OpInfo.CallOperand =
8001 DAG.getNode(ISD::BITCAST, DL, VT, OpInfo.CallOperand);
8002 OpInfo.ConstraintVT = VT;
8007 // No need to allocate a matching input constraint since the constraint it's
8008 // matching to has already been allocated.
8009 if (OpInfo.isMatchingInputConstraint())
8012 EVT ValueVT = OpInfo.ConstraintVT;
8013 if (OpInfo.ConstraintVT == MVT::Other)
8016 // Initialize NumRegs.
8017 unsigned NumRegs = 1;
8018 if (OpInfo.ConstraintVT != MVT::Other)
8019 NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT);
8021 // If this is a constraint for a specific physical register, like {r17},
8024 // If this associated to a specific register, initialize iterator to correct
8025 // place. If virtual, make sure we have enough registers
8027 // Initialize iterator if necessary
8028 TargetRegisterClass::iterator I = RC->begin();
8029 MachineRegisterInfo &RegInfo = MF.getRegInfo();
8031 // Do not check for single registers.
8033 for (; *I != AssignedReg; ++I)
8034 assert(I != RC->end() && "AssignedReg should be member of RC");
8037 for (; NumRegs; --NumRegs, ++I) {
8038 assert(I != RC->end() && "Ran out of registers to allocate!");
8039 Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
8043 OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
8047 findMatchingInlineAsmOperand(unsigned OperandNo,
8048 const std::vector<SDValue> &AsmNodeOperands) {
8049 // Scan until we find the definition we already emitted of this operand.
8050 unsigned CurOp = InlineAsm::Op_FirstOperand;
8051 for (; OperandNo; --OperandNo) {
8052 // Advance to the next operand.
8054 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
8055 assert((InlineAsm::isRegDefKind(OpFlag) ||
8056 InlineAsm::isRegDefEarlyClobberKind(OpFlag) ||
8057 InlineAsm::isMemKind(OpFlag)) &&
8058 "Skipped past definitions?");
8059 CurOp += InlineAsm::getNumOperandRegisters(OpFlag) + 1;
8070 explicit ExtraFlags(ImmutableCallSite CS) {
8071 const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
8072 if (IA->hasSideEffects())
8073 Flags |= InlineAsm::Extra_HasSideEffects;
8074 if (IA->isAlignStack())
8075 Flags |= InlineAsm::Extra_IsAlignStack;
8076 if (CS.isConvergent())
8077 Flags |= InlineAsm::Extra_IsConvergent;
8078 Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
8081 void update(const TargetLowering::AsmOperandInfo &OpInfo) {
8082 // Ideally, we would only check against memory constraints. However, the
8083 // meaning of an Other constraint can be target-specific and we can't easily
8084 // reason about it. Therefore, be conservative and set MayLoad/MayStore
8085 // for Other constraints as well.
8086 if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
8087 OpInfo.ConstraintType == TargetLowering::C_Other) {
8088 if (OpInfo.Type == InlineAsm::isInput)
8089 Flags |= InlineAsm::Extra_MayLoad;
8090 else if (OpInfo.Type == InlineAsm::isOutput)
8091 Flags |= InlineAsm::Extra_MayStore;
8092 else if (OpInfo.Type == InlineAsm::isClobber)
8093 Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
8097 unsigned get() const { return Flags; }
8100 } // end anonymous namespace
8102 /// visitInlineAsm - Handle a call to an InlineAsm object.
8103 void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
8104 const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
8106 /// ConstraintOperands - Information about all of the constraints.
8107 SDISelAsmOperandInfoVector ConstraintOperands;
8109 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8110 TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
8111 DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), CS);
8113 // First Pass: Calculate HasSideEffects and ExtraFlags (AlignStack,
8114 // AsmDialect, MayLoad, MayStore).
8115 bool HasSideEffect = IA->hasSideEffects();
8116 ExtraFlags ExtraInfo(CS);
8118 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
8119 unsigned ResNo = 0; // ResNo - The result number of the next output.
8120 for (auto &T : TargetConstraints) {
8121 ConstraintOperands.push_back(SDISelAsmOperandInfo(T));
8122 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
8124 // Compute the value type for each operand.
8125 if (OpInfo.Type == InlineAsm::isInput ||
8126 (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) {
8127 OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++));
8129 // Process the call argument. BasicBlocks are labels, currently appearing
8131 const Instruction *I = CS.getInstruction();
8132 if (isa<CallBrInst>(I) &&
8133 (ArgNo - 1) >= (cast<CallBrInst>(I)->getNumArgOperands() -
8134 cast<CallBrInst>(I)->getNumIndirectDests())) {
8135 const auto *BA = cast<BlockAddress>(OpInfo.CallOperandVal);
8136 EVT VT = TLI.getValueType(DAG.getDataLayout(), BA->getType(), true);
8137 OpInfo.CallOperand = DAG.getTargetBlockAddress(BA, VT);
8138 } else if (const auto *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) {
8139 OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
8141 OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
8144 OpInfo.ConstraintVT =
8146 .getCallOperandValEVT(*DAG.getContext(), TLI, DAG.getDataLayout())
8148 } else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) {
8149 // The return value of the call is this value. As such, there is no
8150 // corresponding argument.
8151 assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
8152 if (StructType *STy = dyn_cast<StructType>(CS.getType())) {
8153 OpInfo.ConstraintVT = TLI.getSimpleValueType(
8154 DAG.getDataLayout(), STy->getElementType(ResNo));
8156 assert(ResNo == 0 && "Asm only has one result!");
8157 OpInfo.ConstraintVT =
8158 TLI.getSimpleValueType(DAG.getDataLayout(), CS.getType());
8162 OpInfo.ConstraintVT = MVT::Other;
8166 HasSideEffect = OpInfo.hasMemory(TLI);
8168 // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
8169 // FIXME: Could we compute this on OpInfo rather than T?
8171 // Compute the constraint code and ConstraintType to use.
8172 TLI.ComputeConstraintToUse(T, SDValue());
8174 if (T.ConstraintType == TargetLowering::C_Immediate &&
8175 OpInfo.CallOperand && !isa<ConstantSDNode>(OpInfo.CallOperand))
8176 // We've delayed emitting a diagnostic like the "n" constraint because
8177 // inlining could cause an integer showing up.
8178 return emitInlineAsmError(
8179 CS, "constraint '" + Twine(T.ConstraintCode) + "' expects an "
8180 "integer constant expression");
8182 ExtraInfo.update(T);
8186 // We won't need to flush pending loads if this asm doesn't touch
8187 // memory and is nonvolatile.
8188 SDValue Flag, Chain = (HasSideEffect) ? getRoot() : DAG.getRoot();
8190 bool IsCallBr = isa<CallBrInst>(CS.getInstruction());
8192 // If this is a callbr we need to flush pending exports since inlineasm_br
8193 // is a terminator. We need to do this before nodes are glued to
8194 // the inlineasm_br node.
8195 Chain = getControlRoot();
8198 // Second pass over the constraints: compute which constraint option to use.
8199 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
8200 // If this is an output operand with a matching input operand, look up the
8201 // matching input. If their types mismatch, e.g. one is an integer, the
8202 // other is floating point, or their sizes are different, flag it as an
8204 if (OpInfo.hasMatchingInput()) {
8205 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
8206 patchMatchingInput(OpInfo, Input, DAG);
8209 // Compute the constraint code and ConstraintType to use.
8210 TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
8212 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
8213 OpInfo.Type == InlineAsm::isClobber)
8216 // If this is a memory input, and if the operand is not indirect, do what we
8217 // need to provide an address for the memory input.
8218 if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
8219 !OpInfo.isIndirect) {
8220 assert((OpInfo.isMultipleAlternative ||
8221 (OpInfo.Type == InlineAsm::isInput)) &&
8222 "Can only indirectify direct input operands!");
8224 // Memory operands really want the address of the value.
8225 Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG);
8227 // There is no longer a Value* corresponding to this operand.
8228 OpInfo.CallOperandVal = nullptr;
8230 // It is now an indirect operand.
8231 OpInfo.isIndirect = true;
8236 // AsmNodeOperands - The operands for the ISD::INLINEASM node.
8237 std::vector<SDValue> AsmNodeOperands;
8238 AsmNodeOperands.push_back(SDValue()); // reserve space for input chain
8239 AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
8240 IA->getAsmString().c_str(), TLI.getPointerTy(DAG.getDataLayout())));
8242 // If we have a !srcloc metadata node associated with it, we want to attach
8243 // this to the ultimately generated inline asm machineinstr. To do this, we
8244 // pass in the third operand as this (potentially null) inline asm MDNode.
8245 const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc");
8246 AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
8248 // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
8249 // bits as operand 3.
8250 AsmNodeOperands.push_back(DAG.getTargetConstant(
8251 ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
8253 // Third pass: Loop over operands to prepare DAG-level operands.. As part of
8254 // this, assign virtual and physical registers for inputs and otput.
8255 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
8256 // Assign Registers.
8257 SDISelAsmOperandInfo &RefOpInfo =
8258 OpInfo.isMatchingInputConstraint()
8259 ? ConstraintOperands[OpInfo.getMatchedOperand()]
8261 GetRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo);
8263 switch (OpInfo.Type) {
8264 case InlineAsm::isOutput:
8265 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
8266 unsigned ConstraintID =
8267 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
8268 assert(ConstraintID != InlineAsm::Constraint_Unknown &&
8269 "Failed to convert memory constraint code to constraint id.");
8271 // Add information to the INLINEASM node to know about this output.
8272 unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
8273 OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID);
8274 AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(),
8276 AsmNodeOperands.push_back(OpInfo.CallOperand);
8278 // Otherwise, this outputs to a register (directly for C_Register /
8279 // C_RegisterClass, and a target-defined fashion for
8280 // C_Immediate/C_Other). Find a register that we can use.
8281 if (OpInfo.AssignedRegs.Regs.empty()) {
8283 CS, "couldn't allocate output register for constraint '" +
8284 Twine(OpInfo.ConstraintCode) + "'");
8288 // Add information to the INLINEASM node to know that this register is
8290 OpInfo.AssignedRegs.AddInlineAsmOperands(
8291 OpInfo.isEarlyClobber ? InlineAsm::Kind_RegDefEarlyClobber
8292 : InlineAsm::Kind_RegDef,
8293 false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
8297 case InlineAsm::isInput: {
8298 SDValue InOperandVal = OpInfo.CallOperand;
8300 if (OpInfo.isMatchingInputConstraint()) {
8301 // If this is required to match an output register we have already set,
8302 // just use its register.
8303 auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
8306 cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue();
8307 if (InlineAsm::isRegDefKind(OpFlag) ||
8308 InlineAsm::isRegDefEarlyClobberKind(OpFlag)) {
8309 // Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
8310 if (OpInfo.isIndirect) {
8311 // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
8312 emitInlineAsmError(CS, "inline asm not supported yet:"
8313 " don't know how to handle tied "
8314 "indirect register inputs");
8318 MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType();
8319 SmallVector<unsigned, 4> Regs;
8321 if (const TargetRegisterClass *RC = TLI.getRegClassFor(RegVT)) {
8322 unsigned NumRegs = InlineAsm::getNumOperandRegisters(OpFlag);
8323 MachineRegisterInfo &RegInfo =
8324 DAG.getMachineFunction().getRegInfo();
8325 for (unsigned i = 0; i != NumRegs; ++i)
8326 Regs.push_back(RegInfo.createVirtualRegister(RC));
8328 emitInlineAsmError(CS, "inline asm error: This value type register "
8329 "class is not natively supported!");
8333 RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
8335 SDLoc dl = getCurSDLoc();
8336 // Use the produced MatchedRegs object to
8337 MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag,
8338 CS.getInstruction());
8339 MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse,
8340 true, OpInfo.getMatchedOperand(), dl,
8341 DAG, AsmNodeOperands);
8345 assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!");
8346 assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 &&
8347 "Unexpected number of operands");
8348 // Add information to the INLINEASM node to know about this input.
8349 // See InlineAsm.h isUseOperandTiedToDef.
8350 OpFlag = InlineAsm::convertMemFlagWordToMatchingFlagWord(OpFlag);
8351 OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag,
8352 OpInfo.getMatchedOperand());
8353 AsmNodeOperands.push_back(DAG.getTargetConstant(
8354 OpFlag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
8355 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
8359 // Treat indirect 'X' constraint as memory.
8360 if (OpInfo.ConstraintType == TargetLowering::C_Other &&
8362 OpInfo.ConstraintType = TargetLowering::C_Memory;
8364 if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
8365 OpInfo.ConstraintType == TargetLowering::C_Other) {
8366 std::vector<SDValue> Ops;
8367 TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
8370 if (OpInfo.ConstraintType == TargetLowering::C_Immediate)
8371 if (isa<ConstantSDNode>(InOperandVal)) {
8372 emitInlineAsmError(CS, "value out of range for constraint '" +
8373 Twine(OpInfo.ConstraintCode) + "'");
8377 emitInlineAsmError(CS, "invalid operand for inline asm constraint '" +
8378 Twine(OpInfo.ConstraintCode) + "'");
8382 // Add information to the INLINEASM node to know about this input.
8383 unsigned ResOpType =
8384 InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size());
8385 AsmNodeOperands.push_back(DAG.getTargetConstant(
8386 ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
8387 AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end());
8391 if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
8392 assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
8393 assert(InOperandVal.getValueType() ==
8394 TLI.getPointerTy(DAG.getDataLayout()) &&
8395 "Memory operands expect pointer values");
8397 unsigned ConstraintID =
8398 TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
8399 assert(ConstraintID != InlineAsm::Constraint_Unknown &&
8400 "Failed to convert memory constraint code to constraint id.");
8402 // Add information to the INLINEASM node to know about this input.
8403 unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1);
8404 ResOpType = InlineAsm::getFlagWordForMem(ResOpType, ConstraintID);
8405 AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
8408 AsmNodeOperands.push_back(InOperandVal);
8412 assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
8413 OpInfo.ConstraintType == TargetLowering::C_Register) &&
8414 "Unknown constraint type!");
8416 // TODO: Support this.
8417 if (OpInfo.isIndirect) {
8419 CS, "Don't know how to handle indirect register inputs yet "
8420 "for constraint '" +
8421 Twine(OpInfo.ConstraintCode) + "'");
8425 // Copy the input into the appropriate registers.
8426 if (OpInfo.AssignedRegs.Regs.empty()) {
8427 emitInlineAsmError(CS, "couldn't allocate input reg for constraint '" +
8428 Twine(OpInfo.ConstraintCode) + "'");
8432 SDLoc dl = getCurSDLoc();
8434 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl,
8435 Chain, &Flag, CS.getInstruction());
8437 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0,
8438 dl, DAG, AsmNodeOperands);
8441 case InlineAsm::isClobber:
8442 // Add the clobbered value to the operand list, so that the register
8443 // allocator is aware that the physreg got clobbered.
8444 if (!OpInfo.AssignedRegs.Regs.empty())
8445 OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber,
8446 false, 0, getCurSDLoc(), DAG,
8452 // Finish up input operands. Set the input chain and add the flag last.
8453 AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
8454 if (Flag.getNode()) AsmNodeOperands.push_back(Flag);
8456 unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM;
8457 Chain = DAG.getNode(ISDOpc, getCurSDLoc(),
8458 DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
8459 Flag = Chain.getValue(1);
8461 // Do additional work to generate outputs.
8463 SmallVector<EVT, 1> ResultVTs;
8464 SmallVector<SDValue, 1> ResultValues;
8465 SmallVector<SDValue, 8> OutChains;
8467 llvm::Type *CSResultType = CS.getType();
8468 ArrayRef<Type *> ResultTypes;
8469 if (StructType *StructResult = dyn_cast<StructType>(CSResultType))
8470 ResultTypes = StructResult->elements();
8471 else if (!CSResultType->isVoidTy())
8472 ResultTypes = makeArrayRef(CSResultType);
8474 auto CurResultType = ResultTypes.begin();
8475 auto handleRegAssign = [&](SDValue V) {
8476 assert(CurResultType != ResultTypes.end() && "Unexpected value");
8477 assert((*CurResultType)->isSized() && "Unexpected unsized type");
8478 EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), *CurResultType);
8480 // If the type of the inline asm call site return value is different but has
8481 // same size as the type of the asm output bitcast it. One example of this
8482 // is for vectors with different width / number of elements. This can
8483 // happen for register classes that can contain multiple different value
8484 // types. The preg or vreg allocated may not have the same VT as was
8487 // This can also happen for a return value that disagrees with the register
8488 // class it is put in, eg. a double in a general-purpose register on a
8490 if (ResultVT != V.getValueType() &&
8491 ResultVT.getSizeInBits() == V.getValueSizeInBits())
8492 V = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, V);
8493 else if (ResultVT != V.getValueType() && ResultVT.isInteger() &&
8494 V.getValueType().isInteger()) {
8495 // If a result value was tied to an input value, the computed result
8496 // may have a wider width than the expected result. Extract the
8497 // relevant portion.
8498 V = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, V);
8500 assert(ResultVT == V.getValueType() && "Asm result value mismatch!");
8501 ResultVTs.push_back(ResultVT);
8502 ResultValues.push_back(V);
8505 // Deal with output operands.
8506 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
8507 if (OpInfo.Type == InlineAsm::isOutput) {
8509 // Skip trivial output operands.
8510 if (OpInfo.AssignedRegs.Regs.empty())
8513 switch (OpInfo.ConstraintType) {
8514 case TargetLowering::C_Register:
8515 case TargetLowering::C_RegisterClass:
8516 Val = OpInfo.AssignedRegs.getCopyFromRegs(
8517 DAG, FuncInfo, getCurSDLoc(), Chain, &Flag, CS.getInstruction());
8519 case TargetLowering::C_Immediate:
8520 case TargetLowering::C_Other:
8521 Val = TLI.LowerAsmOutputForConstraint(Chain, Flag, getCurSDLoc(),
8524 case TargetLowering::C_Memory:
8525 break; // Already handled.
8526 case TargetLowering::C_Unknown:
8527 assert(false && "Unexpected unknown constraint");
8530 // Indirect output manifest as stores. Record output chains.
8531 if (OpInfo.isIndirect) {
8532 const Value *Ptr = OpInfo.CallOperandVal;
8533 assert(Ptr && "Expected value CallOperandVal for indirect asm operand");
8534 SDValue Store = DAG.getStore(Chain, getCurSDLoc(), Val, getValue(Ptr),
8535 MachinePointerInfo(Ptr));
8536 OutChains.push_back(Store);
8538 // generate CopyFromRegs to associated registers.
8539 assert(!CS.getType()->isVoidTy() && "Bad inline asm!");
8540 if (Val.getOpcode() == ISD::MERGE_VALUES) {
8541 for (const SDValue &V : Val->op_values())
8544 handleRegAssign(Val);
8550 if (!ResultValues.empty()) {
8551 assert(CurResultType == ResultTypes.end() &&
8552 "Mismatch in number of ResultTypes");
8553 assert(ResultValues.size() == ResultTypes.size() &&
8554 "Mismatch in number of output operands in asm result");
8556 SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
8557 DAG.getVTList(ResultVTs), ResultValues);
8558 setValue(CS.getInstruction(), V);
8561 // Collect store chains.
8562 if (!OutChains.empty())
8563 Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
8565 // Only Update Root if inline assembly has a memory effect.
8566 if (ResultValues.empty() || HasSideEffect || !OutChains.empty() || IsCallBr)
8570 void SelectionDAGBuilder::emitInlineAsmError(ImmutableCallSite CS,
8571 const Twine &Message) {
8572 LLVMContext &Ctx = *DAG.getContext();
8573 Ctx.emitError(CS.getInstruction(), Message);
8575 // Make sure we leave the DAG in a valid state
8576 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8577 SmallVector<EVT, 1> ValueVTs;
8578 ComputeValueVTs(TLI, DAG.getDataLayout(), CS->getType(), ValueVTs);
8580 if (ValueVTs.empty())
8583 SmallVector<SDValue, 1> Ops;
8584 for (unsigned i = 0, e = ValueVTs.size(); i != e; ++i)
8585 Ops.push_back(DAG.getUNDEF(ValueVTs[i]));
8587 setValue(CS.getInstruction(), DAG.getMergeValues(Ops, getCurSDLoc()));
8590 void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
8591 DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
8592 MVT::Other, getRoot(),
8593 getValue(I.getArgOperand(0)),
8594 DAG.getSrcValue(I.getArgOperand(0))));
8597 void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
8598 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8599 const DataLayout &DL = DAG.getDataLayout();
8600 SDValue V = DAG.getVAArg(
8601 TLI.getMemValueType(DAG.getDataLayout(), I.getType()), getCurSDLoc(),
8602 getRoot(), getValue(I.getOperand(0)), DAG.getSrcValue(I.getOperand(0)),
8603 DL.getABITypeAlignment(I.getType()));
8604 DAG.setRoot(V.getValue(1));
8606 if (I.getType()->isPointerTy())
8607 V = DAG.getPtrExtOrTrunc(
8608 V, getCurSDLoc(), TLI.getValueType(DAG.getDataLayout(), I.getType()));
8612 void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
8613 DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
8614 MVT::Other, getRoot(),
8615 getValue(I.getArgOperand(0)),
8616 DAG.getSrcValue(I.getArgOperand(0))));
8619 void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
8620 DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
8621 MVT::Other, getRoot(),
8622 getValue(I.getArgOperand(0)),
8623 getValue(I.getArgOperand(1)),
8624 DAG.getSrcValue(I.getArgOperand(0)),
8625 DAG.getSrcValue(I.getArgOperand(1))));
8628 SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG,
8629 const Instruction &I,
8631 const MDNode *Range = I.getMetadata(LLVMContext::MD_range);
8635 ConstantRange CR = getConstantRangeFromMetadata(*Range);
8636 if (CR.isFullSet() || CR.isEmptySet() || CR.isUpperWrapped())
8639 APInt Lo = CR.getUnsignedMin();
8640 if (!Lo.isMinValue())
8643 APInt Hi = CR.getUnsignedMax();
8644 unsigned Bits = std::max(Hi.getActiveBits(),
8645 static_cast<unsigned>(IntegerType::MIN_INT_BITS));
8647 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
8649 SDLoc SL = getCurSDLoc();
8651 SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op,
8652 DAG.getValueType(SmallVT));
8653 unsigned NumVals = Op.getNode()->getNumValues();
8657 SmallVector<SDValue, 4> Ops;
8659 Ops.push_back(ZExt);
8660 for (unsigned I = 1; I != NumVals; ++I)
8661 Ops.push_back(Op.getValue(I));
8663 return DAG.getMergeValues(Ops, SL);
8666 /// Populate a CallLowerinInfo (into \p CLI) based on the properties of
8667 /// the call being lowered.
8669 /// This is a helper for lowering intrinsics that follow a target calling
8670 /// convention or require stack pointer adjustment. Only a subset of the
8671 /// intrinsic's operands need to participate in the calling convention.
8672 void SelectionDAGBuilder::populateCallLoweringInfo(
8673 TargetLowering::CallLoweringInfo &CLI, const CallBase *Call,
8674 unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy,
8675 bool IsPatchPoint) {
8676 TargetLowering::ArgListTy Args;
8677 Args.reserve(NumArgs);
8679 // Populate the argument list.
8680 // Attributes for args start at offset 1, after the return attribute.
8681 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
8682 ArgI != ArgE; ++ArgI) {
8683 const Value *V = Call->getOperand(ArgI);
8685 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
8687 TargetLowering::ArgListEntry Entry;
8688 Entry.Node = getValue(V);
8689 Entry.Ty = V->getType();
8690 Entry.setAttributes(Call, ArgI);
8691 Args.push_back(Entry);
8694 CLI.setDebugLoc(getCurSDLoc())
8695 .setChain(getRoot())
8696 .setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args))
8697 .setDiscardResult(Call->use_empty())
8698 .setIsPatchPoint(IsPatchPoint);
8701 /// Add a stack map intrinsic call's live variable operands to a stackmap
8702 /// or patchpoint target node's operand list.
8704 /// Constants are converted to TargetConstants purely as an optimization to
8705 /// avoid constant materialization and register allocation.
8707 /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
8708 /// generate addess computation nodes, and so FinalizeISel can convert the
8709 /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
8710 /// address materialization and register allocation, but may also be required
8711 /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
8712 /// alloca in the entry block, then the runtime may assume that the alloca's
8713 /// StackMap location can be read immediately after compilation and that the
8714 /// location is valid at any point during execution (this is similar to the
8715 /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
8716 /// only available in a register, then the runtime would need to trap when
8717 /// execution reaches the StackMap in order to read the alloca's location.
8718 static void addStackMapLiveVars(ImmutableCallSite CS, unsigned StartIdx,
8719 const SDLoc &DL, SmallVectorImpl<SDValue> &Ops,
8720 SelectionDAGBuilder &Builder) {
8721 for (unsigned i = StartIdx, e = CS.arg_size(); i != e; ++i) {
8722 SDValue OpVal = Builder.getValue(CS.getArgument(i));
8723 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) {
8725 Builder.DAG.getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64));
8727 Builder.DAG.getTargetConstant(C->getSExtValue(), DL, MVT::i64));
8728 } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) {
8729 const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo();
8730 Ops.push_back(Builder.DAG.getTargetFrameIndex(
8731 FI->getIndex(), TLI.getFrameIndexTy(Builder.DAG.getDataLayout())));
8733 Ops.push_back(OpVal);
8737 /// Lower llvm.experimental.stackmap directly to its target opcode.
8738 void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
8739 // void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>,
8740 // [live variables...])
8742 assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
8744 SDValue Chain, InFlag, Callee, NullPtr;
8745 SmallVector<SDValue, 32> Ops;
8747 SDLoc DL = getCurSDLoc();
8748 Callee = getValue(CI.getCalledValue());
8749 NullPtr = DAG.getIntPtrConstant(0, DL, true);
8751 // The stackmap intrinsic only records the live variables (the arguments
8752 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
8753 // intrinsic, this won't be lowered to a function call. This means we don't
8754 // have to worry about calling conventions and target specific lowering code.
8755 // Instead we perform the call lowering right here.
8757 // chain, flag = CALLSEQ_START(chain, 0, 0)
8758 // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
8759 // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
8761 Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL);
8762 InFlag = Chain.getValue(1);
8764 // Add the <id> and <numBytes> constants.
8765 SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos));
8766 Ops.push_back(DAG.getTargetConstant(
8767 cast<ConstantSDNode>(IDVal)->getZExtValue(), DL, MVT::i64));
8768 SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos));
8769 Ops.push_back(DAG.getTargetConstant(
8770 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), DL,
8773 // Push live variables for the stack map.
8774 addStackMapLiveVars(&CI, 2, DL, Ops, *this);
8776 // We are not pushing any register mask info here on the operands list,
8777 // because the stackmap doesn't clobber anything.
8779 // Push the chain and the glue flag.
8780 Ops.push_back(Chain);
8781 Ops.push_back(InFlag);
8783 // Create the STACKMAP node.
8784 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8785 SDNode *SM = DAG.getMachineNode(TargetOpcode::STACKMAP, DL, NodeTys, Ops);
8786 Chain = SDValue(SM, 0);
8787 InFlag = Chain.getValue(1);
8789 Chain = DAG.getCALLSEQ_END(Chain, NullPtr, NullPtr, InFlag, DL);
8791 // Stackmaps don't generate values, so nothing goes into the NodeMap.
8793 // Set the root to the target-lowered call chain.
8796 // Inform the Frame Information that we have a stackmap in this function.
8797 FuncInfo.MF->getFrameInfo().setHasStackMap();
8800 /// Lower llvm.experimental.patchpoint directly to its target opcode.
8801 void SelectionDAGBuilder::visitPatchpoint(ImmutableCallSite CS,
8802 const BasicBlock *EHPadBB) {
8803 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
8808 // [live variables...])
8810 CallingConv::ID CC = CS.getCallingConv();
8811 bool IsAnyRegCC = CC == CallingConv::AnyReg;
8812 bool HasDef = !CS->getType()->isVoidTy();
8813 SDLoc dl = getCurSDLoc();
8814 SDValue Callee = getValue(CS->getOperand(PatchPointOpers::TargetPos));
8816 // Handle immediate and symbolic callees.
8817 if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
8818 Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
8820 else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
8821 Callee = DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
8822 SDLoc(SymbolicCallee),
8823 SymbolicCallee->getValueType(0));
8825 // Get the real number of arguments participating in the call <numArgs>
8826 SDValue NArgVal = getValue(CS.getArgument(PatchPointOpers::NArgPos));
8827 unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue();
8829 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
8830 // Intrinsics include all meta-operands up to but not including CC.
8831 unsigned NumMetaOpers = PatchPointOpers::CCPos;
8832 assert(CS.arg_size() >= NumMetaOpers + NumArgs &&
8833 "Not enough arguments provided to the patchpoint intrinsic");
8835 // For AnyRegCC the arguments are lowered later on manually.
8836 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
8838 IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CS->getType();
8840 TargetLowering::CallLoweringInfo CLI(DAG);
8841 populateCallLoweringInfo(CLI, cast<CallBase>(CS.getInstruction()),
8842 NumMetaOpers, NumCallArgs, Callee, ReturnTy, true);
8843 std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
8845 SDNode *CallEnd = Result.second.getNode();
8846 if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
8847 CallEnd = CallEnd->getOperand(0).getNode();
8849 /// Get a call instruction from the call sequence chain.
8850 /// Tail calls are not allowed.
8851 assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
8852 "Expected a callseq node.");
8853 SDNode *Call = CallEnd->getOperand(0).getNode();
8854 bool HasGlue = Call->getGluedNode();
8856 // Replace the target specific call node with the patchable intrinsic.
8857 SmallVector<SDValue, 8> Ops;
8859 // Add the <id> and <numBytes> constants.
8860 SDValue IDVal = getValue(CS->getOperand(PatchPointOpers::IDPos));
8861 Ops.push_back(DAG.getTargetConstant(
8862 cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64));
8863 SDValue NBytesVal = getValue(CS->getOperand(PatchPointOpers::NBytesPos));
8864 Ops.push_back(DAG.getTargetConstant(
8865 cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl,
8869 Ops.push_back(Callee);
8871 // Adjust <numArgs> to account for any arguments that have been passed on the
8873 // Call Node: Chain, Target, {Args}, RegMask, [Glue]
8874 unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
8875 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
8876 Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
8878 // Add the calling convention
8879 Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
8881 // Add the arguments we omitted previously. The register allocator should
8882 // place these in any free register.
8884 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
8885 Ops.push_back(getValue(CS.getArgument(i)));
8887 // Push the arguments from the call instruction up to the register mask.
8888 SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
8889 Ops.append(Call->op_begin() + 2, e);
8891 // Push live variables for the stack map.
8892 addStackMapLiveVars(CS, NumMetaOpers + NumArgs, dl, Ops, *this);
8894 // Push the register mask info.
8896 Ops.push_back(*(Call->op_end()-2));
8898 Ops.push_back(*(Call->op_end()-1));
8900 // Push the chain (this is originally the first operand of the call, but
8901 // becomes now the last or second to last operand).
8902 Ops.push_back(*(Call->op_begin()));
8904 // Push the glue flag (last operand).
8906 Ops.push_back(*(Call->op_end()-1));
8909 if (IsAnyRegCC && HasDef) {
8910 // Create the return types based on the intrinsic definition
8911 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8912 SmallVector<EVT, 3> ValueVTs;
8913 ComputeValueVTs(TLI, DAG.getDataLayout(), CS->getType(), ValueVTs);
8914 assert(ValueVTs.size() == 1 && "Expected only one return value type.");
8916 // There is always a chain and a glue type at the end
8917 ValueVTs.push_back(MVT::Other);
8918 ValueVTs.push_back(MVT::Glue);
8919 NodeTys = DAG.getVTList(ValueVTs);
8921 NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8923 // Replace the target specific call node with a PATCHPOINT node.
8924 MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHPOINT,
8927 // Update the NodeMap.
8930 setValue(CS.getInstruction(), SDValue(MN, 0));
8932 setValue(CS.getInstruction(), Result.first);
8935 // Fixup the consumers of the intrinsic. The chain and glue may be used in the
8936 // call sequence. Furthermore the location of the chain and glue can change
8937 // when the AnyReg calling convention is used and the intrinsic returns a
8939 if (IsAnyRegCC && HasDef) {
8940 SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
8941 SDValue To[] = {SDValue(MN, 1), SDValue(MN, 2)};
8942 DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
8944 DAG.ReplaceAllUsesWith(Call, MN);
8945 DAG.DeleteNode(Call);
8947 // Inform the Frame Information that we have a patchpoint in this function.
8948 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
8951 void SelectionDAGBuilder::visitVectorReduce(const CallInst &I,
8952 unsigned Intrinsic) {
8953 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8954 SDValue Op1 = getValue(I.getArgOperand(0));
8956 if (I.getNumArgOperands() > 1)
8957 Op2 = getValue(I.getArgOperand(1));
8958 SDLoc dl = getCurSDLoc();
8959 EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
8962 if (isa<FPMathOperator>(I))
8963 FMF = I.getFastMathFlags();
8965 switch (Intrinsic) {
8966 case Intrinsic::experimental_vector_reduce_v2_fadd:
8967 if (FMF.allowReassoc())
8968 Res = DAG.getNode(ISD::FADD, dl, VT, Op1,
8969 DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2));
8971 Res = DAG.getNode(ISD::VECREDUCE_STRICT_FADD, dl, VT, Op1, Op2);
8973 case Intrinsic::experimental_vector_reduce_v2_fmul:
8974 if (FMF.allowReassoc())
8975 Res = DAG.getNode(ISD::FMUL, dl, VT, Op1,
8976 DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2));
8978 Res = DAG.getNode(ISD::VECREDUCE_STRICT_FMUL, dl, VT, Op1, Op2);
8980 case Intrinsic::experimental_vector_reduce_add:
8981 Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
8983 case Intrinsic::experimental_vector_reduce_mul:
8984 Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
8986 case Intrinsic::experimental_vector_reduce_and:
8987 Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
8989 case Intrinsic::experimental_vector_reduce_or:
8990 Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
8992 case Intrinsic::experimental_vector_reduce_xor:
8993 Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
8995 case Intrinsic::experimental_vector_reduce_smax:
8996 Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
8998 case Intrinsic::experimental_vector_reduce_smin:
8999 Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
9001 case Intrinsic::experimental_vector_reduce_umax:
9002 Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
9004 case Intrinsic::experimental_vector_reduce_umin:
9005 Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
9007 case Intrinsic::experimental_vector_reduce_fmax:
9008 Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1);
9010 case Intrinsic::experimental_vector_reduce_fmin:
9011 Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1);
9014 llvm_unreachable("Unhandled vector reduce intrinsic");
9019 /// Returns an AttributeList representing the attributes applied to the return
9020 /// value of the given call.
9021 static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
9022 SmallVector<Attribute::AttrKind, 2> Attrs;
9024 Attrs.push_back(Attribute::SExt);
9026 Attrs.push_back(Attribute::ZExt);
9028 Attrs.push_back(Attribute::InReg);
9030 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
9034 /// TargetLowering::LowerCallTo - This is the default LowerCallTo
9035 /// implementation, which just calls LowerCall.
9036 /// FIXME: When all targets are
9037 /// migrated to using LowerCall, this hook should be integrated into SDISel.
9038 std::pair<SDValue, SDValue>
9039 TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
9040 // Handle the incoming return values from the call.
9042 Type *OrigRetTy = CLI.RetTy;
9043 SmallVector<EVT, 4> RetTys;
9044 SmallVector<uint64_t, 4> Offsets;
9045 auto &DL = CLI.DAG.getDataLayout();
9046 ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets);
9048 if (CLI.IsPostTypeLegalization) {
9049 // If we are lowering a libcall after legalization, split the return type.
9050 SmallVector<EVT, 4> OldRetTys;
9051 SmallVector<uint64_t, 4> OldOffsets;
9052 RetTys.swap(OldRetTys);
9053 Offsets.swap(OldOffsets);
9055 for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) {
9056 EVT RetVT = OldRetTys[i];
9057 uint64_t Offset = OldOffsets[i];
9058 MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT);
9059 unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT);
9060 unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8;
9061 RetTys.append(NumRegs, RegisterVT);
9062 for (unsigned j = 0; j != NumRegs; ++j)
9063 Offsets.push_back(Offset + j * RegisterVTByteSZ);
9067 SmallVector<ISD::OutputArg, 4> Outs;
9068 GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
9070 bool CanLowerReturn =
9071 this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
9072 CLI.IsVarArg, Outs, CLI.RetTy->getContext());
9074 SDValue DemoteStackSlot;
9075 int DemoteStackIdx = -100;
9076 if (!CanLowerReturn) {
9077 // FIXME: equivalent assert?
9078 // assert(!CS.hasInAllocaArgument() &&
9079 // "sret demotion is incompatible with inalloca");
9080 uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
9081 unsigned Align = DL.getPrefTypeAlignment(CLI.RetTy);
9082 MachineFunction &MF = CLI.DAG.getMachineFunction();
9083 DemoteStackIdx = MF.getFrameInfo().CreateStackObject(TySize, Align, false);
9084 Type *StackSlotPtrType = PointerType::get(CLI.RetTy,
9085 DL.getAllocaAddrSpace());
9087 DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
9089 Entry.Node = DemoteStackSlot;
9090 Entry.Ty = StackSlotPtrType;
9091 Entry.IsSExt = false;
9092 Entry.IsZExt = false;
9093 Entry.IsInReg = false;
9094 Entry.IsSRet = true;
9095 Entry.IsNest = false;
9096 Entry.IsByVal = false;
9097 Entry.IsReturned = false;
9098 Entry.IsSwiftSelf = false;
9099 Entry.IsSwiftError = false;
9100 Entry.IsCFGuardTarget = false;
9101 Entry.Alignment = Align;
9102 CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
9103 CLI.NumFixedArgs += 1;
9104 CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
9106 // sret demotion isn't compatible with tail-calls, since the sret argument
9107 // points into the callers stack frame.
9108 CLI.IsTailCall = false;
9110 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
9111 CLI.RetTy, CLI.CallConv, CLI.IsVarArg);
9112 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
9113 ISD::ArgFlagsTy Flags;
9114 if (NeedsRegBlock) {
9115 Flags.setInConsecutiveRegs();
9116 if (I == RetTys.size() - 1)
9117 Flags.setInConsecutiveRegsLast();
9120 MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
9122 unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
9124 for (unsigned i = 0; i != NumRegs; ++i) {
9125 ISD::InputArg MyFlags;
9126 MyFlags.Flags = Flags;
9127 MyFlags.VT = RegisterVT;
9129 MyFlags.Used = CLI.IsReturnValueUsed;
9130 if (CLI.RetTy->isPointerTy()) {
9131 MyFlags.Flags.setPointer();
9132 MyFlags.Flags.setPointerAddrSpace(
9133 cast<PointerType>(CLI.RetTy)->getAddressSpace());
9136 MyFlags.Flags.setSExt();
9138 MyFlags.Flags.setZExt();
9140 MyFlags.Flags.setInReg();
9141 CLI.Ins.push_back(MyFlags);
9146 // We push in swifterror return as the last element of CLI.Ins.
9147 ArgListTy &Args = CLI.getArgs();
9148 if (supportSwiftError()) {
9149 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
9150 if (Args[i].IsSwiftError) {
9151 ISD::InputArg MyFlags;
9152 MyFlags.VT = getPointerTy(DL);
9153 MyFlags.ArgVT = EVT(getPointerTy(DL));
9154 MyFlags.Flags.setSwiftError();
9155 CLI.Ins.push_back(MyFlags);
9160 // Handle all of the outgoing arguments.
9162 CLI.OutVals.clear();
9163 for (unsigned i = 0, e = Args.size(); i != e; ++i) {
9164 SmallVector<EVT, 4> ValueVTs;
9165 ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
9166 // FIXME: Split arguments if CLI.IsPostTypeLegalization
9167 Type *FinalType = Args[i].Ty;
9168 if (Args[i].IsByVal)
9169 FinalType = cast<PointerType>(Args[i].Ty)->getElementType();
9170 bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
9171 FinalType, CLI.CallConv, CLI.IsVarArg);
9172 for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
9174 EVT VT = ValueVTs[Value];
9175 Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
9176 SDValue Op = SDValue(Args[i].Node.getNode(),
9177 Args[i].Node.getResNo() + Value);
9178 ISD::ArgFlagsTy Flags;
9180 // Certain targets (such as MIPS), may have a different ABI alignment
9181 // for a type depending on the context. Give the target a chance to
9182 // specify the alignment it wants.
9183 const Align OriginalAlignment(getABIAlignmentForCallingConv(ArgTy, DL));
9185 if (Args[i].Ty->isPointerTy()) {
9187 Flags.setPointerAddrSpace(
9188 cast<PointerType>(Args[i].Ty)->getAddressSpace());
9194 if (Args[i].IsInReg) {
9195 // If we are using vectorcall calling convention, a structure that is
9196 // passed InReg - is surely an HVA
9197 if (CLI.CallConv == CallingConv::X86_VectorCall &&
9198 isa<StructType>(FinalType)) {
9199 // The first value of a structure is marked
9201 Flags.setHvaStart();
9209 if (Args[i].IsSwiftSelf)
9210 Flags.setSwiftSelf();
9211 if (Args[i].IsSwiftError)
9212 Flags.setSwiftError();
9213 if (Args[i].IsCFGuardTarget)
9214 Flags.setCFGuardTarget();
9215 if (Args[i].IsByVal)
9217 if (Args[i].IsInAlloca) {
9218 Flags.setInAlloca();
9219 // Set the byval flag for CCAssignFn callbacks that don't know about
9220 // inalloca. This way we can know how many bytes we should've allocated
9221 // and how many bytes a callee cleanup function will pop. If we port
9222 // inalloca to more targets, we'll have to add custom inalloca handling
9223 // in the various CC lowering callbacks.
9226 if (Args[i].IsByVal || Args[i].IsInAlloca) {
9227 PointerType *Ty = cast<PointerType>(Args[i].Ty);
9228 Type *ElementTy = Ty->getElementType();
9230 unsigned FrameSize = DL.getTypeAllocSize(
9231 Args[i].ByValType ? Args[i].ByValType : ElementTy);
9232 Flags.setByValSize(FrameSize);
9234 // info is not there but there are cases it cannot get right.
9235 unsigned FrameAlign;
9236 if (Args[i].Alignment)
9237 FrameAlign = Args[i].Alignment;
9239 FrameAlign = getByValTypeAlignment(ElementTy, DL);
9240 Flags.setByValAlign(Align(FrameAlign));
9245 Flags.setInConsecutiveRegs();
9246 Flags.setOrigAlign(OriginalAlignment);
9248 MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
9250 unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
9252 SmallVector<SDValue, 4> Parts(NumParts);
9253 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
9256 ExtendKind = ISD::SIGN_EXTEND;
9257 else if (Args[i].IsZExt)
9258 ExtendKind = ISD::ZERO_EXTEND;
9260 // Conservatively only handle 'returned' on non-vectors that can be lowered,
9262 if (Args[i].IsReturned && !Op.getValueType().isVector() &&
9264 assert((CLI.RetTy == Args[i].Ty ||
9265 (CLI.RetTy->isPointerTy() && Args[i].Ty->isPointerTy() &&
9266 CLI.RetTy->getPointerAddressSpace() ==
9267 Args[i].Ty->getPointerAddressSpace())) &&
9268 RetTys.size() == NumValues && "unexpected use of 'returned'");
9269 // Before passing 'returned' to the target lowering code, ensure that
9270 // either the register MVT and the actual EVT are the same size or that
9271 // the return value and argument are extended in the same way; in these
9272 // cases it's safe to pass the argument register value unchanged as the
9273 // return register value (although it's at the target's option whether
9275 // TODO: allow code generation to take advantage of partially preserved
9276 // registers rather than clobbering the entire register when the
9277 // parameter extension method is not compatible with the return
9279 if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
9280 (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt &&
9281 CLI.RetZExt == Args[i].IsZExt))
9282 Flags.setReturned();
9285 getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT,
9286 CLI.CS.getInstruction(), CLI.CallConv, ExtendKind);
9288 for (unsigned j = 0; j != NumParts; ++j) {
9289 // if it isn't first piece, alignment must be 1
9290 // For scalable vectors the scalable part is currently handled
9291 // by individual targets, so we just use the known minimum size here.
9292 ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(), VT,
9293 i < CLI.NumFixedArgs, i,
9294 j*Parts[j].getValueType().getStoreSize().getKnownMinSize());
9295 if (NumParts > 1 && j == 0)
9296 MyFlags.Flags.setSplit();
9298 MyFlags.Flags.setOrigAlign(Align::None());
9299 if (j == NumParts - 1)
9300 MyFlags.Flags.setSplitEnd();
9303 CLI.Outs.push_back(MyFlags);
9304 CLI.OutVals.push_back(Parts[j]);
9307 if (NeedsRegBlock && Value == NumValues - 1)
9308 CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
9312 SmallVector<SDValue, 4> InVals;
9313 CLI.Chain = LowerCall(CLI, InVals);
9315 // Update CLI.InVals to use outside of this function.
9316 CLI.InVals = InVals;
9318 // Verify that the target's LowerCall behaved as expected.
9319 assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
9320 "LowerCall didn't return a valid chain!");
9321 assert((!CLI.IsTailCall || InVals.empty()) &&
9322 "LowerCall emitted a return value for a tail call!");
9323 assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
9324 "LowerCall didn't emit the correct number of values!");
9326 // For a tail call, the return value is merely live-out and there aren't
9327 // any nodes in the DAG representing it. Return a special value to
9328 // indicate that a tail call has been emitted and no more Instructions
9329 // should be processed in the current block.
9330 if (CLI.IsTailCall) {
9331 CLI.DAG.setRoot(CLI.Chain);
9332 return std::make_pair(SDValue(), SDValue());
9336 for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
9337 assert(InVals[i].getNode() && "LowerCall emitted a null value!");
9338 assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
9339 "LowerCall emitted a value with the wrong type!");
9343 SmallVector<SDValue, 4> ReturnValues;
9344 if (!CanLowerReturn) {
9345 // The instruction result is the result of loading from the
9346 // hidden sret parameter.
9347 SmallVector<EVT, 1> PVTs;
9348 Type *PtrRetTy = OrigRetTy->getPointerTo(DL.getAllocaAddrSpace());
9350 ComputeValueVTs(*this, DL, PtrRetTy, PVTs);
9351 assert(PVTs.size() == 1 && "Pointers should fit in one register");
9352 EVT PtrVT = PVTs[0];
9354 unsigned NumValues = RetTys.size();
9355 ReturnValues.resize(NumValues);
9356 SmallVector<SDValue, 4> Chains(NumValues);
9358 // An aggregate return value cannot wrap around the address space, so
9359 // offsets to its parts don't wrap either.
9361 Flags.setNoUnsignedWrap(true);
9363 for (unsigned i = 0; i < NumValues; ++i) {
9364 SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
9365 CLI.DAG.getConstant(Offsets[i], CLI.DL,
9367 SDValue L = CLI.DAG.getLoad(
9368 RetTys[i], CLI.DL, CLI.Chain, Add,
9369 MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(),
9370 DemoteStackIdx, Offsets[i]),
9371 /* Alignment = */ 1);
9372 ReturnValues[i] = L;
9373 Chains[i] = L.getValue(1);
9376 CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
9378 // Collect the legal value parts into potentially illegal values
9379 // that correspond to the original function's return values.
9380 Optional<ISD::NodeType> AssertOp;
9382 AssertOp = ISD::AssertSext;
9383 else if (CLI.RetZExt)
9384 AssertOp = ISD::AssertZext;
9385 unsigned CurReg = 0;
9386 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
9388 MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
9390 unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
9393 ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg],
9394 NumRegs, RegisterVT, VT, nullptr,
9395 CLI.CallConv, AssertOp));
9399 // For a function returning void, there is no return value. We can't create
9400 // such a node, so we just return a null return value in that case. In
9401 // that case, nothing will actually look at the value.
9402 if (ReturnValues.empty())
9403 return std::make_pair(SDValue(), CLI.Chain);
9406 SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
9407 CLI.DAG.getVTList(RetTys), ReturnValues);
9408 return std::make_pair(Res, CLI.Chain);
9411 void TargetLowering::LowerOperationWrapper(SDNode *N,
9412 SmallVectorImpl<SDValue> &Results,
9413 SelectionDAG &DAG) const {
9414 if (SDValue Res = LowerOperation(SDValue(N, 0), DAG))
9415 Results.push_back(Res);
9418 SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
9419 llvm_unreachable("LowerOperation not implemented for this target!");
9423 SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) {
9424 SDValue Op = getNonRegisterValue(V);
9425 assert((Op.getOpcode() != ISD::CopyFromReg ||
9426 cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
9427 "Copy from a reg to the same reg!");
9428 assert(!Register::isPhysicalRegister(Reg) && "Is a physreg");
9430 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9431 // If this is an InlineAsm we have to match the registers required, not the
9432 // notional registers required by the type.
9434 RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(),
9435 None); // This is not an ABI copy.
9436 SDValue Chain = DAG.getEntryNode();
9438 ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) ==
9439 FuncInfo.PreferredExtendType.end())
9441 : FuncInfo.PreferredExtendType[V];
9442 RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
9443 PendingExports.push_back(Chain);
9446 #include "llvm/CodeGen/SelectionDAGISel.h"
9448 /// isOnlyUsedInEntryBlock - If the specified argument is only used in the
9449 /// entry block, return true. This includes arguments used by switches, since
9450 /// the switch may expand into multiple basic blocks.
9451 static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
9452 // With FastISel active, we may be splitting blocks, so force creation
9453 // of virtual registers for all non-dead arguments.
9455 return A->use_empty();
9457 const BasicBlock &Entry = A->getParent()->front();
9458 for (const User *U : A->users())
9459 if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
9460 return false; // Use not in entry block.
9465 using ArgCopyElisionMapTy =
9466 DenseMap<const Argument *,
9467 std::pair<const AllocaInst *, const StoreInst *>>;
9469 /// Scan the entry block of the function in FuncInfo for arguments that look
9470 /// like copies into a local alloca. Record any copied arguments in
9471 /// ArgCopyElisionCandidates.
9473 findArgumentCopyElisionCandidates(const DataLayout &DL,
9474 FunctionLoweringInfo *FuncInfo,
9475 ArgCopyElisionMapTy &ArgCopyElisionCandidates) {
9476 // Record the state of every static alloca used in the entry block. Argument
9477 // allocas are all used in the entry block, so we need approximately as many
9478 // entries as we have arguments.
9479 enum StaticAllocaInfo { Unknown, Clobbered, Elidable };
9480 SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas;
9481 unsigned NumArgs = FuncInfo->Fn->arg_size();
9482 StaticAllocas.reserve(NumArgs * 2);
9484 auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
9487 V = V->stripPointerCasts();
9488 const auto *AI = dyn_cast<AllocaInst>(V);
9489 if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
9491 auto Iter = StaticAllocas.insert({AI, Unknown});
9492 return &Iter.first->second;
9495 // Look for stores of arguments to static allocas. Look through bitcasts and
9496 // GEPs to handle type coercions, as long as the alloca is fully initialized
9497 // by the store. Any non-store use of an alloca escapes it and any subsequent
9498 // unanalyzed store might write it.
9499 // FIXME: Handle structs initialized with multiple stores.
9500 for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
9501 // Look for stores, and handle non-store uses conservatively.
9502 const auto *SI = dyn_cast<StoreInst>(&I);
9504 // We will look through cast uses, so ignore them completely.
9507 // Ignore debug info intrinsics, they don't escape or store to allocas.
9508 if (isa<DbgInfoIntrinsic>(I))
9510 // This is an unknown instruction. Assume it escapes or writes to all
9511 // static alloca operands.
9512 for (const Use &U : I.operands()) {
9513 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
9514 *Info = StaticAllocaInfo::Clobbered;
9519 // If the stored value is a static alloca, mark it as escaped.
9520 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
9521 *Info = StaticAllocaInfo::Clobbered;
9523 // Check if the destination is a static alloca.
9524 const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
9525 StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
9528 const AllocaInst *AI = cast<AllocaInst>(Dst);
9530 // Skip allocas that have been initialized or clobbered.
9531 if (*Info != StaticAllocaInfo::Unknown)
9534 // Check if the stored value is an argument, and that this store fully
9535 // initializes the alloca. Don't elide copies from the same argument twice.
9536 const Value *Val = SI->getValueOperand()->stripPointerCasts();
9537 const auto *Arg = dyn_cast<Argument>(Val);
9538 if (!Arg || Arg->hasInAllocaAttr() || Arg->hasByValAttr() ||
9539 Arg->getType()->isEmptyTy() ||
9540 DL.getTypeStoreSize(Arg->getType()) !=
9541 DL.getTypeAllocSize(AI->getAllocatedType()) ||
9542 ArgCopyElisionCandidates.count(Arg)) {
9543 *Info = StaticAllocaInfo::Clobbered;
9547 LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI
9550 // Mark this alloca and store for argument copy elision.
9551 *Info = StaticAllocaInfo::Elidable;
9552 ArgCopyElisionCandidates.insert({Arg, {AI, SI}});
9554 // Stop scanning if we've seen all arguments. This will happen early in -O0
9555 // builds, which is useful, because -O0 builds have large entry blocks and
9557 if (ArgCopyElisionCandidates.size() == NumArgs)
9562 /// Try to elide argument copies from memory into a local alloca. Succeeds if
9563 /// ArgVal is a load from a suitable fixed stack object.
9564 static void tryToElideArgumentCopy(
9565 FunctionLoweringInfo &FuncInfo, SmallVectorImpl<SDValue> &Chains,
9566 DenseMap<int, int> &ArgCopyElisionFrameIndexMap,
9567 SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs,
9568 ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg,
9569 SDValue ArgVal, bool &ArgHasUses) {
9570 // Check if this is a load from a fixed stack object.
9571 auto *LNode = dyn_cast<LoadSDNode>(ArgVal);
9574 auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
9578 // Check that the fixed stack object is the right size and alignment.
9579 // Look at the alignment that the user wrote on the alloca instead of looking
9580 // at the stack object.
9581 auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
9582 assert(ArgCopyIter != ArgCopyElisionCandidates.end());
9583 const AllocaInst *AI = ArgCopyIter->second.first;
9584 int FixedIndex = FINode->getIndex();
9585 int &AllocaIndex = FuncInfo.StaticAllocaMap[AI];
9586 int OldIndex = AllocaIndex;
9587 MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
9588 if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
9590 dbgs() << " argument copy elision failed due to bad fixed stack "
9594 unsigned RequiredAlignment = AI->getAlignment();
9595 if (!RequiredAlignment) {
9596 RequiredAlignment = FuncInfo.MF->getDataLayout().getABITypeAlignment(
9597 AI->getAllocatedType());
9599 if (MFI.getObjectAlignment(FixedIndex) < RequiredAlignment) {
9600 LLVM_DEBUG(dbgs() << " argument copy elision failed: alignment of alloca "
9601 "greater than stack argument alignment ("
9602 << RequiredAlignment << " vs "
9603 << MFI.getObjectAlignment(FixedIndex) << ")\n");
9607 // Perform the elision. Delete the old stack object and replace its only use
9608 // in the variable info map. Mark the stack object as mutable.
9610 dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
9611 << " Replacing frame index " << OldIndex << " with " << FixedIndex
9614 MFI.RemoveStackObject(OldIndex);
9615 MFI.setIsImmutableObjectIndex(FixedIndex, false);
9616 AllocaIndex = FixedIndex;
9617 ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
9618 Chains.push_back(ArgVal.getValue(1));
9620 // Avoid emitting code for the store implementing the copy.
9621 const StoreInst *SI = ArgCopyIter->second.second;
9622 ElidedArgCopyInstrs.insert(SI);
9624 // Check for uses of the argument again so that we can avoid exporting ArgVal
9625 // if it is't used by anything other than the store.
9626 for (const Value *U : Arg.users()) {
9634 void SelectionDAGISel::LowerArguments(const Function &F) {
9635 SelectionDAG &DAG = SDB->DAG;
9636 SDLoc dl = SDB->getCurSDLoc();
9637 const DataLayout &DL = DAG.getDataLayout();
9638 SmallVector<ISD::InputArg, 16> Ins;
9640 if (!FuncInfo->CanLowerReturn) {
9641 // Put in an sret pointer parameter before all the other parameters.
9642 SmallVector<EVT, 1> ValueVTs;
9643 ComputeValueVTs(*TLI, DAG.getDataLayout(),
9644 F.getReturnType()->getPointerTo(
9645 DAG.getDataLayout().getAllocaAddrSpace()),
9648 // NOTE: Assuming that a pointer will never break down to more than one VT
9650 ISD::ArgFlagsTy Flags;
9652 MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
9653 ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
9654 ISD::InputArg::NoArgIndex, 0);
9655 Ins.push_back(RetArg);
9658 // Look for stores of arguments to static allocas. Mark such arguments with a
9659 // flag to ask the target to give us the memory location of that argument if
9661 ArgCopyElisionMapTy ArgCopyElisionCandidates;
9662 findArgumentCopyElisionCandidates(DL, FuncInfo.get(),
9663 ArgCopyElisionCandidates);
9665 // Set up the incoming argument description vector.
9666 for (const Argument &Arg : F.args()) {
9667 unsigned ArgNo = Arg.getArgNo();
9668 SmallVector<EVT, 4> ValueVTs;
9669 ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
9670 bool isArgValueUsed = !Arg.use_empty();
9671 unsigned PartBase = 0;
9672 Type *FinalType = Arg.getType();
9673 if (Arg.hasAttribute(Attribute::ByVal))
9674 FinalType = Arg.getParamByValType();
9675 bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
9676 FinalType, F.getCallingConv(), F.isVarArg());
9677 for (unsigned Value = 0, NumValues = ValueVTs.size();
9678 Value != NumValues; ++Value) {
9679 EVT VT = ValueVTs[Value];
9680 Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
9681 ISD::ArgFlagsTy Flags;
9683 // Certain targets (such as MIPS), may have a different ABI alignment
9684 // for a type depending on the context. Give the target a chance to
9685 // specify the alignment it wants.
9686 const Align OriginalAlignment(
9687 TLI->getABIAlignmentForCallingConv(ArgTy, DL));
9689 if (Arg.getType()->isPointerTy()) {
9691 Flags.setPointerAddrSpace(
9692 cast<PointerType>(Arg.getType())->getAddressSpace());
9694 if (Arg.hasAttribute(Attribute::ZExt))
9696 if (Arg.hasAttribute(Attribute::SExt))
9698 if (Arg.hasAttribute(Attribute::InReg)) {
9699 // If we are using vectorcall calling convention, a structure that is
9700 // passed InReg - is surely an HVA
9701 if (F.getCallingConv() == CallingConv::X86_VectorCall &&
9702 isa<StructType>(Arg.getType())) {
9703 // The first value of a structure is marked
9705 Flags.setHvaStart();
9711 if (Arg.hasAttribute(Attribute::StructRet))
9713 if (Arg.hasAttribute(Attribute::SwiftSelf))
9714 Flags.setSwiftSelf();
9715 if (Arg.hasAttribute(Attribute::SwiftError))
9716 Flags.setSwiftError();
9717 if (Arg.hasAttribute(Attribute::ByVal))
9719 if (Arg.hasAttribute(Attribute::InAlloca)) {
9720 Flags.setInAlloca();
9721 // Set the byval flag for CCAssignFn callbacks that don't know about
9722 // inalloca. This way we can know how many bytes we should've allocated
9723 // and how many bytes a callee cleanup function will pop. If we port
9724 // inalloca to more targets, we'll have to add custom inalloca handling
9725 // in the various CC lowering callbacks.
9728 if (F.getCallingConv() == CallingConv::X86_INTR) {
9729 // IA Interrupt passes frame (1st parameter) by value in the stack.
9733 if (Flags.isByVal() || Flags.isInAlloca()) {
9734 Type *ElementTy = Arg.getParamByValType();
9736 // For ByVal, size and alignment should be passed from FE. BE will
9737 // guess if this info is not there but there are cases it cannot get
9739 unsigned FrameSize = DL.getTypeAllocSize(Arg.getParamByValType());
9740 Flags.setByValSize(FrameSize);
9742 unsigned FrameAlign;
9743 if (Arg.getParamAlignment())
9744 FrameAlign = Arg.getParamAlignment();
9746 FrameAlign = TLI->getByValTypeAlignment(ElementTy, DL);
9747 Flags.setByValAlign(Align(FrameAlign));
9749 if (Arg.hasAttribute(Attribute::Nest))
9752 Flags.setInConsecutiveRegs();
9753 Flags.setOrigAlign(OriginalAlignment);
9754 if (ArgCopyElisionCandidates.count(&Arg))
9755 Flags.setCopyElisionCandidate();
9756 if (Arg.hasAttribute(Attribute::Returned))
9757 Flags.setReturned();
9759 MVT RegisterVT = TLI->getRegisterTypeForCallingConv(
9760 *CurDAG->getContext(), F.getCallingConv(), VT);
9761 unsigned NumRegs = TLI->getNumRegistersForCallingConv(
9762 *CurDAG->getContext(), F.getCallingConv(), VT);
9763 for (unsigned i = 0; i != NumRegs; ++i) {
9764 // For scalable vectors, use the minimum size; individual targets
9765 // are responsible for handling scalable vector arguments and
9767 ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed,
9768 ArgNo, PartBase+i*RegisterVT.getStoreSize().getKnownMinSize());
9769 if (NumRegs > 1 && i == 0)
9770 MyFlags.Flags.setSplit();
9771 // if it isn't first piece, alignment must be 1
9773 MyFlags.Flags.setOrigAlign(Align::None());
9774 if (i == NumRegs - 1)
9775 MyFlags.Flags.setSplitEnd();
9777 Ins.push_back(MyFlags);
9779 if (NeedsRegBlock && Value == NumValues - 1)
9780 Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
9781 PartBase += VT.getStoreSize().getKnownMinSize();
9785 // Call the target to set up the argument values.
9786 SmallVector<SDValue, 8> InVals;
9787 SDValue NewRoot = TLI->LowerFormalArguments(
9788 DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
9790 // Verify that the target's LowerFormalArguments behaved as expected.
9791 assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
9792 "LowerFormalArguments didn't return a valid chain!");
9793 assert(InVals.size() == Ins.size() &&
9794 "LowerFormalArguments didn't emit the correct number of values!");
9796 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
9797 assert(InVals[i].getNode() &&
9798 "LowerFormalArguments emitted a null value!");
9799 assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
9800 "LowerFormalArguments emitted a value with the wrong type!");
9804 // Update the DAG with the new chain value resulting from argument lowering.
9805 DAG.setRoot(NewRoot);
9807 // Set up the argument values.
9809 if (!FuncInfo->CanLowerReturn) {
9810 // Create a virtual register for the sret pointer, and put in a copy
9811 // from the sret argument into it.
9812 SmallVector<EVT, 1> ValueVTs;
9813 ComputeValueVTs(*TLI, DAG.getDataLayout(),
9814 F.getReturnType()->getPointerTo(
9815 DAG.getDataLayout().getAllocaAddrSpace()),
9817 MVT VT = ValueVTs[0].getSimpleVT();
9818 MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
9819 Optional<ISD::NodeType> AssertOp = None;
9820 SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT,
9821 nullptr, F.getCallingConv(), AssertOp);
9823 MachineFunction& MF = SDB->DAG.getMachineFunction();
9824 MachineRegisterInfo& RegInfo = MF.getRegInfo();
9826 RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
9827 FuncInfo->DemoteRegister = SRetReg;
9829 SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
9830 DAG.setRoot(NewRoot);
9832 // i indexes lowered arguments. Bump it past the hidden sret argument.
9836 SmallVector<SDValue, 4> Chains;
9837 DenseMap<int, int> ArgCopyElisionFrameIndexMap;
9838 for (const Argument &Arg : F.args()) {
9839 SmallVector<SDValue, 4> ArgValues;
9840 SmallVector<EVT, 4> ValueVTs;
9841 ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
9842 unsigned NumValues = ValueVTs.size();
9846 bool ArgHasUses = !Arg.use_empty();
9848 // Elide the copying store if the target loaded this argument from a
9849 // suitable fixed stack object.
9850 if (Ins[i].Flags.isCopyElisionCandidate()) {
9851 tryToElideArgumentCopy(*FuncInfo, Chains, ArgCopyElisionFrameIndexMap,
9852 ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg,
9853 InVals[i], ArgHasUses);
9856 // If this argument is unused then remember its value. It is used to generate
9857 // debugging information.
9858 bool isSwiftErrorArg =
9859 TLI->supportSwiftError() &&
9860 Arg.hasAttribute(Attribute::SwiftError);
9861 if (!ArgHasUses && !isSwiftErrorArg) {
9862 SDB->setUnusedArgValue(&Arg, InVals[i]);
9864 // Also remember any frame index for use in FastISel.
9865 if (FrameIndexSDNode *FI =
9866 dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
9867 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
9870 for (unsigned Val = 0; Val != NumValues; ++Val) {
9871 EVT VT = ValueVTs[Val];
9872 MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(),
9873 F.getCallingConv(), VT);
9874 unsigned NumParts = TLI->getNumRegistersForCallingConv(
9875 *CurDAG->getContext(), F.getCallingConv(), VT);
9877 // Even an apparent 'unused' swifterror argument needs to be returned. So
9878 // we do generate a copy for it that can be used on return from the
9880 if (ArgHasUses || isSwiftErrorArg) {
9881 Optional<ISD::NodeType> AssertOp;
9882 if (Arg.hasAttribute(Attribute::SExt))
9883 AssertOp = ISD::AssertSext;
9884 else if (Arg.hasAttribute(Attribute::ZExt))
9885 AssertOp = ISD::AssertZext;
9887 ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
9888 PartVT, VT, nullptr,
9889 F.getCallingConv(), AssertOp));
9895 // We don't need to do anything else for unused arguments.
9896 if (ArgValues.empty())
9899 // Note down frame index.
9900 if (FrameIndexSDNode *FI =
9901 dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
9902 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
9904 SDValue Res = DAG.getMergeValues(makeArrayRef(ArgValues.data(), NumValues),
9905 SDB->getCurSDLoc());
9907 SDB->setValue(&Arg, Res);
9908 if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
9909 // We want to associate the argument with the frame index, among
9910 // involved operands, that correspond to the lowest address. The
9911 // getCopyFromParts function, called earlier, is swapping the order of
9912 // the operands to BUILD_PAIR depending on endianness. The result of
9913 // that swapping is that the least significant bits of the argument will
9914 // be in the first operand of the BUILD_PAIR node, and the most
9915 // significant bits will be in the second operand.
9916 unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0;
9917 if (LoadSDNode *LNode =
9918 dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode()))
9919 if (FrameIndexSDNode *FI =
9920 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
9921 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
9924 // Analyses past this point are naive and don't expect an assertion.
9925 if (Res.getOpcode() == ISD::AssertZext)
9926 Res = Res.getOperand(0);
9928 // Update the SwiftErrorVRegDefMap.
9929 if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
9930 unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
9931 if (Register::isVirtualRegister(Reg))
9932 SwiftError->setCurrentVReg(FuncInfo->MBB, SwiftError->getFunctionArg(),
9936 // If this argument is live outside of the entry block, insert a copy from
9937 // wherever we got it to the vreg that other BB's will reference it as.
9938 if (Res.getOpcode() == ISD::CopyFromReg) {
9939 // If we can, though, try to skip creating an unnecessary vreg.
9940 // FIXME: This isn't very clean... it would be nice to make this more
9942 unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
9943 if (Register::isVirtualRegister(Reg)) {
9944 FuncInfo->ValueMap[&Arg] = Reg;
9948 if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) {
9949 FuncInfo->InitializeRegForValue(&Arg);
9950 SDB->CopyToExportRegsIfNeeded(&Arg);
9954 if (!Chains.empty()) {
9955 Chains.push_back(NewRoot);
9956 NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
9959 DAG.setRoot(NewRoot);
9961 assert(i == InVals.size() && "Argument register count mismatch!");
9963 // If any argument copy elisions occurred and we have debug info, update the
9964 // stale frame indices used in the dbg.declare variable info table.
9965 MachineFunction::VariableDbgInfoMapTy &DbgDeclareInfo = MF->getVariableDbgInfo();
9966 if (!DbgDeclareInfo.empty() && !ArgCopyElisionFrameIndexMap.empty()) {
9967 for (MachineFunction::VariableDbgInfo &VI : DbgDeclareInfo) {
9968 auto I = ArgCopyElisionFrameIndexMap.find(VI.Slot);
9969 if (I != ArgCopyElisionFrameIndexMap.end())
9970 VI.Slot = I->second;
9974 // Finally, if the target has anything special to do, allow it to do so.
9975 EmitFunctionEntryCode();
9978 /// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
9979 /// ensure constants are generated when needed. Remember the virtual registers
9980 /// that need to be added to the Machine PHI nodes as input. We cannot just
9981 /// directly add them, because expansion might result in multiple MBB's for one
9982 /// BB. As such, the start of the BB might correspond to a different MBB than
9985 SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
9986 const Instruction *TI = LLVMBB->getTerminator();
9988 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
9990 // Check PHI nodes in successors that expect a value to be available from this
9992 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
9993 const BasicBlock *SuccBB = TI->getSuccessor(succ);
9994 if (!isa<PHINode>(SuccBB->begin())) continue;
9995 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
9997 // If this terminator has multiple identical successors (common for
9998 // switches), only handle each succ once.
9999 if (!SuccsHandled.insert(SuccMBB).second)
10002 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
10004 // At this point we know that there is a 1-1 correspondence between LLVM PHI
10005 // nodes and Machine PHI nodes, but the incoming operands have not been
10007 for (const PHINode &PN : SuccBB->phis()) {
10008 // Ignore dead phi's.
10009 if (PN.use_empty())
10012 // Skip empty types
10013 if (PN.getType()->isEmptyTy())
10017 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
10019 if (const Constant *C = dyn_cast<Constant>(PHIOp)) {
10020 unsigned &RegOut = ConstantsOut[C];
10022 RegOut = FuncInfo.CreateRegs(C);
10023 CopyValueToVirtualRegister(C, RegOut);
10027 DenseMap<const Value *, unsigned>::iterator I =
10028 FuncInfo.ValueMap.find(PHIOp);
10029 if (I != FuncInfo.ValueMap.end())
10032 assert(isa<AllocaInst>(PHIOp) &&
10033 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
10034 "Didn't codegen value into a register!??");
10035 Reg = FuncInfo.CreateRegs(PHIOp);
10036 CopyValueToVirtualRegister(PHIOp, Reg);
10040 // Remember that this register needs to added to the machine PHI node as
10041 // the input for this MBB.
10042 SmallVector<EVT, 4> ValueVTs;
10043 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10044 ComputeValueVTs(TLI, DAG.getDataLayout(), PN.getType(), ValueVTs);
10045 for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
10046 EVT VT = ValueVTs[vti];
10047 unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
10048 for (unsigned i = 0, e = NumRegisters; i != e; ++i)
10049 FuncInfo.PHINodesToUpdate.push_back(
10050 std::make_pair(&*MBBI++, Reg + i));
10051 Reg += NumRegisters;
10056 ConstantsOut.clear();
10059 /// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB
10061 MachineBasicBlock *
10062 SelectionDAGBuilder::StackProtectorDescriptor::
10063 AddSuccessorMBB(const BasicBlock *BB,
10064 MachineBasicBlock *ParentMBB,
10066 MachineBasicBlock *SuccMBB) {
10067 // If SuccBB has not been created yet, create it.
10069 MachineFunction *MF = ParentMBB->getParent();
10070 MachineFunction::iterator BBI(ParentMBB);
10071 SuccMBB = MF->CreateMachineBasicBlock(BB);
10072 MF->insert(++BBI, SuccMBB);
10074 // Add it as a successor of ParentMBB.
10075 ParentMBB->addSuccessor(
10076 SuccMBB, BranchProbabilityInfo::getBranchProbStackProtector(IsLikely));
10080 MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
10081 MachineFunction::iterator I(MBB);
10082 if (++I == FuncInfo.MF->end())
10087 /// During lowering new call nodes can be created (such as memset, etc.).
10088 /// Those will become new roots of the current DAG, but complications arise
10089 /// when they are tail calls. In such cases, the call lowering will update
10090 /// the root, but the builder still needs to know that a tail call has been
10091 /// lowered in order to avoid generating an additional return.
10092 void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
10093 // If the node is null, we do have a tail call.
10094 if (MaybeTC.getNode() != nullptr)
10095 DAG.setRoot(MaybeTC);
10097 HasTailCall = true;
10100 void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
10101 MachineBasicBlock *SwitchMBB,
10102 MachineBasicBlock *DefaultMBB) {
10103 MachineFunction *CurMF = FuncInfo.MF;
10104 MachineBasicBlock *NextMBB = nullptr;
10105 MachineFunction::iterator BBI(W.MBB);
10106 if (++BBI != FuncInfo.MF->end())
10109 unsigned Size = W.LastCluster - W.FirstCluster + 1;
10111 BranchProbabilityInfo *BPI = FuncInfo.BPI;
10113 if (Size == 2 && W.MBB == SwitchMBB) {
10114 // If any two of the cases has the same destination, and if one value
10115 // is the same as the other, but has one bit unset that the other has set,
10116 // use bit manipulation to do two compares at once. For example:
10117 // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
10118 // TODO: This could be extended to merge any 2 cases in switches with 3
10120 // TODO: Handle cases where W.CaseBB != SwitchBB.
10121 CaseCluster &Small = *W.FirstCluster;
10122 CaseCluster &Big = *W.LastCluster;
10124 if (Small.Low == Small.High && Big.Low == Big.High &&
10125 Small.MBB == Big.MBB) {
10126 const APInt &SmallValue = Small.Low->getValue();
10127 const APInt &BigValue = Big.Low->getValue();
10129 // Check that there is only one bit different.
10130 APInt CommonBit = BigValue ^ SmallValue;
10131 if (CommonBit.isPowerOf2()) {
10132 SDValue CondLHS = getValue(Cond);
10133 EVT VT = CondLHS.getValueType();
10134 SDLoc DL = getCurSDLoc();
10136 SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
10137 DAG.getConstant(CommonBit, DL, VT));
10138 SDValue Cond = DAG.getSetCC(
10139 DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
10142 // Update successor info.
10143 // Both Small and Big will jump to Small.BB, so we sum up the
10145 addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
10147 addSuccessorWithProb(
10148 SwitchMBB, DefaultMBB,
10149 // The default destination is the first successor in IR.
10150 BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
10152 addSuccessorWithProb(SwitchMBB, DefaultMBB);
10154 // Insert the true branch.
10156 DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
10157 DAG.getBasicBlock(Small.MBB));
10158 // Insert the false branch.
10159 BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
10160 DAG.getBasicBlock(DefaultMBB));
10162 DAG.setRoot(BrCond);
10168 if (TM.getOptLevel() != CodeGenOpt::None) {
10169 // Here, we order cases by probability so the most likely case will be
10170 // checked first. However, two clusters can have the same probability in
10171 // which case their relative ordering is non-deterministic. So we use Low
10172 // as a tie-breaker as clusters are guaranteed to never overlap.
10173 llvm::sort(W.FirstCluster, W.LastCluster + 1,
10174 [](const CaseCluster &a, const CaseCluster &b) {
10175 return a.Prob != b.Prob ?
10177 a.Low->getValue().slt(b.Low->getValue());
10180 // Rearrange the case blocks so that the last one falls through if possible
10181 // without changing the order of probabilities.
10182 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
10184 if (I->Prob > W.LastCluster->Prob)
10186 if (I->Kind == CC_Range && I->MBB == NextMBB) {
10187 std::swap(*I, *W.LastCluster);
10193 // Compute total probability.
10194 BranchProbability DefaultProb = W.DefaultProb;
10195 BranchProbability UnhandledProbs = DefaultProb;
10196 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
10197 UnhandledProbs += I->Prob;
10199 MachineBasicBlock *CurMBB = W.MBB;
10200 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
10201 bool FallthroughUnreachable = false;
10202 MachineBasicBlock *Fallthrough;
10203 if (I == W.LastCluster) {
10204 // For the last cluster, fall through to the default destination.
10205 Fallthrough = DefaultMBB;
10206 FallthroughUnreachable = isa<UnreachableInst>(
10207 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
10209 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
10210 CurMF->insert(BBI, Fallthrough);
10211 // Put Cond in a virtual register to make it available from the new blocks.
10212 ExportFromCurrentBlock(Cond);
10214 UnhandledProbs -= I->Prob;
10217 case CC_JumpTable: {
10218 // FIXME: Optimize away range check based on pivot comparisons.
10219 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
10220 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
10222 // The jump block hasn't been inserted yet; insert it here.
10223 MachineBasicBlock *JumpMBB = JT->MBB;
10224 CurMF->insert(BBI, JumpMBB);
10226 auto JumpProb = I->Prob;
10227 auto FallthroughProb = UnhandledProbs;
10229 // If the default statement is a target of the jump table, we evenly
10230 // distribute the default probability to successors of CurMBB. Also
10231 // update the probability on the edge from JumpMBB to Fallthrough.
10232 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
10233 SE = JumpMBB->succ_end();
10235 if (*SI == DefaultMBB) {
10236 JumpProb += DefaultProb / 2;
10237 FallthroughProb -= DefaultProb / 2;
10238 JumpMBB->setSuccProbability(SI, DefaultProb / 2);
10239 JumpMBB->normalizeSuccProbs();
10244 if (FallthroughUnreachable) {
10245 // Skip the range check if the fallthrough block is unreachable.
10246 JTH->OmitRangeCheck = true;
10249 if (!JTH->OmitRangeCheck)
10250 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
10251 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
10252 CurMBB->normalizeSuccProbs();
10254 // The jump table header will be inserted in our current block, do the
10255 // range check, and fall through to our fallthrough block.
10256 JTH->HeaderBB = CurMBB;
10257 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
10259 // If we're in the right place, emit the jump table header right now.
10260 if (CurMBB == SwitchMBB) {
10261 visitJumpTableHeader(*JT, *JTH, SwitchMBB);
10262 JTH->Emitted = true;
10266 case CC_BitTests: {
10267 // FIXME: Optimize away range check based on pivot comparisons.
10268 BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
10270 // The bit test blocks haven't been inserted yet; insert them here.
10271 for (BitTestCase &BTC : BTB->Cases)
10272 CurMF->insert(BBI, BTC.ThisBB);
10274 // Fill in fields of the BitTestBlock.
10275 BTB->Parent = CurMBB;
10276 BTB->Default = Fallthrough;
10278 BTB->DefaultProb = UnhandledProbs;
10279 // If the cases in bit test don't form a contiguous range, we evenly
10280 // distribute the probability on the edge to Fallthrough to two
10281 // successors of CurMBB.
10282 if (!BTB->ContiguousRange) {
10283 BTB->Prob += DefaultProb / 2;
10284 BTB->DefaultProb -= DefaultProb / 2;
10287 if (FallthroughUnreachable) {
10288 // Skip the range check if the fallthrough block is unreachable.
10289 BTB->OmitRangeCheck = true;
10292 // If we're in the right place, emit the bit test header right now.
10293 if (CurMBB == SwitchMBB) {
10294 visitBitTestHeader(*BTB, SwitchMBB);
10295 BTB->Emitted = true;
10300 const Value *RHS, *LHS, *MHS;
10302 if (I->Low == I->High) {
10303 // Check Cond == I->Low.
10309 // Check I->Low <= Cond <= I->High.
10316 // If Fallthrough is unreachable, fold away the comparison.
10317 if (FallthroughUnreachable)
10320 // The false probability is the sum of all unhandled cases.
10321 CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB,
10322 getCurSDLoc(), I->Prob, UnhandledProbs);
10324 if (CurMBB == SwitchMBB)
10325 visitSwitchCase(CB, SwitchMBB);
10327 SL->SwitchCases.push_back(CB);
10332 CurMBB = Fallthrough;
10336 unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster &CC,
10337 CaseClusterIt First,
10338 CaseClusterIt Last) {
10339 return std::count_if(First, Last + 1, [&](const CaseCluster &X) {
10340 if (X.Prob != CC.Prob)
10341 return X.Prob > CC.Prob;
10343 // Ties are broken by comparing the case value.
10344 return X.Low->getValue().slt(CC.Low->getValue());
10348 void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
10349 const SwitchWorkListItem &W,
10351 MachineBasicBlock *SwitchMBB) {
10352 assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
10353 "Clusters not sorted?");
10355 assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
10357 // Balance the tree based on branch probabilities to create a near-optimal (in
10358 // terms of search time given key frequency) binary search tree. See e.g. Kurt
10359 // Mehlhorn "Nearly Optimal Binary Search Trees" (1975).
10360 CaseClusterIt LastLeft = W.FirstCluster;
10361 CaseClusterIt FirstRight = W.LastCluster;
10362 auto LeftProb = LastLeft->Prob + W.DefaultProb / 2;
10363 auto RightProb = FirstRight->Prob + W.DefaultProb / 2;
10365 // Move LastLeft and FirstRight towards each other from opposite directions to
10366 // find a partitioning of the clusters which balances the probability on both
10367 // sides. If LeftProb and RightProb are equal, alternate which side is
10368 // taken to ensure 0-probability nodes are distributed evenly.
10370 while (LastLeft + 1 < FirstRight) {
10371 if (LeftProb < RightProb || (LeftProb == RightProb && (I & 1)))
10372 LeftProb += (++LastLeft)->Prob;
10374 RightProb += (--FirstRight)->Prob;
10379 // Our binary search tree differs from a typical BST in that ours can have up
10380 // to three values in each leaf. The pivot selection above doesn't take that
10381 // into account, which means the tree might require more nodes and be less
10382 // efficient. We compensate for this here.
10384 unsigned NumLeft = LastLeft - W.FirstCluster + 1;
10385 unsigned NumRight = W.LastCluster - FirstRight + 1;
10387 if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) {
10388 // If one side has less than 3 clusters, and the other has more than 3,
10389 // consider taking a cluster from the other side.
10391 if (NumLeft < NumRight) {
10392 // Consider moving the first cluster on the right to the left side.
10393 CaseCluster &CC = *FirstRight;
10394 unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
10395 unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
10396 if (LeftSideRank <= RightSideRank) {
10397 // Moving the cluster to the left does not demote it.
10403 assert(NumRight < NumLeft);
10404 // Consider moving the last element on the left to the right side.
10405 CaseCluster &CC = *LastLeft;
10406 unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft);
10407 unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster);
10408 if (RightSideRank <= LeftSideRank) {
10409 // Moving the cluster to the right does not demot it.
10419 assert(LastLeft + 1 == FirstRight);
10420 assert(LastLeft >= W.FirstCluster);
10421 assert(FirstRight <= W.LastCluster);
10423 // Use the first element on the right as pivot since we will make less-than
10424 // comparisons against it.
10425 CaseClusterIt PivotCluster = FirstRight;
10426 assert(PivotCluster > W.FirstCluster);
10427 assert(PivotCluster <= W.LastCluster);
10429 CaseClusterIt FirstLeft = W.FirstCluster;
10430 CaseClusterIt LastRight = W.LastCluster;
10432 const ConstantInt *Pivot = PivotCluster->Low;
10434 // New blocks will be inserted immediately after the current one.
10435 MachineFunction::iterator BBI(W.MBB);
10438 // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
10439 // we can branch to its destination directly if it's squeezed exactly in
10440 // between the known lower bound and Pivot - 1.
10441 MachineBasicBlock *LeftMBB;
10442 if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
10443 FirstLeft->Low == W.GE &&
10444 (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
10445 LeftMBB = FirstLeft->MBB;
10447 LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
10448 FuncInfo.MF->insert(BBI, LeftMBB);
10449 WorkList.push_back(
10450 {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
10451 // Put Cond in a virtual register to make it available from the new blocks.
10452 ExportFromCurrentBlock(Cond);
10455 // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
10456 // single cluster, RHS.Low == Pivot, and we can branch to its destination
10457 // directly if RHS.High equals the current upper bound.
10458 MachineBasicBlock *RightMBB;
10459 if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
10460 W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
10461 RightMBB = FirstRight->MBB;
10463 RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
10464 FuncInfo.MF->insert(BBI, RightMBB);
10465 WorkList.push_back(
10466 {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
10467 // Put Cond in a virtual register to make it available from the new blocks.
10468 ExportFromCurrentBlock(Cond);
10471 // Create the CaseBlock record that will be used to lower the branch.
10472 CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
10473 getCurSDLoc(), LeftProb, RightProb);
10475 if (W.MBB == SwitchMBB)
10476 visitSwitchCase(CB, SwitchMBB);
10478 SL->SwitchCases.push_back(CB);
10481 // Scale CaseProb after peeling a case with the probablity of PeeledCaseProb
10482 // from the swith statement.
10483 static BranchProbability scaleCaseProbality(BranchProbability CaseProb,
10484 BranchProbability PeeledCaseProb) {
10485 if (PeeledCaseProb == BranchProbability::getOne())
10486 return BranchProbability::getZero();
10487 BranchProbability SwitchProb = PeeledCaseProb.getCompl();
10489 uint32_t Numerator = CaseProb.getNumerator();
10490 uint32_t Denominator = SwitchProb.scale(CaseProb.getDenominator());
10491 return BranchProbability(Numerator, std::max(Numerator, Denominator));
10494 // Try to peel the top probability case if it exceeds the threshold.
10495 // Return current MachineBasicBlock for the switch statement if the peeling
10497 // If the peeling is performed, return the newly created MachineBasicBlock
10498 // for the peeled switch statement. Also update Clusters to remove the peeled
10499 // case. PeeledCaseProb is the BranchProbability for the peeled case.
10500 MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster(
10501 const SwitchInst &SI, CaseClusterVector &Clusters,
10502 BranchProbability &PeeledCaseProb) {
10503 MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
10504 // Don't perform if there is only one cluster or optimizing for size.
10505 if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 ||
10506 TM.getOptLevel() == CodeGenOpt::None ||
10507 SwitchMBB->getParent()->getFunction().hasMinSize())
10510 BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100);
10511 unsigned PeeledCaseIndex = 0;
10512 bool SwitchPeeled = false;
10513 for (unsigned Index = 0; Index < Clusters.size(); ++Index) {
10514 CaseCluster &CC = Clusters[Index];
10515 if (CC.Prob < TopCaseProb)
10517 TopCaseProb = CC.Prob;
10518 PeeledCaseIndex = Index;
10519 SwitchPeeled = true;
10524 LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: "
10525 << TopCaseProb << "\n");
10527 // Record the MBB for the peeled switch statement.
10528 MachineFunction::iterator BBI(SwitchMBB);
10530 MachineBasicBlock *PeeledSwitchMBB =
10531 FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock());
10532 FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
10534 ExportFromCurrentBlock(SI.getCondition());
10535 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
10536 SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
10537 nullptr, nullptr, TopCaseProb.getCompl()};
10538 lowerWorkItem(W, SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
10540 Clusters.erase(PeeledCaseIt);
10541 for (CaseCluster &CC : Clusters) {
10543 dbgs() << "Scale the probablity for one cluster, before scaling: "
10544 << CC.Prob << "\n");
10545 CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb);
10546 LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n");
10548 PeeledCaseProb = TopCaseProb;
10549 return PeeledSwitchMBB;
10552 void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
10553 // Extract cases from the switch.
10554 BranchProbabilityInfo *BPI = FuncInfo.BPI;
10555 CaseClusterVector Clusters;
10556 Clusters.reserve(SI.getNumCases());
10557 for (auto I : SI.cases()) {
10558 MachineBasicBlock *Succ = FuncInfo.MBBMap[I.getCaseSuccessor()];
10559 const ConstantInt *CaseVal = I.getCaseValue();
10560 BranchProbability Prob =
10561 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
10562 : BranchProbability(1, SI.getNumCases() + 1);
10563 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
10566 MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[SI.getDefaultDest()];
10568 // Cluster adjacent cases with the same destination. We do this at all
10569 // optimization levels because it's cheap to do and will make codegen faster
10570 // if there are many clusters.
10571 sortAndRangeify(Clusters);
10573 // The branch probablity of the peeled case.
10574 BranchProbability PeeledCaseProb = BranchProbability::getZero();
10575 MachineBasicBlock *PeeledSwitchMBB =
10576 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
10578 // If there is only the default destination, jump there directly.
10579 MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
10580 if (Clusters.empty()) {
10581 assert(PeeledSwitchMBB == SwitchMBB);
10582 SwitchMBB->addSuccessor(DefaultMBB);
10583 if (DefaultMBB != NextBlock(SwitchMBB)) {
10584 DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
10585 getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
10590 SL->findJumpTables(Clusters, &SI, DefaultMBB, DAG.getPSI(), DAG.getBFI());
10591 SL->findBitTestClusters(Clusters, &SI);
10594 dbgs() << "Case clusters: ";
10595 for (const CaseCluster &C : Clusters) {
10596 if (C.Kind == CC_JumpTable)
10598 if (C.Kind == CC_BitTests)
10601 C.Low->getValue().print(dbgs(), true);
10602 if (C.Low != C.High) {
10604 C.High->getValue().print(dbgs(), true);
10611 assert(!Clusters.empty());
10612 SwitchWorkList WorkList;
10613 CaseClusterIt First = Clusters.begin();
10614 CaseClusterIt Last = Clusters.end() - 1;
10615 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
10616 // Scale the branchprobability for DefaultMBB if the peel occurs and
10617 // DefaultMBB is not replaced.
10618 if (PeeledCaseProb != BranchProbability::getZero() &&
10619 DefaultMBB == FuncInfo.MBBMap[SI.getDefaultDest()])
10620 DefaultProb = scaleCaseProbality(DefaultProb, PeeledCaseProb);
10621 WorkList.push_back(
10622 {PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
10624 while (!WorkList.empty()) {
10625 SwitchWorkListItem W = WorkList.back();
10626 WorkList.pop_back();
10627 unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
10629 if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None &&
10630 !DefaultMBB->getParent()->getFunction().hasMinSize()) {
10631 // For optimized builds, lower large range as a balanced binary tree.
10632 splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
10636 lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);
10640 void SelectionDAGBuilder::visitFreeze(const FreezeInst &I) {
10641 SDValue N = getValue(I.getOperand(0));