1 //===- Instructions.cpp - Implement the LLVM instructions -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements all of the non-inline methods for the LLVM instruction
13 //===----------------------------------------------------------------------===//
15 #include "llvm/IR/Instructions.h"
16 #include "LLVMContextImpl.h"
17 #include "llvm/ADT/None.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/IR/Attributes.h"
21 #include "llvm/IR/BasicBlock.h"
22 #include "llvm/IR/CallSite.h"
23 #include "llvm/IR/Constant.h"
24 #include "llvm/IR/Constants.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/InstrTypes.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/Intrinsics.h"
31 #include "llvm/IR/LLVMContext.h"
32 #include "llvm/IR/Metadata.h"
33 #include "llvm/IR/Module.h"
34 #include "llvm/IR/Operator.h"
35 #include "llvm/IR/Type.h"
36 #include "llvm/IR/Value.h"
37 #include "llvm/Support/AtomicOrdering.h"
38 #include "llvm/Support/Casting.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/MathExtras.h"
48 //===----------------------------------------------------------------------===//
50 //===----------------------------------------------------------------------===//
53 AllocaInst::getAllocationSizeInBits(const DataLayout &DL) const {
54 uint64_t Size = DL.getTypeAllocSizeInBits(getAllocatedType());
55 if (isArrayAllocation()) {
56 auto C = dyn_cast<ConstantInt>(getArraySize());
59 Size *= C->getZExtValue();
64 //===----------------------------------------------------------------------===//
66 //===----------------------------------------------------------------------===//
68 User::op_iterator CallSite::getCallee() const {
69 return cast<CallBase>(getInstruction())->op_end() - 1;
72 //===----------------------------------------------------------------------===//
74 //===----------------------------------------------------------------------===//
76 /// areInvalidOperands - Return a string if the specified operands are invalid
77 /// for a select operation, otherwise return null.
78 const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
79 if (Op1->getType() != Op2->getType())
80 return "both values to select must have same type";
82 if (Op1->getType()->isTokenTy())
83 return "select values cannot have token type";
85 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
87 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
88 return "vector select condition element type must be i1";
89 VectorType *ET = dyn_cast<VectorType>(Op1->getType());
91 return "selected values for vector select must be vectors";
92 if (ET->getNumElements() != VT->getNumElements())
93 return "vector select requires selected vectors to have "
94 "the same vector length as select condition";
95 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
96 return "select condition must be i1 or <n x i1>";
101 //===----------------------------------------------------------------------===//
103 //===----------------------------------------------------------------------===//
105 PHINode::PHINode(const PHINode &PN)
106 : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),
107 ReservedSpace(PN.getNumOperands()) {
108 allocHungoffUses(PN.getNumOperands());
109 std::copy(PN.op_begin(), PN.op_end(), op_begin());
110 std::copy(PN.block_begin(), PN.block_end(), block_begin());
111 SubclassOptionalData = PN.SubclassOptionalData;
114 // removeIncomingValue - Remove an incoming value. This is useful if a
115 // predecessor basic block is deleted.
116 Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
117 Value *Removed = getIncomingValue(Idx);
119 // Move everything after this operand down.
121 // FIXME: we could just swap with the end of the list, then erase. However,
122 // clients might not expect this to happen. The code as it is thrashes the
123 // use/def lists, which is kinda lame.
124 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
125 std::copy(block_begin() + Idx + 1, block_end(), block_begin() + Idx);
127 // Nuke the last value.
128 Op<-1>().set(nullptr);
129 setNumHungOffUseOperands(getNumOperands() - 1);
131 // If the PHI node is dead, because it has zero entries, nuke it now.
132 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
133 // If anyone is using this PHI, make them use a dummy value instead...
134 replaceAllUsesWith(UndefValue::get(getType()));
140 /// growOperands - grow operands - This grows the operand list in response
141 /// to a push_back style of operation. This grows the number of ops by 1.5
144 void PHINode::growOperands() {
145 unsigned e = getNumOperands();
146 unsigned NumOps = e + e / 2;
147 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
149 ReservedSpace = NumOps;
150 growHungoffUses(ReservedSpace, /* IsPhi */ true);
153 /// hasConstantValue - If the specified PHI node always merges together the same
154 /// value, return the value, otherwise return null.
155 Value *PHINode::hasConstantValue() const {
156 // Exploit the fact that phi nodes always have at least one entry.
157 Value *ConstantValue = getIncomingValue(0);
158 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
159 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
160 if (ConstantValue != this)
161 return nullptr; // Incoming values not all the same.
162 // The case where the first value is this PHI.
163 ConstantValue = getIncomingValue(i);
165 if (ConstantValue == this)
166 return UndefValue::get(getType());
167 return ConstantValue;
170 /// hasConstantOrUndefValue - Whether the specified PHI node always merges
171 /// together the same value, assuming that undefs result in the same value as
173 /// Unlike \ref hasConstantValue, this does not return a value because the
174 /// unique non-undef incoming value need not dominate the PHI node.
175 bool PHINode::hasConstantOrUndefValue() const {
176 Value *ConstantValue = nullptr;
177 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
178 Value *Incoming = getIncomingValue(i);
179 if (Incoming != this && !isa<UndefValue>(Incoming)) {
180 if (ConstantValue && ConstantValue != Incoming)
182 ConstantValue = Incoming;
188 //===----------------------------------------------------------------------===//
189 // LandingPadInst Implementation
190 //===----------------------------------------------------------------------===//
192 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
193 const Twine &NameStr, Instruction *InsertBefore)
194 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
195 init(NumReservedValues, NameStr);
198 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
199 const Twine &NameStr, BasicBlock *InsertAtEnd)
200 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) {
201 init(NumReservedValues, NameStr);
204 LandingPadInst::LandingPadInst(const LandingPadInst &LP)
205 : Instruction(LP.getType(), Instruction::LandingPad, nullptr,
206 LP.getNumOperands()),
207 ReservedSpace(LP.getNumOperands()) {
208 allocHungoffUses(LP.getNumOperands());
209 Use *OL = getOperandList();
210 const Use *InOL = LP.getOperandList();
211 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
214 setCleanup(LP.isCleanup());
217 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
218 const Twine &NameStr,
219 Instruction *InsertBefore) {
220 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
223 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
224 const Twine &NameStr,
225 BasicBlock *InsertAtEnd) {
226 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd);
229 void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
230 ReservedSpace = NumReservedValues;
231 setNumHungOffUseOperands(0);
232 allocHungoffUses(ReservedSpace);
237 /// growOperands - grow operands - This grows the operand list in response to a
238 /// push_back style of operation. This grows the number of ops by 2 times.
239 void LandingPadInst::growOperands(unsigned Size) {
240 unsigned e = getNumOperands();
241 if (ReservedSpace >= e + Size) return;
242 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
243 growHungoffUses(ReservedSpace);
246 void LandingPadInst::addClause(Constant *Val) {
247 unsigned OpNo = getNumOperands();
249 assert(OpNo < ReservedSpace && "Growing didn't work!");
250 setNumHungOffUseOperands(getNumOperands() + 1);
251 getOperandList()[OpNo] = Val;
254 //===----------------------------------------------------------------------===//
255 // CallBase Implementation
256 //===----------------------------------------------------------------------===//
258 Function *CallBase::getCaller() { return getParent()->getParent(); }
260 bool CallBase::isIndirectCall() const {
261 const Value *V = getCalledValue();
262 if (isa<Function>(V) || isa<Constant>(V))
264 if (const CallInst *CI = dyn_cast<CallInst>(this))
265 if (CI->isInlineAsm())
270 Intrinsic::ID CallBase::getIntrinsicID() const {
271 if (auto *F = getCalledFunction())
272 return F->getIntrinsicID();
273 return Intrinsic::not_intrinsic;
276 bool CallBase::isReturnNonNull() const {
277 if (hasRetAttr(Attribute::NonNull))
280 if (getDereferenceableBytes(AttributeList::ReturnIndex) > 0 &&
281 !NullPointerIsDefined(getCaller(),
282 getType()->getPointerAddressSpace()))
288 Value *CallBase::getReturnedArgOperand() const {
291 if (Attrs.hasAttrSomewhere(Attribute::Returned, &Index) && Index)
292 return getArgOperand(Index - AttributeList::FirstArgIndex);
293 if (const Function *F = getCalledFunction())
294 if (F->getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) &&
296 return getArgOperand(Index - AttributeList::FirstArgIndex);
301 bool CallBase::hasRetAttr(Attribute::AttrKind Kind) const {
302 if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind))
305 // Look at the callee, if available.
306 if (const Function *F = getCalledFunction())
307 return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind);
311 /// Determine whether the argument or parameter has the given attribute.
312 bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
313 assert(ArgNo < getNumArgOperands() && "Param index out of bounds!");
315 if (Attrs.hasParamAttribute(ArgNo, Kind))
317 if (const Function *F = getCalledFunction())
318 return F->getAttributes().hasParamAttribute(ArgNo, Kind);
322 bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
323 if (const Function *F = getCalledFunction())
324 return F->getAttributes().hasAttribute(AttributeList::FunctionIndex, Kind);
328 bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
329 if (const Function *F = getCalledFunction())
330 return F->getAttributes().hasAttribute(AttributeList::FunctionIndex, Kind);
334 CallBase::op_iterator
335 CallBase::populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
336 const unsigned BeginIndex) {
337 auto It = op_begin() + BeginIndex;
338 for (auto &B : Bundles)
339 It = std::copy(B.input_begin(), B.input_end(), It);
341 auto *ContextImpl = getContext().pImpl;
342 auto BI = Bundles.begin();
343 unsigned CurrentIndex = BeginIndex;
345 for (auto &BOI : bundle_op_infos()) {
346 assert(BI != Bundles.end() && "Incorrect allocation?");
348 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
349 BOI.Begin = CurrentIndex;
350 BOI.End = CurrentIndex + BI->input_size();
351 CurrentIndex = BOI.End;
355 assert(BI == Bundles.end() && "Incorrect allocation?");
360 //===----------------------------------------------------------------------===//
361 // CallInst Implementation
362 //===----------------------------------------------------------------------===//
364 void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
365 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
367 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
368 "NumOperands not set up?");
369 setCalledOperand(Func);
372 assert((Args.size() == FTy->getNumParams() ||
373 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
374 "Calling a function with bad signature!");
376 for (unsigned i = 0; i != Args.size(); ++i)
377 assert((i >= FTy->getNumParams() ||
378 FTy->getParamType(i) == Args[i]->getType()) &&
379 "Calling a function with a bad signature!");
382 llvm::copy(Args, op_begin());
384 auto It = populateBundleOperandInfos(Bundles, Args.size());
386 assert(It + 1 == op_end() && "Should add up!");
391 void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
393 assert(getNumOperands() == 1 && "NumOperands not set up?");
394 setCalledOperand(Func);
396 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
401 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
402 Instruction *InsertBefore)
403 : CallBase(Ty->getReturnType(), Instruction::Call,
404 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
405 init(Ty, Func, Name);
408 CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
409 BasicBlock *InsertAtEnd)
410 : CallBase(Ty->getReturnType(), Instruction::Call,
411 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertAtEnd) {
412 init(Ty, Func, Name);
415 CallInst::CallInst(const CallInst &CI)
416 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call,
417 OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(),
418 CI.getNumOperands()) {
419 setTailCallKind(CI.getTailCallKind());
420 setCallingConv(CI.getCallingConv());
422 std::copy(CI.op_begin(), CI.op_end(), op_begin());
423 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
424 bundle_op_info_begin());
425 SubclassOptionalData = CI.SubclassOptionalData;
428 CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB,
429 Instruction *InsertPt) {
430 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
432 auto *NewCI = CallInst::Create(CI->getCalledValue(), Args, OpB, CI->getName(),
434 NewCI->setTailCallKind(CI->getTailCallKind());
435 NewCI->setCallingConv(CI->getCallingConv());
436 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
437 NewCI->setAttributes(CI->getAttributes());
438 NewCI->setDebugLoc(CI->getDebugLoc());
451 /// IsConstantOne - Return true only if val is constant int 1
452 static bool IsConstantOne(Value *val) {
453 assert(val && "IsConstantOne does not work with nullptr val");
454 const ConstantInt *CVal = dyn_cast<ConstantInt>(val);
455 return CVal && CVal->isOne();
458 static Instruction *createMalloc(Instruction *InsertBefore,
459 BasicBlock *InsertAtEnd, Type *IntPtrTy,
460 Type *AllocTy, Value *AllocSize,
462 ArrayRef<OperandBundleDef> OpB,
463 Function *MallocF, const Twine &Name) {
464 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
465 "createMalloc needs either InsertBefore or InsertAtEnd");
467 // malloc(type) becomes:
468 // bitcast (i8* malloc(typeSize)) to type*
469 // malloc(type, arraySize) becomes:
470 // bitcast (i8* malloc(typeSize*arraySize)) to type*
472 ArraySize = ConstantInt::get(IntPtrTy, 1);
473 else if (ArraySize->getType() != IntPtrTy) {
475 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
478 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
482 if (!IsConstantOne(ArraySize)) {
483 if (IsConstantOne(AllocSize)) {
484 AllocSize = ArraySize; // Operand * 1 = Operand
485 } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) {
486 Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy,
488 // Malloc arg is constant product of type size and array size
489 AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize));
491 // Multiply type size by the array size...
493 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
494 "mallocsize", InsertBefore);
496 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
497 "mallocsize", InsertAtEnd);
501 assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size");
502 // Create the call to Malloc.
503 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
504 Module *M = BB->getParent()->getParent();
505 Type *BPTy = Type::getInt8PtrTy(BB->getContext());
506 Value *MallocFunc = MallocF;
508 // prototype malloc as "void *malloc(size_t)"
509 MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy);
510 PointerType *AllocPtrType = PointerType::getUnqual(AllocTy);
511 CallInst *MCall = nullptr;
512 Instruction *Result = nullptr;
514 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall",
517 if (Result->getType() != AllocPtrType)
518 // Create a cast instruction to convert to the right type...
519 Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore);
521 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall");
523 if (Result->getType() != AllocPtrType) {
524 InsertAtEnd->getInstList().push_back(MCall);
525 // Create a cast instruction to convert to the right type...
526 Result = new BitCastInst(MCall, AllocPtrType, Name);
529 MCall->setTailCall();
530 if (Function *F = dyn_cast<Function>(MallocFunc)) {
531 MCall->setCallingConv(F->getCallingConv());
532 if (!F->returnDoesNotAlias())
533 F->setReturnDoesNotAlias();
535 assert(!MCall->getType()->isVoidTy() && "Malloc has void return type");
540 /// CreateMalloc - Generate the IR for a call to malloc:
541 /// 1. Compute the malloc call's argument as the specified type's size,
542 /// possibly multiplied by the array size if the array size is not
544 /// 2. Call malloc with that argument.
545 /// 3. Bitcast the result of the malloc call to the specified type.
546 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore,
547 Type *IntPtrTy, Type *AllocTy,
548 Value *AllocSize, Value *ArraySize,
551 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
552 ArraySize, None, MallocF, Name);
554 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore,
555 Type *IntPtrTy, Type *AllocTy,
556 Value *AllocSize, Value *ArraySize,
557 ArrayRef<OperandBundleDef> OpB,
560 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
561 ArraySize, OpB, MallocF, Name);
564 /// CreateMalloc - Generate the IR for a call to malloc:
565 /// 1. Compute the malloc call's argument as the specified type's size,
566 /// possibly multiplied by the array size if the array size is not
568 /// 2. Call malloc with that argument.
569 /// 3. Bitcast the result of the malloc call to the specified type.
570 /// Note: This function does not add the bitcast to the basic block, that is the
571 /// responsibility of the caller.
572 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd,
573 Type *IntPtrTy, Type *AllocTy,
574 Value *AllocSize, Value *ArraySize,
575 Function *MallocF, const Twine &Name) {
576 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
577 ArraySize, None, MallocF, Name);
579 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd,
580 Type *IntPtrTy, Type *AllocTy,
581 Value *AllocSize, Value *ArraySize,
582 ArrayRef<OperandBundleDef> OpB,
583 Function *MallocF, const Twine &Name) {
584 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
585 ArraySize, OpB, MallocF, Name);
588 static Instruction *createFree(Value *Source,
589 ArrayRef<OperandBundleDef> Bundles,
590 Instruction *InsertBefore,
591 BasicBlock *InsertAtEnd) {
592 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
593 "createFree needs either InsertBefore or InsertAtEnd");
594 assert(Source->getType()->isPointerTy() &&
595 "Can not free something of nonpointer type!");
597 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
598 Module *M = BB->getParent()->getParent();
600 Type *VoidTy = Type::getVoidTy(M->getContext());
601 Type *IntPtrTy = Type::getInt8PtrTy(M->getContext());
602 // prototype free as "void free(void*)"
603 Value *FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy);
604 CallInst *Result = nullptr;
605 Value *PtrCast = Source;
607 if (Source->getType() != IntPtrTy)
608 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore);
609 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "", InsertBefore);
611 if (Source->getType() != IntPtrTy)
612 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd);
613 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "");
615 Result->setTailCall();
616 if (Function *F = dyn_cast<Function>(FreeFunc))
617 Result->setCallingConv(F->getCallingConv());
622 /// CreateFree - Generate the IR for a call to the builtin free function.
623 Instruction *CallInst::CreateFree(Value *Source, Instruction *InsertBefore) {
624 return createFree(Source, None, InsertBefore, nullptr);
626 Instruction *CallInst::CreateFree(Value *Source,
627 ArrayRef<OperandBundleDef> Bundles,
628 Instruction *InsertBefore) {
629 return createFree(Source, Bundles, InsertBefore, nullptr);
632 /// CreateFree - Generate the IR for a call to the builtin free function.
633 /// Note: This function does not add the call to the basic block, that is the
634 /// responsibility of the caller.
635 Instruction *CallInst::CreateFree(Value *Source, BasicBlock *InsertAtEnd) {
636 Instruction *FreeCall = createFree(Source, None, nullptr, InsertAtEnd);
637 assert(FreeCall && "CreateFree did not create a CallInst");
640 Instruction *CallInst::CreateFree(Value *Source,
641 ArrayRef<OperandBundleDef> Bundles,
642 BasicBlock *InsertAtEnd) {
643 Instruction *FreeCall = createFree(Source, Bundles, nullptr, InsertAtEnd);
644 assert(FreeCall && "CreateFree did not create a CallInst");
648 //===----------------------------------------------------------------------===//
649 // InvokeInst Implementation
650 //===----------------------------------------------------------------------===//
652 void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
653 BasicBlock *IfException, ArrayRef<Value *> Args,
654 ArrayRef<OperandBundleDef> Bundles,
655 const Twine &NameStr) {
658 assert((int)getNumOperands() ==
659 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
660 "NumOperands not set up?");
661 setNormalDest(IfNormal);
662 setUnwindDest(IfException);
663 setCalledOperand(Fn);
666 assert(((Args.size() == FTy->getNumParams()) ||
667 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
668 "Invoking a function with bad signature");
670 for (unsigned i = 0, e = Args.size(); i != e; i++)
671 assert((i >= FTy->getNumParams() ||
672 FTy->getParamType(i) == Args[i]->getType()) &&
673 "Invoking a function with a bad signature!");
676 llvm::copy(Args, op_begin());
678 auto It = populateBundleOperandInfos(Bundles, Args.size());
680 assert(It + 3 == op_end() && "Should add up!");
685 InvokeInst::InvokeInst(const InvokeInst &II)
686 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke,
687 OperandTraits<CallBase>::op_end(this) - II.getNumOperands(),
688 II.getNumOperands()) {
689 setCallingConv(II.getCallingConv());
690 std::copy(II.op_begin(), II.op_end(), op_begin());
691 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
692 bundle_op_info_begin());
693 SubclassOptionalData = II.SubclassOptionalData;
696 InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB,
697 Instruction *InsertPt) {
698 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
700 auto *NewII = InvokeInst::Create(II->getCalledValue(), II->getNormalDest(),
701 II->getUnwindDest(), Args, OpB,
702 II->getName(), InsertPt);
703 NewII->setCallingConv(II->getCallingConv());
704 NewII->SubclassOptionalData = II->SubclassOptionalData;
705 NewII->setAttributes(II->getAttributes());
706 NewII->setDebugLoc(II->getDebugLoc());
711 LandingPadInst *InvokeInst::getLandingPadInst() const {
712 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI());
715 //===----------------------------------------------------------------------===//
716 // ReturnInst Implementation
717 //===----------------------------------------------------------------------===//
719 ReturnInst::ReturnInst(const ReturnInst &RI)
720 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
721 OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(),
722 RI.getNumOperands()) {
723 if (RI.getNumOperands())
724 Op<0>() = RI.Op<0>();
725 SubclassOptionalData = RI.SubclassOptionalData;
728 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore)
729 : Instruction(Type::getVoidTy(C), Instruction::Ret,
730 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
736 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd)
737 : Instruction(Type::getVoidTy(C), Instruction::Ret,
738 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
744 ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
745 : Instruction(Type::getVoidTy(Context), Instruction::Ret,
746 OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {}
748 //===----------------------------------------------------------------------===//
749 // ResumeInst Implementation
750 //===----------------------------------------------------------------------===//
752 ResumeInst::ResumeInst(const ResumeInst &RI)
753 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
754 OperandTraits<ResumeInst>::op_begin(this), 1) {
755 Op<0>() = RI.Op<0>();
758 ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore)
759 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
760 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
764 ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
765 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
766 OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) {
770 //===----------------------------------------------------------------------===//
771 // CleanupReturnInst Implementation
772 //===----------------------------------------------------------------------===//
774 CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI)
775 : Instruction(CRI.getType(), Instruction::CleanupRet,
776 OperandTraits<CleanupReturnInst>::op_end(this) -
777 CRI.getNumOperands(),
778 CRI.getNumOperands()) {
779 setInstructionSubclassData(CRI.getSubclassDataFromInstruction());
780 Op<0>() = CRI.Op<0>();
781 if (CRI.hasUnwindDest())
782 Op<1>() = CRI.Op<1>();
785 void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
787 setInstructionSubclassData(getSubclassDataFromInstruction() | 1);
789 Op<0>() = CleanupPad;
794 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
795 unsigned Values, Instruction *InsertBefore)
796 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
797 Instruction::CleanupRet,
798 OperandTraits<CleanupReturnInst>::op_end(this) - Values,
799 Values, InsertBefore) {
800 init(CleanupPad, UnwindBB);
803 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
804 unsigned Values, BasicBlock *InsertAtEnd)
805 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
806 Instruction::CleanupRet,
807 OperandTraits<CleanupReturnInst>::op_end(this) - Values,
808 Values, InsertAtEnd) {
809 init(CleanupPad, UnwindBB);
812 //===----------------------------------------------------------------------===//
813 // CatchReturnInst Implementation
814 //===----------------------------------------------------------------------===//
815 void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
820 CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
821 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
822 OperandTraits<CatchReturnInst>::op_begin(this), 2) {
823 Op<0>() = CRI.Op<0>();
824 Op<1>() = CRI.Op<1>();
827 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
828 Instruction *InsertBefore)
829 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
830 OperandTraits<CatchReturnInst>::op_begin(this), 2,
835 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
836 BasicBlock *InsertAtEnd)
837 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
838 OperandTraits<CatchReturnInst>::op_begin(this), 2,
843 //===----------------------------------------------------------------------===//
844 // CatchSwitchInst Implementation
845 //===----------------------------------------------------------------------===//
847 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
848 unsigned NumReservedValues,
849 const Twine &NameStr,
850 Instruction *InsertBefore)
851 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
855 init(ParentPad, UnwindDest, NumReservedValues + 1);
859 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
860 unsigned NumReservedValues,
861 const Twine &NameStr, BasicBlock *InsertAtEnd)
862 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
866 init(ParentPad, UnwindDest, NumReservedValues + 1);
870 CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
871 : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr,
872 CSI.getNumOperands()) {
873 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
874 setNumHungOffUseOperands(ReservedSpace);
875 Use *OL = getOperandList();
876 const Use *InOL = CSI.getOperandList();
877 for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
881 void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
882 unsigned NumReservedValues) {
883 assert(ParentPad && NumReservedValues);
885 ReservedSpace = NumReservedValues;
886 setNumHungOffUseOperands(UnwindDest ? 2 : 1);
887 allocHungoffUses(ReservedSpace);
891 setInstructionSubclassData(getSubclassDataFromInstruction() | 1);
892 setUnwindDest(UnwindDest);
896 /// growOperands - grow operands - This grows the operand list in response to a
897 /// push_back style of operation. This grows the number of ops by 2 times.
898 void CatchSwitchInst::growOperands(unsigned Size) {
899 unsigned NumOperands = getNumOperands();
900 assert(NumOperands >= 1);
901 if (ReservedSpace >= NumOperands + Size)
903 ReservedSpace = (NumOperands + Size / 2) * 2;
904 growHungoffUses(ReservedSpace);
907 void CatchSwitchInst::addHandler(BasicBlock *Handler) {
908 unsigned OpNo = getNumOperands();
910 assert(OpNo < ReservedSpace && "Growing didn't work!");
911 setNumHungOffUseOperands(getNumOperands() + 1);
912 getOperandList()[OpNo] = Handler;
915 void CatchSwitchInst::removeHandler(handler_iterator HI) {
916 // Move all subsequent handlers up one.
917 Use *EndDst = op_end() - 1;
918 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
919 *CurDst = *(CurDst + 1);
920 // Null out the last handler use.
923 setNumHungOffUseOperands(getNumOperands() - 1);
926 //===----------------------------------------------------------------------===//
927 // FuncletPadInst Implementation
928 //===----------------------------------------------------------------------===//
929 void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
930 const Twine &NameStr) {
931 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
932 llvm::copy(Args, op_begin());
933 setParentPad(ParentPad);
937 FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI)
938 : Instruction(FPI.getType(), FPI.getOpcode(),
939 OperandTraits<FuncletPadInst>::op_end(this) -
940 FPI.getNumOperands(),
941 FPI.getNumOperands()) {
942 std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
943 setParentPad(FPI.getParentPad());
946 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
947 ArrayRef<Value *> Args, unsigned Values,
948 const Twine &NameStr, Instruction *InsertBefore)
949 : Instruction(ParentPad->getType(), Op,
950 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
952 init(ParentPad, Args, NameStr);
955 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
956 ArrayRef<Value *> Args, unsigned Values,
957 const Twine &NameStr, BasicBlock *InsertAtEnd)
958 : Instruction(ParentPad->getType(), Op,
959 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
961 init(ParentPad, Args, NameStr);
964 //===----------------------------------------------------------------------===//
965 // UnreachableInst Implementation
966 //===----------------------------------------------------------------------===//
968 UnreachableInst::UnreachableInst(LLVMContext &Context,
969 Instruction *InsertBefore)
970 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
972 UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
973 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
976 //===----------------------------------------------------------------------===//
977 // BranchInst Implementation
978 //===----------------------------------------------------------------------===//
980 void BranchInst::AssertOK() {
982 assert(getCondition()->getType()->isIntegerTy(1) &&
983 "May only branch on boolean predicates!");
986 BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
987 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
988 OperandTraits<BranchInst>::op_end(this) - 1, 1,
990 assert(IfTrue && "Branch destination may not be null!");
994 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
995 Instruction *InsertBefore)
996 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
997 OperandTraits<BranchInst>::op_end(this) - 3, 3,
1007 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd)
1008 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1009 OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) {
1010 assert(IfTrue && "Branch destination may not be null!");
1014 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1015 BasicBlock *InsertAtEnd)
1016 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1017 OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) {
1026 BranchInst::BranchInst(const BranchInst &BI)
1027 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1028 OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),
1029 BI.getNumOperands()) {
1030 Op<-1>() = BI.Op<-1>();
1031 if (BI.getNumOperands() != 1) {
1032 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1033 Op<-3>() = BI.Op<-3>();
1034 Op<-2>() = BI.Op<-2>();
1036 SubclassOptionalData = BI.SubclassOptionalData;
1039 void BranchInst::swapSuccessors() {
1040 assert(isConditional() &&
1041 "Cannot swap successors of an unconditional branch");
1042 Op<-1>().swap(Op<-2>());
1044 // Update profile metadata if present and it matches our structural
1049 //===----------------------------------------------------------------------===//
1050 // AllocaInst Implementation
1051 //===----------------------------------------------------------------------===//
1053 static Value *getAISize(LLVMContext &Context, Value *Amt) {
1055 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
1057 assert(!isa<BasicBlock>(Amt) &&
1058 "Passed basic block into allocation size parameter! Use other ctor");
1059 assert(Amt->getType()->isIntegerTy() &&
1060 "Allocation array size is not an integer!");
1065 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1066 Instruction *InsertBefore)
1067 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1069 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1070 BasicBlock *InsertAtEnd)
1071 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {}
1073 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1074 const Twine &Name, Instruction *InsertBefore)
1075 : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/0, Name, InsertBefore) {}
1077 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1078 const Twine &Name, BasicBlock *InsertAtEnd)
1079 : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/0, Name, InsertAtEnd) {}
1081 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1082 unsigned Align, const Twine &Name,
1083 Instruction *InsertBefore)
1084 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1085 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1087 setAlignment(Align);
1088 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1092 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1093 unsigned Align, const Twine &Name,
1094 BasicBlock *InsertAtEnd)
1095 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1096 getAISize(Ty->getContext(), ArraySize), InsertAtEnd),
1098 setAlignment(Align);
1099 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1103 void AllocaInst::setAlignment(unsigned Align) {
1104 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
1105 assert(Align <= MaximumAlignment &&
1106 "Alignment is greater than MaximumAlignment!");
1107 setInstructionSubclassData((getSubclassDataFromInstruction() & ~31) |
1108 (Log2_32(Align) + 1));
1109 assert(getAlignment() == Align && "Alignment representation error!");
1112 bool AllocaInst::isArrayAllocation() const {
1113 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
1114 return !CI->isOne();
1118 /// isStaticAlloca - Return true if this alloca is in the entry block of the
1119 /// function and is a constant size. If so, the code generator will fold it
1120 /// into the prolog/epilog code, so it is basically free.
1121 bool AllocaInst::isStaticAlloca() const {
1122 // Must be constant size.
1123 if (!isa<ConstantInt>(getArraySize())) return false;
1125 // Must be in the entry block.
1126 const BasicBlock *Parent = getParent();
1127 return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca();
1130 //===----------------------------------------------------------------------===//
1131 // LoadInst Implementation
1132 //===----------------------------------------------------------------------===//
1134 void LoadInst::AssertOK() {
1135 assert(getOperand(0)->getType()->isPointerTy() &&
1136 "Ptr must have pointer type.");
1137 assert(!(isAtomic() && getAlignment() == 0) &&
1138 "Alignment required for atomic load");
1141 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
1142 Instruction *InsertBef)
1143 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1145 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name,
1146 BasicBlock *InsertAE)
1147 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {}
1149 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1150 Instruction *InsertBef)
1151 : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/0, InsertBef) {}
1153 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1154 BasicBlock *InsertAE)
1155 : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/0, InsertAE) {}
1157 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1158 unsigned Align, Instruction *InsertBef)
1159 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1160 SyncScope::System, InsertBef) {}
1162 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1163 unsigned Align, BasicBlock *InsertAE)
1164 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1165 SyncScope::System, InsertAE) {}
1167 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1168 unsigned Align, AtomicOrdering Order,
1169 SyncScope::ID SSID, Instruction *InsertBef)
1170 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1171 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
1172 setVolatile(isVolatile);
1173 setAlignment(Align);
1174 setAtomic(Order, SSID);
1179 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1180 unsigned Align, AtomicOrdering Order, SyncScope::ID SSID,
1181 BasicBlock *InsertAE)
1182 : UnaryInstruction(Ty, Load, Ptr, InsertAE) {
1183 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
1184 setVolatile(isVolatile);
1185 setAlignment(Align);
1186 setAtomic(Order, SSID);
1191 void LoadInst::setAlignment(unsigned Align) {
1192 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
1193 assert(Align <= MaximumAlignment &&
1194 "Alignment is greater than MaximumAlignment!");
1195 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
1196 ((Log2_32(Align)+1)<<1));
1197 assert(getAlignment() == Align && "Alignment representation error!");
1200 //===----------------------------------------------------------------------===//
1201 // StoreInst Implementation
1202 //===----------------------------------------------------------------------===//
1204 void StoreInst::AssertOK() {
1205 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1206 assert(getOperand(1)->getType()->isPointerTy() &&
1207 "Ptr must have pointer type!");
1208 assert(getOperand(0)->getType() ==
1209 cast<PointerType>(getOperand(1)->getType())->getElementType()
1210 && "Ptr must be a pointer to Val type!");
1211 assert(!(isAtomic() && getAlignment() == 0) &&
1212 "Alignment required for atomic store");
1215 StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore)
1216 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1218 StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
1219 : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {}
1221 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1222 Instruction *InsertBefore)
1223 : StoreInst(val, addr, isVolatile, /*Align=*/0, InsertBefore) {}
1225 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1226 BasicBlock *InsertAtEnd)
1227 : StoreInst(val, addr, isVolatile, /*Align=*/0, InsertAtEnd) {}
1229 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
1230 Instruction *InsertBefore)
1231 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1232 SyncScope::System, InsertBefore) {}
1234 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
1235 BasicBlock *InsertAtEnd)
1236 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1237 SyncScope::System, InsertAtEnd) {}
1239 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1240 unsigned Align, AtomicOrdering Order,
1242 Instruction *InsertBefore)
1243 : Instruction(Type::getVoidTy(val->getContext()), Store,
1244 OperandTraits<StoreInst>::op_begin(this),
1245 OperandTraits<StoreInst>::operands(this),
1249 setVolatile(isVolatile);
1250 setAlignment(Align);
1251 setAtomic(Order, SSID);
1255 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1256 unsigned Align, AtomicOrdering Order,
1258 BasicBlock *InsertAtEnd)
1259 : Instruction(Type::getVoidTy(val->getContext()), Store,
1260 OperandTraits<StoreInst>::op_begin(this),
1261 OperandTraits<StoreInst>::operands(this),
1265 setVolatile(isVolatile);
1266 setAlignment(Align);
1267 setAtomic(Order, SSID);
1271 void StoreInst::setAlignment(unsigned Align) {
1272 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
1273 assert(Align <= MaximumAlignment &&
1274 "Alignment is greater than MaximumAlignment!");
1275 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
1276 ((Log2_32(Align)+1) << 1));
1277 assert(getAlignment() == Align && "Alignment representation error!");
1280 //===----------------------------------------------------------------------===//
1281 // AtomicCmpXchgInst Implementation
1282 //===----------------------------------------------------------------------===//
1284 void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1285 AtomicOrdering SuccessOrdering,
1286 AtomicOrdering FailureOrdering,
1287 SyncScope::ID SSID) {
1291 setSuccessOrdering(SuccessOrdering);
1292 setFailureOrdering(FailureOrdering);
1293 setSyncScopeID(SSID);
1295 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1296 "All operands must be non-null!");
1297 assert(getOperand(0)->getType()->isPointerTy() &&
1298 "Ptr must have pointer type!");
1299 assert(getOperand(1)->getType() ==
1300 cast<PointerType>(getOperand(0)->getType())->getElementType()
1301 && "Ptr must be a pointer to Cmp type!");
1302 assert(getOperand(2)->getType() ==
1303 cast<PointerType>(getOperand(0)->getType())->getElementType()
1304 && "Ptr must be a pointer to NewVal type!");
1305 assert(SuccessOrdering != AtomicOrdering::NotAtomic &&
1306 "AtomicCmpXchg instructions must be atomic!");
1307 assert(FailureOrdering != AtomicOrdering::NotAtomic &&
1308 "AtomicCmpXchg instructions must be atomic!");
1309 assert(!isStrongerThan(FailureOrdering, SuccessOrdering) &&
1310 "AtomicCmpXchg failure argument shall be no stronger than the success "
1312 assert(FailureOrdering != AtomicOrdering::Release &&
1313 FailureOrdering != AtomicOrdering::AcquireRelease &&
1314 "AtomicCmpXchg failure ordering cannot include release semantics");
1317 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
1318 AtomicOrdering SuccessOrdering,
1319 AtomicOrdering FailureOrdering,
1321 Instruction *InsertBefore)
1323 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1324 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1325 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
1326 Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID);
1329 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
1330 AtomicOrdering SuccessOrdering,
1331 AtomicOrdering FailureOrdering,
1333 BasicBlock *InsertAtEnd)
1335 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1336 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1337 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
1338 Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID);
1341 //===----------------------------------------------------------------------===//
1342 // AtomicRMWInst Implementation
1343 //===----------------------------------------------------------------------===//
1345 void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1346 AtomicOrdering Ordering,
1347 SyncScope::ID SSID) {
1350 setOperation(Operation);
1351 setOrdering(Ordering);
1352 setSyncScopeID(SSID);
1354 assert(getOperand(0) && getOperand(1) &&
1355 "All operands must be non-null!");
1356 assert(getOperand(0)->getType()->isPointerTy() &&
1357 "Ptr must have pointer type!");
1358 assert(getOperand(1)->getType() ==
1359 cast<PointerType>(getOperand(0)->getType())->getElementType()
1360 && "Ptr must be a pointer to Val type!");
1361 assert(Ordering != AtomicOrdering::NotAtomic &&
1362 "AtomicRMW instructions must be atomic!");
1365 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
1366 AtomicOrdering Ordering,
1368 Instruction *InsertBefore)
1369 : Instruction(Val->getType(), AtomicRMW,
1370 OperandTraits<AtomicRMWInst>::op_begin(this),
1371 OperandTraits<AtomicRMWInst>::operands(this),
1373 Init(Operation, Ptr, Val, Ordering, SSID);
1376 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
1377 AtomicOrdering Ordering,
1379 BasicBlock *InsertAtEnd)
1380 : Instruction(Val->getType(), AtomicRMW,
1381 OperandTraits<AtomicRMWInst>::op_begin(this),
1382 OperandTraits<AtomicRMWInst>::operands(this),
1384 Init(Operation, Ptr, Val, Ordering, SSID);
1387 StringRef AtomicRMWInst::getOperationName(BinOp Op) {
1389 case AtomicRMWInst::Xchg:
1391 case AtomicRMWInst::Add:
1393 case AtomicRMWInst::Sub:
1395 case AtomicRMWInst::And:
1397 case AtomicRMWInst::Nand:
1399 case AtomicRMWInst::Or:
1401 case AtomicRMWInst::Xor:
1403 case AtomicRMWInst::Max:
1405 case AtomicRMWInst::Min:
1407 case AtomicRMWInst::UMax:
1409 case AtomicRMWInst::UMin:
1411 case AtomicRMWInst::BAD_BINOP:
1412 return "<invalid operation>";
1415 llvm_unreachable("invalid atomicrmw operation");
1418 //===----------------------------------------------------------------------===//
1419 // FenceInst Implementation
1420 //===----------------------------------------------------------------------===//
1422 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
1424 Instruction *InsertBefore)
1425 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
1426 setOrdering(Ordering);
1427 setSyncScopeID(SSID);
1430 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
1432 BasicBlock *InsertAtEnd)
1433 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
1434 setOrdering(Ordering);
1435 setSyncScopeID(SSID);
1438 //===----------------------------------------------------------------------===//
1439 // GetElementPtrInst Implementation
1440 //===----------------------------------------------------------------------===//
1442 void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1443 const Twine &Name) {
1444 assert(getNumOperands() == 1 + IdxList.size() &&
1445 "NumOperands not initialized?");
1447 llvm::copy(IdxList, op_begin() + 1);
1451 GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
1452 : Instruction(GEPI.getType(), GetElementPtr,
1453 OperandTraits<GetElementPtrInst>::op_end(this) -
1454 GEPI.getNumOperands(),
1455 GEPI.getNumOperands()),
1456 SourceElementType(GEPI.SourceElementType),
1457 ResultElementType(GEPI.ResultElementType) {
1458 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1459 SubclassOptionalData = GEPI.SubclassOptionalData;
1462 /// getIndexedType - Returns the type of the element that would be accessed with
1463 /// a gep instruction with the specified parameters.
1465 /// The Idxs pointer should point to a continuous piece of memory containing the
1466 /// indices, either as Value* or uint64_t.
1468 /// A null type is returned if the indices are invalid for the specified
1471 template <typename IndexTy>
1472 static Type *getIndexedTypeInternal(Type *Agg, ArrayRef<IndexTy> IdxList) {
1473 // Handle the special case of the empty set index set, which is always valid.
1474 if (IdxList.empty())
1477 // If there is at least one index, the top level type must be sized, otherwise
1478 // it cannot be 'stepped over'.
1479 if (!Agg->isSized())
1482 unsigned CurIdx = 1;
1483 for (; CurIdx != IdxList.size(); ++CurIdx) {
1484 CompositeType *CT = dyn_cast<CompositeType>(Agg);
1485 if (!CT || CT->isPointerTy()) return nullptr;
1486 IndexTy Index = IdxList[CurIdx];
1487 if (!CT->indexValid(Index)) return nullptr;
1488 Agg = CT->getTypeAtIndex(Index);
1490 return CurIdx == IdxList.size() ? Agg : nullptr;
1493 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) {
1494 return getIndexedTypeInternal(Ty, IdxList);
1497 Type *GetElementPtrInst::getIndexedType(Type *Ty,
1498 ArrayRef<Constant *> IdxList) {
1499 return getIndexedTypeInternal(Ty, IdxList);
1502 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) {
1503 return getIndexedTypeInternal(Ty, IdxList);
1506 /// hasAllZeroIndices - Return true if all of the indices of this GEP are
1507 /// zeros. If so, the result pointer and the first operand have the same
1508 /// value, just potentially different types.
1509 bool GetElementPtrInst::hasAllZeroIndices() const {
1510 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1511 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
1512 if (!CI->isZero()) return false;
1520 /// hasAllConstantIndices - Return true if all of the indices of this GEP are
1521 /// constant integers. If so, the result pointer and the first operand have
1522 /// a constant offset between them.
1523 bool GetElementPtrInst::hasAllConstantIndices() const {
1524 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1525 if (!isa<ConstantInt>(getOperand(i)))
1531 void GetElementPtrInst::setIsInBounds(bool B) {
1532 cast<GEPOperator>(this)->setIsInBounds(B);
1535 bool GetElementPtrInst::isInBounds() const {
1536 return cast<GEPOperator>(this)->isInBounds();
1539 bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL,
1540 APInt &Offset) const {
1541 // Delegate to the generic GEPOperator implementation.
1542 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1545 //===----------------------------------------------------------------------===//
1546 // ExtractElementInst Implementation
1547 //===----------------------------------------------------------------------===//
1549 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1551 Instruction *InsertBef)
1552 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1554 OperandTraits<ExtractElementInst>::op_begin(this),
1556 assert(isValidOperands(Val, Index) &&
1557 "Invalid extractelement instruction operands!");
1563 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1565 BasicBlock *InsertAE)
1566 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1568 OperandTraits<ExtractElementInst>::op_begin(this),
1570 assert(isValidOperands(Val, Index) &&
1571 "Invalid extractelement instruction operands!");
1578 bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
1579 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1584 //===----------------------------------------------------------------------===//
1585 // InsertElementInst Implementation
1586 //===----------------------------------------------------------------------===//
1588 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1590 Instruction *InsertBef)
1591 : Instruction(Vec->getType(), InsertElement,
1592 OperandTraits<InsertElementInst>::op_begin(this),
1594 assert(isValidOperands(Vec, Elt, Index) &&
1595 "Invalid insertelement instruction operands!");
1602 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1604 BasicBlock *InsertAE)
1605 : Instruction(Vec->getType(), InsertElement,
1606 OperandTraits<InsertElementInst>::op_begin(this),
1608 assert(isValidOperands(Vec, Elt, Index) &&
1609 "Invalid insertelement instruction operands!");
1617 bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
1618 const Value *Index) {
1619 if (!Vec->getType()->isVectorTy())
1620 return false; // First operand of insertelement must be vector type.
1622 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
1623 return false;// Second operand of insertelement must be vector element type.
1625 if (!Index->getType()->isIntegerTy())
1626 return false; // Third operand of insertelement must be i32.
1630 //===----------------------------------------------------------------------===//
1631 // ShuffleVectorInst Implementation
1632 //===----------------------------------------------------------------------===//
1634 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1636 Instruction *InsertBefore)
1637 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1638 cast<VectorType>(Mask->getType())->getNumElements()),
1640 OperandTraits<ShuffleVectorInst>::op_begin(this),
1641 OperandTraits<ShuffleVectorInst>::operands(this),
1643 assert(isValidOperands(V1, V2, Mask) &&
1644 "Invalid shuffle vector instruction operands!");
1651 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1653 BasicBlock *InsertAtEnd)
1654 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1655 cast<VectorType>(Mask->getType())->getNumElements()),
1657 OperandTraits<ShuffleVectorInst>::op_begin(this),
1658 OperandTraits<ShuffleVectorInst>::operands(this),
1660 assert(isValidOperands(V1, V2, Mask) &&
1661 "Invalid shuffle vector instruction operands!");
1669 bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
1670 const Value *Mask) {
1671 // V1 and V2 must be vectors of the same type.
1672 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
1675 // Mask must be vector of i32.
1676 auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
1677 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32))
1680 // Check to see if Mask is valid.
1681 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
1684 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
1685 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements();
1686 for (Value *Op : MV->operands()) {
1687 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1688 if (CI->uge(V1Size*2))
1690 } else if (!isa<UndefValue>(Op)) {
1697 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1698 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements();
1699 for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i)
1700 if (CDS->getElementAsInteger(i) >= V1Size*2)
1705 // The bitcode reader can create a place holder for a forward reference
1706 // used as the shuffle mask. When this occurs, the shuffle mask will
1707 // fall into this case and fail. To avoid this error, do this bit of
1708 // ugliness to allow such a mask pass.
1709 if (const auto *CE = dyn_cast<ConstantExpr>(Mask))
1710 if (CE->getOpcode() == Instruction::UserOp1)
1716 int ShuffleVectorInst::getMaskValue(const Constant *Mask, unsigned i) {
1717 assert(i < Mask->getType()->getVectorNumElements() && "Index out of range");
1718 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask))
1719 return CDS->getElementAsInteger(i);
1720 Constant *C = Mask->getAggregateElement(i);
1721 if (isa<UndefValue>(C))
1723 return cast<ConstantInt>(C)->getZExtValue();
1726 void ShuffleVectorInst::getShuffleMask(const Constant *Mask,
1727 SmallVectorImpl<int> &Result) {
1728 unsigned NumElts = Mask->getType()->getVectorNumElements();
1730 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1731 for (unsigned i = 0; i != NumElts; ++i)
1732 Result.push_back(CDS->getElementAsInteger(i));
1735 for (unsigned i = 0; i != NumElts; ++i) {
1736 Constant *C = Mask->getAggregateElement(i);
1737 Result.push_back(isa<UndefValue>(C) ? -1 :
1738 cast<ConstantInt>(C)->getZExtValue());
1742 static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1743 assert(!Mask.empty() && "Shuffle mask must contain elements");
1744 bool UsesLHS = false;
1745 bool UsesRHS = false;
1746 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
1749 assert(Mask[i] >= 0 && Mask[i] < (NumOpElts * 2) &&
1750 "Out-of-bounds shuffle mask element");
1751 UsesLHS |= (Mask[i] < NumOpElts);
1752 UsesRHS |= (Mask[i] >= NumOpElts);
1753 if (UsesLHS && UsesRHS)
1756 assert((UsesLHS ^ UsesRHS) && "Should have selected from exactly 1 source");
1760 bool ShuffleVectorInst::isSingleSourceMask(ArrayRef<int> Mask) {
1761 // We don't have vector operand size information, so assume operands are the
1762 // same size as the mask.
1763 return isSingleSourceMaskImpl(Mask, Mask.size());
1766 static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
1767 if (!isSingleSourceMaskImpl(Mask, NumOpElts))
1769 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
1772 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
1778 bool ShuffleVectorInst::isIdentityMask(ArrayRef<int> Mask) {
1779 // We don't have vector operand size information, so assume operands are the
1780 // same size as the mask.
1781 return isIdentityMaskImpl(Mask, Mask.size());
1784 bool ShuffleVectorInst::isReverseMask(ArrayRef<int> Mask) {
1785 if (!isSingleSourceMask(Mask))
1787 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
1790 if (Mask[i] != (NumElts - 1 - i) && Mask[i] != (NumElts + NumElts - 1 - i))
1796 bool ShuffleVectorInst::isZeroEltSplatMask(ArrayRef<int> Mask) {
1797 if (!isSingleSourceMask(Mask))
1799 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
1802 if (Mask[i] != 0 && Mask[i] != NumElts)
1808 bool ShuffleVectorInst::isSelectMask(ArrayRef<int> Mask) {
1809 // Select is differentiated from identity. It requires using both sources.
1810 if (isSingleSourceMask(Mask))
1812 for (int i = 0, NumElts = Mask.size(); i < NumElts; ++i) {
1815 if (Mask[i] != i && Mask[i] != (NumElts + i))
1821 bool ShuffleVectorInst::isTransposeMask(ArrayRef<int> Mask) {
1822 // Example masks that will return true:
1823 // v1 = <a, b, c, d>
1824 // v2 = <e, f, g, h>
1825 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
1826 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
1828 // 1. The number of elements in the mask must be a power-of-2 and at least 2.
1829 int NumElts = Mask.size();
1830 if (NumElts < 2 || !isPowerOf2_32(NumElts))
1833 // 2. The first element of the mask must be either a 0 or a 1.
1834 if (Mask[0] != 0 && Mask[0] != 1)
1837 // 3. The difference between the first 2 elements must be equal to the
1838 // number of elements in the mask.
1839 if ((Mask[1] - Mask[0]) != NumElts)
1842 // 4. The difference between consecutive even-numbered and odd-numbered
1843 // elements must be equal to 2.
1844 for (int i = 2; i < NumElts; ++i) {
1845 int MaskEltVal = Mask[i];
1846 if (MaskEltVal == -1)
1848 int MaskEltPrevVal = Mask[i - 2];
1849 if (MaskEltVal - MaskEltPrevVal != 2)
1855 bool ShuffleVectorInst::isExtractSubvectorMask(ArrayRef<int> Mask,
1856 int NumSrcElts, int &Index) {
1857 // Must extract from a single source.
1858 if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
1861 // Must be smaller (else this is an Identity shuffle).
1862 if (NumSrcElts <= (int)Mask.size())
1865 // Find start of extraction, accounting that we may start with an UNDEF.
1867 for (int i = 0, e = Mask.size(); i != e; ++i) {
1871 int Offset = (M % NumSrcElts) - i;
1872 if (0 <= SubIndex && SubIndex != Offset)
1877 if (0 <= SubIndex) {
1884 bool ShuffleVectorInst::isIdentityWithPadding() const {
1885 int NumOpElts = Op<0>()->getType()->getVectorNumElements();
1886 int NumMaskElts = getType()->getVectorNumElements();
1887 if (NumMaskElts <= NumOpElts)
1890 // The first part of the mask must choose elements from exactly 1 source op.
1891 SmallVector<int, 16> Mask = getShuffleMask();
1892 if (!isIdentityMaskImpl(Mask, NumOpElts))
1895 // All extending must be with undef elements.
1896 for (int i = NumOpElts; i < NumMaskElts; ++i)
1903 bool ShuffleVectorInst::isIdentityWithExtract() const {
1904 int NumOpElts = Op<0>()->getType()->getVectorNumElements();
1905 int NumMaskElts = getType()->getVectorNumElements();
1906 if (NumMaskElts >= NumOpElts)
1909 return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
1912 bool ShuffleVectorInst::isConcat() const {
1913 // Vector concatenation is differentiated from identity with padding.
1914 if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()))
1917 int NumOpElts = Op<0>()->getType()->getVectorNumElements();
1918 int NumMaskElts = getType()->getVectorNumElements();
1919 if (NumMaskElts != NumOpElts * 2)
1922 // Use the mask length rather than the operands' vector lengths here. We
1923 // already know that the shuffle returns a vector twice as long as the inputs,
1924 // and neither of the inputs are undef vectors. If the mask picks consecutive
1925 // elements from both inputs, then this is a concatenation of the inputs.
1926 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
1929 //===----------------------------------------------------------------------===//
1930 // InsertValueInst Class
1931 //===----------------------------------------------------------------------===//
1933 void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
1934 const Twine &Name) {
1935 assert(getNumOperands() == 2 && "NumOperands not initialized?");
1937 // There's no fundamental reason why we require at least one index
1938 // (other than weirdness with &*IdxBegin being invalid; see
1939 // getelementptr's init routine for example). But there's no
1940 // present need to support it.
1941 assert(!Idxs.empty() && "InsertValueInst must have at least one index");
1943 assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) ==
1944 Val->getType() && "Inserted value must match indexed type!");
1948 Indices.append(Idxs.begin(), Idxs.end());
1952 InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
1953 : Instruction(IVI.getType(), InsertValue,
1954 OperandTraits<InsertValueInst>::op_begin(this), 2),
1955 Indices(IVI.Indices) {
1956 Op<0>() = IVI.getOperand(0);
1957 Op<1>() = IVI.getOperand(1);
1958 SubclassOptionalData = IVI.SubclassOptionalData;
1961 //===----------------------------------------------------------------------===//
1962 // ExtractValueInst Class
1963 //===----------------------------------------------------------------------===//
1965 void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
1966 assert(getNumOperands() == 1 && "NumOperands not initialized?");
1968 // There's no fundamental reason why we require at least one index.
1969 // But there's no present need to support it.
1970 assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
1972 Indices.append(Idxs.begin(), Idxs.end());
1976 ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
1977 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)),
1978 Indices(EVI.Indices) {
1979 SubclassOptionalData = EVI.SubclassOptionalData;
1982 // getIndexedType - Returns the type of the element that would be extracted
1983 // with an extractvalue instruction with the specified parameters.
1985 // A null type is returned if the indices are invalid for the specified
1988 Type *ExtractValueInst::getIndexedType(Type *Agg,
1989 ArrayRef<unsigned> Idxs) {
1990 for (unsigned Index : Idxs) {
1991 // We can't use CompositeType::indexValid(Index) here.
1992 // indexValid() always returns true for arrays because getelementptr allows
1993 // out-of-bounds indices. Since we don't allow those for extractvalue and
1994 // insertvalue we need to check array indexing manually.
1995 // Since the only other types we can index into are struct types it's just
1996 // as easy to check those manually as well.
1997 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
1998 if (Index >= AT->getNumElements())
2000 } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2001 if (Index >= ST->getNumElements())
2004 // Not a valid type to index into.
2008 Agg = cast<CompositeType>(Agg)->getTypeAtIndex(Index);
2010 return const_cast<Type*>(Agg);
2013 //===----------------------------------------------------------------------===//
2014 // UnaryOperator Class
2015 //===----------------------------------------------------------------------===//
2017 UnaryOperator::UnaryOperator(UnaryOps iType, Value *S,
2018 Type *Ty, const Twine &Name,
2019 Instruction *InsertBefore)
2020 : UnaryInstruction(Ty, iType, S, InsertBefore) {
2026 UnaryOperator::UnaryOperator(UnaryOps iType, Value *S,
2027 Type *Ty, const Twine &Name,
2028 BasicBlock *InsertAtEnd)
2029 : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
2035 UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S,
2037 Instruction *InsertBefore) {
2038 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
2041 UnaryOperator *UnaryOperator::Create(UnaryOps Op, Value *S,
2043 BasicBlock *InsertAtEnd) {
2044 UnaryOperator *Res = Create(Op, S, Name);
2045 InsertAtEnd->getInstList().push_back(Res);
2049 void UnaryOperator::AssertOK() {
2050 Value *LHS = getOperand(0);
2051 (void)LHS; // Silence warnings.
2053 switch (getOpcode()) {
2055 assert(getType() == LHS->getType() &&
2056 "Unary operation should return same type as operand!");
2057 assert(getType()->isFPOrFPVectorTy() &&
2058 "Tried to create a floating-point operation on a "
2059 "non-floating-point type!");
2061 default: llvm_unreachable("Invalid opcode provided");
2066 //===----------------------------------------------------------------------===//
2067 // BinaryOperator Class
2068 //===----------------------------------------------------------------------===//
2070 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
2071 Type *Ty, const Twine &Name,
2072 Instruction *InsertBefore)
2073 : Instruction(Ty, iType,
2074 OperandTraits<BinaryOperator>::op_begin(this),
2075 OperandTraits<BinaryOperator>::operands(this),
2083 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
2084 Type *Ty, const Twine &Name,
2085 BasicBlock *InsertAtEnd)
2086 : Instruction(Ty, iType,
2087 OperandTraits<BinaryOperator>::op_begin(this),
2088 OperandTraits<BinaryOperator>::operands(this),
2096 void BinaryOperator::AssertOK() {
2097 Value *LHS = getOperand(0), *RHS = getOperand(1);
2098 (void)LHS; (void)RHS; // Silence warnings.
2099 assert(LHS->getType() == RHS->getType() &&
2100 "Binary operator operand types must match!");
2102 switch (getOpcode()) {
2105 assert(getType() == LHS->getType() &&
2106 "Arithmetic operation should return same type as operands!");
2107 assert(getType()->isIntOrIntVectorTy() &&
2108 "Tried to create an integer operation on a non-integer type!");
2110 case FAdd: case FSub:
2112 assert(getType() == LHS->getType() &&
2113 "Arithmetic operation should return same type as operands!");
2114 assert(getType()->isFPOrFPVectorTy() &&
2115 "Tried to create a floating-point operation on a "
2116 "non-floating-point type!");
2120 assert(getType() == LHS->getType() &&
2121 "Arithmetic operation should return same type as operands!");
2122 assert(getType()->isIntOrIntVectorTy() &&
2123 "Incorrect operand type (not integer) for S/UDIV");
2126 assert(getType() == LHS->getType() &&
2127 "Arithmetic operation should return same type as operands!");
2128 assert(getType()->isFPOrFPVectorTy() &&
2129 "Incorrect operand type (not floating point) for FDIV");
2133 assert(getType() == LHS->getType() &&
2134 "Arithmetic operation should return same type as operands!");
2135 assert(getType()->isIntOrIntVectorTy() &&
2136 "Incorrect operand type (not integer) for S/UREM");
2139 assert(getType() == LHS->getType() &&
2140 "Arithmetic operation should return same type as operands!");
2141 assert(getType()->isFPOrFPVectorTy() &&
2142 "Incorrect operand type (not floating point) for FREM");
2147 assert(getType() == LHS->getType() &&
2148 "Shift operation should return same type as operands!");
2149 assert(getType()->isIntOrIntVectorTy() &&
2150 "Tried to create a shift operation on a non-integral type!");
2154 assert(getType() == LHS->getType() &&
2155 "Logical operation should return same type as operands!");
2156 assert(getType()->isIntOrIntVectorTy() &&
2157 "Tried to create a logical operation on a non-integral type!");
2159 default: llvm_unreachable("Invalid opcode provided");
2164 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
2166 Instruction *InsertBefore) {
2167 assert(S1->getType() == S2->getType() &&
2168 "Cannot create binary operator with two operands of differing type!");
2169 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
2172 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
2174 BasicBlock *InsertAtEnd) {
2175 BinaryOperator *Res = Create(Op, S1, S2, Name);
2176 InsertAtEnd->getInstList().push_back(Res);
2180 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
2181 Instruction *InsertBefore) {
2182 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
2183 return new BinaryOperator(Instruction::Sub,
2185 Op->getType(), Name, InsertBefore);
2188 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
2189 BasicBlock *InsertAtEnd) {
2190 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
2191 return new BinaryOperator(Instruction::Sub,
2193 Op->getType(), Name, InsertAtEnd);
2196 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
2197 Instruction *InsertBefore) {
2198 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
2199 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore);
2202 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
2203 BasicBlock *InsertAtEnd) {
2204 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
2205 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd);
2208 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
2209 Instruction *InsertBefore) {
2210 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
2211 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertBefore);
2214 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
2215 BasicBlock *InsertAtEnd) {
2216 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
2217 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertAtEnd);
2220 BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name,
2221 Instruction *InsertBefore) {
2222 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
2223 return new BinaryOperator(Instruction::FSub, zero, Op,
2224 Op->getType(), Name, InsertBefore);
2227 BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name,
2228 BasicBlock *InsertAtEnd) {
2229 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
2230 return new BinaryOperator(Instruction::FSub, zero, Op,
2231 Op->getType(), Name, InsertAtEnd);
2234 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
2235 Instruction *InsertBefore) {
2236 Constant *C = Constant::getAllOnesValue(Op->getType());
2237 return new BinaryOperator(Instruction::Xor, Op, C,
2238 Op->getType(), Name, InsertBefore);
2241 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
2242 BasicBlock *InsertAtEnd) {
2243 Constant *AllOnes = Constant::getAllOnesValue(Op->getType());
2244 return new BinaryOperator(Instruction::Xor, Op, AllOnes,
2245 Op->getType(), Name, InsertAtEnd);
2248 // Exchange the two operands to this instruction. This instruction is safe to
2249 // use on any binary instruction and does not modify the semantics of the
2250 // instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
2252 bool BinaryOperator::swapOperands() {
2253 if (!isCommutative())
2254 return true; // Can't commute operands
2255 Op<0>().swap(Op<1>());
2259 //===----------------------------------------------------------------------===//
2260 // FPMathOperator Class
2261 //===----------------------------------------------------------------------===//
2263 float FPMathOperator::getFPAccuracy() const {
2265 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
2268 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
2269 return Accuracy->getValueAPF().convertToFloat();
2272 //===----------------------------------------------------------------------===//
2274 //===----------------------------------------------------------------------===//
2276 // Just determine if this cast only deals with integral->integral conversion.
2277 bool CastInst::isIntegerCast() const {
2278 switch (getOpcode()) {
2279 default: return false;
2280 case Instruction::ZExt:
2281 case Instruction::SExt:
2282 case Instruction::Trunc:
2284 case Instruction::BitCast:
2285 return getOperand(0)->getType()->isIntegerTy() &&
2286 getType()->isIntegerTy();
2290 bool CastInst::isLosslessCast() const {
2291 // Only BitCast can be lossless, exit fast if we're not BitCast
2292 if (getOpcode() != Instruction::BitCast)
2295 // Identity cast is always lossless
2296 Type *SrcTy = getOperand(0)->getType();
2297 Type *DstTy = getType();
2301 // Pointer to pointer is always lossless.
2302 if (SrcTy->isPointerTy())
2303 return DstTy->isPointerTy();
2304 return false; // Other types have no identity values
2307 /// This function determines if the CastInst does not require any bits to be
2308 /// changed in order to effect the cast. Essentially, it identifies cases where
2309 /// no code gen is necessary for the cast, hence the name no-op cast. For
2310 /// example, the following are all no-op casts:
2311 /// # bitcast i32* %x to i8*
2312 /// # bitcast <2 x i32> %x to <4 x i16>
2313 /// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
2314 /// Determine if the described cast is a no-op.
2315 bool CastInst::isNoopCast(Instruction::CastOps Opcode,
2318 const DataLayout &DL) {
2320 default: llvm_unreachable("Invalid CastOp");
2321 case Instruction::Trunc:
2322 case Instruction::ZExt:
2323 case Instruction::SExt:
2324 case Instruction::FPTrunc:
2325 case Instruction::FPExt:
2326 case Instruction::UIToFP:
2327 case Instruction::SIToFP:
2328 case Instruction::FPToUI:
2329 case Instruction::FPToSI:
2330 case Instruction::AddrSpaceCast:
2331 // TODO: Target informations may give a more accurate answer here.
2333 case Instruction::BitCast:
2334 return true; // BitCast never modifies bits.
2335 case Instruction::PtrToInt:
2336 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
2337 DestTy->getScalarSizeInBits();
2338 case Instruction::IntToPtr:
2339 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
2340 SrcTy->getScalarSizeInBits();
2344 bool CastInst::isNoopCast(const DataLayout &DL) const {
2345 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
2348 /// This function determines if a pair of casts can be eliminated and what
2349 /// opcode should be used in the elimination. This assumes that there are two
2350 /// instructions like this:
2351 /// * %F = firstOpcode SrcTy %x to MidTy
2352 /// * %S = secondOpcode MidTy %F to DstTy
2353 /// The function returns a resultOpcode so these two casts can be replaced with:
2354 /// * %Replacement = resultOpcode %SrcTy %x to DstTy
2355 /// If no such cast is permitted, the function returns 0.
2356 unsigned CastInst::isEliminableCastPair(
2357 Instruction::CastOps firstOp, Instruction::CastOps secondOp,
2358 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
2359 Type *DstIntPtrTy) {
2360 // Define the 144 possibilities for these two cast instructions. The values
2361 // in this matrix determine what to do in a given situation and select the
2362 // case in the switch below. The rows correspond to firstOp, the columns
2363 // correspond to secondOp. In looking at the table below, keep in mind
2364 // the following cast properties:
2366 // Size Compare Source Destination
2367 // Operator Src ? Size Type Sign Type Sign
2368 // -------- ------------ ------------------- ---------------------
2369 // TRUNC > Integer Any Integral Any
2370 // ZEXT < Integral Unsigned Integer Any
2371 // SEXT < Integral Signed Integer Any
2372 // FPTOUI n/a FloatPt n/a Integral Unsigned
2373 // FPTOSI n/a FloatPt n/a Integral Signed
2374 // UITOFP n/a Integral Unsigned FloatPt n/a
2375 // SITOFP n/a Integral Signed FloatPt n/a
2376 // FPTRUNC > FloatPt n/a FloatPt n/a
2377 // FPEXT < FloatPt n/a FloatPt n/a
2378 // PTRTOINT n/a Pointer n/a Integral Unsigned
2379 // INTTOPTR n/a Integral Unsigned Pointer n/a
2380 // BITCAST = FirstClass n/a FirstClass n/a
2381 // ADDRSPCST n/a Pointer n/a Pointer n/a
2383 // NOTE: some transforms are safe, but we consider them to be non-profitable.
2384 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
2385 // into "fptoui double to i64", but this loses information about the range
2386 // of the produced value (we no longer know the top-part is all zeros).
2387 // Further this conversion is often much more expensive for typical hardware,
2388 // and causes issues when building libgcc. We disallow fptosi+sext for the
2390 const unsigned numCastOps =
2391 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
2392 static const uint8_t CastResults[numCastOps][numCastOps] = {
2393 // T F F U S F F P I B A -+
2394 // R Z S P P I I T P 2 N T S |
2395 // U E E 2 2 2 2 R E I T C C +- secondOp
2396 // N X X U S F F N X N 2 V V |
2397 // C T T I I P P C T T P T T -+
2398 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+
2399 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |
2400 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |
2401 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |
2402 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |
2403 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp
2404 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |
2405 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |
2406 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt |
2407 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |
2408 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |
2409 { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast |
2410 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
2413 // TODO: This logic could be encoded into the table above and handled in the
2415 // If either of the casts are a bitcast from scalar to vector, disallow the
2416 // merging. However, any pair of bitcasts are allowed.
2417 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
2418 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
2419 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
2421 // Check if any of the casts convert scalars <-> vectors.
2422 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
2423 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
2424 if (!AreBothBitcasts)
2427 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
2428 [secondOp-Instruction::CastOpsBegin];
2431 // Categorically disallowed.
2434 // Allowed, use first cast's opcode.
2437 // Allowed, use second cast's opcode.
2440 // No-op cast in second op implies firstOp as long as the DestTy
2441 // is integer and we are not converting between a vector and a
2443 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
2447 // No-op cast in second op implies firstOp as long as the DestTy
2448 // is floating point.
2449 if (DstTy->isFloatingPointTy())
2453 // No-op cast in first op implies secondOp as long as the SrcTy
2455 if (SrcTy->isIntegerTy())
2459 // No-op cast in first op implies secondOp as long as the SrcTy
2460 // is a floating point.
2461 if (SrcTy->isFloatingPointTy())
2465 // Cannot simplify if address spaces are different!
2466 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2469 unsigned MidSize = MidTy->getScalarSizeInBits();
2470 // We can still fold this without knowing the actual sizes as long we
2471 // know that the intermediate pointer is the largest possible
2473 // FIXME: Is this always true?
2475 return Instruction::BitCast;
2477 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
2478 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
2480 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
2481 if (MidSize >= PtrSize)
2482 return Instruction::BitCast;
2486 // ext, trunc -> bitcast, if the SrcTy and DstTy are same size
2487 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
2488 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
2489 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2490 unsigned DstSize = DstTy->getScalarSizeInBits();
2491 if (SrcSize == DstSize)
2492 return Instruction::BitCast;
2493 else if (SrcSize < DstSize)
2498 // zext, sext -> zext, because sext can't sign extend after zext
2499 return Instruction::ZExt;
2501 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
2504 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
2505 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2506 unsigned DstSize = DstTy->getScalarSizeInBits();
2507 if (SrcSize <= PtrSize && SrcSize == DstSize)
2508 return Instruction::BitCast;
2512 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
2513 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
2514 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2515 return Instruction::AddrSpaceCast;
2516 return Instruction::BitCast;
2518 // FIXME: this state can be merged with (1), but the following assert
2519 // is useful to check the correcteness of the sequence due to semantic
2520 // change of bitcast.
2522 SrcTy->isPtrOrPtrVectorTy() &&
2523 MidTy->isPtrOrPtrVectorTy() &&
2524 DstTy->isPtrOrPtrVectorTy() &&
2525 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
2526 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
2527 "Illegal addrspacecast, bitcast sequence!");
2528 // Allowed, use first cast's opcode
2531 // bitcast, addrspacecast -> addrspacecast if the element type of
2532 // bitcast's source is the same as that of addrspacecast's destination.
2533 if (SrcTy->getScalarType()->getPointerElementType() ==
2534 DstTy->getScalarType()->getPointerElementType())
2535 return Instruction::AddrSpaceCast;
2538 // FIXME: this state can be merged with (1), but the following assert
2539 // is useful to check the correcteness of the sequence due to semantic
2540 // change of bitcast.
2542 SrcTy->isIntOrIntVectorTy() &&
2543 MidTy->isPtrOrPtrVectorTy() &&
2544 DstTy->isPtrOrPtrVectorTy() &&
2545 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
2546 "Illegal inttoptr, bitcast sequence!");
2547 // Allowed, use first cast's opcode
2550 // FIXME: this state can be merged with (2), but the following assert
2551 // is useful to check the correcteness of the sequence due to semantic
2552 // change of bitcast.
2554 SrcTy->isPtrOrPtrVectorTy() &&
2555 MidTy->isPtrOrPtrVectorTy() &&
2556 DstTy->isIntOrIntVectorTy() &&
2557 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
2558 "Illegal bitcast, ptrtoint sequence!");
2559 // Allowed, use second cast's opcode
2562 // (sitofp (zext x)) -> (uitofp x)
2563 return Instruction::UIToFP;
2565 // Cast combination can't happen (error in input). This is for all cases
2566 // where the MidTy is not the same for the two cast instructions.
2567 llvm_unreachable("Invalid Cast Combination");
2569 llvm_unreachable("Error in CastResults table!!!");
2573 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
2574 const Twine &Name, Instruction *InsertBefore) {
2575 assert(castIsValid(op, S, Ty) && "Invalid cast!");
2576 // Construct and return the appropriate CastInst subclass
2578 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
2579 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
2580 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
2581 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
2582 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
2583 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
2584 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
2585 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
2586 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
2587 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
2588 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
2589 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
2590 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
2591 default: llvm_unreachable("Invalid opcode provided");
2595 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
2596 const Twine &Name, BasicBlock *InsertAtEnd) {
2597 assert(castIsValid(op, S, Ty) && "Invalid cast!");
2598 // Construct and return the appropriate CastInst subclass
2600 case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd);
2601 case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd);
2602 case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd);
2603 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd);
2604 case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd);
2605 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd);
2606 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd);
2607 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd);
2608 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd);
2609 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd);
2610 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd);
2611 case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd);
2612 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd);
2613 default: llvm_unreachable("Invalid opcode provided");
2617 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
2619 Instruction *InsertBefore) {
2620 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2621 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
2622 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
2625 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
2627 BasicBlock *InsertAtEnd) {
2628 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2629 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
2630 return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd);
2633 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
2635 Instruction *InsertBefore) {
2636 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2637 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
2638 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
2641 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
2643 BasicBlock *InsertAtEnd) {
2644 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2645 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
2646 return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd);
2649 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
2651 Instruction *InsertBefore) {
2652 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2653 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
2654 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
2657 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
2659 BasicBlock *InsertAtEnd) {
2660 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2661 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
2662 return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd);
2665 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
2667 BasicBlock *InsertAtEnd) {
2668 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
2669 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
2671 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
2672 assert((!Ty->isVectorTy() ||
2673 Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) &&
2676 if (Ty->isIntOrIntVectorTy())
2677 return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd);
2679 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd);
2682 /// Create a BitCast or a PtrToInt cast instruction
2683 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
2685 Instruction *InsertBefore) {
2686 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
2687 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
2689 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
2690 assert((!Ty->isVectorTy() ||
2691 Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) &&
2694 if (Ty->isIntOrIntVectorTy())
2695 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
2697 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
2700 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
2703 BasicBlock *InsertAtEnd) {
2704 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
2705 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
2707 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
2708 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd);
2710 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
2713 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
2716 Instruction *InsertBefore) {
2717 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
2718 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
2720 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
2721 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
2723 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
2726 CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty,
2728 Instruction *InsertBefore) {
2729 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
2730 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
2731 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
2732 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
2734 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
2737 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
2738 bool isSigned, const Twine &Name,
2739 Instruction *InsertBefore) {
2740 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
2741 "Invalid integer cast");
2742 unsigned SrcBits = C->getType()->getScalarSizeInBits();
2743 unsigned DstBits = Ty->getScalarSizeInBits();
2744 Instruction::CastOps opcode =
2745 (SrcBits == DstBits ? Instruction::BitCast :
2746 (SrcBits > DstBits ? Instruction::Trunc :
2747 (isSigned ? Instruction::SExt : Instruction::ZExt)));
2748 return Create(opcode, C, Ty, Name, InsertBefore);
2751 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
2752 bool isSigned, const Twine &Name,
2753 BasicBlock *InsertAtEnd) {
2754 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
2756 unsigned SrcBits = C->getType()->getScalarSizeInBits();
2757 unsigned DstBits = Ty->getScalarSizeInBits();
2758 Instruction::CastOps opcode =
2759 (SrcBits == DstBits ? Instruction::BitCast :
2760 (SrcBits > DstBits ? Instruction::Trunc :
2761 (isSigned ? Instruction::SExt : Instruction::ZExt)));
2762 return Create(opcode, C, Ty, Name, InsertAtEnd);
2765 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
2767 Instruction *InsertBefore) {
2768 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
2770 unsigned SrcBits = C->getType()->getScalarSizeInBits();
2771 unsigned DstBits = Ty->getScalarSizeInBits();
2772 Instruction::CastOps opcode =
2773 (SrcBits == DstBits ? Instruction::BitCast :
2774 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
2775 return Create(opcode, C, Ty, Name, InsertBefore);
2778 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
2780 BasicBlock *InsertAtEnd) {
2781 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
2783 unsigned SrcBits = C->getType()->getScalarSizeInBits();
2784 unsigned DstBits = Ty->getScalarSizeInBits();
2785 Instruction::CastOps opcode =
2786 (SrcBits == DstBits ? Instruction::BitCast :
2787 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
2788 return Create(opcode, C, Ty, Name, InsertAtEnd);
2791 // Check whether it is valid to call getCastOpcode for these types.
2792 // This routine must be kept in sync with getCastOpcode.
2793 bool CastInst::isCastable(Type *SrcTy, Type *DestTy) {
2794 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
2797 if (SrcTy == DestTy)
2800 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
2801 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
2802 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) {
2803 // An element by element cast. Valid if casting the elements is valid.
2804 SrcTy = SrcVecTy->getElementType();
2805 DestTy = DestVecTy->getElementType();
2808 // Get the bit sizes, we'll need these
2809 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
2810 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
2812 // Run through the possibilities ...
2813 if (DestTy->isIntegerTy()) { // Casting to integral
2814 if (SrcTy->isIntegerTy()) // Casting from integral
2816 if (SrcTy->isFloatingPointTy()) // Casting from floating pt
2818 if (SrcTy->isVectorTy()) // Casting from vector
2819 return DestBits == SrcBits;
2820 // Casting from something else
2821 return SrcTy->isPointerTy();
2823 if (DestTy->isFloatingPointTy()) { // Casting to floating pt
2824 if (SrcTy->isIntegerTy()) // Casting from integral
2826 if (SrcTy->isFloatingPointTy()) // Casting from floating pt
2828 if (SrcTy->isVectorTy()) // Casting from vector
2829 return DestBits == SrcBits;
2830 // Casting from something else
2833 if (DestTy->isVectorTy()) // Casting to vector
2834 return DestBits == SrcBits;
2835 if (DestTy->isPointerTy()) { // Casting to pointer
2836 if (SrcTy->isPointerTy()) // Casting from pointer
2838 return SrcTy->isIntegerTy(); // Casting from integral
2840 if (DestTy->isX86_MMXTy()) {
2841 if (SrcTy->isVectorTy())
2842 return DestBits == SrcBits; // 64-bit vector to MMX
2844 } // Casting to something else
2848 bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
2849 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
2852 if (SrcTy == DestTy)
2855 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
2856 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
2857 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) {
2858 // An element by element cast. Valid if casting the elements is valid.
2859 SrcTy = SrcVecTy->getElementType();
2860 DestTy = DestVecTy->getElementType();
2865 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
2866 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
2867 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
2871 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
2872 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
2874 // Could still have vectors of pointers if the number of elements doesn't
2876 if (SrcBits == 0 || DestBits == 0)
2879 if (SrcBits != DestBits)
2882 if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy())
2888 bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy,
2889 const DataLayout &DL) {
2890 // ptrtoint and inttoptr are not allowed on non-integral pointers
2891 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
2892 if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
2893 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
2894 !DL.isNonIntegralPointerType(PtrTy));
2895 if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
2896 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
2897 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
2898 !DL.isNonIntegralPointerType(PtrTy));
2900 return isBitCastable(SrcTy, DestTy);
2903 // Provide a way to get a "cast" where the cast opcode is inferred from the
2904 // types and size of the operand. This, basically, is a parallel of the
2905 // logic in the castIsValid function below. This axiom should hold:
2906 // castIsValid( getCastOpcode(Val, Ty), Val, Ty)
2907 // should not assert in castIsValid. In other words, this produces a "correct"
2908 // casting opcode for the arguments passed to it.
2909 // This routine must be kept in sync with isCastable.
2910 Instruction::CastOps
2911 CastInst::getCastOpcode(
2912 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
2913 Type *SrcTy = Src->getType();
2915 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
2916 "Only first class types are castable!");
2918 if (SrcTy == DestTy)
2921 // FIXME: Check address space sizes here
2922 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
2923 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
2924 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) {
2925 // An element by element cast. Find the appropriate opcode based on the
2927 SrcTy = SrcVecTy->getElementType();
2928 DestTy = DestVecTy->getElementType();
2931 // Get the bit sizes, we'll need these
2932 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
2933 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
2935 // Run through the possibilities ...
2936 if (DestTy->isIntegerTy()) { // Casting to integral
2937 if (SrcTy->isIntegerTy()) { // Casting from integral
2938 if (DestBits < SrcBits)
2939 return Trunc; // int -> smaller int
2940 else if (DestBits > SrcBits) { // its an extension
2942 return SExt; // signed -> SEXT
2944 return ZExt; // unsigned -> ZEXT
2946 return BitCast; // Same size, No-op cast
2948 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
2950 return FPToSI; // FP -> sint
2952 return FPToUI; // FP -> uint
2953 } else if (SrcTy->isVectorTy()) {
2954 assert(DestBits == SrcBits &&
2955 "Casting vector to integer of different width");
2956 return BitCast; // Same size, no-op cast
2958 assert(SrcTy->isPointerTy() &&
2959 "Casting from a value that is not first-class type");
2960 return PtrToInt; // ptr -> int
2962 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
2963 if (SrcTy->isIntegerTy()) { // Casting from integral
2965 return SIToFP; // sint -> FP
2967 return UIToFP; // uint -> FP
2968 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
2969 if (DestBits < SrcBits) {
2970 return FPTrunc; // FP -> smaller FP
2971 } else if (DestBits > SrcBits) {
2972 return FPExt; // FP -> larger FP
2974 return BitCast; // same size, no-op cast
2976 } else if (SrcTy->isVectorTy()) {
2977 assert(DestBits == SrcBits &&
2978 "Casting vector to floating point of different width");
2979 return BitCast; // same size, no-op cast
2981 llvm_unreachable("Casting pointer or non-first class to float");
2982 } else if (DestTy->isVectorTy()) {
2983 assert(DestBits == SrcBits &&
2984 "Illegal cast to vector (wrong type or size)");
2986 } else if (DestTy->isPointerTy()) {
2987 if (SrcTy->isPointerTy()) {
2988 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
2989 return AddrSpaceCast;
2990 return BitCast; // ptr -> ptr
2991 } else if (SrcTy->isIntegerTy()) {
2992 return IntToPtr; // int -> ptr
2994 llvm_unreachable("Casting pointer to other than pointer or int");
2995 } else if (DestTy->isX86_MMXTy()) {
2996 if (SrcTy->isVectorTy()) {
2997 assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
2998 return BitCast; // 64-bit vector to MMX
3000 llvm_unreachable("Illegal cast to X86_MMX");
3002 llvm_unreachable("Casting to type that is not first-class");
3005 //===----------------------------------------------------------------------===//
3006 // CastInst SubClass Constructors
3007 //===----------------------------------------------------------------------===//
3009 /// Check that the construction parameters for a CastInst are correct. This
3010 /// could be broken out into the separate constructors but it is useful to have
3011 /// it in one place and to eliminate the redundant code for getting the sizes
3012 /// of the types involved.
3014 CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) {
3015 // Check for type sanity on the arguments
3016 Type *SrcTy = S->getType();
3018 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3019 SrcTy->isAggregateType() || DstTy->isAggregateType())
3022 // Get the size of the types in bits, we'll need this later
3023 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3024 unsigned DstBitSize = DstTy->getScalarSizeInBits();
3026 // If these are vector types, get the lengths of the vectors (using zero for
3027 // scalar types means that checking that vector lengths match also checks that
3028 // scalars are not being converted to vectors or vectors to scalars).
3029 unsigned SrcLength = SrcTy->isVectorTy() ?
3030 cast<VectorType>(SrcTy)->getNumElements() : 0;
3031 unsigned DstLength = DstTy->isVectorTy() ?
3032 cast<VectorType>(DstTy)->getNumElements() : 0;
3034 // Switch on the opcode provided
3036 default: return false; // This is an input error
3037 case Instruction::Trunc:
3038 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3039 SrcLength == DstLength && SrcBitSize > DstBitSize;
3040 case Instruction::ZExt:
3041 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3042 SrcLength == DstLength && SrcBitSize < DstBitSize;
3043 case Instruction::SExt:
3044 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3045 SrcLength == DstLength && SrcBitSize < DstBitSize;
3046 case Instruction::FPTrunc:
3047 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3048 SrcLength == DstLength && SrcBitSize > DstBitSize;
3049 case Instruction::FPExt:
3050 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3051 SrcLength == DstLength && SrcBitSize < DstBitSize;
3052 case Instruction::UIToFP:
3053 case Instruction::SIToFP:
3054 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3055 SrcLength == DstLength;
3056 case Instruction::FPToUI:
3057 case Instruction::FPToSI:
3058 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3059 SrcLength == DstLength;
3060 case Instruction::PtrToInt:
3061 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy))
3063 if (VectorType *VT = dyn_cast<VectorType>(SrcTy))
3064 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements())
3066 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
3067 case Instruction::IntToPtr:
3068 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy))
3070 if (VectorType *VT = dyn_cast<VectorType>(SrcTy))
3071 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements())
3073 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
3074 case Instruction::BitCast: {
3075 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3076 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3078 // BitCast implies a no-op cast of type only. No bits change.
3079 // However, you can't cast pointers to anything but pointers.
3080 if (!SrcPtrTy != !DstPtrTy)
3083 // For non-pointer cases, the cast is okay if the source and destination bit
3084 // widths are identical.
3086 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3088 // If both are pointers then the address spaces must match.
3089 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3092 // A vector of pointers must have the same number of elements.
3093 VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy);
3094 VectorType *DstVecTy = dyn_cast<VectorType>(DstTy);
3095 if (SrcVecTy && DstVecTy)
3096 return (SrcVecTy->getNumElements() == DstVecTy->getNumElements());
3098 return SrcVecTy->getNumElements() == 1;
3100 return DstVecTy->getNumElements() == 1;
3104 case Instruction::AddrSpaceCast: {
3105 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3109 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3113 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3116 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3117 if (VectorType *DstVecTy = dyn_cast<VectorType>(DstTy))
3118 return (SrcVecTy->getNumElements() == DstVecTy->getNumElements());
3128 TruncInst::TruncInst(
3129 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3130 ) : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3131 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3134 TruncInst::TruncInst(
3135 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3136 ) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
3137 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3141 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3142 ) : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3143 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3147 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3148 ) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
3149 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3152 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3153 ) : CastInst(Ty, SExt, S, Name, InsertBefore) {
3154 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3158 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3159 ) : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
3160 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3163 FPTruncInst::FPTruncInst(
3164 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3165 ) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3166 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3169 FPTruncInst::FPTruncInst(
3170 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3171 ) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
3172 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3175 FPExtInst::FPExtInst(
3176 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3177 ) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3178 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3181 FPExtInst::FPExtInst(
3182 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3183 ) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
3184 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3187 UIToFPInst::UIToFPInst(
3188 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3189 ) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3190 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3193 UIToFPInst::UIToFPInst(
3194 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3195 ) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
3196 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3199 SIToFPInst::SIToFPInst(
3200 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3201 ) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3202 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3205 SIToFPInst::SIToFPInst(
3206 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3207 ) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
3208 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3211 FPToUIInst::FPToUIInst(
3212 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3213 ) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3214 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3217 FPToUIInst::FPToUIInst(
3218 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3219 ) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
3220 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3223 FPToSIInst::FPToSIInst(
3224 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3225 ) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
3226 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3229 FPToSIInst::FPToSIInst(
3230 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3231 ) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
3232 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3235 PtrToIntInst::PtrToIntInst(
3236 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3237 ) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
3238 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3241 PtrToIntInst::PtrToIntInst(
3242 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3243 ) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
3244 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3247 IntToPtrInst::IntToPtrInst(
3248 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3249 ) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
3250 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3253 IntToPtrInst::IntToPtrInst(
3254 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3255 ) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
3256 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3259 BitCastInst::BitCastInst(
3260 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3261 ) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
3262 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3265 BitCastInst::BitCastInst(
3266 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3267 ) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
3268 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3271 AddrSpaceCastInst::AddrSpaceCastInst(
3272 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3273 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
3274 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3277 AddrSpaceCastInst::AddrSpaceCastInst(
3278 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3279 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) {
3280 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3283 //===----------------------------------------------------------------------===//
3285 //===----------------------------------------------------------------------===//
3287 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
3288 Value *RHS, const Twine &Name, Instruction *InsertBefore,
3289 Instruction *FlagsSource)
3290 : Instruction(ty, op,
3291 OperandTraits<CmpInst>::op_begin(this),
3292 OperandTraits<CmpInst>::operands(this),
3296 setPredicate((Predicate)predicate);
3299 copyIRFlags(FlagsSource);
3302 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
3303 Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd)
3304 : Instruction(ty, op,
3305 OperandTraits<CmpInst>::op_begin(this),
3306 OperandTraits<CmpInst>::operands(this),
3310 setPredicate((Predicate)predicate);
3315 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
3316 const Twine &Name, Instruction *InsertBefore) {
3317 if (Op == Instruction::ICmp) {
3319 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
3322 return new ICmpInst(CmpInst::Predicate(predicate),
3327 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
3330 return new FCmpInst(CmpInst::Predicate(predicate),
3335 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
3336 const Twine &Name, BasicBlock *InsertAtEnd) {
3337 if (Op == Instruction::ICmp) {
3338 return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
3341 return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
3345 void CmpInst::swapOperands() {
3346 if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
3349 cast<FCmpInst>(this)->swapOperands();
3352 bool CmpInst::isCommutative() const {
3353 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
3354 return IC->isCommutative();
3355 return cast<FCmpInst>(this)->isCommutative();
3358 bool CmpInst::isEquality() const {
3359 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
3360 return IC->isEquality();
3361 return cast<FCmpInst>(this)->isEquality();
3364 CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) {
3366 default: llvm_unreachable("Unknown cmp predicate!");
3367 case ICMP_EQ: return ICMP_NE;
3368 case ICMP_NE: return ICMP_EQ;
3369 case ICMP_UGT: return ICMP_ULE;
3370 case ICMP_ULT: return ICMP_UGE;
3371 case ICMP_UGE: return ICMP_ULT;
3372 case ICMP_ULE: return ICMP_UGT;
3373 case ICMP_SGT: return ICMP_SLE;
3374 case ICMP_SLT: return ICMP_SGE;
3375 case ICMP_SGE: return ICMP_SLT;
3376 case ICMP_SLE: return ICMP_SGT;
3378 case FCMP_OEQ: return FCMP_UNE;
3379 case FCMP_ONE: return FCMP_UEQ;
3380 case FCMP_OGT: return FCMP_ULE;
3381 case FCMP_OLT: return FCMP_UGE;
3382 case FCMP_OGE: return FCMP_ULT;
3383 case FCMP_OLE: return FCMP_UGT;
3384 case FCMP_UEQ: return FCMP_ONE;
3385 case FCMP_UNE: return FCMP_OEQ;
3386 case FCMP_UGT: return FCMP_OLE;
3387 case FCMP_ULT: return FCMP_OGE;
3388 case FCMP_UGE: return FCMP_OLT;
3389 case FCMP_ULE: return FCMP_OGT;
3390 case FCMP_ORD: return FCMP_UNO;
3391 case FCMP_UNO: return FCMP_ORD;
3392 case FCMP_TRUE: return FCMP_FALSE;
3393 case FCMP_FALSE: return FCMP_TRUE;
3397 StringRef CmpInst::getPredicateName(Predicate Pred) {
3399 default: return "unknown";
3400 case FCmpInst::FCMP_FALSE: return "false";
3401 case FCmpInst::FCMP_OEQ: return "oeq";
3402 case FCmpInst::FCMP_OGT: return "ogt";
3403 case FCmpInst::FCMP_OGE: return "oge";
3404 case FCmpInst::FCMP_OLT: return "olt";
3405 case FCmpInst::FCMP_OLE: return "ole";
3406 case FCmpInst::FCMP_ONE: return "one";
3407 case FCmpInst::FCMP_ORD: return "ord";
3408 case FCmpInst::FCMP_UNO: return "uno";
3409 case FCmpInst::FCMP_UEQ: return "ueq";
3410 case FCmpInst::FCMP_UGT: return "ugt";
3411 case FCmpInst::FCMP_UGE: return "uge";
3412 case FCmpInst::FCMP_ULT: return "ult";
3413 case FCmpInst::FCMP_ULE: return "ule";
3414 case FCmpInst::FCMP_UNE: return "une";
3415 case FCmpInst::FCMP_TRUE: return "true";
3416 case ICmpInst::ICMP_EQ: return "eq";
3417 case ICmpInst::ICMP_NE: return "ne";
3418 case ICmpInst::ICMP_SGT: return "sgt";
3419 case ICmpInst::ICMP_SGE: return "sge";
3420 case ICmpInst::ICMP_SLT: return "slt";
3421 case ICmpInst::ICMP_SLE: return "sle";
3422 case ICmpInst::ICMP_UGT: return "ugt";
3423 case ICmpInst::ICMP_UGE: return "uge";
3424 case ICmpInst::ICMP_ULT: return "ult";
3425 case ICmpInst::ICMP_ULE: return "ule";
3429 ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) {
3431 default: llvm_unreachable("Unknown icmp predicate!");
3432 case ICMP_EQ: case ICMP_NE:
3433 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
3435 case ICMP_UGT: return ICMP_SGT;
3436 case ICMP_ULT: return ICMP_SLT;
3437 case ICMP_UGE: return ICMP_SGE;
3438 case ICMP_ULE: return ICMP_SLE;
3442 ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) {
3444 default: llvm_unreachable("Unknown icmp predicate!");
3445 case ICMP_EQ: case ICMP_NE:
3446 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
3448 case ICMP_SGT: return ICMP_UGT;
3449 case ICMP_SLT: return ICMP_ULT;
3450 case ICMP_SGE: return ICMP_UGE;
3451 case ICMP_SLE: return ICMP_ULE;
3455 CmpInst::Predicate CmpInst::getFlippedStrictnessPredicate(Predicate pred) {
3457 default: llvm_unreachable("Unknown or unsupported cmp predicate!");
3458 case ICMP_SGT: return ICMP_SGE;
3459 case ICMP_SLT: return ICMP_SLE;
3460 case ICMP_SGE: return ICMP_SGT;
3461 case ICMP_SLE: return ICMP_SLT;
3462 case ICMP_UGT: return ICMP_UGE;
3463 case ICMP_ULT: return ICMP_ULE;
3464 case ICMP_UGE: return ICMP_UGT;
3465 case ICMP_ULE: return ICMP_ULT;
3467 case FCMP_OGT: return FCMP_OGE;
3468 case FCMP_OLT: return FCMP_OLE;
3469 case FCMP_OGE: return FCMP_OGT;
3470 case FCMP_OLE: return FCMP_OLT;
3471 case FCMP_UGT: return FCMP_UGE;
3472 case FCMP_ULT: return FCMP_ULE;
3473 case FCMP_UGE: return FCMP_UGT;
3474 case FCMP_ULE: return FCMP_ULT;
3478 CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) {
3480 default: llvm_unreachable("Unknown cmp predicate!");
3481 case ICMP_EQ: case ICMP_NE:
3483 case ICMP_SGT: return ICMP_SLT;
3484 case ICMP_SLT: return ICMP_SGT;
3485 case ICMP_SGE: return ICMP_SLE;
3486 case ICMP_SLE: return ICMP_SGE;
3487 case ICMP_UGT: return ICMP_ULT;
3488 case ICMP_ULT: return ICMP_UGT;
3489 case ICMP_UGE: return ICMP_ULE;
3490 case ICMP_ULE: return ICMP_UGE;
3492 case FCMP_FALSE: case FCMP_TRUE:
3493 case FCMP_OEQ: case FCMP_ONE:
3494 case FCMP_UEQ: case FCMP_UNE:
3495 case FCMP_ORD: case FCMP_UNO:
3497 case FCMP_OGT: return FCMP_OLT;
3498 case FCMP_OLT: return FCMP_OGT;
3499 case FCMP_OGE: return FCMP_OLE;
3500 case FCMP_OLE: return FCMP_OGE;
3501 case FCMP_UGT: return FCMP_ULT;
3502 case FCMP_ULT: return FCMP_UGT;
3503 case FCMP_UGE: return FCMP_ULE;
3504 case FCMP_ULE: return FCMP_UGE;
3508 CmpInst::Predicate CmpInst::getNonStrictPredicate(Predicate pred) {
3510 case ICMP_SGT: return ICMP_SGE;
3511 case ICMP_SLT: return ICMP_SLE;
3512 case ICMP_UGT: return ICMP_UGE;
3513 case ICMP_ULT: return ICMP_ULE;
3514 case FCMP_OGT: return FCMP_OGE;
3515 case FCMP_OLT: return FCMP_OLE;
3516 case FCMP_UGT: return FCMP_UGE;
3517 case FCMP_ULT: return FCMP_ULE;
3518 default: return pred;
3522 CmpInst::Predicate CmpInst::getSignedPredicate(Predicate pred) {
3523 assert(CmpInst::isUnsigned(pred) && "Call only with signed predicates!");
3527 llvm_unreachable("Unknown predicate!");
3528 case CmpInst::ICMP_ULT:
3529 return CmpInst::ICMP_SLT;
3530 case CmpInst::ICMP_ULE:
3531 return CmpInst::ICMP_SLE;
3532 case CmpInst::ICMP_UGT:
3533 return CmpInst::ICMP_SGT;
3534 case CmpInst::ICMP_UGE:
3535 return CmpInst::ICMP_SGE;
3539 bool CmpInst::isUnsigned(Predicate predicate) {
3540 switch (predicate) {
3541 default: return false;
3542 case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT:
3543 case ICmpInst::ICMP_UGE: return true;
3547 bool CmpInst::isSigned(Predicate predicate) {
3548 switch (predicate) {
3549 default: return false;
3550 case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT:
3551 case ICmpInst::ICMP_SGE: return true;
3555 bool CmpInst::isOrdered(Predicate predicate) {
3556 switch (predicate) {
3557 default: return false;
3558 case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT:
3559 case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE:
3560 case FCmpInst::FCMP_ORD: return true;
3564 bool CmpInst::isUnordered(Predicate predicate) {
3565 switch (predicate) {
3566 default: return false;
3567 case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT:
3568 case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE:
3569 case FCmpInst::FCMP_UNO: return true;
3573 bool CmpInst::isTrueWhenEqual(Predicate predicate) {
3575 default: return false;
3576 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
3577 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
3581 bool CmpInst::isFalseWhenEqual(Predicate predicate) {
3583 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
3584 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
3585 default: return false;
3589 bool CmpInst::isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2) {
3590 // If the predicates match, then we know the first condition implies the
3599 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
3600 return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE ||
3602 case ICMP_UGT: // A >u B implies A != B and A >=u B are true.
3603 return Pred2 == ICMP_NE || Pred2 == ICMP_UGE;
3604 case ICMP_ULT: // A <u B implies A != B and A <=u B are true.
3605 return Pred2 == ICMP_NE || Pred2 == ICMP_ULE;
3606 case ICMP_SGT: // A >s B implies A != B and A >=s B are true.
3607 return Pred2 == ICMP_NE || Pred2 == ICMP_SGE;
3608 case ICMP_SLT: // A <s B implies A != B and A <=s B are true.
3609 return Pred2 == ICMP_NE || Pred2 == ICMP_SLE;
3614 bool CmpInst::isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2) {
3615 return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2));
3618 //===----------------------------------------------------------------------===//
3619 // SwitchInst Implementation
3620 //===----------------------------------------------------------------------===//
3622 void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
3623 assert(Value && Default && NumReserved);
3624 ReservedSpace = NumReserved;
3625 setNumHungOffUseOperands(2);
3626 allocHungoffUses(ReservedSpace);
3632 /// SwitchInst ctor - Create a new switch instruction, specifying a value to
3633 /// switch on and a default destination. The number of additional cases can
3634 /// be specified here to make memory allocation more efficient. This
3635 /// constructor can also autoinsert before another instruction.
3636 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3637 Instruction *InsertBefore)
3638 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
3639 nullptr, 0, InsertBefore) {
3640 init(Value, Default, 2+NumCases*2);
3643 /// SwitchInst ctor - Create a new switch instruction, specifying a value to
3644 /// switch on and a default destination. The number of additional cases can
3645 /// be specified here to make memory allocation more efficient. This
3646 /// constructor also autoinserts at the end of the specified BasicBlock.
3647 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3648 BasicBlock *InsertAtEnd)
3649 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
3650 nullptr, 0, InsertAtEnd) {
3651 init(Value, Default, 2+NumCases*2);
3654 SwitchInst::SwitchInst(const SwitchInst &SI)
3655 : Instruction(SI.getType(), Instruction::Switch, nullptr, 0) {
3656 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
3657 setNumHungOffUseOperands(SI.getNumOperands());
3658 Use *OL = getOperandList();
3659 const Use *InOL = SI.getOperandList();
3660 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
3662 OL[i+1] = InOL[i+1];
3664 SubclassOptionalData = SI.SubclassOptionalData;
3667 /// addCase - Add an entry to the switch instruction...
3669 void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) {
3670 unsigned NewCaseIdx = getNumCases();
3671 unsigned OpNo = getNumOperands();
3672 if (OpNo+2 > ReservedSpace)
3673 growOperands(); // Get more space!
3674 // Initialize some new operands.
3675 assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
3676 setNumHungOffUseOperands(OpNo+2);
3677 CaseHandle Case(this, NewCaseIdx);
3678 Case.setValue(OnVal);
3679 Case.setSuccessor(Dest);
3682 /// removeCase - This method removes the specified case and its successor
3683 /// from the switch instruction.
3684 SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) {
3685 unsigned idx = I->getCaseIndex();
3687 assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
3689 unsigned NumOps = getNumOperands();
3690 Use *OL = getOperandList();
3692 // Overwrite this case with the end of the list.
3693 if (2 + (idx + 1) * 2 != NumOps) {
3694 OL[2 + idx * 2] = OL[NumOps - 2];
3695 OL[2 + idx * 2 + 1] = OL[NumOps - 1];
3698 // Nuke the last value.
3699 OL[NumOps-2].set(nullptr);
3700 OL[NumOps-2+1].set(nullptr);
3701 setNumHungOffUseOperands(NumOps-2);
3703 return CaseIt(this, idx);
3706 /// growOperands - grow operands - This grows the operand list in response
3707 /// to a push_back style of operation. This grows the number of ops by 3 times.
3709 void SwitchInst::growOperands() {
3710 unsigned e = getNumOperands();
3711 unsigned NumOps = e*3;
3713 ReservedSpace = NumOps;
3714 growHungoffUses(ReservedSpace);
3717 //===----------------------------------------------------------------------===//
3718 // IndirectBrInst Implementation
3719 //===----------------------------------------------------------------------===//
3721 void IndirectBrInst::init(Value *Address, unsigned NumDests) {
3722 assert(Address && Address->getType()->isPointerTy() &&
3723 "Address of indirectbr must be a pointer");
3724 ReservedSpace = 1+NumDests;
3725 setNumHungOffUseOperands(1);
3726 allocHungoffUses(ReservedSpace);
3732 /// growOperands - grow operands - This grows the operand list in response
3733 /// to a push_back style of operation. This grows the number of ops by 2 times.
3735 void IndirectBrInst::growOperands() {
3736 unsigned e = getNumOperands();
3737 unsigned NumOps = e*2;
3739 ReservedSpace = NumOps;
3740 growHungoffUses(ReservedSpace);
3743 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
3744 Instruction *InsertBefore)
3745 : Instruction(Type::getVoidTy(Address->getContext()),
3746 Instruction::IndirectBr, nullptr, 0, InsertBefore) {
3747 init(Address, NumCases);
3750 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
3751 BasicBlock *InsertAtEnd)
3752 : Instruction(Type::getVoidTy(Address->getContext()),
3753 Instruction::IndirectBr, nullptr, 0, InsertAtEnd) {
3754 init(Address, NumCases);
3757 IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
3758 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
3759 nullptr, IBI.getNumOperands()) {
3760 allocHungoffUses(IBI.getNumOperands());
3761 Use *OL = getOperandList();
3762 const Use *InOL = IBI.getOperandList();
3763 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
3765 SubclassOptionalData = IBI.SubclassOptionalData;
3768 /// addDestination - Add a destination.
3770 void IndirectBrInst::addDestination(BasicBlock *DestBB) {
3771 unsigned OpNo = getNumOperands();
3772 if (OpNo+1 > ReservedSpace)
3773 growOperands(); // Get more space!
3774 // Initialize some new operands.
3775 assert(OpNo < ReservedSpace && "Growing didn't work!");
3776 setNumHungOffUseOperands(OpNo+1);
3777 getOperandList()[OpNo] = DestBB;
3780 /// removeDestination - This method removes the specified successor from the
3781 /// indirectbr instruction.
3782 void IndirectBrInst::removeDestination(unsigned idx) {
3783 assert(idx < getNumOperands()-1 && "Successor index out of range!");
3785 unsigned NumOps = getNumOperands();
3786 Use *OL = getOperandList();
3788 // Replace this value with the last one.
3789 OL[idx+1] = OL[NumOps-1];
3791 // Nuke the last value.
3792 OL[NumOps-1].set(nullptr);
3793 setNumHungOffUseOperands(NumOps-1);
3796 //===----------------------------------------------------------------------===//
3797 // cloneImpl() implementations
3798 //===----------------------------------------------------------------------===//
3800 // Define these methods here so vtables don't get emitted into every translation
3801 // unit that uses these classes.
3803 GetElementPtrInst *GetElementPtrInst::cloneImpl() const {
3804 return new (getNumOperands()) GetElementPtrInst(*this);
3807 UnaryOperator *UnaryOperator::cloneImpl() const {
3808 return Create(getOpcode(), Op<0>());
3811 BinaryOperator *BinaryOperator::cloneImpl() const {
3812 return Create(getOpcode(), Op<0>(), Op<1>());
3815 FCmpInst *FCmpInst::cloneImpl() const {
3816 return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
3819 ICmpInst *ICmpInst::cloneImpl() const {
3820 return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
3823 ExtractValueInst *ExtractValueInst::cloneImpl() const {
3824 return new ExtractValueInst(*this);
3827 InsertValueInst *InsertValueInst::cloneImpl() const {
3828 return new InsertValueInst(*this);
3831 AllocaInst *AllocaInst::cloneImpl() const {
3832 AllocaInst *Result = new AllocaInst(getAllocatedType(),
3833 getType()->getAddressSpace(),
3834 (Value *)getOperand(0), getAlignment());
3835 Result->setUsedWithInAlloca(isUsedWithInAlloca());
3836 Result->setSwiftError(isSwiftError());
3840 LoadInst *LoadInst::cloneImpl() const {
3841 return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
3842 getAlignment(), getOrdering(), getSyncScopeID());
3845 StoreInst *StoreInst::cloneImpl() const {
3846 return new StoreInst(getOperand(0), getOperand(1), isVolatile(),
3847 getAlignment(), getOrdering(), getSyncScopeID());
3851 AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
3852 AtomicCmpXchgInst *Result =
3853 new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2),
3854 getSuccessOrdering(), getFailureOrdering(),
3856 Result->setVolatile(isVolatile());
3857 Result->setWeak(isWeak());
3861 AtomicRMWInst *AtomicRMWInst::cloneImpl() const {
3862 AtomicRMWInst *Result =
3863 new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1),
3864 getOrdering(), getSyncScopeID());
3865 Result->setVolatile(isVolatile());
3869 FenceInst *FenceInst::cloneImpl() const {
3870 return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
3873 TruncInst *TruncInst::cloneImpl() const {
3874 return new TruncInst(getOperand(0), getType());
3877 ZExtInst *ZExtInst::cloneImpl() const {
3878 return new ZExtInst(getOperand(0), getType());
3881 SExtInst *SExtInst::cloneImpl() const {
3882 return new SExtInst(getOperand(0), getType());
3885 FPTruncInst *FPTruncInst::cloneImpl() const {
3886 return new FPTruncInst(getOperand(0), getType());
3889 FPExtInst *FPExtInst::cloneImpl() const {
3890 return new FPExtInst(getOperand(0), getType());
3893 UIToFPInst *UIToFPInst::cloneImpl() const {
3894 return new UIToFPInst(getOperand(0), getType());
3897 SIToFPInst *SIToFPInst::cloneImpl() const {
3898 return new SIToFPInst(getOperand(0), getType());
3901 FPToUIInst *FPToUIInst::cloneImpl() const {
3902 return new FPToUIInst(getOperand(0), getType());
3905 FPToSIInst *FPToSIInst::cloneImpl() const {
3906 return new FPToSIInst(getOperand(0), getType());
3909 PtrToIntInst *PtrToIntInst::cloneImpl() const {
3910 return new PtrToIntInst(getOperand(0), getType());
3913 IntToPtrInst *IntToPtrInst::cloneImpl() const {
3914 return new IntToPtrInst(getOperand(0), getType());
3917 BitCastInst *BitCastInst::cloneImpl() const {
3918 return new BitCastInst(getOperand(0), getType());
3921 AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const {
3922 return new AddrSpaceCastInst(getOperand(0), getType());
3925 CallInst *CallInst::cloneImpl() const {
3926 if (hasOperandBundles()) {
3927 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
3928 return new(getNumOperands(), DescriptorBytes) CallInst(*this);
3930 return new(getNumOperands()) CallInst(*this);
3933 SelectInst *SelectInst::cloneImpl() const {
3934 return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2));
3937 VAArgInst *VAArgInst::cloneImpl() const {
3938 return new VAArgInst(getOperand(0), getType());
3941 ExtractElementInst *ExtractElementInst::cloneImpl() const {
3942 return ExtractElementInst::Create(getOperand(0), getOperand(1));
3945 InsertElementInst *InsertElementInst::cloneImpl() const {
3946 return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2));
3949 ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const {
3950 return new ShuffleVectorInst(getOperand(0), getOperand(1), getOperand(2));
3953 PHINode *PHINode::cloneImpl() const { return new PHINode(*this); }
3955 LandingPadInst *LandingPadInst::cloneImpl() const {
3956 return new LandingPadInst(*this);
3959 ReturnInst *ReturnInst::cloneImpl() const {
3960 return new(getNumOperands()) ReturnInst(*this);
3963 BranchInst *BranchInst::cloneImpl() const {
3964 return new(getNumOperands()) BranchInst(*this);
3967 SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
3969 IndirectBrInst *IndirectBrInst::cloneImpl() const {
3970 return new IndirectBrInst(*this);
3973 InvokeInst *InvokeInst::cloneImpl() const {
3974 if (hasOperandBundles()) {
3975 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
3976 return new(getNumOperands(), DescriptorBytes) InvokeInst(*this);
3978 return new(getNumOperands()) InvokeInst(*this);
3981 ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); }
3983 CleanupReturnInst *CleanupReturnInst::cloneImpl() const {
3984 return new (getNumOperands()) CleanupReturnInst(*this);
3987 CatchReturnInst *CatchReturnInst::cloneImpl() const {
3988 return new (getNumOperands()) CatchReturnInst(*this);
3991 CatchSwitchInst *CatchSwitchInst::cloneImpl() const {
3992 return new CatchSwitchInst(*this);
3995 FuncletPadInst *FuncletPadInst::cloneImpl() const {
3996 return new (getNumOperands()) FuncletPadInst(*this);
3999 UnreachableInst *UnreachableInst::cloneImpl() const {
4000 LLVMContext &Context = getContext();
4001 return new UnreachableInst(Context);