1 //===- Instructions.cpp - Implement the LLVM instructions -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements all of the non-inline methods for the LLVM instruction
13 //===----------------------------------------------------------------------===//
15 #include "LLVMContextImpl.h"
16 #include "llvm/ADT/None.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/IR/Attributes.h"
20 #include "llvm/IR/BasicBlock.h"
21 #include "llvm/IR/CallSite.h"
22 #include "llvm/IR/Constant.h"
23 #include "llvm/IR/Constants.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/DerivedTypes.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/IR/InstrTypes.h"
28 #include "llvm/IR/Instruction.h"
29 #include "llvm/IR/Instructions.h"
30 #include "llvm/IR/LLVMContext.h"
31 #include "llvm/IR/Metadata.h"
32 #include "llvm/IR/Module.h"
33 #include "llvm/IR/Operator.h"
34 #include "llvm/IR/Type.h"
35 #include "llvm/IR/Value.h"
36 #include "llvm/Support/AtomicOrdering.h"
37 #include "llvm/Support/Casting.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/MathExtras.h"
47 //===----------------------------------------------------------------------===//
49 //===----------------------------------------------------------------------===//
51 User::op_iterator CallSite::getCallee() const {
52 Instruction *II(getInstruction());
54 ? cast<CallInst>(II)->op_end() - 1 // Skip Callee
55 : cast<InvokeInst>(II)->op_end() - 3; // Skip BB, BB, Callee
58 //===----------------------------------------------------------------------===//
59 // TerminatorInst Class
60 //===----------------------------------------------------------------------===//
62 // Out of line virtual method, so the vtable, etc has a home.
63 TerminatorInst::~TerminatorInst() = default;
65 unsigned TerminatorInst::getNumSuccessors() const {
66 switch (getOpcode()) {
67 #define HANDLE_TERM_INST(N, OPC, CLASS) \
68 case Instruction::OPC: \
69 return static_cast<const CLASS *>(this)->getNumSuccessorsV();
70 #include "llvm/IR/Instruction.def"
74 llvm_unreachable("not a terminator");
77 BasicBlock *TerminatorInst::getSuccessor(unsigned idx) const {
78 switch (getOpcode()) {
79 #define HANDLE_TERM_INST(N, OPC, CLASS) \
80 case Instruction::OPC: \
81 return static_cast<const CLASS *>(this)->getSuccessorV(idx);
82 #include "llvm/IR/Instruction.def"
86 llvm_unreachable("not a terminator");
89 void TerminatorInst::setSuccessor(unsigned idx, BasicBlock *B) {
90 switch (getOpcode()) {
91 #define HANDLE_TERM_INST(N, OPC, CLASS) \
92 case Instruction::OPC: \
93 return static_cast<CLASS *>(this)->setSuccessorV(idx, B);
94 #include "llvm/IR/Instruction.def"
98 llvm_unreachable("not a terminator");
101 //===----------------------------------------------------------------------===//
102 // UnaryInstruction Class
103 //===----------------------------------------------------------------------===//
105 // Out of line virtual method, so the vtable, etc has a home.
106 UnaryInstruction::~UnaryInstruction() = default;
108 //===----------------------------------------------------------------------===//
110 //===----------------------------------------------------------------------===//
112 /// areInvalidOperands - Return a string if the specified operands are invalid
113 /// for a select operation, otherwise return null.
114 const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
115 if (Op1->getType() != Op2->getType())
116 return "both values to select must have same type";
118 if (Op1->getType()->isTokenTy())
119 return "select values cannot have token type";
121 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
123 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
124 return "vector select condition element type must be i1";
125 VectorType *ET = dyn_cast<VectorType>(Op1->getType());
127 return "selected values for vector select must be vectors";
128 if (ET->getNumElements() != VT->getNumElements())
129 return "vector select requires selected vectors to have "
130 "the same vector length as select condition";
131 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
132 return "select condition must be i1 or <n x i1>";
137 //===----------------------------------------------------------------------===//
139 //===----------------------------------------------------------------------===//
141 void PHINode::anchor() {}
143 PHINode::PHINode(const PHINode &PN)
144 : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),
145 ReservedSpace(PN.getNumOperands()) {
146 allocHungoffUses(PN.getNumOperands());
147 std::copy(PN.op_begin(), PN.op_end(), op_begin());
148 std::copy(PN.block_begin(), PN.block_end(), block_begin());
149 SubclassOptionalData = PN.SubclassOptionalData;
152 // removeIncomingValue - Remove an incoming value. This is useful if a
153 // predecessor basic block is deleted.
154 Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
155 Value *Removed = getIncomingValue(Idx);
157 // Move everything after this operand down.
159 // FIXME: we could just swap with the end of the list, then erase. However,
160 // clients might not expect this to happen. The code as it is thrashes the
161 // use/def lists, which is kinda lame.
162 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
163 std::copy(block_begin() + Idx + 1, block_end(), block_begin() + Idx);
165 // Nuke the last value.
166 Op<-1>().set(nullptr);
167 setNumHungOffUseOperands(getNumOperands() - 1);
169 // If the PHI node is dead, because it has zero entries, nuke it now.
170 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
171 // If anyone is using this PHI, make them use a dummy value instead...
172 replaceAllUsesWith(UndefValue::get(getType()));
178 /// growOperands - grow operands - This grows the operand list in response
179 /// to a push_back style of operation. This grows the number of ops by 1.5
182 void PHINode::growOperands() {
183 unsigned e = getNumOperands();
184 unsigned NumOps = e + e / 2;
185 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
187 ReservedSpace = NumOps;
188 growHungoffUses(ReservedSpace, /* IsPhi */ true);
191 /// hasConstantValue - If the specified PHI node always merges together the same
192 /// value, return the value, otherwise return null.
193 Value *PHINode::hasConstantValue() const {
194 // Exploit the fact that phi nodes always have at least one entry.
195 Value *ConstantValue = getIncomingValue(0);
196 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
197 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
198 if (ConstantValue != this)
199 return nullptr; // Incoming values not all the same.
200 // The case where the first value is this PHI.
201 ConstantValue = getIncomingValue(i);
203 if (ConstantValue == this)
204 return UndefValue::get(getType());
205 return ConstantValue;
208 /// hasConstantOrUndefValue - Whether the specified PHI node always merges
209 /// together the same value, assuming that undefs result in the same value as
211 /// Unlike \ref hasConstantValue, this does not return a value because the
212 /// unique non-undef incoming value need not dominate the PHI node.
213 bool PHINode::hasConstantOrUndefValue() const {
214 Value *ConstantValue = nullptr;
215 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
216 Value *Incoming = getIncomingValue(i);
217 if (Incoming != this && !isa<UndefValue>(Incoming)) {
218 if (ConstantValue && ConstantValue != Incoming)
220 ConstantValue = Incoming;
226 //===----------------------------------------------------------------------===//
227 // LandingPadInst Implementation
228 //===----------------------------------------------------------------------===//
230 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
231 const Twine &NameStr, Instruction *InsertBefore)
232 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
233 init(NumReservedValues, NameStr);
236 LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
237 const Twine &NameStr, BasicBlock *InsertAtEnd)
238 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) {
239 init(NumReservedValues, NameStr);
242 LandingPadInst::LandingPadInst(const LandingPadInst &LP)
243 : Instruction(LP.getType(), Instruction::LandingPad, nullptr,
244 LP.getNumOperands()),
245 ReservedSpace(LP.getNumOperands()) {
246 allocHungoffUses(LP.getNumOperands());
247 Use *OL = getOperandList();
248 const Use *InOL = LP.getOperandList();
249 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
252 setCleanup(LP.isCleanup());
255 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
256 const Twine &NameStr,
257 Instruction *InsertBefore) {
258 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
261 LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
262 const Twine &NameStr,
263 BasicBlock *InsertAtEnd) {
264 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd);
267 void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
268 ReservedSpace = NumReservedValues;
269 setNumHungOffUseOperands(0);
270 allocHungoffUses(ReservedSpace);
275 /// growOperands - grow operands - This grows the operand list in response to a
276 /// push_back style of operation. This grows the number of ops by 2 times.
277 void LandingPadInst::growOperands(unsigned Size) {
278 unsigned e = getNumOperands();
279 if (ReservedSpace >= e + Size) return;
280 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
281 growHungoffUses(ReservedSpace);
284 void LandingPadInst::addClause(Constant *Val) {
285 unsigned OpNo = getNumOperands();
287 assert(OpNo < ReservedSpace && "Growing didn't work!");
288 setNumHungOffUseOperands(getNumOperands() + 1);
289 getOperandList()[OpNo] = Val;
292 //===----------------------------------------------------------------------===//
293 // CallInst Implementation
294 //===----------------------------------------------------------------------===//
296 CallInst::~CallInst() = default;
298 void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
299 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
301 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
302 "NumOperands not set up?");
306 assert((Args.size() == FTy->getNumParams() ||
307 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
308 "Calling a function with bad signature!");
310 for (unsigned i = 0; i != Args.size(); ++i)
311 assert((i >= FTy->getNumParams() ||
312 FTy->getParamType(i) == Args[i]->getType()) &&
313 "Calling a function with a bad signature!");
316 std::copy(Args.begin(), Args.end(), op_begin());
318 auto It = populateBundleOperandInfos(Bundles, Args.size());
320 assert(It + 1 == op_end() && "Should add up!");
325 void CallInst::init(Value *Func, const Twine &NameStr) {
327 cast<FunctionType>(cast<PointerType>(Func->getType())->getElementType());
328 assert(getNumOperands() == 1 && "NumOperands not set up?");
331 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
336 CallInst::CallInst(Value *Func, const Twine &Name,
337 Instruction *InsertBefore)
338 : Instruction(cast<FunctionType>(cast<PointerType>(Func->getType())
339 ->getElementType())->getReturnType(),
341 OperandTraits<CallInst>::op_end(this) - 1,
346 CallInst::CallInst(Value *Func, const Twine &Name,
347 BasicBlock *InsertAtEnd)
348 : Instruction(cast<FunctionType>(cast<PointerType>(Func->getType())
349 ->getElementType())->getReturnType(),
351 OperandTraits<CallInst>::op_end(this) - 1,
356 CallInst::CallInst(const CallInst &CI)
357 : Instruction(CI.getType(), Instruction::Call,
358 OperandTraits<CallInst>::op_end(this) - CI.getNumOperands(),
359 CI.getNumOperands()),
360 Attrs(CI.Attrs), FTy(CI.FTy) {
361 setTailCallKind(CI.getTailCallKind());
362 setCallingConv(CI.getCallingConv());
364 std::copy(CI.op_begin(), CI.op_end(), op_begin());
365 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
366 bundle_op_info_begin());
367 SubclassOptionalData = CI.SubclassOptionalData;
370 CallInst *CallInst::Create(CallInst *CI, ArrayRef<OperandBundleDef> OpB,
371 Instruction *InsertPt) {
372 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
374 auto *NewCI = CallInst::Create(CI->getCalledValue(), Args, OpB, CI->getName(),
376 NewCI->setTailCallKind(CI->getTailCallKind());
377 NewCI->setCallingConv(CI->getCallingConv());
378 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
379 NewCI->setAttributes(CI->getAttributes());
380 NewCI->setDebugLoc(CI->getDebugLoc());
384 Value *CallInst::getReturnedArgOperand() const {
387 if (Attrs.hasAttrSomewhere(Attribute::Returned, &Index) && Index)
388 return getArgOperand(Index - AttributeList::FirstArgIndex);
389 if (const Function *F = getCalledFunction())
390 if (F->getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) &&
392 return getArgOperand(Index - AttributeList::FirstArgIndex);
397 void CallInst::addAttribute(unsigned i, Attribute::AttrKind Kind) {
398 AttributeList PAL = getAttributes();
399 PAL = PAL.addAttribute(getContext(), i, Kind);
403 void CallInst::addAttribute(unsigned i, Attribute Attr) {
404 AttributeList PAL = getAttributes();
405 PAL = PAL.addAttribute(getContext(), i, Attr);
409 void CallInst::addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
410 addAttribute(ArgNo + AttributeList::FirstArgIndex, Kind);
413 void CallInst::removeAttribute(unsigned i, Attribute::AttrKind Kind) {
414 AttributeList PAL = getAttributes();
415 PAL = PAL.removeAttribute(getContext(), i, Kind);
419 void CallInst::removeAttribute(unsigned i, StringRef Kind) {
420 AttributeList PAL = getAttributes();
421 PAL = PAL.removeAttribute(getContext(), i, Kind);
425 void CallInst::removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
426 removeAttribute(ArgNo + AttributeList::FirstArgIndex, Kind);
429 void CallInst::addDereferenceableAttr(unsigned i, uint64_t Bytes) {
430 AttributeList PAL = getAttributes();
431 PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes);
435 void CallInst::addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) {
436 AttributeList PAL = getAttributes();
437 PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes);
441 bool CallInst::hasRetAttr(Attribute::AttrKind Kind) const {
442 if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind))
445 // Look at the callee, if available.
446 if (const Function *F = getCalledFunction())
447 return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind);
451 bool CallInst::paramHasAttr(unsigned i, Attribute::AttrKind Kind) const {
452 assert(i < getNumArgOperands() && "Param index out of bounds!");
454 if (Attrs.hasParamAttribute(i, Kind))
456 if (const Function *F = getCalledFunction())
457 return F->getAttributes().hasParamAttribute(i, Kind);
461 bool CallInst::dataOperandHasImpliedAttr(unsigned i,
462 Attribute::AttrKind Kind) const {
463 // There are getNumOperands() - 1 data operands. The last operand is the
465 assert(i < getNumOperands() && "Data operand index out of bounds!");
467 // The attribute A can either be directly specified, if the operand in
468 // question is a call argument; or be indirectly implied by the kind of its
469 // containing operand bundle, if the operand is a bundle operand.
471 // FIXME: Avoid these i - 1 calculations and update the API to use zero-based
473 if (i < (getNumArgOperands() + 1))
474 return paramHasAttr(i - 1, Kind);
476 assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) &&
477 "Must be either a call argument or an operand bundle!");
478 return bundleOperandHasAttr(i - 1, Kind);
481 /// IsConstantOne - Return true only if val is constant int 1
482 static bool IsConstantOne(Value *val) {
483 assert(val && "IsConstantOne does not work with nullptr val");
484 const ConstantInt *CVal = dyn_cast<ConstantInt>(val);
485 return CVal && CVal->isOne();
488 static Instruction *createMalloc(Instruction *InsertBefore,
489 BasicBlock *InsertAtEnd, Type *IntPtrTy,
490 Type *AllocTy, Value *AllocSize,
492 ArrayRef<OperandBundleDef> OpB,
493 Function *MallocF, const Twine &Name) {
494 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
495 "createMalloc needs either InsertBefore or InsertAtEnd");
497 // malloc(type) becomes:
498 // bitcast (i8* malloc(typeSize)) to type*
499 // malloc(type, arraySize) becomes:
500 // bitcast (i8* malloc(typeSize*arraySize)) to type*
502 ArraySize = ConstantInt::get(IntPtrTy, 1);
503 else if (ArraySize->getType() != IntPtrTy) {
505 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
508 ArraySize = CastInst::CreateIntegerCast(ArraySize, IntPtrTy, false,
512 if (!IsConstantOne(ArraySize)) {
513 if (IsConstantOne(AllocSize)) {
514 AllocSize = ArraySize; // Operand * 1 = Operand
515 } else if (Constant *CO = dyn_cast<Constant>(ArraySize)) {
516 Constant *Scale = ConstantExpr::getIntegerCast(CO, IntPtrTy,
518 // Malloc arg is constant product of type size and array size
519 AllocSize = ConstantExpr::getMul(Scale, cast<Constant>(AllocSize));
521 // Multiply type size by the array size...
523 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
524 "mallocsize", InsertBefore);
526 AllocSize = BinaryOperator::CreateMul(ArraySize, AllocSize,
527 "mallocsize", InsertAtEnd);
531 assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size");
532 // Create the call to Malloc.
533 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
534 Module *M = BB->getParent()->getParent();
535 Type *BPTy = Type::getInt8PtrTy(BB->getContext());
536 Value *MallocFunc = MallocF;
538 // prototype malloc as "void *malloc(size_t)"
539 MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy);
540 PointerType *AllocPtrType = PointerType::getUnqual(AllocTy);
541 CallInst *MCall = nullptr;
542 Instruction *Result = nullptr;
544 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall",
547 if (Result->getType() != AllocPtrType)
548 // Create a cast instruction to convert to the right type...
549 Result = new BitCastInst(MCall, AllocPtrType, Name, InsertBefore);
551 MCall = CallInst::Create(MallocFunc, AllocSize, OpB, "malloccall");
553 if (Result->getType() != AllocPtrType) {
554 InsertAtEnd->getInstList().push_back(MCall);
555 // Create a cast instruction to convert to the right type...
556 Result = new BitCastInst(MCall, AllocPtrType, Name);
559 MCall->setTailCall();
560 if (Function *F = dyn_cast<Function>(MallocFunc)) {
561 MCall->setCallingConv(F->getCallingConv());
562 if (!F->returnDoesNotAlias())
563 F->setReturnDoesNotAlias();
565 assert(!MCall->getType()->isVoidTy() && "Malloc has void return type");
570 /// CreateMalloc - Generate the IR for a call to malloc:
571 /// 1. Compute the malloc call's argument as the specified type's size,
572 /// possibly multiplied by the array size if the array size is not
574 /// 2. Call malloc with that argument.
575 /// 3. Bitcast the result of the malloc call to the specified type.
576 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore,
577 Type *IntPtrTy, Type *AllocTy,
578 Value *AllocSize, Value *ArraySize,
581 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
582 ArraySize, None, MallocF, Name);
584 Instruction *CallInst::CreateMalloc(Instruction *InsertBefore,
585 Type *IntPtrTy, Type *AllocTy,
586 Value *AllocSize, Value *ArraySize,
587 ArrayRef<OperandBundleDef> OpB,
590 return createMalloc(InsertBefore, nullptr, IntPtrTy, AllocTy, AllocSize,
591 ArraySize, OpB, MallocF, Name);
594 /// CreateMalloc - Generate the IR for a call to malloc:
595 /// 1. Compute the malloc call's argument as the specified type's size,
596 /// possibly multiplied by the array size if the array size is not
598 /// 2. Call malloc with that argument.
599 /// 3. Bitcast the result of the malloc call to the specified type.
600 /// Note: This function does not add the bitcast to the basic block, that is the
601 /// responsibility of the caller.
602 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd,
603 Type *IntPtrTy, Type *AllocTy,
604 Value *AllocSize, Value *ArraySize,
605 Function *MallocF, const Twine &Name) {
606 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
607 ArraySize, None, MallocF, Name);
609 Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd,
610 Type *IntPtrTy, Type *AllocTy,
611 Value *AllocSize, Value *ArraySize,
612 ArrayRef<OperandBundleDef> OpB,
613 Function *MallocF, const Twine &Name) {
614 return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
615 ArraySize, OpB, MallocF, Name);
618 static Instruction *createFree(Value *Source,
619 ArrayRef<OperandBundleDef> Bundles,
620 Instruction *InsertBefore,
621 BasicBlock *InsertAtEnd) {
622 assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
623 "createFree needs either InsertBefore or InsertAtEnd");
624 assert(Source->getType()->isPointerTy() &&
625 "Can not free something of nonpointer type!");
627 BasicBlock *BB = InsertBefore ? InsertBefore->getParent() : InsertAtEnd;
628 Module *M = BB->getParent()->getParent();
630 Type *VoidTy = Type::getVoidTy(M->getContext());
631 Type *IntPtrTy = Type::getInt8PtrTy(M->getContext());
632 // prototype free as "void free(void*)"
633 Value *FreeFunc = M->getOrInsertFunction("free", VoidTy, IntPtrTy);
634 CallInst *Result = nullptr;
635 Value *PtrCast = Source;
637 if (Source->getType() != IntPtrTy)
638 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertBefore);
639 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "", InsertBefore);
641 if (Source->getType() != IntPtrTy)
642 PtrCast = new BitCastInst(Source, IntPtrTy, "", InsertAtEnd);
643 Result = CallInst::Create(FreeFunc, PtrCast, Bundles, "");
645 Result->setTailCall();
646 if (Function *F = dyn_cast<Function>(FreeFunc))
647 Result->setCallingConv(F->getCallingConv());
652 /// CreateFree - Generate the IR for a call to the builtin free function.
653 Instruction *CallInst::CreateFree(Value *Source, Instruction *InsertBefore) {
654 return createFree(Source, None, InsertBefore, nullptr);
656 Instruction *CallInst::CreateFree(Value *Source,
657 ArrayRef<OperandBundleDef> Bundles,
658 Instruction *InsertBefore) {
659 return createFree(Source, Bundles, InsertBefore, nullptr);
662 /// CreateFree - Generate the IR for a call to the builtin free function.
663 /// Note: This function does not add the call to the basic block, that is the
664 /// responsibility of the caller.
665 Instruction *CallInst::CreateFree(Value *Source, BasicBlock *InsertAtEnd) {
666 Instruction *FreeCall = createFree(Source, None, nullptr, InsertAtEnd);
667 assert(FreeCall && "CreateFree did not create a CallInst");
670 Instruction *CallInst::CreateFree(Value *Source,
671 ArrayRef<OperandBundleDef> Bundles,
672 BasicBlock *InsertAtEnd) {
673 Instruction *FreeCall = createFree(Source, Bundles, nullptr, InsertAtEnd);
674 assert(FreeCall && "CreateFree did not create a CallInst");
678 //===----------------------------------------------------------------------===//
679 // InvokeInst Implementation
680 //===----------------------------------------------------------------------===//
682 void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
683 BasicBlock *IfException, ArrayRef<Value *> Args,
684 ArrayRef<OperandBundleDef> Bundles,
685 const Twine &NameStr) {
688 assert(getNumOperands() == 3 + Args.size() + CountBundleInputs(Bundles) &&
689 "NumOperands not set up?");
692 Op<-1>() = IfException;
695 assert(((Args.size() == FTy->getNumParams()) ||
696 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
697 "Invoking a function with bad signature");
699 for (unsigned i = 0, e = Args.size(); i != e; i++)
700 assert((i >= FTy->getNumParams() ||
701 FTy->getParamType(i) == Args[i]->getType()) &&
702 "Invoking a function with a bad signature!");
705 std::copy(Args.begin(), Args.end(), op_begin());
707 auto It = populateBundleOperandInfos(Bundles, Args.size());
709 assert(It + 3 == op_end() && "Should add up!");
714 InvokeInst::InvokeInst(const InvokeInst &II)
715 : TerminatorInst(II.getType(), Instruction::Invoke,
716 OperandTraits<InvokeInst>::op_end(this) -
718 II.getNumOperands()),
719 Attrs(II.Attrs), FTy(II.FTy) {
720 setCallingConv(II.getCallingConv());
721 std::copy(II.op_begin(), II.op_end(), op_begin());
722 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
723 bundle_op_info_begin());
724 SubclassOptionalData = II.SubclassOptionalData;
727 InvokeInst *InvokeInst::Create(InvokeInst *II, ArrayRef<OperandBundleDef> OpB,
728 Instruction *InsertPt) {
729 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
731 auto *NewII = InvokeInst::Create(II->getCalledValue(), II->getNormalDest(),
732 II->getUnwindDest(), Args, OpB,
733 II->getName(), InsertPt);
734 NewII->setCallingConv(II->getCallingConv());
735 NewII->SubclassOptionalData = II->SubclassOptionalData;
736 NewII->setAttributes(II->getAttributes());
737 NewII->setDebugLoc(II->getDebugLoc());
741 BasicBlock *InvokeInst::getSuccessorV(unsigned idx) const {
742 return getSuccessor(idx);
745 unsigned InvokeInst::getNumSuccessorsV() const {
746 return getNumSuccessors();
749 void InvokeInst::setSuccessorV(unsigned idx, BasicBlock *B) {
750 return setSuccessor(idx, B);
753 Value *InvokeInst::getReturnedArgOperand() const {
756 if (Attrs.hasAttrSomewhere(Attribute::Returned, &Index) && Index)
757 return getArgOperand(Index - AttributeList::FirstArgIndex);
758 if (const Function *F = getCalledFunction())
759 if (F->getAttributes().hasAttrSomewhere(Attribute::Returned, &Index) &&
761 return getArgOperand(Index - AttributeList::FirstArgIndex);
766 bool InvokeInst::hasRetAttr(Attribute::AttrKind Kind) const {
767 if (Attrs.hasAttribute(AttributeList::ReturnIndex, Kind))
770 // Look at the callee, if available.
771 if (const Function *F = getCalledFunction())
772 return F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Kind);
776 bool InvokeInst::paramHasAttr(unsigned i, Attribute::AttrKind Kind) const {
777 assert(i < getNumArgOperands() && "Param index out of bounds!");
779 if (Attrs.hasParamAttribute(i, Kind))
781 if (const Function *F = getCalledFunction())
782 return F->getAttributes().hasParamAttribute(i, Kind);
786 bool InvokeInst::dataOperandHasImpliedAttr(unsigned i,
787 Attribute::AttrKind Kind) const {
788 // There are getNumOperands() - 3 data operands. The last three operands are
789 // the callee and the two successor basic blocks.
790 assert(i < (getNumOperands() - 2) && "Data operand index out of bounds!");
792 // The attribute A can either be directly specified, if the operand in
793 // question is an invoke argument; or be indirectly implied by the kind of its
794 // containing operand bundle, if the operand is a bundle operand.
796 // FIXME: Avoid these i - 1 calculations and update the API to use zero-based
798 if (i < (getNumArgOperands() + 1))
799 return paramHasAttr(i - 1, Kind);
801 assert(hasOperandBundles() && i >= (getBundleOperandsStartIndex() + 1) &&
802 "Must be either an invoke argument or an operand bundle!");
803 return bundleOperandHasAttr(i - 1, Kind);
806 void InvokeInst::addAttribute(unsigned i, Attribute::AttrKind Kind) {
807 AttributeList PAL = getAttributes();
808 PAL = PAL.addAttribute(getContext(), i, Kind);
812 void InvokeInst::addAttribute(unsigned i, Attribute Attr) {
813 AttributeList PAL = getAttributes();
814 PAL = PAL.addAttribute(getContext(), i, Attr);
818 void InvokeInst::addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
819 addAttribute(ArgNo + AttributeList::FirstArgIndex, Kind);
822 void InvokeInst::removeAttribute(unsigned i, Attribute::AttrKind Kind) {
823 AttributeList PAL = getAttributes();
824 PAL = PAL.removeAttribute(getContext(), i, Kind);
828 void InvokeInst::removeAttribute(unsigned i, StringRef Kind) {
829 AttributeList PAL = getAttributes();
830 PAL = PAL.removeAttribute(getContext(), i, Kind);
834 void InvokeInst::removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) {
835 removeAttribute(ArgNo + AttributeList::FirstArgIndex, Kind);
838 void InvokeInst::addDereferenceableAttr(unsigned i, uint64_t Bytes) {
839 AttributeList PAL = getAttributes();
840 PAL = PAL.addDereferenceableAttr(getContext(), i, Bytes);
844 void InvokeInst::addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes) {
845 AttributeList PAL = getAttributes();
846 PAL = PAL.addDereferenceableOrNullAttr(getContext(), i, Bytes);
850 LandingPadInst *InvokeInst::getLandingPadInst() const {
851 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI());
854 //===----------------------------------------------------------------------===//
855 // ReturnInst Implementation
856 //===----------------------------------------------------------------------===//
858 ReturnInst::ReturnInst(const ReturnInst &RI)
859 : TerminatorInst(Type::getVoidTy(RI.getContext()), Instruction::Ret,
860 OperandTraits<ReturnInst>::op_end(this) -
862 RI.getNumOperands()) {
863 if (RI.getNumOperands())
864 Op<0>() = RI.Op<0>();
865 SubclassOptionalData = RI.SubclassOptionalData;
868 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, Instruction *InsertBefore)
869 : TerminatorInst(Type::getVoidTy(C), Instruction::Ret,
870 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
876 ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd)
877 : TerminatorInst(Type::getVoidTy(C), Instruction::Ret,
878 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
884 ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
885 : TerminatorInst(Type::getVoidTy(Context), Instruction::Ret,
886 OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {
889 unsigned ReturnInst::getNumSuccessorsV() const {
890 return getNumSuccessors();
893 /// Out-of-line ReturnInst method, put here so the C++ compiler can choose to
894 /// emit the vtable for the class in this translation unit.
895 void ReturnInst::setSuccessorV(unsigned idx, BasicBlock *NewSucc) {
896 llvm_unreachable("ReturnInst has no successors!");
899 BasicBlock *ReturnInst::getSuccessorV(unsigned idx) const {
900 llvm_unreachable("ReturnInst has no successors!");
903 ReturnInst::~ReturnInst() = default;
905 //===----------------------------------------------------------------------===//
906 // ResumeInst Implementation
907 //===----------------------------------------------------------------------===//
909 ResumeInst::ResumeInst(const ResumeInst &RI)
910 : TerminatorInst(Type::getVoidTy(RI.getContext()), Instruction::Resume,
911 OperandTraits<ResumeInst>::op_begin(this), 1) {
912 Op<0>() = RI.Op<0>();
915 ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore)
916 : TerminatorInst(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
917 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
921 ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
922 : TerminatorInst(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
923 OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) {
927 unsigned ResumeInst::getNumSuccessorsV() const {
928 return getNumSuccessors();
931 void ResumeInst::setSuccessorV(unsigned idx, BasicBlock *NewSucc) {
932 llvm_unreachable("ResumeInst has no successors!");
935 BasicBlock *ResumeInst::getSuccessorV(unsigned idx) const {
936 llvm_unreachable("ResumeInst has no successors!");
939 //===----------------------------------------------------------------------===//
940 // CleanupReturnInst Implementation
941 //===----------------------------------------------------------------------===//
943 CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI)
944 : TerminatorInst(CRI.getType(), Instruction::CleanupRet,
945 OperandTraits<CleanupReturnInst>::op_end(this) -
946 CRI.getNumOperands(),
947 CRI.getNumOperands()) {
948 setInstructionSubclassData(CRI.getSubclassDataFromInstruction());
949 Op<0>() = CRI.Op<0>();
950 if (CRI.hasUnwindDest())
951 Op<1>() = CRI.Op<1>();
954 void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
956 setInstructionSubclassData(getSubclassDataFromInstruction() | 1);
958 Op<0>() = CleanupPad;
963 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
964 unsigned Values, Instruction *InsertBefore)
965 : TerminatorInst(Type::getVoidTy(CleanupPad->getContext()),
966 Instruction::CleanupRet,
967 OperandTraits<CleanupReturnInst>::op_end(this) - Values,
968 Values, InsertBefore) {
969 init(CleanupPad, UnwindBB);
972 CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
973 unsigned Values, BasicBlock *InsertAtEnd)
974 : TerminatorInst(Type::getVoidTy(CleanupPad->getContext()),
975 Instruction::CleanupRet,
976 OperandTraits<CleanupReturnInst>::op_end(this) - Values,
977 Values, InsertAtEnd) {
978 init(CleanupPad, UnwindBB);
981 BasicBlock *CleanupReturnInst::getSuccessorV(unsigned Idx) const {
983 return getUnwindDest();
986 unsigned CleanupReturnInst::getNumSuccessorsV() const {
987 return getNumSuccessors();
990 void CleanupReturnInst::setSuccessorV(unsigned Idx, BasicBlock *B) {
995 //===----------------------------------------------------------------------===//
996 // CatchReturnInst Implementation
997 //===----------------------------------------------------------------------===//
998 void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1003 CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1004 : TerminatorInst(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1005 OperandTraits<CatchReturnInst>::op_begin(this), 2) {
1006 Op<0>() = CRI.Op<0>();
1007 Op<1>() = CRI.Op<1>();
1010 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1011 Instruction *InsertBefore)
1012 : TerminatorInst(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1013 OperandTraits<CatchReturnInst>::op_begin(this), 2,
1018 CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1019 BasicBlock *InsertAtEnd)
1020 : TerminatorInst(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1021 OperandTraits<CatchReturnInst>::op_begin(this), 2,
1026 BasicBlock *CatchReturnInst::getSuccessorV(unsigned Idx) const {
1027 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
1028 return getSuccessor();
1031 unsigned CatchReturnInst::getNumSuccessorsV() const {
1032 return getNumSuccessors();
1035 void CatchReturnInst::setSuccessorV(unsigned Idx, BasicBlock *B) {
1036 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!");
1040 //===----------------------------------------------------------------------===//
1041 // CatchSwitchInst Implementation
1042 //===----------------------------------------------------------------------===//
1044 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1045 unsigned NumReservedValues,
1046 const Twine &NameStr,
1047 Instruction *InsertBefore)
1048 : TerminatorInst(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1051 ++NumReservedValues;
1052 init(ParentPad, UnwindDest, NumReservedValues + 1);
1056 CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1057 unsigned NumReservedValues,
1058 const Twine &NameStr, BasicBlock *InsertAtEnd)
1059 : TerminatorInst(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1062 ++NumReservedValues;
1063 init(ParentPad, UnwindDest, NumReservedValues + 1);
1067 CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1068 : TerminatorInst(CSI.getType(), Instruction::CatchSwitch, nullptr,
1069 CSI.getNumOperands()) {
1070 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1071 setNumHungOffUseOperands(ReservedSpace);
1072 Use *OL = getOperandList();
1073 const Use *InOL = CSI.getOperandList();
1074 for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1078 void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1079 unsigned NumReservedValues) {
1080 assert(ParentPad && NumReservedValues);
1082 ReservedSpace = NumReservedValues;
1083 setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1084 allocHungoffUses(ReservedSpace);
1086 Op<0>() = ParentPad;
1088 setInstructionSubclassData(getSubclassDataFromInstruction() | 1);
1089 setUnwindDest(UnwindDest);
1093 /// growOperands - grow operands - This grows the operand list in response to a
1094 /// push_back style of operation. This grows the number of ops by 2 times.
1095 void CatchSwitchInst::growOperands(unsigned Size) {
1096 unsigned NumOperands = getNumOperands();
1097 assert(NumOperands >= 1);
1098 if (ReservedSpace >= NumOperands + Size)
1100 ReservedSpace = (NumOperands + Size / 2) * 2;
1101 growHungoffUses(ReservedSpace);
1104 void CatchSwitchInst::addHandler(BasicBlock *Handler) {
1105 unsigned OpNo = getNumOperands();
1107 assert(OpNo < ReservedSpace && "Growing didn't work!");
1108 setNumHungOffUseOperands(getNumOperands() + 1);
1109 getOperandList()[OpNo] = Handler;
1112 void CatchSwitchInst::removeHandler(handler_iterator HI) {
1113 // Move all subsequent handlers up one.
1114 Use *EndDst = op_end() - 1;
1115 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1116 *CurDst = *(CurDst + 1);
1117 // Null out the last handler use.
1120 setNumHungOffUseOperands(getNumOperands() - 1);
1123 BasicBlock *CatchSwitchInst::getSuccessorV(unsigned idx) const {
1124 return getSuccessor(idx);
1127 unsigned CatchSwitchInst::getNumSuccessorsV() const {
1128 return getNumSuccessors();
1131 void CatchSwitchInst::setSuccessorV(unsigned idx, BasicBlock *B) {
1132 setSuccessor(idx, B);
1135 //===----------------------------------------------------------------------===//
1136 // FuncletPadInst Implementation
1137 //===----------------------------------------------------------------------===//
1138 void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1139 const Twine &NameStr) {
1140 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1141 std::copy(Args.begin(), Args.end(), op_begin());
1142 setParentPad(ParentPad);
1146 FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI)
1147 : Instruction(FPI.getType(), FPI.getOpcode(),
1148 OperandTraits<FuncletPadInst>::op_end(this) -
1149 FPI.getNumOperands(),
1150 FPI.getNumOperands()) {
1151 std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1152 setParentPad(FPI.getParentPad());
1155 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1156 ArrayRef<Value *> Args, unsigned Values,
1157 const Twine &NameStr, Instruction *InsertBefore)
1158 : Instruction(ParentPad->getType(), Op,
1159 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1161 init(ParentPad, Args, NameStr);
1164 FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1165 ArrayRef<Value *> Args, unsigned Values,
1166 const Twine &NameStr, BasicBlock *InsertAtEnd)
1167 : Instruction(ParentPad->getType(), Op,
1168 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1170 init(ParentPad, Args, NameStr);
1173 //===----------------------------------------------------------------------===//
1174 // UnreachableInst Implementation
1175 //===----------------------------------------------------------------------===//
1177 UnreachableInst::UnreachableInst(LLVMContext &Context,
1178 Instruction *InsertBefore)
1179 : TerminatorInst(Type::getVoidTy(Context), Instruction::Unreachable,
1180 nullptr, 0, InsertBefore) {
1182 UnreachableInst::UnreachableInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
1183 : TerminatorInst(Type::getVoidTy(Context), Instruction::Unreachable,
1184 nullptr, 0, InsertAtEnd) {
1187 unsigned UnreachableInst::getNumSuccessorsV() const {
1188 return getNumSuccessors();
1191 void UnreachableInst::setSuccessorV(unsigned idx, BasicBlock *NewSucc) {
1192 llvm_unreachable("UnreachableInst has no successors!");
1195 BasicBlock *UnreachableInst::getSuccessorV(unsigned idx) const {
1196 llvm_unreachable("UnreachableInst has no successors!");
1199 //===----------------------------------------------------------------------===//
1200 // BranchInst Implementation
1201 //===----------------------------------------------------------------------===//
1203 void BranchInst::AssertOK() {
1204 if (isConditional())
1205 assert(getCondition()->getType()->isIntegerTy(1) &&
1206 "May only branch on boolean predicates!");
1209 BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
1210 : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1211 OperandTraits<BranchInst>::op_end(this) - 1,
1213 assert(IfTrue && "Branch destination may not be null!");
1217 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1218 Instruction *InsertBefore)
1219 : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1220 OperandTraits<BranchInst>::op_end(this) - 3,
1230 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd)
1231 : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1232 OperandTraits<BranchInst>::op_end(this) - 1,
1234 assert(IfTrue && "Branch destination may not be null!");
1238 BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1239 BasicBlock *InsertAtEnd)
1240 : TerminatorInst(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1241 OperandTraits<BranchInst>::op_end(this) - 3,
1251 BranchInst::BranchInst(const BranchInst &BI) :
1252 TerminatorInst(Type::getVoidTy(BI.getContext()), Instruction::Br,
1253 OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),
1254 BI.getNumOperands()) {
1255 Op<-1>() = BI.Op<-1>();
1256 if (BI.getNumOperands() != 1) {
1257 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1258 Op<-3>() = BI.Op<-3>();
1259 Op<-2>() = BI.Op<-2>();
1261 SubclassOptionalData = BI.SubclassOptionalData;
1264 void BranchInst::swapSuccessors() {
1265 assert(isConditional() &&
1266 "Cannot swap successors of an unconditional branch");
1267 Op<-1>().swap(Op<-2>());
1269 // Update profile metadata if present and it matches our structural
1274 BasicBlock *BranchInst::getSuccessorV(unsigned idx) const {
1275 return getSuccessor(idx);
1278 unsigned BranchInst::getNumSuccessorsV() const {
1279 return getNumSuccessors();
1282 void BranchInst::setSuccessorV(unsigned idx, BasicBlock *B) {
1283 setSuccessor(idx, B);
1286 //===----------------------------------------------------------------------===//
1287 // AllocaInst Implementation
1288 //===----------------------------------------------------------------------===//
1290 static Value *getAISize(LLVMContext &Context, Value *Amt) {
1292 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
1294 assert(!isa<BasicBlock>(Amt) &&
1295 "Passed basic block into allocation size parameter! Use other ctor");
1296 assert(Amt->getType()->isIntegerTy() &&
1297 "Allocation array size is not an integer!");
1302 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1303 Instruction *InsertBefore)
1304 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1306 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1307 BasicBlock *InsertAtEnd)
1308 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {}
1310 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1311 const Twine &Name, Instruction *InsertBefore)
1312 : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/0, Name, InsertBefore) {}
1314 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1315 const Twine &Name, BasicBlock *InsertAtEnd)
1316 : AllocaInst(Ty, AddrSpace, ArraySize, /*Align=*/0, Name, InsertAtEnd) {}
1318 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1319 unsigned Align, const Twine &Name,
1320 Instruction *InsertBefore)
1321 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1322 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1324 setAlignment(Align);
1325 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1329 AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1330 unsigned Align, const Twine &Name,
1331 BasicBlock *InsertAtEnd)
1332 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1333 getAISize(Ty->getContext(), ArraySize), InsertAtEnd),
1335 setAlignment(Align);
1336 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1340 // Out of line virtual method, so the vtable, etc has a home.
1341 AllocaInst::~AllocaInst() = default;
1343 void AllocaInst::setAlignment(unsigned Align) {
1344 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
1345 assert(Align <= MaximumAlignment &&
1346 "Alignment is greater than MaximumAlignment!");
1347 setInstructionSubclassData((getSubclassDataFromInstruction() & ~31) |
1348 (Log2_32(Align) + 1));
1349 assert(getAlignment() == Align && "Alignment representation error!");
1352 bool AllocaInst::isArrayAllocation() const {
1353 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
1354 return !CI->isOne();
1358 /// isStaticAlloca - Return true if this alloca is in the entry block of the
1359 /// function and is a constant size. If so, the code generator will fold it
1360 /// into the prolog/epilog code, so it is basically free.
1361 bool AllocaInst::isStaticAlloca() const {
1362 // Must be constant size.
1363 if (!isa<ConstantInt>(getArraySize())) return false;
1365 // Must be in the entry block.
1366 const BasicBlock *Parent = getParent();
1367 return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca();
1370 //===----------------------------------------------------------------------===//
1371 // LoadInst Implementation
1372 //===----------------------------------------------------------------------===//
1374 void LoadInst::AssertOK() {
1375 assert(getOperand(0)->getType()->isPointerTy() &&
1376 "Ptr must have pointer type.");
1377 assert(!(isAtomic() && getAlignment() == 0) &&
1378 "Alignment required for atomic load");
1381 LoadInst::LoadInst(Value *Ptr, const Twine &Name, Instruction *InsertBef)
1382 : LoadInst(Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1384 LoadInst::LoadInst(Value *Ptr, const Twine &Name, BasicBlock *InsertAE)
1385 : LoadInst(Ptr, Name, /*isVolatile=*/false, InsertAE) {}
1387 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1388 Instruction *InsertBef)
1389 : LoadInst(Ty, Ptr, Name, isVolatile, /*Align=*/0, InsertBef) {}
1391 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
1392 BasicBlock *InsertAE)
1393 : LoadInst(Ptr, Name, isVolatile, /*Align=*/0, InsertAE) {}
1395 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1396 unsigned Align, Instruction *InsertBef)
1397 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1398 CrossThread, InsertBef) {}
1400 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
1401 unsigned Align, BasicBlock *InsertAE)
1402 : LoadInst(Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1403 CrossThread, InsertAE) {}
1405 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1406 unsigned Align, AtomicOrdering Order,
1407 SynchronizationScope SynchScope, Instruction *InsertBef)
1408 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1409 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
1410 setVolatile(isVolatile);
1411 setAlignment(Align);
1412 setAtomic(Order, SynchScope);
1417 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
1418 unsigned Align, AtomicOrdering Order,
1419 SynchronizationScope SynchScope,
1420 BasicBlock *InsertAE)
1421 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
1422 Load, Ptr, InsertAE) {
1423 setVolatile(isVolatile);
1424 setAlignment(Align);
1425 setAtomic(Order, SynchScope);
1430 LoadInst::LoadInst(Value *Ptr, const char *Name, Instruction *InsertBef)
1431 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
1432 Load, Ptr, InsertBef) {
1435 setAtomic(AtomicOrdering::NotAtomic);
1437 if (Name && Name[0]) setName(Name);
1440 LoadInst::LoadInst(Value *Ptr, const char *Name, BasicBlock *InsertAE)
1441 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
1442 Load, Ptr, InsertAE) {
1445 setAtomic(AtomicOrdering::NotAtomic);
1447 if (Name && Name[0]) setName(Name);
1450 LoadInst::LoadInst(Type *Ty, Value *Ptr, const char *Name, bool isVolatile,
1451 Instruction *InsertBef)
1452 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1453 assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
1454 setVolatile(isVolatile);
1456 setAtomic(AtomicOrdering::NotAtomic);
1458 if (Name && Name[0]) setName(Name);
1461 LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile,
1462 BasicBlock *InsertAE)
1463 : UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
1464 Load, Ptr, InsertAE) {
1465 setVolatile(isVolatile);
1467 setAtomic(AtomicOrdering::NotAtomic);
1469 if (Name && Name[0]) setName(Name);
1472 void LoadInst::setAlignment(unsigned Align) {
1473 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
1474 assert(Align <= MaximumAlignment &&
1475 "Alignment is greater than MaximumAlignment!");
1476 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
1477 ((Log2_32(Align)+1)<<1));
1478 assert(getAlignment() == Align && "Alignment representation error!");
1481 //===----------------------------------------------------------------------===//
1482 // StoreInst Implementation
1483 //===----------------------------------------------------------------------===//
1485 void StoreInst::AssertOK() {
1486 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1487 assert(getOperand(1)->getType()->isPointerTy() &&
1488 "Ptr must have pointer type!");
1489 assert(getOperand(0)->getType() ==
1490 cast<PointerType>(getOperand(1)->getType())->getElementType()
1491 && "Ptr must be a pointer to Val type!");
1492 assert(!(isAtomic() && getAlignment() == 0) &&
1493 "Alignment required for atomic store");
1496 StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore)
1497 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1499 StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
1500 : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {}
1502 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1503 Instruction *InsertBefore)
1504 : StoreInst(val, addr, isVolatile, /*Align=*/0, InsertBefore) {}
1506 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1507 BasicBlock *InsertAtEnd)
1508 : StoreInst(val, addr, isVolatile, /*Align=*/0, InsertAtEnd) {}
1510 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
1511 Instruction *InsertBefore)
1512 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1513 CrossThread, InsertBefore) {}
1515 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
1516 BasicBlock *InsertAtEnd)
1517 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1518 CrossThread, InsertAtEnd) {}
1520 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1521 unsigned Align, AtomicOrdering Order,
1522 SynchronizationScope SynchScope,
1523 Instruction *InsertBefore)
1524 : Instruction(Type::getVoidTy(val->getContext()), Store,
1525 OperandTraits<StoreInst>::op_begin(this),
1526 OperandTraits<StoreInst>::operands(this),
1530 setVolatile(isVolatile);
1531 setAlignment(Align);
1532 setAtomic(Order, SynchScope);
1536 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1537 unsigned Align, AtomicOrdering Order,
1538 SynchronizationScope SynchScope,
1539 BasicBlock *InsertAtEnd)
1540 : Instruction(Type::getVoidTy(val->getContext()), Store,
1541 OperandTraits<StoreInst>::op_begin(this),
1542 OperandTraits<StoreInst>::operands(this),
1546 setVolatile(isVolatile);
1547 setAlignment(Align);
1548 setAtomic(Order, SynchScope);
1552 void StoreInst::setAlignment(unsigned Align) {
1553 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
1554 assert(Align <= MaximumAlignment &&
1555 "Alignment is greater than MaximumAlignment!");
1556 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
1557 ((Log2_32(Align)+1) << 1));
1558 assert(getAlignment() == Align && "Alignment representation error!");
1561 //===----------------------------------------------------------------------===//
1562 // AtomicCmpXchgInst Implementation
1563 //===----------------------------------------------------------------------===//
1565 void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1566 AtomicOrdering SuccessOrdering,
1567 AtomicOrdering FailureOrdering,
1568 SynchronizationScope SynchScope) {
1572 setSuccessOrdering(SuccessOrdering);
1573 setFailureOrdering(FailureOrdering);
1574 setSynchScope(SynchScope);
1576 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1577 "All operands must be non-null!");
1578 assert(getOperand(0)->getType()->isPointerTy() &&
1579 "Ptr must have pointer type!");
1580 assert(getOperand(1)->getType() ==
1581 cast<PointerType>(getOperand(0)->getType())->getElementType()
1582 && "Ptr must be a pointer to Cmp type!");
1583 assert(getOperand(2)->getType() ==
1584 cast<PointerType>(getOperand(0)->getType())->getElementType()
1585 && "Ptr must be a pointer to NewVal type!");
1586 assert(SuccessOrdering != AtomicOrdering::NotAtomic &&
1587 "AtomicCmpXchg instructions must be atomic!");
1588 assert(FailureOrdering != AtomicOrdering::NotAtomic &&
1589 "AtomicCmpXchg instructions must be atomic!");
1590 assert(!isStrongerThan(FailureOrdering, SuccessOrdering) &&
1591 "AtomicCmpXchg failure argument shall be no stronger than the success "
1593 assert(FailureOrdering != AtomicOrdering::Release &&
1594 FailureOrdering != AtomicOrdering::AcquireRelease &&
1595 "AtomicCmpXchg failure ordering cannot include release semantics");
1598 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
1599 AtomicOrdering SuccessOrdering,
1600 AtomicOrdering FailureOrdering,
1601 SynchronizationScope SynchScope,
1602 Instruction *InsertBefore)
1604 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1605 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1606 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
1607 Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SynchScope);
1610 AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
1611 AtomicOrdering SuccessOrdering,
1612 AtomicOrdering FailureOrdering,
1613 SynchronizationScope SynchScope,
1614 BasicBlock *InsertAtEnd)
1616 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1617 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1618 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
1619 Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SynchScope);
1622 //===----------------------------------------------------------------------===//
1623 // AtomicRMWInst Implementation
1624 //===----------------------------------------------------------------------===//
1626 void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1627 AtomicOrdering Ordering,
1628 SynchronizationScope SynchScope) {
1631 setOperation(Operation);
1632 setOrdering(Ordering);
1633 setSynchScope(SynchScope);
1635 assert(getOperand(0) && getOperand(1) &&
1636 "All operands must be non-null!");
1637 assert(getOperand(0)->getType()->isPointerTy() &&
1638 "Ptr must have pointer type!");
1639 assert(getOperand(1)->getType() ==
1640 cast<PointerType>(getOperand(0)->getType())->getElementType()
1641 && "Ptr must be a pointer to Val type!");
1642 assert(Ordering != AtomicOrdering::NotAtomic &&
1643 "AtomicRMW instructions must be atomic!");
1646 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
1647 AtomicOrdering Ordering,
1648 SynchronizationScope SynchScope,
1649 Instruction *InsertBefore)
1650 : Instruction(Val->getType(), AtomicRMW,
1651 OperandTraits<AtomicRMWInst>::op_begin(this),
1652 OperandTraits<AtomicRMWInst>::operands(this),
1654 Init(Operation, Ptr, Val, Ordering, SynchScope);
1657 AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
1658 AtomicOrdering Ordering,
1659 SynchronizationScope SynchScope,
1660 BasicBlock *InsertAtEnd)
1661 : Instruction(Val->getType(), AtomicRMW,
1662 OperandTraits<AtomicRMWInst>::op_begin(this),
1663 OperandTraits<AtomicRMWInst>::operands(this),
1665 Init(Operation, Ptr, Val, Ordering, SynchScope);
1668 //===----------------------------------------------------------------------===//
1669 // FenceInst Implementation
1670 //===----------------------------------------------------------------------===//
1672 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
1673 SynchronizationScope SynchScope,
1674 Instruction *InsertBefore)
1675 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
1676 setOrdering(Ordering);
1677 setSynchScope(SynchScope);
1680 FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
1681 SynchronizationScope SynchScope,
1682 BasicBlock *InsertAtEnd)
1683 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
1684 setOrdering(Ordering);
1685 setSynchScope(SynchScope);
1688 //===----------------------------------------------------------------------===//
1689 // GetElementPtrInst Implementation
1690 //===----------------------------------------------------------------------===//
1692 void GetElementPtrInst::anchor() {}
1694 void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1695 const Twine &Name) {
1696 assert(getNumOperands() == 1 + IdxList.size() &&
1697 "NumOperands not initialized?");
1699 std::copy(IdxList.begin(), IdxList.end(), op_begin() + 1);
1703 GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
1704 : Instruction(GEPI.getType(), GetElementPtr,
1705 OperandTraits<GetElementPtrInst>::op_end(this) -
1706 GEPI.getNumOperands(),
1707 GEPI.getNumOperands()),
1708 SourceElementType(GEPI.SourceElementType),
1709 ResultElementType(GEPI.ResultElementType) {
1710 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1711 SubclassOptionalData = GEPI.SubclassOptionalData;
1714 /// getIndexedType - Returns the type of the element that would be accessed with
1715 /// a gep instruction with the specified parameters.
1717 /// The Idxs pointer should point to a continuous piece of memory containing the
1718 /// indices, either as Value* or uint64_t.
1720 /// A null type is returned if the indices are invalid for the specified
1723 template <typename IndexTy>
1724 static Type *getIndexedTypeInternal(Type *Agg, ArrayRef<IndexTy> IdxList) {
1725 // Handle the special case of the empty set index set, which is always valid.
1726 if (IdxList.empty())
1729 // If there is at least one index, the top level type must be sized, otherwise
1730 // it cannot be 'stepped over'.
1731 if (!Agg->isSized())
1734 unsigned CurIdx = 1;
1735 for (; CurIdx != IdxList.size(); ++CurIdx) {
1736 CompositeType *CT = dyn_cast<CompositeType>(Agg);
1737 if (!CT || CT->isPointerTy()) return nullptr;
1738 IndexTy Index = IdxList[CurIdx];
1739 if (!CT->indexValid(Index)) return nullptr;
1740 Agg = CT->getTypeAtIndex(Index);
1742 return CurIdx == IdxList.size() ? Agg : nullptr;
1745 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<Value *> IdxList) {
1746 return getIndexedTypeInternal(Ty, IdxList);
1749 Type *GetElementPtrInst::getIndexedType(Type *Ty,
1750 ArrayRef<Constant *> IdxList) {
1751 return getIndexedTypeInternal(Ty, IdxList);
1754 Type *GetElementPtrInst::getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList) {
1755 return getIndexedTypeInternal(Ty, IdxList);
1758 /// hasAllZeroIndices - Return true if all of the indices of this GEP are
1759 /// zeros. If so, the result pointer and the first operand have the same
1760 /// value, just potentially different types.
1761 bool GetElementPtrInst::hasAllZeroIndices() const {
1762 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1763 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
1764 if (!CI->isZero()) return false;
1772 /// hasAllConstantIndices - Return true if all of the indices of this GEP are
1773 /// constant integers. If so, the result pointer and the first operand have
1774 /// a constant offset between them.
1775 bool GetElementPtrInst::hasAllConstantIndices() const {
1776 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
1777 if (!isa<ConstantInt>(getOperand(i)))
1783 void GetElementPtrInst::setIsInBounds(bool B) {
1784 cast<GEPOperator>(this)->setIsInBounds(B);
1787 bool GetElementPtrInst::isInBounds() const {
1788 return cast<GEPOperator>(this)->isInBounds();
1791 bool GetElementPtrInst::accumulateConstantOffset(const DataLayout &DL,
1792 APInt &Offset) const {
1793 // Delegate to the generic GEPOperator implementation.
1794 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
1797 //===----------------------------------------------------------------------===//
1798 // ExtractElementInst Implementation
1799 //===----------------------------------------------------------------------===//
1801 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1803 Instruction *InsertBef)
1804 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1806 OperandTraits<ExtractElementInst>::op_begin(this),
1808 assert(isValidOperands(Val, Index) &&
1809 "Invalid extractelement instruction operands!");
1815 ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
1817 BasicBlock *InsertAE)
1818 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
1820 OperandTraits<ExtractElementInst>::op_begin(this),
1822 assert(isValidOperands(Val, Index) &&
1823 "Invalid extractelement instruction operands!");
1830 bool ExtractElementInst::isValidOperands(const Value *Val, const Value *Index) {
1831 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
1836 //===----------------------------------------------------------------------===//
1837 // InsertElementInst Implementation
1838 //===----------------------------------------------------------------------===//
1840 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1842 Instruction *InsertBef)
1843 : Instruction(Vec->getType(), InsertElement,
1844 OperandTraits<InsertElementInst>::op_begin(this),
1846 assert(isValidOperands(Vec, Elt, Index) &&
1847 "Invalid insertelement instruction operands!");
1854 InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
1856 BasicBlock *InsertAE)
1857 : Instruction(Vec->getType(), InsertElement,
1858 OperandTraits<InsertElementInst>::op_begin(this),
1860 assert(isValidOperands(Vec, Elt, Index) &&
1861 "Invalid insertelement instruction operands!");
1869 bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
1870 const Value *Index) {
1871 if (!Vec->getType()->isVectorTy())
1872 return false; // First operand of insertelement must be vector type.
1874 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
1875 return false;// Second operand of insertelement must be vector element type.
1877 if (!Index->getType()->isIntegerTy())
1878 return false; // Third operand of insertelement must be i32.
1882 //===----------------------------------------------------------------------===//
1883 // ShuffleVectorInst Implementation
1884 //===----------------------------------------------------------------------===//
1886 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1888 Instruction *InsertBefore)
1889 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1890 cast<VectorType>(Mask->getType())->getNumElements()),
1892 OperandTraits<ShuffleVectorInst>::op_begin(this),
1893 OperandTraits<ShuffleVectorInst>::operands(this),
1895 assert(isValidOperands(V1, V2, Mask) &&
1896 "Invalid shuffle vector instruction operands!");
1903 ShuffleVectorInst::ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
1905 BasicBlock *InsertAtEnd)
1906 : Instruction(VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
1907 cast<VectorType>(Mask->getType())->getNumElements()),
1909 OperandTraits<ShuffleVectorInst>::op_begin(this),
1910 OperandTraits<ShuffleVectorInst>::operands(this),
1912 assert(isValidOperands(V1, V2, Mask) &&
1913 "Invalid shuffle vector instruction operands!");
1921 bool ShuffleVectorInst::isValidOperands(const Value *V1, const Value *V2,
1922 const Value *Mask) {
1923 // V1 and V2 must be vectors of the same type.
1924 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
1927 // Mask must be vector of i32.
1928 auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
1929 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32))
1932 // Check to see if Mask is valid.
1933 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
1936 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
1937 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements();
1938 for (Value *Op : MV->operands()) {
1939 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1940 if (CI->uge(V1Size*2))
1942 } else if (!isa<UndefValue>(Op)) {
1949 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1950 unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements();
1951 for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i)
1952 if (CDS->getElementAsInteger(i) >= V1Size*2)
1957 // The bitcode reader can create a place holder for a forward reference
1958 // used as the shuffle mask. When this occurs, the shuffle mask will
1959 // fall into this case and fail. To avoid this error, do this bit of
1960 // ugliness to allow such a mask pass.
1961 if (const auto *CE = dyn_cast<ConstantExpr>(Mask))
1962 if (CE->getOpcode() == Instruction::UserOp1)
1968 int ShuffleVectorInst::getMaskValue(Constant *Mask, unsigned i) {
1969 assert(i < Mask->getType()->getVectorNumElements() && "Index out of range");
1970 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask))
1971 return CDS->getElementAsInteger(i);
1972 Constant *C = Mask->getAggregateElement(i);
1973 if (isa<UndefValue>(C))
1975 return cast<ConstantInt>(C)->getZExtValue();
1978 void ShuffleVectorInst::getShuffleMask(Constant *Mask,
1979 SmallVectorImpl<int> &Result) {
1980 unsigned NumElts = Mask->getType()->getVectorNumElements();
1982 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
1983 for (unsigned i = 0; i != NumElts; ++i)
1984 Result.push_back(CDS->getElementAsInteger(i));
1987 for (unsigned i = 0; i != NumElts; ++i) {
1988 Constant *C = Mask->getAggregateElement(i);
1989 Result.push_back(isa<UndefValue>(C) ? -1 :
1990 cast<ConstantInt>(C)->getZExtValue());
1994 //===----------------------------------------------------------------------===//
1995 // InsertValueInst Class
1996 //===----------------------------------------------------------------------===//
1998 void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
1999 const Twine &Name) {
2000 assert(getNumOperands() == 2 && "NumOperands not initialized?");
2002 // There's no fundamental reason why we require at least one index
2003 // (other than weirdness with &*IdxBegin being invalid; see
2004 // getelementptr's init routine for example). But there's no
2005 // present need to support it.
2006 assert(!Idxs.empty() && "InsertValueInst must have at least one index");
2008 assert(ExtractValueInst::getIndexedType(Agg->getType(), Idxs) ==
2009 Val->getType() && "Inserted value must match indexed type!");
2013 Indices.append(Idxs.begin(), Idxs.end());
2017 InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
2018 : Instruction(IVI.getType(), InsertValue,
2019 OperandTraits<InsertValueInst>::op_begin(this), 2),
2020 Indices(IVI.Indices) {
2021 Op<0>() = IVI.getOperand(0);
2022 Op<1>() = IVI.getOperand(1);
2023 SubclassOptionalData = IVI.SubclassOptionalData;
2026 //===----------------------------------------------------------------------===//
2027 // ExtractValueInst Class
2028 //===----------------------------------------------------------------------===//
2030 void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
2031 assert(getNumOperands() == 1 && "NumOperands not initialized?");
2033 // There's no fundamental reason why we require at least one index.
2034 // But there's no present need to support it.
2035 assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
2037 Indices.append(Idxs.begin(), Idxs.end());
2041 ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
2042 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)),
2043 Indices(EVI.Indices) {
2044 SubclassOptionalData = EVI.SubclassOptionalData;
2047 // getIndexedType - Returns the type of the element that would be extracted
2048 // with an extractvalue instruction with the specified parameters.
2050 // A null type is returned if the indices are invalid for the specified
2053 Type *ExtractValueInst::getIndexedType(Type *Agg,
2054 ArrayRef<unsigned> Idxs) {
2055 for (unsigned Index : Idxs) {
2056 // We can't use CompositeType::indexValid(Index) here.
2057 // indexValid() always returns true for arrays because getelementptr allows
2058 // out-of-bounds indices. Since we don't allow those for extractvalue and
2059 // insertvalue we need to check array indexing manually.
2060 // Since the only other types we can index into are struct types it's just
2061 // as easy to check those manually as well.
2062 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
2063 if (Index >= AT->getNumElements())
2065 } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
2066 if (Index >= ST->getNumElements())
2069 // Not a valid type to index into.
2073 Agg = cast<CompositeType>(Agg)->getTypeAtIndex(Index);
2075 return const_cast<Type*>(Agg);
2078 //===----------------------------------------------------------------------===//
2079 // BinaryOperator Class
2080 //===----------------------------------------------------------------------===//
2082 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
2083 Type *Ty, const Twine &Name,
2084 Instruction *InsertBefore)
2085 : Instruction(Ty, iType,
2086 OperandTraits<BinaryOperator>::op_begin(this),
2087 OperandTraits<BinaryOperator>::operands(this),
2095 BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
2096 Type *Ty, const Twine &Name,
2097 BasicBlock *InsertAtEnd)
2098 : Instruction(Ty, iType,
2099 OperandTraits<BinaryOperator>::op_begin(this),
2100 OperandTraits<BinaryOperator>::operands(this),
2108 void BinaryOperator::init(BinaryOps iType) {
2109 Value *LHS = getOperand(0), *RHS = getOperand(1);
2110 (void)LHS; (void)RHS; // Silence warnings.
2111 assert(LHS->getType() == RHS->getType() &&
2112 "Binary operator operand types must match!");
2117 assert(getType() == LHS->getType() &&
2118 "Arithmetic operation should return same type as operands!");
2119 assert(getType()->isIntOrIntVectorTy() &&
2120 "Tried to create an integer operation on a non-integer type!");
2122 case FAdd: case FSub:
2124 assert(getType() == LHS->getType() &&
2125 "Arithmetic operation should return same type as operands!");
2126 assert(getType()->isFPOrFPVectorTy() &&
2127 "Tried to create a floating-point operation on a "
2128 "non-floating-point type!");
2132 assert(getType() == LHS->getType() &&
2133 "Arithmetic operation should return same type as operands!");
2134 assert((getType()->isIntegerTy() || (getType()->isVectorTy() &&
2135 cast<VectorType>(getType())->getElementType()->isIntegerTy())) &&
2136 "Incorrect operand type (not integer) for S/UDIV");
2139 assert(getType() == LHS->getType() &&
2140 "Arithmetic operation should return same type as operands!");
2141 assert(getType()->isFPOrFPVectorTy() &&
2142 "Incorrect operand type (not floating point) for FDIV");
2146 assert(getType() == LHS->getType() &&
2147 "Arithmetic operation should return same type as operands!");
2148 assert((getType()->isIntegerTy() || (getType()->isVectorTy() &&
2149 cast<VectorType>(getType())->getElementType()->isIntegerTy())) &&
2150 "Incorrect operand type (not integer) for S/UREM");
2153 assert(getType() == LHS->getType() &&
2154 "Arithmetic operation should return same type as operands!");
2155 assert(getType()->isFPOrFPVectorTy() &&
2156 "Incorrect operand type (not floating point) for FREM");
2161 assert(getType() == LHS->getType() &&
2162 "Shift operation should return same type as operands!");
2163 assert((getType()->isIntegerTy() ||
2164 (getType()->isVectorTy() &&
2165 cast<VectorType>(getType())->getElementType()->isIntegerTy())) &&
2166 "Tried to create a shift operation on a non-integral type!");
2170 assert(getType() == LHS->getType() &&
2171 "Logical operation should return same type as operands!");
2172 assert((getType()->isIntegerTy() ||
2173 (getType()->isVectorTy() &&
2174 cast<VectorType>(getType())->getElementType()->isIntegerTy())) &&
2175 "Tried to create a logical operation on a non-integral type!");
2183 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
2185 Instruction *InsertBefore) {
2186 assert(S1->getType() == S2->getType() &&
2187 "Cannot create binary operator with two operands of differing type!");
2188 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
2191 BinaryOperator *BinaryOperator::Create(BinaryOps Op, Value *S1, Value *S2,
2193 BasicBlock *InsertAtEnd) {
2194 BinaryOperator *Res = Create(Op, S1, S2, Name);
2195 InsertAtEnd->getInstList().push_back(Res);
2199 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
2200 Instruction *InsertBefore) {
2201 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
2202 return new BinaryOperator(Instruction::Sub,
2204 Op->getType(), Name, InsertBefore);
2207 BinaryOperator *BinaryOperator::CreateNeg(Value *Op, const Twine &Name,
2208 BasicBlock *InsertAtEnd) {
2209 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
2210 return new BinaryOperator(Instruction::Sub,
2212 Op->getType(), Name, InsertAtEnd);
2215 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
2216 Instruction *InsertBefore) {
2217 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
2218 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertBefore);
2221 BinaryOperator *BinaryOperator::CreateNSWNeg(Value *Op, const Twine &Name,
2222 BasicBlock *InsertAtEnd) {
2223 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
2224 return BinaryOperator::CreateNSWSub(zero, Op, Name, InsertAtEnd);
2227 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
2228 Instruction *InsertBefore) {
2229 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
2230 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertBefore);
2233 BinaryOperator *BinaryOperator::CreateNUWNeg(Value *Op, const Twine &Name,
2234 BasicBlock *InsertAtEnd) {
2235 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
2236 return BinaryOperator::CreateNUWSub(zero, Op, Name, InsertAtEnd);
2239 BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name,
2240 Instruction *InsertBefore) {
2241 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
2242 return new BinaryOperator(Instruction::FSub, zero, Op,
2243 Op->getType(), Name, InsertBefore);
2246 BinaryOperator *BinaryOperator::CreateFNeg(Value *Op, const Twine &Name,
2247 BasicBlock *InsertAtEnd) {
2248 Value *zero = ConstantFP::getZeroValueForNegation(Op->getType());
2249 return new BinaryOperator(Instruction::FSub, zero, Op,
2250 Op->getType(), Name, InsertAtEnd);
2253 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
2254 Instruction *InsertBefore) {
2255 Constant *C = Constant::getAllOnesValue(Op->getType());
2256 return new BinaryOperator(Instruction::Xor, Op, C,
2257 Op->getType(), Name, InsertBefore);
2260 BinaryOperator *BinaryOperator::CreateNot(Value *Op, const Twine &Name,
2261 BasicBlock *InsertAtEnd) {
2262 Constant *AllOnes = Constant::getAllOnesValue(Op->getType());
2263 return new BinaryOperator(Instruction::Xor, Op, AllOnes,
2264 Op->getType(), Name, InsertAtEnd);
2267 // isConstantAllOnes - Helper function for several functions below
2268 static inline bool isConstantAllOnes(const Value *V) {
2269 if (const Constant *C = dyn_cast<Constant>(V))
2270 return C->isAllOnesValue();
2274 bool BinaryOperator::isNeg(const Value *V) {
2275 if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(V))
2276 if (Bop->getOpcode() == Instruction::Sub)
2277 if (Constant *C = dyn_cast<Constant>(Bop->getOperand(0)))
2278 return C->isNegativeZeroValue();
2282 bool BinaryOperator::isFNeg(const Value *V, bool IgnoreZeroSign) {
2283 if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(V))
2284 if (Bop->getOpcode() == Instruction::FSub)
2285 if (Constant *C = dyn_cast<Constant>(Bop->getOperand(0))) {
2286 if (!IgnoreZeroSign)
2287 IgnoreZeroSign = cast<Instruction>(V)->hasNoSignedZeros();
2288 return !IgnoreZeroSign ? C->isNegativeZeroValue() : C->isZeroValue();
2293 bool BinaryOperator::isNot(const Value *V) {
2294 if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(V))
2295 return (Bop->getOpcode() == Instruction::Xor &&
2296 (isConstantAllOnes(Bop->getOperand(1)) ||
2297 isConstantAllOnes(Bop->getOperand(0))));
2301 Value *BinaryOperator::getNegArgument(Value *BinOp) {
2302 return cast<BinaryOperator>(BinOp)->getOperand(1);
2305 const Value *BinaryOperator::getNegArgument(const Value *BinOp) {
2306 return getNegArgument(const_cast<Value*>(BinOp));
2309 Value *BinaryOperator::getFNegArgument(Value *BinOp) {
2310 return cast<BinaryOperator>(BinOp)->getOperand(1);
2313 const Value *BinaryOperator::getFNegArgument(const Value *BinOp) {
2314 return getFNegArgument(const_cast<Value*>(BinOp));
2317 Value *BinaryOperator::getNotArgument(Value *BinOp) {
2318 assert(isNot(BinOp) && "getNotArgument on non-'not' instruction!");
2319 BinaryOperator *BO = cast<BinaryOperator>(BinOp);
2320 Value *Op0 = BO->getOperand(0);
2321 Value *Op1 = BO->getOperand(1);
2322 if (isConstantAllOnes(Op0)) return Op1;
2324 assert(isConstantAllOnes(Op1));
2328 const Value *BinaryOperator::getNotArgument(const Value *BinOp) {
2329 return getNotArgument(const_cast<Value*>(BinOp));
2332 // Exchange the two operands to this instruction. This instruction is safe to
2333 // use on any binary instruction and does not modify the semantics of the
2334 // instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
2336 bool BinaryOperator::swapOperands() {
2337 if (!isCommutative())
2338 return true; // Can't commute operands
2339 Op<0>().swap(Op<1>());
2343 //===----------------------------------------------------------------------===//
2344 // FPMathOperator Class
2345 //===----------------------------------------------------------------------===//
2347 float FPMathOperator::getFPAccuracy() const {
2349 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
2352 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
2353 return Accuracy->getValueAPF().convertToFloat();
2356 //===----------------------------------------------------------------------===//
2358 //===----------------------------------------------------------------------===//
2360 void CastInst::anchor() {}
2362 // Just determine if this cast only deals with integral->integral conversion.
2363 bool CastInst::isIntegerCast() const {
2364 switch (getOpcode()) {
2365 default: return false;
2366 case Instruction::ZExt:
2367 case Instruction::SExt:
2368 case Instruction::Trunc:
2370 case Instruction::BitCast:
2371 return getOperand(0)->getType()->isIntegerTy() &&
2372 getType()->isIntegerTy();
2376 bool CastInst::isLosslessCast() const {
2377 // Only BitCast can be lossless, exit fast if we're not BitCast
2378 if (getOpcode() != Instruction::BitCast)
2381 // Identity cast is always lossless
2382 Type *SrcTy = getOperand(0)->getType();
2383 Type *DstTy = getType();
2387 // Pointer to pointer is always lossless.
2388 if (SrcTy->isPointerTy())
2389 return DstTy->isPointerTy();
2390 return false; // Other types have no identity values
2393 /// This function determines if the CastInst does not require any bits to be
2394 /// changed in order to effect the cast. Essentially, it identifies cases where
2395 /// no code gen is necessary for the cast, hence the name no-op cast. For
2396 /// example, the following are all no-op casts:
2397 /// # bitcast i32* %x to i8*
2398 /// # bitcast <2 x i32> %x to <4 x i16>
2399 /// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
2400 /// @brief Determine if the described cast is a no-op.
2401 bool CastInst::isNoopCast(Instruction::CastOps Opcode,
2406 default: llvm_unreachable("Invalid CastOp");
2407 case Instruction::Trunc:
2408 case Instruction::ZExt:
2409 case Instruction::SExt:
2410 case Instruction::FPTrunc:
2411 case Instruction::FPExt:
2412 case Instruction::UIToFP:
2413 case Instruction::SIToFP:
2414 case Instruction::FPToUI:
2415 case Instruction::FPToSI:
2416 case Instruction::AddrSpaceCast:
2417 // TODO: Target informations may give a more accurate answer here.
2419 case Instruction::BitCast:
2420 return true; // BitCast never modifies bits.
2421 case Instruction::PtrToInt:
2422 return IntPtrTy->getScalarSizeInBits() ==
2423 DestTy->getScalarSizeInBits();
2424 case Instruction::IntToPtr:
2425 return IntPtrTy->getScalarSizeInBits() ==
2426 SrcTy->getScalarSizeInBits();
2430 /// @brief Determine if a cast is a no-op.
2431 bool CastInst::isNoopCast(Type *IntPtrTy) const {
2432 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), IntPtrTy);
2435 bool CastInst::isNoopCast(const DataLayout &DL) const {
2436 Type *PtrOpTy = nullptr;
2437 if (getOpcode() == Instruction::PtrToInt)
2438 PtrOpTy = getOperand(0)->getType();
2439 else if (getOpcode() == Instruction::IntToPtr)
2440 PtrOpTy = getType();
2443 PtrOpTy ? DL.getIntPtrType(PtrOpTy) : DL.getIntPtrType(getContext(), 0);
2445 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), IntPtrTy);
2448 /// This function determines if a pair of casts can be eliminated and what
2449 /// opcode should be used in the elimination. This assumes that there are two
2450 /// instructions like this:
2451 /// * %F = firstOpcode SrcTy %x to MidTy
2452 /// * %S = secondOpcode MidTy %F to DstTy
2453 /// The function returns a resultOpcode so these two casts can be replaced with:
2454 /// * %Replacement = resultOpcode %SrcTy %x to DstTy
2455 /// If no such cast is permitted, the function returns 0.
2456 unsigned CastInst::isEliminableCastPair(
2457 Instruction::CastOps firstOp, Instruction::CastOps secondOp,
2458 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
2459 Type *DstIntPtrTy) {
2460 // Define the 144 possibilities for these two cast instructions. The values
2461 // in this matrix determine what to do in a given situation and select the
2462 // case in the switch below. The rows correspond to firstOp, the columns
2463 // correspond to secondOp. In looking at the table below, keep in mind
2464 // the following cast properties:
2466 // Size Compare Source Destination
2467 // Operator Src ? Size Type Sign Type Sign
2468 // -------- ------------ ------------------- ---------------------
2469 // TRUNC > Integer Any Integral Any
2470 // ZEXT < Integral Unsigned Integer Any
2471 // SEXT < Integral Signed Integer Any
2472 // FPTOUI n/a FloatPt n/a Integral Unsigned
2473 // FPTOSI n/a FloatPt n/a Integral Signed
2474 // UITOFP n/a Integral Unsigned FloatPt n/a
2475 // SITOFP n/a Integral Signed FloatPt n/a
2476 // FPTRUNC > FloatPt n/a FloatPt n/a
2477 // FPEXT < FloatPt n/a FloatPt n/a
2478 // PTRTOINT n/a Pointer n/a Integral Unsigned
2479 // INTTOPTR n/a Integral Unsigned Pointer n/a
2480 // BITCAST = FirstClass n/a FirstClass n/a
2481 // ADDRSPCST n/a Pointer n/a Pointer n/a
2483 // NOTE: some transforms are safe, but we consider them to be non-profitable.
2484 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
2485 // into "fptoui double to i64", but this loses information about the range
2486 // of the produced value (we no longer know the top-part is all zeros).
2487 // Further this conversion is often much more expensive for typical hardware,
2488 // and causes issues when building libgcc. We disallow fptosi+sext for the
2490 const unsigned numCastOps =
2491 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
2492 static const uint8_t CastResults[numCastOps][numCastOps] = {
2493 // T F F U S F F P I B A -+
2494 // R Z S P P I I T P 2 N T S |
2495 // U E E 2 2 2 2 R E I T C C +- secondOp
2496 // N X X U S F F N X N 2 V V |
2497 // C T T I I P P C T T P T T -+
2498 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+
2499 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |
2500 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |
2501 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |
2502 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |
2503 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp
2504 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |
2505 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |
2506 { 99,99,99, 2, 2,99,99,10, 2,99,99, 4, 0}, // FPExt |
2507 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |
2508 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |
2509 { 5, 5, 5, 6, 6, 5, 5, 6, 6,16, 5, 1,14}, // BitCast |
2510 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
2513 // TODO: This logic could be encoded into the table above and handled in the
2515 // If either of the casts are a bitcast from scalar to vector, disallow the
2516 // merging. However, any pair of bitcasts are allowed.
2517 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
2518 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
2519 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
2521 // Check if any of the casts convert scalars <-> vectors.
2522 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
2523 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
2524 if (!AreBothBitcasts)
2527 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
2528 [secondOp-Instruction::CastOpsBegin];
2531 // Categorically disallowed.
2534 // Allowed, use first cast's opcode.
2537 // Allowed, use second cast's opcode.
2540 // No-op cast in second op implies firstOp as long as the DestTy
2541 // is integer and we are not converting between a vector and a
2543 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
2547 // No-op cast in second op implies firstOp as long as the DestTy
2548 // is floating point.
2549 if (DstTy->isFloatingPointTy())
2553 // No-op cast in first op implies secondOp as long as the SrcTy
2555 if (SrcTy->isIntegerTy())
2559 // No-op cast in first op implies secondOp as long as the SrcTy
2560 // is a floating point.
2561 if (SrcTy->isFloatingPointTy())
2565 // Cannot simplify if address spaces are different!
2566 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2569 unsigned MidSize = MidTy->getScalarSizeInBits();
2570 // We can still fold this without knowing the actual sizes as long we
2571 // know that the intermediate pointer is the largest possible
2573 // FIXME: Is this always true?
2575 return Instruction::BitCast;
2577 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
2578 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
2580 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
2581 if (MidSize >= PtrSize)
2582 return Instruction::BitCast;
2586 // ext, trunc -> bitcast, if the SrcTy and DstTy are same size
2587 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
2588 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
2589 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2590 unsigned DstSize = DstTy->getScalarSizeInBits();
2591 if (SrcSize == DstSize)
2592 return Instruction::BitCast;
2593 else if (SrcSize < DstSize)
2598 // zext, sext -> zext, because sext can't sign extend after zext
2599 return Instruction::ZExt;
2601 // fpext followed by ftrunc is allowed if the bit size returned to is
2602 // the same as the original, in which case its just a bitcast
2604 return Instruction::BitCast;
2605 return 0; // If the types are not the same we can't eliminate it.
2607 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
2610 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
2611 unsigned SrcSize = SrcTy->getScalarSizeInBits();
2612 unsigned DstSize = DstTy->getScalarSizeInBits();
2613 if (SrcSize <= PtrSize && SrcSize == DstSize)
2614 return Instruction::BitCast;
2618 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
2619 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
2620 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
2621 return Instruction::AddrSpaceCast;
2622 return Instruction::BitCast;
2624 // FIXME: this state can be merged with (1), but the following assert
2625 // is useful to check the correcteness of the sequence due to semantic
2626 // change of bitcast.
2628 SrcTy->isPtrOrPtrVectorTy() &&
2629 MidTy->isPtrOrPtrVectorTy() &&
2630 DstTy->isPtrOrPtrVectorTy() &&
2631 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
2632 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
2633 "Illegal addrspacecast, bitcast sequence!");
2634 // Allowed, use first cast's opcode
2637 // bitcast, addrspacecast -> addrspacecast if the element type of
2638 // bitcast's source is the same as that of addrspacecast's destination.
2639 if (SrcTy->getScalarType()->getPointerElementType() ==
2640 DstTy->getScalarType()->getPointerElementType())
2641 return Instruction::AddrSpaceCast;
2644 // FIXME: this state can be merged with (1), but the following assert
2645 // is useful to check the correcteness of the sequence due to semantic
2646 // change of bitcast.
2648 SrcTy->isIntOrIntVectorTy() &&
2649 MidTy->isPtrOrPtrVectorTy() &&
2650 DstTy->isPtrOrPtrVectorTy() &&
2651 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
2652 "Illegal inttoptr, bitcast sequence!");
2653 // Allowed, use first cast's opcode
2656 // FIXME: this state can be merged with (2), but the following assert
2657 // is useful to check the correcteness of the sequence due to semantic
2658 // change of bitcast.
2660 SrcTy->isPtrOrPtrVectorTy() &&
2661 MidTy->isPtrOrPtrVectorTy() &&
2662 DstTy->isIntOrIntVectorTy() &&
2663 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
2664 "Illegal bitcast, ptrtoint sequence!");
2665 // Allowed, use second cast's opcode
2668 // (sitofp (zext x)) -> (uitofp x)
2669 return Instruction::UIToFP;
2671 // Cast combination can't happen (error in input). This is for all cases
2672 // where the MidTy is not the same for the two cast instructions.
2673 llvm_unreachable("Invalid Cast Combination");
2675 llvm_unreachable("Error in CastResults table!!!");
2679 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
2680 const Twine &Name, Instruction *InsertBefore) {
2681 assert(castIsValid(op, S, Ty) && "Invalid cast!");
2682 // Construct and return the appropriate CastInst subclass
2684 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
2685 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
2686 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
2687 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
2688 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
2689 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
2690 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
2691 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
2692 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
2693 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
2694 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
2695 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
2696 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
2697 default: llvm_unreachable("Invalid opcode provided");
2701 CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
2702 const Twine &Name, BasicBlock *InsertAtEnd) {
2703 assert(castIsValid(op, S, Ty) && "Invalid cast!");
2704 // Construct and return the appropriate CastInst subclass
2706 case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd);
2707 case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd);
2708 case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd);
2709 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd);
2710 case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd);
2711 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd);
2712 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd);
2713 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd);
2714 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd);
2715 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd);
2716 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd);
2717 case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd);
2718 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd);
2719 default: llvm_unreachable("Invalid opcode provided");
2723 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
2725 Instruction *InsertBefore) {
2726 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2727 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
2728 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
2731 CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
2733 BasicBlock *InsertAtEnd) {
2734 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2735 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
2736 return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd);
2739 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
2741 Instruction *InsertBefore) {
2742 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2743 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
2744 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
2747 CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
2749 BasicBlock *InsertAtEnd) {
2750 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2751 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
2752 return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd);
2755 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
2757 Instruction *InsertBefore) {
2758 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2759 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
2760 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
2763 CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
2765 BasicBlock *InsertAtEnd) {
2766 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
2767 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
2768 return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd);
2771 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
2773 BasicBlock *InsertAtEnd) {
2774 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
2775 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
2777 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
2778 assert((!Ty->isVectorTy() ||
2779 Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) &&
2782 if (Ty->isIntOrIntVectorTy())
2783 return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd);
2785 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd);
2788 /// @brief Create a BitCast or a PtrToInt cast instruction
2789 CastInst *CastInst::CreatePointerCast(Value *S, Type *Ty,
2791 Instruction *InsertBefore) {
2792 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
2793 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
2795 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
2796 assert((!Ty->isVectorTy() ||
2797 Ty->getVectorNumElements() == S->getType()->getVectorNumElements()) &&
2800 if (Ty->isIntOrIntVectorTy())
2801 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
2803 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
2806 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
2809 BasicBlock *InsertAtEnd) {
2810 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
2811 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
2813 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
2814 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd);
2816 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
2819 CastInst *CastInst::CreatePointerBitCastOrAddrSpaceCast(
2822 Instruction *InsertBefore) {
2823 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
2824 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
2826 if (S->getType()->getPointerAddressSpace() != Ty->getPointerAddressSpace())
2827 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
2829 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
2832 CastInst *CastInst::CreateBitOrPointerCast(Value *S, Type *Ty,
2834 Instruction *InsertBefore) {
2835 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
2836 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
2837 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
2838 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
2840 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
2843 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
2844 bool isSigned, const Twine &Name,
2845 Instruction *InsertBefore) {
2846 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
2847 "Invalid integer cast");
2848 unsigned SrcBits = C->getType()->getScalarSizeInBits();
2849 unsigned DstBits = Ty->getScalarSizeInBits();
2850 Instruction::CastOps opcode =
2851 (SrcBits == DstBits ? Instruction::BitCast :
2852 (SrcBits > DstBits ? Instruction::Trunc :
2853 (isSigned ? Instruction::SExt : Instruction::ZExt)));
2854 return Create(opcode, C, Ty, Name, InsertBefore);
2857 CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
2858 bool isSigned, const Twine &Name,
2859 BasicBlock *InsertAtEnd) {
2860 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
2862 unsigned SrcBits = C->getType()->getScalarSizeInBits();
2863 unsigned DstBits = Ty->getScalarSizeInBits();
2864 Instruction::CastOps opcode =
2865 (SrcBits == DstBits ? Instruction::BitCast :
2866 (SrcBits > DstBits ? Instruction::Trunc :
2867 (isSigned ? Instruction::SExt : Instruction::ZExt)));
2868 return Create(opcode, C, Ty, Name, InsertAtEnd);
2871 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
2873 Instruction *InsertBefore) {
2874 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
2876 unsigned SrcBits = C->getType()->getScalarSizeInBits();
2877 unsigned DstBits = Ty->getScalarSizeInBits();
2878 Instruction::CastOps opcode =
2879 (SrcBits == DstBits ? Instruction::BitCast :
2880 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
2881 return Create(opcode, C, Ty, Name, InsertBefore);
2884 CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
2886 BasicBlock *InsertAtEnd) {
2887 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
2889 unsigned SrcBits = C->getType()->getScalarSizeInBits();
2890 unsigned DstBits = Ty->getScalarSizeInBits();
2891 Instruction::CastOps opcode =
2892 (SrcBits == DstBits ? Instruction::BitCast :
2893 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
2894 return Create(opcode, C, Ty, Name, InsertAtEnd);
2897 // Check whether it is valid to call getCastOpcode for these types.
2898 // This routine must be kept in sync with getCastOpcode.
2899 bool CastInst::isCastable(Type *SrcTy, Type *DestTy) {
2900 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
2903 if (SrcTy == DestTy)
2906 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
2907 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
2908 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) {
2909 // An element by element cast. Valid if casting the elements is valid.
2910 SrcTy = SrcVecTy->getElementType();
2911 DestTy = DestVecTy->getElementType();
2914 // Get the bit sizes, we'll need these
2915 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
2916 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
2918 // Run through the possibilities ...
2919 if (DestTy->isIntegerTy()) { // Casting to integral
2920 if (SrcTy->isIntegerTy()) // Casting from integral
2922 if (SrcTy->isFloatingPointTy()) // Casting from floating pt
2924 if (SrcTy->isVectorTy()) // Casting from vector
2925 return DestBits == SrcBits;
2926 // Casting from something else
2927 return SrcTy->isPointerTy();
2929 if (DestTy->isFloatingPointTy()) { // Casting to floating pt
2930 if (SrcTy->isIntegerTy()) // Casting from integral
2932 if (SrcTy->isFloatingPointTy()) // Casting from floating pt
2934 if (SrcTy->isVectorTy()) // Casting from vector
2935 return DestBits == SrcBits;
2936 // Casting from something else
2939 if (DestTy->isVectorTy()) // Casting to vector
2940 return DestBits == SrcBits;
2941 if (DestTy->isPointerTy()) { // Casting to pointer
2942 if (SrcTy->isPointerTy()) // Casting from pointer
2944 return SrcTy->isIntegerTy(); // Casting from integral
2946 if (DestTy->isX86_MMXTy()) {
2947 if (SrcTy->isVectorTy())
2948 return DestBits == SrcBits; // 64-bit vector to MMX
2950 } // Casting to something else
2954 bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
2955 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
2958 if (SrcTy == DestTy)
2961 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
2962 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
2963 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) {
2964 // An element by element cast. Valid if casting the elements is valid.
2965 SrcTy = SrcVecTy->getElementType();
2966 DestTy = DestVecTy->getElementType();
2971 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
2972 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
2973 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
2977 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
2978 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
2980 // Could still have vectors of pointers if the number of elements doesn't
2982 if (SrcBits == 0 || DestBits == 0)
2985 if (SrcBits != DestBits)
2988 if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy())
2994 bool CastInst::isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy,
2995 const DataLayout &DL) {
2996 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
2997 if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
2998 return IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy);
2999 if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
3000 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
3001 return IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy);
3003 return isBitCastable(SrcTy, DestTy);
3006 // Provide a way to get a "cast" where the cast opcode is inferred from the
3007 // types and size of the operand. This, basically, is a parallel of the
3008 // logic in the castIsValid function below. This axiom should hold:
3009 // castIsValid( getCastOpcode(Val, Ty), Val, Ty)
3010 // should not assert in castIsValid. In other words, this produces a "correct"
3011 // casting opcode for the arguments passed to it.
3012 // This routine must be kept in sync with isCastable.
3013 Instruction::CastOps
3014 CastInst::getCastOpcode(
3015 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
3016 Type *SrcTy = Src->getType();
3018 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
3019 "Only first class types are castable!");
3021 if (SrcTy == DestTy)
3024 // FIXME: Check address space sizes here
3025 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
3026 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
3027 if (SrcVecTy->getNumElements() == DestVecTy->getNumElements()) {
3028 // An element by element cast. Find the appropriate opcode based on the
3030 SrcTy = SrcVecTy->getElementType();
3031 DestTy = DestVecTy->getElementType();
3034 // Get the bit sizes, we'll need these
3035 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
3036 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
3038 // Run through the possibilities ...
3039 if (DestTy->isIntegerTy()) { // Casting to integral
3040 if (SrcTy->isIntegerTy()) { // Casting from integral
3041 if (DestBits < SrcBits)
3042 return Trunc; // int -> smaller int
3043 else if (DestBits > SrcBits) { // its an extension
3045 return SExt; // signed -> SEXT
3047 return ZExt; // unsigned -> ZEXT
3049 return BitCast; // Same size, No-op cast
3051 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3053 return FPToSI; // FP -> sint
3055 return FPToUI; // FP -> uint
3056 } else if (SrcTy->isVectorTy()) {
3057 assert(DestBits == SrcBits &&
3058 "Casting vector to integer of different width");
3059 return BitCast; // Same size, no-op cast
3061 assert(SrcTy->isPointerTy() &&
3062 "Casting from a value that is not first-class type");
3063 return PtrToInt; // ptr -> int
3065 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
3066 if (SrcTy->isIntegerTy()) { // Casting from integral
3068 return SIToFP; // sint -> FP
3070 return UIToFP; // uint -> FP
3071 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
3072 if (DestBits < SrcBits) {
3073 return FPTrunc; // FP -> smaller FP
3074 } else if (DestBits > SrcBits) {
3075 return FPExt; // FP -> larger FP
3077 return BitCast; // same size, no-op cast
3079 } else if (SrcTy->isVectorTy()) {
3080 assert(DestBits == SrcBits &&
3081 "Casting vector to floating point of different width");
3082 return BitCast; // same size, no-op cast
3084 llvm_unreachable("Casting pointer or non-first class to float");
3085 } else if (DestTy->isVectorTy()) {
3086 assert(DestBits == SrcBits &&
3087 "Illegal cast to vector (wrong type or size)");
3089 } else if (DestTy->isPointerTy()) {
3090 if (SrcTy->isPointerTy()) {
3091 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
3092 return AddrSpaceCast;
3093 return BitCast; // ptr -> ptr
3094 } else if (SrcTy->isIntegerTy()) {
3095 return IntToPtr; // int -> ptr
3097 llvm_unreachable("Casting pointer to other than pointer or int");
3098 } else if (DestTy->isX86_MMXTy()) {
3099 if (SrcTy->isVectorTy()) {
3100 assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
3101 return BitCast; // 64-bit vector to MMX
3103 llvm_unreachable("Illegal cast to X86_MMX");
3105 llvm_unreachable("Casting to type that is not first-class");
3108 //===----------------------------------------------------------------------===//
3109 // CastInst SubClass Constructors
3110 //===----------------------------------------------------------------------===//
3112 /// Check that the construction parameters for a CastInst are correct. This
3113 /// could be broken out into the separate constructors but it is useful to have
3114 /// it in one place and to eliminate the redundant code for getting the sizes
3115 /// of the types involved.
3117 CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) {
3118 // Check for type sanity on the arguments
3119 Type *SrcTy = S->getType();
3121 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
3122 SrcTy->isAggregateType() || DstTy->isAggregateType())
3125 // Get the size of the types in bits, we'll need this later
3126 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
3127 unsigned DstBitSize = DstTy->getScalarSizeInBits();
3129 // If these are vector types, get the lengths of the vectors (using zero for
3130 // scalar types means that checking that vector lengths match also checks that
3131 // scalars are not being converted to vectors or vectors to scalars).
3132 unsigned SrcLength = SrcTy->isVectorTy() ?
3133 cast<VectorType>(SrcTy)->getNumElements() : 0;
3134 unsigned DstLength = DstTy->isVectorTy() ?
3135 cast<VectorType>(DstTy)->getNumElements() : 0;
3137 // Switch on the opcode provided
3139 default: return false; // This is an input error
3140 case Instruction::Trunc:
3141 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3142 SrcLength == DstLength && SrcBitSize > DstBitSize;
3143 case Instruction::ZExt:
3144 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3145 SrcLength == DstLength && SrcBitSize < DstBitSize;
3146 case Instruction::SExt:
3147 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
3148 SrcLength == DstLength && SrcBitSize < DstBitSize;
3149 case Instruction::FPTrunc:
3150 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3151 SrcLength == DstLength && SrcBitSize > DstBitSize;
3152 case Instruction::FPExt:
3153 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
3154 SrcLength == DstLength && SrcBitSize < DstBitSize;
3155 case Instruction::UIToFP:
3156 case Instruction::SIToFP:
3157 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
3158 SrcLength == DstLength;
3159 case Instruction::FPToUI:
3160 case Instruction::FPToSI:
3161 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
3162 SrcLength == DstLength;
3163 case Instruction::PtrToInt:
3164 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy))
3166 if (VectorType *VT = dyn_cast<VectorType>(SrcTy))
3167 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements())
3169 return SrcTy->getScalarType()->isPointerTy() &&
3170 DstTy->getScalarType()->isIntegerTy();
3171 case Instruction::IntToPtr:
3172 if (isa<VectorType>(SrcTy) != isa<VectorType>(DstTy))
3174 if (VectorType *VT = dyn_cast<VectorType>(SrcTy))
3175 if (VT->getNumElements() != cast<VectorType>(DstTy)->getNumElements())
3177 return SrcTy->getScalarType()->isIntegerTy() &&
3178 DstTy->getScalarType()->isPointerTy();
3179 case Instruction::BitCast: {
3180 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3181 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3183 // BitCast implies a no-op cast of type only. No bits change.
3184 // However, you can't cast pointers to anything but pointers.
3185 if (!SrcPtrTy != !DstPtrTy)
3188 // For non-pointer cases, the cast is okay if the source and destination bit
3189 // widths are identical.
3191 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
3193 // If both are pointers then the address spaces must match.
3194 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
3197 // A vector of pointers must have the same number of elements.
3198 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3199 if (VectorType *DstVecTy = dyn_cast<VectorType>(DstTy))
3200 return (SrcVecTy->getNumElements() == DstVecTy->getNumElements());
3207 case Instruction::AddrSpaceCast: {
3208 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
3212 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
3216 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
3219 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
3220 if (VectorType *DstVecTy = dyn_cast<VectorType>(DstTy))
3221 return (SrcVecTy->getNumElements() == DstVecTy->getNumElements());
3231 TruncInst::TruncInst(
3232 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3233 ) : CastInst(Ty, Trunc, S, Name, InsertBefore) {
3234 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3237 TruncInst::TruncInst(
3238 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3239 ) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
3240 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
3244 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3245 ) : CastInst(Ty, ZExt, S, Name, InsertBefore) {
3246 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3250 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3251 ) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
3252 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
3255 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3256 ) : CastInst(Ty, SExt, S, Name, InsertBefore) {
3257 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3261 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3262 ) : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
3263 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
3266 FPTruncInst::FPTruncInst(
3267 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3268 ) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
3269 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3272 FPTruncInst::FPTruncInst(
3273 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3274 ) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
3275 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
3278 FPExtInst::FPExtInst(
3279 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3280 ) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
3281 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3284 FPExtInst::FPExtInst(
3285 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3286 ) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
3287 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
3290 UIToFPInst::UIToFPInst(
3291 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3292 ) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
3293 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3296 UIToFPInst::UIToFPInst(
3297 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3298 ) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
3299 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
3302 SIToFPInst::SIToFPInst(
3303 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3304 ) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
3305 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3308 SIToFPInst::SIToFPInst(
3309 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3310 ) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
3311 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
3314 FPToUIInst::FPToUIInst(
3315 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3316 ) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
3317 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3320 FPToUIInst::FPToUIInst(
3321 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3322 ) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
3323 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
3326 FPToSIInst::FPToSIInst(
3327 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3328 ) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
3329 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3332 FPToSIInst::FPToSIInst(
3333 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3334 ) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
3335 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
3338 PtrToIntInst::PtrToIntInst(
3339 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3340 ) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
3341 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3344 PtrToIntInst::PtrToIntInst(
3345 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3346 ) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
3347 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
3350 IntToPtrInst::IntToPtrInst(
3351 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3352 ) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
3353 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3356 IntToPtrInst::IntToPtrInst(
3357 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3358 ) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
3359 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
3362 BitCastInst::BitCastInst(
3363 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3364 ) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
3365 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3368 BitCastInst::BitCastInst(
3369 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3370 ) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
3371 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
3374 AddrSpaceCastInst::AddrSpaceCastInst(
3375 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
3376 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
3377 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3380 AddrSpaceCastInst::AddrSpaceCastInst(
3381 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
3382 ) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) {
3383 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
3386 //===----------------------------------------------------------------------===//
3388 //===----------------------------------------------------------------------===//
3390 void CmpInst::anchor() {}
3392 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
3393 Value *RHS, const Twine &Name, Instruction *InsertBefore)
3394 : Instruction(ty, op,
3395 OperandTraits<CmpInst>::op_begin(this),
3396 OperandTraits<CmpInst>::operands(this),
3400 setPredicate((Predicate)predicate);
3404 CmpInst::CmpInst(Type *ty, OtherOps op, Predicate predicate, Value *LHS,
3405 Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd)
3406 : Instruction(ty, op,
3407 OperandTraits<CmpInst>::op_begin(this),
3408 OperandTraits<CmpInst>::operands(this),
3412 setPredicate((Predicate)predicate);
3417 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
3418 const Twine &Name, Instruction *InsertBefore) {
3419 if (Op == Instruction::ICmp) {
3421 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
3424 return new ICmpInst(CmpInst::Predicate(predicate),
3429 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
3432 return new FCmpInst(CmpInst::Predicate(predicate),
3437 CmpInst::Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2,
3438 const Twine &Name, BasicBlock *InsertAtEnd) {
3439 if (Op == Instruction::ICmp) {
3440 return new ICmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
3443 return new FCmpInst(*InsertAtEnd, CmpInst::Predicate(predicate),
3447 void CmpInst::swapOperands() {
3448 if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
3451 cast<FCmpInst>(this)->swapOperands();
3454 bool CmpInst::isCommutative() const {
3455 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
3456 return IC->isCommutative();
3457 return cast<FCmpInst>(this)->isCommutative();
3460 bool CmpInst::isEquality() const {
3461 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
3462 return IC->isEquality();
3463 return cast<FCmpInst>(this)->isEquality();
3466 CmpInst::Predicate CmpInst::getInversePredicate(Predicate pred) {
3468 default: llvm_unreachable("Unknown cmp predicate!");
3469 case ICMP_EQ: return ICMP_NE;
3470 case ICMP_NE: return ICMP_EQ;
3471 case ICMP_UGT: return ICMP_ULE;
3472 case ICMP_ULT: return ICMP_UGE;
3473 case ICMP_UGE: return ICMP_ULT;
3474 case ICMP_ULE: return ICMP_UGT;
3475 case ICMP_SGT: return ICMP_SLE;
3476 case ICMP_SLT: return ICMP_SGE;
3477 case ICMP_SGE: return ICMP_SLT;
3478 case ICMP_SLE: return ICMP_SGT;
3480 case FCMP_OEQ: return FCMP_UNE;
3481 case FCMP_ONE: return FCMP_UEQ;
3482 case FCMP_OGT: return FCMP_ULE;
3483 case FCMP_OLT: return FCMP_UGE;
3484 case FCMP_OGE: return FCMP_ULT;
3485 case FCMP_OLE: return FCMP_UGT;
3486 case FCMP_UEQ: return FCMP_ONE;
3487 case FCMP_UNE: return FCMP_OEQ;
3488 case FCMP_UGT: return FCMP_OLE;
3489 case FCMP_ULT: return FCMP_OGE;
3490 case FCMP_UGE: return FCMP_OLT;
3491 case FCMP_ULE: return FCMP_OGT;
3492 case FCMP_ORD: return FCMP_UNO;
3493 case FCMP_UNO: return FCMP_ORD;
3494 case FCMP_TRUE: return FCMP_FALSE;
3495 case FCMP_FALSE: return FCMP_TRUE;
3499 StringRef CmpInst::getPredicateName(Predicate Pred) {
3501 default: return "unknown";
3502 case FCmpInst::FCMP_FALSE: return "false";
3503 case FCmpInst::FCMP_OEQ: return "oeq";
3504 case FCmpInst::FCMP_OGT: return "ogt";
3505 case FCmpInst::FCMP_OGE: return "oge";
3506 case FCmpInst::FCMP_OLT: return "olt";
3507 case FCmpInst::FCMP_OLE: return "ole";
3508 case FCmpInst::FCMP_ONE: return "one";
3509 case FCmpInst::FCMP_ORD: return "ord";
3510 case FCmpInst::FCMP_UNO: return "uno";
3511 case FCmpInst::FCMP_UEQ: return "ueq";
3512 case FCmpInst::FCMP_UGT: return "ugt";
3513 case FCmpInst::FCMP_UGE: return "uge";
3514 case FCmpInst::FCMP_ULT: return "ult";
3515 case FCmpInst::FCMP_ULE: return "ule";
3516 case FCmpInst::FCMP_UNE: return "une";
3517 case FCmpInst::FCMP_TRUE: return "true";
3518 case ICmpInst::ICMP_EQ: return "eq";
3519 case ICmpInst::ICMP_NE: return "ne";
3520 case ICmpInst::ICMP_SGT: return "sgt";
3521 case ICmpInst::ICMP_SGE: return "sge";
3522 case ICmpInst::ICMP_SLT: return "slt";
3523 case ICmpInst::ICMP_SLE: return "sle";
3524 case ICmpInst::ICMP_UGT: return "ugt";
3525 case ICmpInst::ICMP_UGE: return "uge";
3526 case ICmpInst::ICMP_ULT: return "ult";
3527 case ICmpInst::ICMP_ULE: return "ule";
3531 void ICmpInst::anchor() {}
3533 ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) {
3535 default: llvm_unreachable("Unknown icmp predicate!");
3536 case ICMP_EQ: case ICMP_NE:
3537 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
3539 case ICMP_UGT: return ICMP_SGT;
3540 case ICMP_ULT: return ICMP_SLT;
3541 case ICMP_UGE: return ICMP_SGE;
3542 case ICMP_ULE: return ICMP_SLE;
3546 ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) {
3548 default: llvm_unreachable("Unknown icmp predicate!");
3549 case ICMP_EQ: case ICMP_NE:
3550 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
3552 case ICMP_SGT: return ICMP_UGT;
3553 case ICMP_SLT: return ICMP_ULT;
3554 case ICMP_SGE: return ICMP_UGE;
3555 case ICMP_SLE: return ICMP_ULE;
3559 CmpInst::Predicate CmpInst::getSwappedPredicate(Predicate pred) {
3561 default: llvm_unreachable("Unknown cmp predicate!");
3562 case ICMP_EQ: case ICMP_NE:
3564 case ICMP_SGT: return ICMP_SLT;
3565 case ICMP_SLT: return ICMP_SGT;
3566 case ICMP_SGE: return ICMP_SLE;
3567 case ICMP_SLE: return ICMP_SGE;
3568 case ICMP_UGT: return ICMP_ULT;
3569 case ICMP_ULT: return ICMP_UGT;
3570 case ICMP_UGE: return ICMP_ULE;
3571 case ICMP_ULE: return ICMP_UGE;
3573 case FCMP_FALSE: case FCMP_TRUE:
3574 case FCMP_OEQ: case FCMP_ONE:
3575 case FCMP_UEQ: case FCMP_UNE:
3576 case FCMP_ORD: case FCMP_UNO:
3578 case FCMP_OGT: return FCMP_OLT;
3579 case FCMP_OLT: return FCMP_OGT;
3580 case FCMP_OGE: return FCMP_OLE;
3581 case FCMP_OLE: return FCMP_OGE;
3582 case FCMP_UGT: return FCMP_ULT;
3583 case FCMP_ULT: return FCMP_UGT;
3584 case FCMP_UGE: return FCMP_ULE;
3585 case FCMP_ULE: return FCMP_UGE;
3589 CmpInst::Predicate CmpInst::getSignedPredicate(Predicate pred) {
3590 assert(CmpInst::isUnsigned(pred) && "Call only with signed predicates!");
3594 llvm_unreachable("Unknown predicate!");
3595 case CmpInst::ICMP_ULT:
3596 return CmpInst::ICMP_SLT;
3597 case CmpInst::ICMP_ULE:
3598 return CmpInst::ICMP_SLE;
3599 case CmpInst::ICMP_UGT:
3600 return CmpInst::ICMP_SGT;
3601 case CmpInst::ICMP_UGE:
3602 return CmpInst::ICMP_SGE;
3606 bool CmpInst::isUnsigned(Predicate predicate) {
3607 switch (predicate) {
3608 default: return false;
3609 case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT:
3610 case ICmpInst::ICMP_UGE: return true;
3614 bool CmpInst::isSigned(Predicate predicate) {
3615 switch (predicate) {
3616 default: return false;
3617 case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT:
3618 case ICmpInst::ICMP_SGE: return true;
3622 bool CmpInst::isOrdered(Predicate predicate) {
3623 switch (predicate) {
3624 default: return false;
3625 case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT:
3626 case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE:
3627 case FCmpInst::FCMP_ORD: return true;
3631 bool CmpInst::isUnordered(Predicate predicate) {
3632 switch (predicate) {
3633 default: return false;
3634 case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT:
3635 case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE:
3636 case FCmpInst::FCMP_UNO: return true;
3640 bool CmpInst::isTrueWhenEqual(Predicate predicate) {
3642 default: return false;
3643 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
3644 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
3648 bool CmpInst::isFalseWhenEqual(Predicate predicate) {
3650 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
3651 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
3652 default: return false;
3656 bool CmpInst::isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2) {
3657 // If the predicates match, then we know the first condition implies the
3666 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
3667 return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE ||
3669 case ICMP_UGT: // A >u B implies A != B and A >=u B are true.
3670 return Pred2 == ICMP_NE || Pred2 == ICMP_UGE;
3671 case ICMP_ULT: // A <u B implies A != B and A <=u B are true.
3672 return Pred2 == ICMP_NE || Pred2 == ICMP_ULE;
3673 case ICMP_SGT: // A >s B implies A != B and A >=s B are true.
3674 return Pred2 == ICMP_NE || Pred2 == ICMP_SGE;
3675 case ICMP_SLT: // A <s B implies A != B and A <=s B are true.
3676 return Pred2 == ICMP_NE || Pred2 == ICMP_SLE;
3681 bool CmpInst::isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2) {
3682 return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2));
3685 //===----------------------------------------------------------------------===//
3686 // SwitchInst Implementation
3687 //===----------------------------------------------------------------------===//
3689 void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
3690 assert(Value && Default && NumReserved);
3691 ReservedSpace = NumReserved;
3692 setNumHungOffUseOperands(2);
3693 allocHungoffUses(ReservedSpace);
3699 /// SwitchInst ctor - Create a new switch instruction, specifying a value to
3700 /// switch on and a default destination. The number of additional cases can
3701 /// be specified here to make memory allocation more efficient. This
3702 /// constructor can also autoinsert before another instruction.
3703 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3704 Instruction *InsertBefore)
3705 : TerminatorInst(Type::getVoidTy(Value->getContext()), Instruction::Switch,
3706 nullptr, 0, InsertBefore) {
3707 init(Value, Default, 2+NumCases*2);
3710 /// SwitchInst ctor - Create a new switch instruction, specifying a value to
3711 /// switch on and a default destination. The number of additional cases can
3712 /// be specified here to make memory allocation more efficient. This
3713 /// constructor also autoinserts at the end of the specified BasicBlock.
3714 SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3715 BasicBlock *InsertAtEnd)
3716 : TerminatorInst(Type::getVoidTy(Value->getContext()), Instruction::Switch,
3717 nullptr, 0, InsertAtEnd) {
3718 init(Value, Default, 2+NumCases*2);
3721 SwitchInst::SwitchInst(const SwitchInst &SI)
3722 : TerminatorInst(SI.getType(), Instruction::Switch, nullptr, 0) {
3723 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
3724 setNumHungOffUseOperands(SI.getNumOperands());
3725 Use *OL = getOperandList();
3726 const Use *InOL = SI.getOperandList();
3727 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
3729 OL[i+1] = InOL[i+1];
3731 SubclassOptionalData = SI.SubclassOptionalData;
3735 /// addCase - Add an entry to the switch instruction...
3737 void SwitchInst::addCase(ConstantInt *OnVal, BasicBlock *Dest) {
3738 unsigned NewCaseIdx = getNumCases();
3739 unsigned OpNo = getNumOperands();
3740 if (OpNo+2 > ReservedSpace)
3741 growOperands(); // Get more space!
3742 // Initialize some new operands.
3743 assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
3744 setNumHungOffUseOperands(OpNo+2);
3745 CaseHandle Case(this, NewCaseIdx);
3746 Case.setValue(OnVal);
3747 Case.setSuccessor(Dest);
3750 /// removeCase - This method removes the specified case and its successor
3751 /// from the switch instruction.
3752 SwitchInst::CaseIt SwitchInst::removeCase(CaseIt I) {
3753 unsigned idx = I->getCaseIndex();
3755 assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
3757 unsigned NumOps = getNumOperands();
3758 Use *OL = getOperandList();
3760 // Overwrite this case with the end of the list.
3761 if (2 + (idx + 1) * 2 != NumOps) {
3762 OL[2 + idx * 2] = OL[NumOps - 2];
3763 OL[2 + idx * 2 + 1] = OL[NumOps - 1];
3766 // Nuke the last value.
3767 OL[NumOps-2].set(nullptr);
3768 OL[NumOps-2+1].set(nullptr);
3769 setNumHungOffUseOperands(NumOps-2);
3771 return CaseIt(this, idx);
3774 /// growOperands - grow operands - This grows the operand list in response
3775 /// to a push_back style of operation. This grows the number of ops by 3 times.
3777 void SwitchInst::growOperands() {
3778 unsigned e = getNumOperands();
3779 unsigned NumOps = e*3;
3781 ReservedSpace = NumOps;
3782 growHungoffUses(ReservedSpace);
3786 BasicBlock *SwitchInst::getSuccessorV(unsigned idx) const {
3787 return getSuccessor(idx);
3790 unsigned SwitchInst::getNumSuccessorsV() const {
3791 return getNumSuccessors();
3794 void SwitchInst::setSuccessorV(unsigned idx, BasicBlock *B) {
3795 setSuccessor(idx, B);
3798 //===----------------------------------------------------------------------===//
3799 // IndirectBrInst Implementation
3800 //===----------------------------------------------------------------------===//
3802 void IndirectBrInst::init(Value *Address, unsigned NumDests) {
3803 assert(Address && Address->getType()->isPointerTy() &&
3804 "Address of indirectbr must be a pointer");
3805 ReservedSpace = 1+NumDests;
3806 setNumHungOffUseOperands(1);
3807 allocHungoffUses(ReservedSpace);
3813 /// growOperands - grow operands - This grows the operand list in response
3814 /// to a push_back style of operation. This grows the number of ops by 2 times.
3816 void IndirectBrInst::growOperands() {
3817 unsigned e = getNumOperands();
3818 unsigned NumOps = e*2;
3820 ReservedSpace = NumOps;
3821 growHungoffUses(ReservedSpace);
3824 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
3825 Instruction *InsertBefore)
3826 : TerminatorInst(Type::getVoidTy(Address->getContext()),Instruction::IndirectBr,
3827 nullptr, 0, InsertBefore) {
3828 init(Address, NumCases);
3831 IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
3832 BasicBlock *InsertAtEnd)
3833 : TerminatorInst(Type::getVoidTy(Address->getContext()),Instruction::IndirectBr,
3834 nullptr, 0, InsertAtEnd) {
3835 init(Address, NumCases);
3838 IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
3839 : TerminatorInst(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
3840 nullptr, IBI.getNumOperands()) {
3841 allocHungoffUses(IBI.getNumOperands());
3842 Use *OL = getOperandList();
3843 const Use *InOL = IBI.getOperandList();
3844 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
3846 SubclassOptionalData = IBI.SubclassOptionalData;
3849 /// addDestination - Add a destination.
3851 void IndirectBrInst::addDestination(BasicBlock *DestBB) {
3852 unsigned OpNo = getNumOperands();
3853 if (OpNo+1 > ReservedSpace)
3854 growOperands(); // Get more space!
3855 // Initialize some new operands.
3856 assert(OpNo < ReservedSpace && "Growing didn't work!");
3857 setNumHungOffUseOperands(OpNo+1);
3858 getOperandList()[OpNo] = DestBB;
3861 /// removeDestination - This method removes the specified successor from the
3862 /// indirectbr instruction.
3863 void IndirectBrInst::removeDestination(unsigned idx) {
3864 assert(idx < getNumOperands()-1 && "Successor index out of range!");
3866 unsigned NumOps = getNumOperands();
3867 Use *OL = getOperandList();
3869 // Replace this value with the last one.
3870 OL[idx+1] = OL[NumOps-1];
3872 // Nuke the last value.
3873 OL[NumOps-1].set(nullptr);
3874 setNumHungOffUseOperands(NumOps-1);
3877 BasicBlock *IndirectBrInst::getSuccessorV(unsigned idx) const {
3878 return getSuccessor(idx);
3881 unsigned IndirectBrInst::getNumSuccessorsV() const {
3882 return getNumSuccessors();
3885 void IndirectBrInst::setSuccessorV(unsigned idx, BasicBlock *B) {
3886 setSuccessor(idx, B);
3889 //===----------------------------------------------------------------------===//
3890 // cloneImpl() implementations
3891 //===----------------------------------------------------------------------===//
3893 // Define these methods here so vtables don't get emitted into every translation
3894 // unit that uses these classes.
3896 GetElementPtrInst *GetElementPtrInst::cloneImpl() const {
3897 return new (getNumOperands()) GetElementPtrInst(*this);
3900 BinaryOperator *BinaryOperator::cloneImpl() const {
3901 return Create(getOpcode(), Op<0>(), Op<1>());
3904 FCmpInst *FCmpInst::cloneImpl() const {
3905 return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
3908 ICmpInst *ICmpInst::cloneImpl() const {
3909 return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
3912 ExtractValueInst *ExtractValueInst::cloneImpl() const {
3913 return new ExtractValueInst(*this);
3916 InsertValueInst *InsertValueInst::cloneImpl() const {
3917 return new InsertValueInst(*this);
3920 AllocaInst *AllocaInst::cloneImpl() const {
3921 AllocaInst *Result = new AllocaInst(getAllocatedType(),
3922 getType()->getAddressSpace(),
3923 (Value *)getOperand(0), getAlignment());
3924 Result->setUsedWithInAlloca(isUsedWithInAlloca());
3925 Result->setSwiftError(isSwiftError());
3929 LoadInst *LoadInst::cloneImpl() const {
3930 return new LoadInst(getOperand(0), Twine(), isVolatile(),
3931 getAlignment(), getOrdering(), getSynchScope());
3934 StoreInst *StoreInst::cloneImpl() const {
3935 return new StoreInst(getOperand(0), getOperand(1), isVolatile(),
3936 getAlignment(), getOrdering(), getSynchScope());
3940 AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
3941 AtomicCmpXchgInst *Result =
3942 new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2),
3943 getSuccessOrdering(), getFailureOrdering(),
3945 Result->setVolatile(isVolatile());
3946 Result->setWeak(isWeak());
3950 AtomicRMWInst *AtomicRMWInst::cloneImpl() const {
3951 AtomicRMWInst *Result =
3952 new AtomicRMWInst(getOperation(),getOperand(0), getOperand(1),
3953 getOrdering(), getSynchScope());
3954 Result->setVolatile(isVolatile());
3958 FenceInst *FenceInst::cloneImpl() const {
3959 return new FenceInst(getContext(), getOrdering(), getSynchScope());
3962 TruncInst *TruncInst::cloneImpl() const {
3963 return new TruncInst(getOperand(0), getType());
3966 ZExtInst *ZExtInst::cloneImpl() const {
3967 return new ZExtInst(getOperand(0), getType());
3970 SExtInst *SExtInst::cloneImpl() const {
3971 return new SExtInst(getOperand(0), getType());
3974 FPTruncInst *FPTruncInst::cloneImpl() const {
3975 return new FPTruncInst(getOperand(0), getType());
3978 FPExtInst *FPExtInst::cloneImpl() const {
3979 return new FPExtInst(getOperand(0), getType());
3982 UIToFPInst *UIToFPInst::cloneImpl() const {
3983 return new UIToFPInst(getOperand(0), getType());
3986 SIToFPInst *SIToFPInst::cloneImpl() const {
3987 return new SIToFPInst(getOperand(0), getType());
3990 FPToUIInst *FPToUIInst::cloneImpl() const {
3991 return new FPToUIInst(getOperand(0), getType());
3994 FPToSIInst *FPToSIInst::cloneImpl() const {
3995 return new FPToSIInst(getOperand(0), getType());
3998 PtrToIntInst *PtrToIntInst::cloneImpl() const {
3999 return new PtrToIntInst(getOperand(0), getType());
4002 IntToPtrInst *IntToPtrInst::cloneImpl() const {
4003 return new IntToPtrInst(getOperand(0), getType());
4006 BitCastInst *BitCastInst::cloneImpl() const {
4007 return new BitCastInst(getOperand(0), getType());
4010 AddrSpaceCastInst *AddrSpaceCastInst::cloneImpl() const {
4011 return new AddrSpaceCastInst(getOperand(0), getType());
4014 CallInst *CallInst::cloneImpl() const {
4015 if (hasOperandBundles()) {
4016 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
4017 return new(getNumOperands(), DescriptorBytes) CallInst(*this);
4019 return new(getNumOperands()) CallInst(*this);
4022 SelectInst *SelectInst::cloneImpl() const {
4023 return SelectInst::Create(getOperand(0), getOperand(1), getOperand(2));
4026 VAArgInst *VAArgInst::cloneImpl() const {
4027 return new VAArgInst(getOperand(0), getType());
4030 ExtractElementInst *ExtractElementInst::cloneImpl() const {
4031 return ExtractElementInst::Create(getOperand(0), getOperand(1));
4034 InsertElementInst *InsertElementInst::cloneImpl() const {
4035 return InsertElementInst::Create(getOperand(0), getOperand(1), getOperand(2));
4038 ShuffleVectorInst *ShuffleVectorInst::cloneImpl() const {
4039 return new ShuffleVectorInst(getOperand(0), getOperand(1), getOperand(2));
4042 PHINode *PHINode::cloneImpl() const { return new PHINode(*this); }
4044 LandingPadInst *LandingPadInst::cloneImpl() const {
4045 return new LandingPadInst(*this);
4048 ReturnInst *ReturnInst::cloneImpl() const {
4049 return new(getNumOperands()) ReturnInst(*this);
4052 BranchInst *BranchInst::cloneImpl() const {
4053 return new(getNumOperands()) BranchInst(*this);
4056 SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
4058 IndirectBrInst *IndirectBrInst::cloneImpl() const {
4059 return new IndirectBrInst(*this);
4062 InvokeInst *InvokeInst::cloneImpl() const {
4063 if (hasOperandBundles()) {
4064 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
4065 return new(getNumOperands(), DescriptorBytes) InvokeInst(*this);
4067 return new(getNumOperands()) InvokeInst(*this);
4070 ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); }
4072 CleanupReturnInst *CleanupReturnInst::cloneImpl() const {
4073 return new (getNumOperands()) CleanupReturnInst(*this);
4076 CatchReturnInst *CatchReturnInst::cloneImpl() const {
4077 return new (getNumOperands()) CatchReturnInst(*this);
4080 CatchSwitchInst *CatchSwitchInst::cloneImpl() const {
4081 return new CatchSwitchInst(*this);
4084 FuncletPadInst *FuncletPadInst::cloneImpl() const {
4085 return new (getNumOperands()) FuncletPadInst(*this);
4088 UnreachableInst *UnreachableInst::cloneImpl() const {
4089 LLVMContext &Context = getContext();
4090 return new UnreachableInst(Context);