1 //===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the IRBuilder class, which is used as a convenient way
10 // to create LLVM instructions with a consistent and simplified interface.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/IR/IRBuilder.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/ADT/None.h"
17 #include "llvm/IR/Constant.h"
18 #include "llvm/IR/Constants.h"
19 #include "llvm/IR/DerivedTypes.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/GlobalValue.h"
22 #include "llvm/IR/GlobalVariable.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/Intrinsics.h"
25 #include "llvm/IR/LLVMContext.h"
26 #include "llvm/IR/NoFolder.h"
27 #include "llvm/IR/Operator.h"
28 #include "llvm/IR/Statepoint.h"
29 #include "llvm/IR/Type.h"
30 #include "llvm/IR/Value.h"
31 #include "llvm/Support/Casting.h"
32 #include "llvm/Support/MathExtras.h"
39 /// CreateGlobalString - Make a new global variable with an initializer that
40 /// has array of i8 type filled in with the nul terminated string value
41 /// specified. If Name is specified, it is the name of the global variable
43 GlobalVariable *IRBuilderBase::CreateGlobalString(StringRef Str,
45 unsigned AddressSpace) {
46 Constant *StrConstant = ConstantDataArray::getString(Context, Str);
47 Module &M = *BB->getParent()->getParent();
48 auto *GV = new GlobalVariable(M, StrConstant->getType(), true,
49 GlobalValue::PrivateLinkage, StrConstant, Name,
50 nullptr, GlobalVariable::NotThreadLocal,
52 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
53 GV->setAlignment(Align(1));
57 Type *IRBuilderBase::getCurrentFunctionReturnType() const {
58 assert(BB && BB->getParent() && "No current function!");
59 return BB->getParent()->getReturnType();
62 Value *IRBuilderBase::getCastedInt8PtrValue(Value *Ptr) {
63 auto *PT = cast<PointerType>(Ptr->getType());
64 if (PT->getElementType()->isIntegerTy(8))
67 // Otherwise, we need to insert a bitcast.
68 return CreateBitCast(Ptr, getInt8PtrTy(PT->getAddressSpace()));
71 static CallInst *createCallHelper(Function *Callee, ArrayRef<Value *> Ops,
72 IRBuilderBase *Builder,
73 const Twine &Name = "",
74 Instruction *FMFSource = nullptr) {
75 CallInst *CI = Builder->CreateCall(Callee, Ops, Name);
77 CI->copyFastMathFlags(FMFSource);
81 CallInst *IRBuilderBase::CreateMemSet(Value *Ptr, Value *Val, Value *Size,
82 MaybeAlign Align, bool isVolatile,
83 MDNode *TBAATag, MDNode *ScopeTag,
85 Ptr = getCastedInt8PtrValue(Ptr);
86 Value *Ops[] = {Ptr, Val, Size, getInt1(isVolatile)};
87 Type *Tys[] = { Ptr->getType(), Size->getType() };
88 Module *M = BB->getParent()->getParent();
89 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
91 CallInst *CI = createCallHelper(TheFn, Ops, this);
94 cast<MemSetInst>(CI)->setDestAlignment(Align->value());
96 // Set the TBAA info if present.
98 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
101 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
104 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
109 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet(
110 Value *Ptr, Value *Val, Value *Size, Align Alignment, uint32_t ElementSize,
111 MDNode *TBAATag, MDNode *ScopeTag, MDNode *NoAliasTag) {
113 Ptr = getCastedInt8PtrValue(Ptr);
114 Value *Ops[] = {Ptr, Val, Size, getInt32(ElementSize)};
115 Type *Tys[] = {Ptr->getType(), Size->getType()};
116 Module *M = BB->getParent()->getParent();
117 Function *TheFn = Intrinsic::getDeclaration(
118 M, Intrinsic::memset_element_unordered_atomic, Tys);
120 CallInst *CI = createCallHelper(TheFn, Ops, this);
122 cast<AtomicMemSetInst>(CI)->setDestAlignment(Alignment);
124 // Set the TBAA info if present.
126 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
129 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
132 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
137 CallInst *IRBuilderBase::CreateMemCpy(Value *Dst, MaybeAlign DstAlign,
138 Value *Src, MaybeAlign SrcAlign,
139 Value *Size, bool isVolatile,
140 MDNode *TBAATag, MDNode *TBAAStructTag,
141 MDNode *ScopeTag, MDNode *NoAliasTag) {
142 Dst = getCastedInt8PtrValue(Dst);
143 Src = getCastedInt8PtrValue(Src);
145 Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
146 Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
147 Module *M = BB->getParent()->getParent();
148 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy, Tys);
150 CallInst *CI = createCallHelper(TheFn, Ops, this);
152 auto* MCI = cast<MemCpyInst>(CI);
154 MCI->setDestAlignment(*DstAlign);
156 MCI->setSourceAlignment(*SrcAlign);
158 // Set the TBAA info if present.
160 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
162 // Set the TBAA Struct info if present.
164 CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
167 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
170 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
175 CallInst *IRBuilderBase::CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign,
176 Value *Src, MaybeAlign SrcAlign,
178 Dst = getCastedInt8PtrValue(Dst);
179 Src = getCastedInt8PtrValue(Src);
180 Value *IsVolatile = getInt1(false);
182 Value *Ops[] = {Dst, Src, Size, IsVolatile};
183 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
184 Function *F = BB->getParent();
185 Module *M = F->getParent();
186 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy_inline, Tys);
188 CallInst *CI = createCallHelper(TheFn, Ops, this);
190 auto *MCI = cast<MemCpyInlineInst>(CI);
192 MCI->setDestAlignment(*DstAlign);
194 MCI->setSourceAlignment(*SrcAlign);
199 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
200 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
201 uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag,
202 MDNode *ScopeTag, MDNode *NoAliasTag) {
203 assert(DstAlign >= ElementSize &&
204 "Pointer alignment must be at least element size");
205 assert(SrcAlign >= ElementSize &&
206 "Pointer alignment must be at least element size");
207 Dst = getCastedInt8PtrValue(Dst);
208 Src = getCastedInt8PtrValue(Src);
210 Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
211 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
212 Module *M = BB->getParent()->getParent();
213 Function *TheFn = Intrinsic::getDeclaration(
214 M, Intrinsic::memcpy_element_unordered_atomic, Tys);
216 CallInst *CI = createCallHelper(TheFn, Ops, this);
218 // Set the alignment of the pointer args.
219 auto *AMCI = cast<AtomicMemCpyInst>(CI);
220 AMCI->setDestAlignment(DstAlign);
221 AMCI->setSourceAlignment(SrcAlign);
223 // Set the TBAA info if present.
225 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
227 // Set the TBAA Struct info if present.
229 CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
232 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
235 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
240 CallInst *IRBuilderBase::CreateMemMove(Value *Dst, MaybeAlign DstAlign,
241 Value *Src, MaybeAlign SrcAlign,
242 Value *Size, bool isVolatile,
243 MDNode *TBAATag, MDNode *ScopeTag,
244 MDNode *NoAliasTag) {
245 Dst = getCastedInt8PtrValue(Dst);
246 Src = getCastedInt8PtrValue(Src);
248 Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
249 Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
250 Module *M = BB->getParent()->getParent();
251 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memmove, Tys);
253 CallInst *CI = createCallHelper(TheFn, Ops, this);
255 auto *MMI = cast<MemMoveInst>(CI);
257 MMI->setDestAlignment(*DstAlign);
259 MMI->setSourceAlignment(*SrcAlign);
261 // Set the TBAA info if present.
263 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
266 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
269 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
274 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemMove(
275 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
276 uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag,
277 MDNode *ScopeTag, MDNode *NoAliasTag) {
278 assert(DstAlign >= ElementSize &&
279 "Pointer alignment must be at least element size");
280 assert(SrcAlign >= ElementSize &&
281 "Pointer alignment must be at least element size");
282 Dst = getCastedInt8PtrValue(Dst);
283 Src = getCastedInt8PtrValue(Src);
285 Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
286 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
287 Module *M = BB->getParent()->getParent();
288 Function *TheFn = Intrinsic::getDeclaration(
289 M, Intrinsic::memmove_element_unordered_atomic, Tys);
291 CallInst *CI = createCallHelper(TheFn, Ops, this);
293 // Set the alignment of the pointer args.
294 CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), DstAlign));
295 CI->addParamAttr(1, Attribute::getWithAlignment(CI->getContext(), SrcAlign));
297 // Set the TBAA info if present.
299 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
301 // Set the TBAA Struct info if present.
303 CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
306 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
309 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
314 static CallInst *getReductionIntrinsic(IRBuilderBase *Builder, Intrinsic::ID ID,
316 Module *M = Builder->GetInsertBlock()->getParent()->getParent();
317 Value *Ops[] = {Src};
318 Type *Tys[] = { Src->getType() };
319 auto Decl = Intrinsic::getDeclaration(M, ID, Tys);
320 return createCallHelper(Decl, Ops, Builder);
323 CallInst *IRBuilderBase::CreateFAddReduce(Value *Acc, Value *Src) {
324 Module *M = GetInsertBlock()->getParent()->getParent();
325 Value *Ops[] = {Acc, Src};
326 Type *Tys[] = {Acc->getType(), Src->getType()};
327 auto Decl = Intrinsic::getDeclaration(
328 M, Intrinsic::experimental_vector_reduce_v2_fadd, Tys);
329 return createCallHelper(Decl, Ops, this);
332 CallInst *IRBuilderBase::CreateFMulReduce(Value *Acc, Value *Src) {
333 Module *M = GetInsertBlock()->getParent()->getParent();
334 Value *Ops[] = {Acc, Src};
335 Type *Tys[] = {Acc->getType(), Src->getType()};
336 auto Decl = Intrinsic::getDeclaration(
337 M, Intrinsic::experimental_vector_reduce_v2_fmul, Tys);
338 return createCallHelper(Decl, Ops, this);
341 CallInst *IRBuilderBase::CreateAddReduce(Value *Src) {
342 return getReductionIntrinsic(this, Intrinsic::experimental_vector_reduce_add,
346 CallInst *IRBuilderBase::CreateMulReduce(Value *Src) {
347 return getReductionIntrinsic(this, Intrinsic::experimental_vector_reduce_mul,
351 CallInst *IRBuilderBase::CreateAndReduce(Value *Src) {
352 return getReductionIntrinsic(this, Intrinsic::experimental_vector_reduce_and,
356 CallInst *IRBuilderBase::CreateOrReduce(Value *Src) {
357 return getReductionIntrinsic(this, Intrinsic::experimental_vector_reduce_or,
361 CallInst *IRBuilderBase::CreateXorReduce(Value *Src) {
362 return getReductionIntrinsic(this, Intrinsic::experimental_vector_reduce_xor,
366 CallInst *IRBuilderBase::CreateIntMaxReduce(Value *Src, bool IsSigned) {
367 auto ID = IsSigned ? Intrinsic::experimental_vector_reduce_smax
368 : Intrinsic::experimental_vector_reduce_umax;
369 return getReductionIntrinsic(this, ID, Src);
372 CallInst *IRBuilderBase::CreateIntMinReduce(Value *Src, bool IsSigned) {
373 auto ID = IsSigned ? Intrinsic::experimental_vector_reduce_smin
374 : Intrinsic::experimental_vector_reduce_umin;
375 return getReductionIntrinsic(this, ID, Src);
378 CallInst *IRBuilderBase::CreateFPMaxReduce(Value *Src, bool NoNaN) {
379 auto Rdx = getReductionIntrinsic(
380 this, Intrinsic::experimental_vector_reduce_fmax, Src);
384 Rdx->setFastMathFlags(FMF);
389 CallInst *IRBuilderBase::CreateFPMinReduce(Value *Src, bool NoNaN) {
390 auto Rdx = getReductionIntrinsic(
391 this, Intrinsic::experimental_vector_reduce_fmin, Src);
395 Rdx->setFastMathFlags(FMF);
400 CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) {
401 assert(isa<PointerType>(Ptr->getType()) &&
402 "lifetime.start only applies to pointers.");
403 Ptr = getCastedInt8PtrValue(Ptr);
407 assert(Size->getType() == getInt64Ty() &&
408 "lifetime.start requires the size to be an i64");
409 Value *Ops[] = { Size, Ptr };
410 Module *M = BB->getParent()->getParent();
412 Intrinsic::getDeclaration(M, Intrinsic::lifetime_start, {Ptr->getType()});
413 return createCallHelper(TheFn, Ops, this);
416 CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) {
417 assert(isa<PointerType>(Ptr->getType()) &&
418 "lifetime.end only applies to pointers.");
419 Ptr = getCastedInt8PtrValue(Ptr);
423 assert(Size->getType() == getInt64Ty() &&
424 "lifetime.end requires the size to be an i64");
425 Value *Ops[] = { Size, Ptr };
426 Module *M = BB->getParent()->getParent();
428 Intrinsic::getDeclaration(M, Intrinsic::lifetime_end, {Ptr->getType()});
429 return createCallHelper(TheFn, Ops, this);
432 CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) {
434 assert(isa<PointerType>(Ptr->getType()) &&
435 "invariant.start only applies to pointers.");
436 Ptr = getCastedInt8PtrValue(Ptr);
440 assert(Size->getType() == getInt64Ty() &&
441 "invariant.start requires the size to be an i64");
443 Value *Ops[] = {Size, Ptr};
444 // Fill in the single overloaded type: memory object type.
445 Type *ObjectPtr[1] = {Ptr->getType()};
446 Module *M = BB->getParent()->getParent();
448 Intrinsic::getDeclaration(M, Intrinsic::invariant_start, ObjectPtr);
449 return createCallHelper(TheFn, Ops, this);
452 CallInst *IRBuilderBase::CreateAssumption(Value *Cond) {
453 assert(Cond->getType() == getInt1Ty() &&
454 "an assumption condition must be of type i1");
456 Value *Ops[] = { Cond };
457 Module *M = BB->getParent()->getParent();
458 Function *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::assume);
459 return createCallHelper(FnAssume, Ops, this);
462 /// Create a call to a Masked Load intrinsic.
463 /// \p Ptr - base pointer for the load
464 /// \p Alignment - alignment of the source location
465 /// \p Mask - vector of booleans which indicates what vector lanes should
466 /// be accessed in memory
467 /// \p PassThru - pass-through value that is used to fill the masked-off lanes
469 /// \p Name - name of the result variable
470 CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, Align Alignment,
471 Value *Mask, Value *PassThru,
473 auto *PtrTy = cast<PointerType>(Ptr->getType());
474 Type *DataTy = PtrTy->getElementType();
475 assert(DataTy->isVectorTy() && "Ptr should point to a vector");
476 assert(Mask && "Mask should not be all-ones (null)");
478 PassThru = UndefValue::get(DataTy);
479 Type *OverloadedTypes[] = { DataTy, PtrTy };
480 Value *Ops[] = {Ptr, getInt32(Alignment.value()), Mask, PassThru};
481 return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops,
482 OverloadedTypes, Name);
485 /// Create a call to a Masked Store intrinsic.
486 /// \p Val - data to be stored,
487 /// \p Ptr - base pointer for the store
488 /// \p Alignment - alignment of the destination location
489 /// \p Mask - vector of booleans which indicates what vector lanes should
490 /// be accessed in memory
491 CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr,
492 Align Alignment, Value *Mask) {
493 auto *PtrTy = cast<PointerType>(Ptr->getType());
494 Type *DataTy = PtrTy->getElementType();
495 assert(DataTy->isVectorTy() && "Ptr should point to a vector");
496 assert(Mask && "Mask should not be all-ones (null)");
497 Type *OverloadedTypes[] = { DataTy, PtrTy };
498 Value *Ops[] = {Val, Ptr, getInt32(Alignment.value()), Mask};
499 return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes);
502 /// Create a call to a Masked intrinsic, with given intrinsic Id,
503 /// an array of operands - Ops, and an array of overloaded types -
505 CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id,
506 ArrayRef<Value *> Ops,
507 ArrayRef<Type *> OverloadedTypes,
509 Module *M = BB->getParent()->getParent();
510 Function *TheFn = Intrinsic::getDeclaration(M, Id, OverloadedTypes);
511 return createCallHelper(TheFn, Ops, this, Name);
514 /// Create a call to a Masked Gather intrinsic.
515 /// \p Ptrs - vector of pointers for loading
516 /// \p Align - alignment for one element
517 /// \p Mask - vector of booleans which indicates what vector lanes should
518 /// be accessed in memory
519 /// \p PassThru - pass-through value that is used to fill the masked-off lanes
521 /// \p Name - name of the result variable
522 CallInst *IRBuilderBase::CreateMaskedGather(Value *Ptrs, Align Alignment,
523 Value *Mask, Value *PassThru,
525 auto PtrsTy = cast<VectorType>(Ptrs->getType());
526 auto PtrTy = cast<PointerType>(PtrsTy->getElementType());
527 unsigned NumElts = PtrsTy->getNumElements();
528 auto *DataTy = FixedVectorType::get(PtrTy->getElementType(), NumElts);
531 Mask = Constant::getAllOnesValue(
532 FixedVectorType::get(Type::getInt1Ty(Context), NumElts));
535 PassThru = UndefValue::get(DataTy);
537 Type *OverloadedTypes[] = {DataTy, PtrsTy};
538 Value *Ops[] = {Ptrs, getInt32(Alignment.value()), Mask, PassThru};
540 // We specify only one type when we create this intrinsic. Types of other
541 // arguments are derived from this type.
542 return CreateMaskedIntrinsic(Intrinsic::masked_gather, Ops, OverloadedTypes,
546 /// Create a call to a Masked Scatter intrinsic.
547 /// \p Data - data to be stored,
548 /// \p Ptrs - the vector of pointers, where the \p Data elements should be
550 /// \p Align - alignment for one element
551 /// \p Mask - vector of booleans which indicates what vector lanes should
552 /// be accessed in memory
553 CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs,
554 Align Alignment, Value *Mask) {
555 auto PtrsTy = cast<VectorType>(Ptrs->getType());
556 auto DataTy = cast<VectorType>(Data->getType());
557 unsigned NumElts = PtrsTy->getNumElements();
560 auto PtrTy = cast<PointerType>(PtrsTy->getElementType());
561 assert(NumElts == DataTy->getNumElements() &&
562 PtrTy->getElementType() == DataTy->getElementType() &&
563 "Incompatible pointer and data types");
567 Mask = Constant::getAllOnesValue(
568 FixedVectorType::get(Type::getInt1Ty(Context), NumElts));
570 Type *OverloadedTypes[] = {DataTy, PtrsTy};
571 Value *Ops[] = {Data, Ptrs, getInt32(Alignment.value()), Mask};
573 // We specify only one type when we create this intrinsic. Types of other
574 // arguments are derived from this type.
575 return CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes);
578 template <typename T0>
579 static std::vector<Value *>
580 getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes,
581 Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs) {
582 std::vector<Value *> Args;
583 Args.push_back(B.getInt64(ID));
584 Args.push_back(B.getInt32(NumPatchBytes));
585 Args.push_back(ActualCallee);
586 Args.push_back(B.getInt32(CallArgs.size()));
587 Args.push_back(B.getInt32(Flags));
588 Args.insert(Args.end(), CallArgs.begin(), CallArgs.end());
589 // GC Transition and Deopt args are now always handled via operand bundle.
590 // They will be removed from the signature of gc.statepoint shortly.
591 Args.push_back(B.getInt32(0));
592 Args.push_back(B.getInt32(0));
593 // GC args are now encoded in the gc-live operand bundle
597 template<typename T1, typename T2, typename T3>
598 static std::vector<OperandBundleDef>
599 getStatepointBundles(Optional<ArrayRef<T1>> TransitionArgs,
600 Optional<ArrayRef<T2>> DeoptArgs,
601 ArrayRef<T3> GCArgs) {
602 std::vector<OperandBundleDef> Rval;
604 SmallVector<Value*, 16> DeoptValues;
605 DeoptValues.insert(DeoptValues.end(), DeoptArgs->begin(), DeoptArgs->end());
606 Rval.emplace_back("deopt", DeoptValues);
608 if (TransitionArgs) {
609 SmallVector<Value*, 16> TransitionValues;
610 TransitionValues.insert(TransitionValues.end(),
611 TransitionArgs->begin(), TransitionArgs->end());
612 Rval.emplace_back("gc-transition", TransitionValues);
615 SmallVector<Value*, 16> LiveValues;
616 LiveValues.insert(LiveValues.end(), GCArgs.begin(), GCArgs.end());
617 Rval.emplace_back("gc-live", LiveValues);
622 template <typename T0, typename T1, typename T2, typename T3>
623 static CallInst *CreateGCStatepointCallCommon(
624 IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
625 Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs,
626 Optional<ArrayRef<T1>> TransitionArgs,
627 Optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs,
629 // Extract out the type of the callee.
630 auto *FuncPtrType = cast<PointerType>(ActualCallee->getType());
631 assert(isa<FunctionType>(FuncPtrType->getElementType()) &&
632 "actual callee must be a callable value");
634 Module *M = Builder->GetInsertBlock()->getParent()->getParent();
635 // Fill in the one generic type'd argument (the function is also vararg)
636 Type *ArgTypes[] = { FuncPtrType };
637 Function *FnStatepoint =
638 Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint,
641 std::vector<Value *> Args =
642 getStatepointArgs(*Builder, ID, NumPatchBytes, ActualCallee, Flags,
645 return Builder->CreateCall(FnStatepoint, Args,
646 getStatepointBundles(TransitionArgs, DeoptArgs,
651 CallInst *IRBuilderBase::CreateGCStatepointCall(
652 uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee,
653 ArrayRef<Value *> CallArgs, Optional<ArrayRef<Value *>> DeoptArgs,
654 ArrayRef<Value *> GCArgs, const Twine &Name) {
655 return CreateGCStatepointCallCommon<Value *, Value *, Value *, Value *>(
656 this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
657 CallArgs, None /* No Transition Args */, DeoptArgs, GCArgs, Name);
660 CallInst *IRBuilderBase::CreateGCStatepointCall(
661 uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee, uint32_t Flags,
662 ArrayRef<Use> CallArgs, Optional<ArrayRef<Use>> TransitionArgs,
663 Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
665 return CreateGCStatepointCallCommon<Use, Use, Use, Value *>(
666 this, ID, NumPatchBytes, ActualCallee, Flags, CallArgs, TransitionArgs,
667 DeoptArgs, GCArgs, Name);
670 CallInst *IRBuilderBase::CreateGCStatepointCall(
671 uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee,
672 ArrayRef<Use> CallArgs, Optional<ArrayRef<Value *>> DeoptArgs,
673 ArrayRef<Value *> GCArgs, const Twine &Name) {
674 return CreateGCStatepointCallCommon<Use, Value *, Value *, Value *>(
675 this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
676 CallArgs, None, DeoptArgs, GCArgs, Name);
679 template <typename T0, typename T1, typename T2, typename T3>
680 static InvokeInst *CreateGCStatepointInvokeCommon(
681 IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
682 Value *ActualInvokee, BasicBlock *NormalDest, BasicBlock *UnwindDest,
683 uint32_t Flags, ArrayRef<T0> InvokeArgs,
684 Optional<ArrayRef<T1>> TransitionArgs, Optional<ArrayRef<T2>> DeoptArgs,
685 ArrayRef<T3> GCArgs, const Twine &Name) {
686 // Extract out the type of the callee.
687 auto *FuncPtrType = cast<PointerType>(ActualInvokee->getType());
688 assert(isa<FunctionType>(FuncPtrType->getElementType()) &&
689 "actual callee must be a callable value");
691 Module *M = Builder->GetInsertBlock()->getParent()->getParent();
692 // Fill in the one generic type'd argument (the function is also vararg)
693 Function *FnStatepoint = Intrinsic::getDeclaration(
694 M, Intrinsic::experimental_gc_statepoint, {FuncPtrType});
696 std::vector<Value *> Args =
697 getStatepointArgs(*Builder, ID, NumPatchBytes, ActualInvokee, Flags,
700 return Builder->CreateInvoke(FnStatepoint, NormalDest, UnwindDest, Args,
701 getStatepointBundles(TransitionArgs, DeoptArgs,
706 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
707 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
708 BasicBlock *NormalDest, BasicBlock *UnwindDest,
709 ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Value *>> DeoptArgs,
710 ArrayRef<Value *> GCArgs, const Twine &Name) {
711 return CreateGCStatepointInvokeCommon<Value *, Value *, Value *, Value *>(
712 this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
713 uint32_t(StatepointFlags::None), InvokeArgs, None /* No Transition Args*/,
714 DeoptArgs, GCArgs, Name);
717 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
718 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
719 BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
720 ArrayRef<Use> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs,
721 Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name) {
722 return CreateGCStatepointInvokeCommon<Use, Use, Use, Value *>(
723 this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, Flags,
724 InvokeArgs, TransitionArgs, DeoptArgs, GCArgs, Name);
727 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
728 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
729 BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
730 Optional<ArrayRef<Value *>> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name) {
731 return CreateGCStatepointInvokeCommon<Use, Value *, Value *, Value *>(
732 this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
733 uint32_t(StatepointFlags::None), InvokeArgs, None, DeoptArgs, GCArgs,
737 CallInst *IRBuilderBase::CreateGCResult(Instruction *Statepoint,
740 Intrinsic::ID ID = Intrinsic::experimental_gc_result;
741 Module *M = BB->getParent()->getParent();
742 Type *Types[] = {ResultType};
743 Function *FnGCResult = Intrinsic::getDeclaration(M, ID, Types);
745 Value *Args[] = {Statepoint};
746 return createCallHelper(FnGCResult, Args, this, Name);
749 CallInst *IRBuilderBase::CreateGCRelocate(Instruction *Statepoint,
754 Module *M = BB->getParent()->getParent();
755 Type *Types[] = {ResultType};
756 Function *FnGCRelocate =
757 Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, Types);
759 Value *Args[] = {Statepoint,
760 getInt32(BaseOffset),
761 getInt32(DerivedOffset)};
762 return createCallHelper(FnGCRelocate, Args, this, Name);
765 CallInst *IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
766 Instruction *FMFSource,
768 Module *M = BB->getModule();
769 Function *Fn = Intrinsic::getDeclaration(M, ID, {V->getType()});
770 return createCallHelper(Fn, {V}, this, Name, FMFSource);
773 CallInst *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS,
775 Instruction *FMFSource,
777 Module *M = BB->getModule();
778 Function *Fn = Intrinsic::getDeclaration(M, ID, { LHS->getType() });
779 return createCallHelper(Fn, {LHS, RHS}, this, Name, FMFSource);
782 CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID,
783 ArrayRef<Type *> Types,
784 ArrayRef<Value *> Args,
785 Instruction *FMFSource,
787 Module *M = BB->getModule();
788 Function *Fn = Intrinsic::getDeclaration(M, ID, Types);
789 return createCallHelper(Fn, Args, this, Name, FMFSource);
792 CallInst *IRBuilderBase::CreateConstrainedFPBinOp(
793 Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource,
794 const Twine &Name, MDNode *FPMathTag,
795 Optional<RoundingMode> Rounding,
796 Optional<fp::ExceptionBehavior> Except) {
797 Value *RoundingV = getConstrainedFPRounding(Rounding);
798 Value *ExceptV = getConstrainedFPExcept(Except);
800 FastMathFlags UseFMF = FMF;
802 UseFMF = FMFSource->getFastMathFlags();
804 CallInst *C = CreateIntrinsic(ID, {L->getType()},
805 {L, R, RoundingV, ExceptV}, nullptr, Name);
806 setConstrainedFPCallAttr(C);
807 setFPAttrs(C, FPMathTag, UseFMF);
811 Value *IRBuilderBase::CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
812 const Twine &Name, MDNode *FPMathTag) {
813 if (Instruction::isBinaryOp(Opc)) {
814 assert(Ops.size() == 2 && "Invalid number of operands!");
815 return CreateBinOp(static_cast<Instruction::BinaryOps>(Opc),
816 Ops[0], Ops[1], Name, FPMathTag);
818 if (Instruction::isUnaryOp(Opc)) {
819 assert(Ops.size() == 1 && "Invalid number of operands!");
820 return CreateUnOp(static_cast<Instruction::UnaryOps>(Opc),
821 Ops[0], Name, FPMathTag);
823 llvm_unreachable("Unexpected opcode!");
826 CallInst *IRBuilderBase::CreateConstrainedFPCast(
827 Intrinsic::ID ID, Value *V, Type *DestTy,
828 Instruction *FMFSource, const Twine &Name, MDNode *FPMathTag,
829 Optional<RoundingMode> Rounding,
830 Optional<fp::ExceptionBehavior> Except) {
831 Value *ExceptV = getConstrainedFPExcept(Except);
833 FastMathFlags UseFMF = FMF;
835 UseFMF = FMFSource->getFastMathFlags();
838 bool HasRoundingMD = false;
842 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
843 case Intrinsic::INTRINSIC: \
844 HasRoundingMD = ROUND_MODE; \
846 #include "llvm/IR/ConstrainedOps.def"
849 Value *RoundingV = getConstrainedFPRounding(Rounding);
850 C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, RoundingV, ExceptV},
853 C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, nullptr,
856 setConstrainedFPCallAttr(C);
858 if (isa<FPMathOperator>(C))
859 setFPAttrs(C, FPMathTag, UseFMF);
863 Value *IRBuilderBase::CreateFCmpHelper(
864 CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name,
865 MDNode *FPMathTag, bool IsSignaling) {
866 if (IsFPConstrained) {
867 auto ID = IsSignaling ? Intrinsic::experimental_constrained_fcmps
868 : Intrinsic::experimental_constrained_fcmp;
869 return CreateConstrainedFPCmp(ID, P, LHS, RHS, Name);
872 if (auto *LC = dyn_cast<Constant>(LHS))
873 if (auto *RC = dyn_cast<Constant>(RHS))
874 return Insert(Folder.CreateFCmp(P, LC, RC), Name);
875 return Insert(setFPAttrs(new FCmpInst(P, LHS, RHS), FPMathTag, FMF), Name);
878 CallInst *IRBuilderBase::CreateConstrainedFPCmp(
879 Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
880 const Twine &Name, Optional<fp::ExceptionBehavior> Except) {
881 Value *PredicateV = getConstrainedFPPredicate(P);
882 Value *ExceptV = getConstrainedFPExcept(Except);
884 CallInst *C = CreateIntrinsic(ID, {L->getType()},
885 {L, R, PredicateV, ExceptV}, nullptr, Name);
886 setConstrainedFPCallAttr(C);
890 CallInst *IRBuilderBase::CreateConstrainedFPCall(
891 Function *Callee, ArrayRef<Value *> Args, const Twine &Name,
892 Optional<RoundingMode> Rounding,
893 Optional<fp::ExceptionBehavior> Except) {
894 llvm::SmallVector<Value *, 6> UseArgs;
896 for (auto *OneArg : Args)
897 UseArgs.push_back(OneArg);
898 bool HasRoundingMD = false;
899 switch (Callee->getIntrinsicID()) {
902 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
903 case Intrinsic::INTRINSIC: \
904 HasRoundingMD = ROUND_MODE; \
906 #include "llvm/IR/ConstrainedOps.def"
909 UseArgs.push_back(getConstrainedFPRounding(Rounding));
910 UseArgs.push_back(getConstrainedFPExcept(Except));
912 CallInst *C = CreateCall(Callee, UseArgs, Name);
913 setConstrainedFPCallAttr(C);
917 Value *IRBuilderBase::CreateSelect(Value *C, Value *True, Value *False,
918 const Twine &Name, Instruction *MDFrom) {
919 if (auto *CC = dyn_cast<Constant>(C))
920 if (auto *TC = dyn_cast<Constant>(True))
921 if (auto *FC = dyn_cast<Constant>(False))
922 return Insert(Folder.CreateSelect(CC, TC, FC), Name);
924 SelectInst *Sel = SelectInst::Create(C, True, False);
926 MDNode *Prof = MDFrom->getMetadata(LLVMContext::MD_prof);
927 MDNode *Unpred = MDFrom->getMetadata(LLVMContext::MD_unpredictable);
928 Sel = addBranchMetadata(Sel, Prof, Unpred);
930 if (isa<FPMathOperator>(Sel))
931 setFPAttrs(Sel, nullptr /* MDNode* */, FMF);
932 return Insert(Sel, Name);
935 Value *IRBuilderBase::CreatePtrDiff(Value *LHS, Value *RHS,
937 assert(LHS->getType() == RHS->getType() &&
938 "Pointer subtraction operand types must match!");
939 auto *ArgType = cast<PointerType>(LHS->getType());
940 Value *LHS_int = CreatePtrToInt(LHS, Type::getInt64Ty(Context));
941 Value *RHS_int = CreatePtrToInt(RHS, Type::getInt64Ty(Context));
942 Value *Difference = CreateSub(LHS_int, RHS_int);
943 return CreateExactSDiv(Difference,
944 ConstantExpr::getSizeOf(ArgType->getElementType()),
948 Value *IRBuilderBase::CreateLaunderInvariantGroup(Value *Ptr) {
949 assert(isa<PointerType>(Ptr->getType()) &&
950 "launder.invariant.group only applies to pointers.");
951 // FIXME: we could potentially avoid casts to/from i8*.
952 auto *PtrType = Ptr->getType();
953 auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
954 if (PtrType != Int8PtrTy)
955 Ptr = CreateBitCast(Ptr, Int8PtrTy);
956 Module *M = BB->getParent()->getParent();
957 Function *FnLaunderInvariantGroup = Intrinsic::getDeclaration(
958 M, Intrinsic::launder_invariant_group, {Int8PtrTy});
960 assert(FnLaunderInvariantGroup->getReturnType() == Int8PtrTy &&
961 FnLaunderInvariantGroup->getFunctionType()->getParamType(0) ==
963 "LaunderInvariantGroup should take and return the same type");
965 CallInst *Fn = CreateCall(FnLaunderInvariantGroup, {Ptr});
967 if (PtrType != Int8PtrTy)
968 return CreateBitCast(Fn, PtrType);
972 Value *IRBuilderBase::CreateStripInvariantGroup(Value *Ptr) {
973 assert(isa<PointerType>(Ptr->getType()) &&
974 "strip.invariant.group only applies to pointers.");
976 // FIXME: we could potentially avoid casts to/from i8*.
977 auto *PtrType = Ptr->getType();
978 auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
979 if (PtrType != Int8PtrTy)
980 Ptr = CreateBitCast(Ptr, Int8PtrTy);
981 Module *M = BB->getParent()->getParent();
982 Function *FnStripInvariantGroup = Intrinsic::getDeclaration(
983 M, Intrinsic::strip_invariant_group, {Int8PtrTy});
985 assert(FnStripInvariantGroup->getReturnType() == Int8PtrTy &&
986 FnStripInvariantGroup->getFunctionType()->getParamType(0) ==
988 "StripInvariantGroup should take and return the same type");
990 CallInst *Fn = CreateCall(FnStripInvariantGroup, {Ptr});
992 if (PtrType != Int8PtrTy)
993 return CreateBitCast(Fn, PtrType);
997 Value *IRBuilderBase::CreateVectorSplat(unsigned NumElts, Value *V,
999 assert(NumElts > 0 && "Cannot splat to an empty vector!");
1001 // First insert it into an undef vector so we can shuffle it.
1002 Type *I32Ty = getInt32Ty();
1003 Value *Undef = UndefValue::get(FixedVectorType::get(V->getType(), NumElts));
1004 V = CreateInsertElement(Undef, V, ConstantInt::get(I32Ty, 0),
1005 Name + ".splatinsert");
1007 // Shuffle the value across the desired number of elements.
1009 ConstantAggregateZero::get(FixedVectorType::get(I32Ty, NumElts));
1010 return CreateShuffleVector(V, Undef, Zeros, Name + ".splat");
1013 Value *IRBuilderBase::CreateExtractInteger(
1014 const DataLayout &DL, Value *From, IntegerType *ExtractedTy,
1015 uint64_t Offset, const Twine &Name) {
1016 auto *IntTy = cast<IntegerType>(From->getType());
1017 assert(DL.getTypeStoreSize(ExtractedTy) + Offset <=
1018 DL.getTypeStoreSize(IntTy) &&
1019 "Element extends past full value");
1020 uint64_t ShAmt = 8 * Offset;
1022 if (DL.isBigEndian())
1023 ShAmt = 8 * (DL.getTypeStoreSize(IntTy) -
1024 DL.getTypeStoreSize(ExtractedTy) - Offset);
1026 V = CreateLShr(V, ShAmt, Name + ".shift");
1028 assert(ExtractedTy->getBitWidth() <= IntTy->getBitWidth() &&
1029 "Cannot extract to a larger integer!");
1030 if (ExtractedTy != IntTy) {
1031 V = CreateTrunc(V, ExtractedTy, Name + ".trunc");
1036 Value *IRBuilderBase::CreatePreserveArrayAccessIndex(
1037 Type *ElTy, Value *Base, unsigned Dimension, unsigned LastIndex,
1039 assert(isa<PointerType>(Base->getType()) &&
1040 "Invalid Base ptr type for preserve.array.access.index.");
1041 auto *BaseType = Base->getType();
1043 Value *LastIndexV = getInt32(LastIndex);
1044 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
1045 SmallVector<Value *, 4> IdxList;
1046 for (unsigned I = 0; I < Dimension; ++I)
1047 IdxList.push_back(Zero);
1048 IdxList.push_back(LastIndexV);
1051 GetElementPtrInst::getGEPReturnType(ElTy, Base, IdxList);
1053 Module *M = BB->getParent()->getParent();
1054 Function *FnPreserveArrayAccessIndex = Intrinsic::getDeclaration(
1055 M, Intrinsic::preserve_array_access_index, {ResultType, BaseType});
1057 Value *DimV = getInt32(Dimension);
1059 CreateCall(FnPreserveArrayAccessIndex, {Base, DimV, LastIndexV});
1061 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1066 Value *IRBuilderBase::CreatePreserveUnionAccessIndex(
1067 Value *Base, unsigned FieldIndex, MDNode *DbgInfo) {
1068 assert(isa<PointerType>(Base->getType()) &&
1069 "Invalid Base ptr type for preserve.union.access.index.");
1070 auto *BaseType = Base->getType();
1072 Module *M = BB->getParent()->getParent();
1073 Function *FnPreserveUnionAccessIndex = Intrinsic::getDeclaration(
1074 M, Intrinsic::preserve_union_access_index, {BaseType, BaseType});
1076 Value *DIIndex = getInt32(FieldIndex);
1078 CreateCall(FnPreserveUnionAccessIndex, {Base, DIIndex});
1080 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1085 Value *IRBuilderBase::CreatePreserveStructAccessIndex(
1086 Type *ElTy, Value *Base, unsigned Index, unsigned FieldIndex,
1088 assert(isa<PointerType>(Base->getType()) &&
1089 "Invalid Base ptr type for preserve.struct.access.index.");
1090 auto *BaseType = Base->getType();
1092 Value *GEPIndex = getInt32(Index);
1093 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
1095 GetElementPtrInst::getGEPReturnType(ElTy, Base, {Zero, GEPIndex});
1097 Module *M = BB->getParent()->getParent();
1098 Function *FnPreserveStructAccessIndex = Intrinsic::getDeclaration(
1099 M, Intrinsic::preserve_struct_access_index, {ResultType, BaseType});
1101 Value *DIIndex = getInt32(FieldIndex);
1102 CallInst *Fn = CreateCall(FnPreserveStructAccessIndex,
1103 {Base, GEPIndex, DIIndex});
1105 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1110 CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(
1111 const DataLayout &DL, Value *PtrValue, Value *Mask, Type *IntPtrTy,
1112 Value *OffsetValue, Value **TheCheck) {
1113 Value *PtrIntValue = CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
1116 bool IsOffsetZero = false;
1117 if (const auto *CI = dyn_cast<ConstantInt>(OffsetValue))
1118 IsOffsetZero = CI->isZero();
1120 if (!IsOffsetZero) {
1121 if (OffsetValue->getType() != IntPtrTy)
1122 OffsetValue = CreateIntCast(OffsetValue, IntPtrTy, /*isSigned*/ true,
1124 PtrIntValue = CreateSub(PtrIntValue, OffsetValue, "offsetptr");
1128 Value *Zero = ConstantInt::get(IntPtrTy, 0);
1129 Value *MaskedPtr = CreateAnd(PtrIntValue, Mask, "maskedptr");
1130 Value *InvCond = CreateICmpEQ(MaskedPtr, Zero, "maskcond");
1132 *TheCheck = InvCond;
1134 return CreateAssumption(InvCond);
1137 CallInst *IRBuilderBase::CreateAlignmentAssumption(
1138 const DataLayout &DL, Value *PtrValue, unsigned Alignment,
1139 Value *OffsetValue, Value **TheCheck) {
1140 assert(isa<PointerType>(PtrValue->getType()) &&
1141 "trying to create an alignment assumption on a non-pointer?");
1142 assert(Alignment != 0 && "Invalid Alignment");
1143 auto *PtrTy = cast<PointerType>(PtrValue->getType());
1144 Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
1146 Value *Mask = ConstantInt::get(IntPtrTy, Alignment - 1);
1147 return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy,
1148 OffsetValue, TheCheck);
1151 CallInst *IRBuilderBase::CreateAlignmentAssumption(
1152 const DataLayout &DL, Value *PtrValue, Value *Alignment,
1153 Value *OffsetValue, Value **TheCheck) {
1154 assert(isa<PointerType>(PtrValue->getType()) &&
1155 "trying to create an alignment assumption on a non-pointer?");
1156 auto *PtrTy = cast<PointerType>(PtrValue->getType());
1157 Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
1159 if (Alignment->getType() != IntPtrTy)
1160 Alignment = CreateIntCast(Alignment, IntPtrTy, /*isSigned*/ false,
1163 Value *Mask = CreateSub(Alignment, ConstantInt::get(IntPtrTy, 1), "mask");
1165 return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy,
1166 OffsetValue, TheCheck);
1169 IRBuilderDefaultInserter::~IRBuilderDefaultInserter() {}
1170 IRBuilderCallbackInserter::~IRBuilderCallbackInserter() {}
1171 IRBuilderFolder::~IRBuilderFolder() {}
1172 void ConstantFolder::anchor() {}
1173 void NoFolder::anchor() {}