1 //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the code for emitting atomic operations.
12 //===----------------------------------------------------------------------===//
14 #include "CodeGenFunction.h"
16 #include "CGRecordLayout.h"
17 #include "CodeGenModule.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/CodeGen/CGFunctionInfo.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Intrinsics.h"
23 #include "llvm/IR/Operator.h"
25 using namespace clang;
26 using namespace CodeGen;
33 uint64_t AtomicSizeInBits;
34 uint64_t ValueSizeInBits;
35 CharUnits AtomicAlign;
37 CharUnits LValueAlign;
38 TypeEvaluationKind EvaluationKind;
43 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
44 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45 EvaluationKind(TEK_Scalar), UseLibcall(true) {
46 assert(!lvalue.isGlobalReg());
47 ASTContext &C = CGF.getContext();
48 if (lvalue.isSimple()) {
49 AtomicTy = lvalue.getType();
50 if (auto *ATy = AtomicTy->getAs<AtomicType>())
51 ValueTy = ATy->getValueType();
54 EvaluationKind = CGF.getEvaluationKind(ValueTy);
56 uint64_t ValueAlignInBits;
57 uint64_t AtomicAlignInBits;
58 TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59 ValueSizeInBits = ValueTI.Width;
60 ValueAlignInBits = ValueTI.Align;
62 TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63 AtomicSizeInBits = AtomicTI.Width;
64 AtomicAlignInBits = AtomicTI.Align;
66 assert(ValueSizeInBits <= AtomicSizeInBits);
67 assert(ValueAlignInBits <= AtomicAlignInBits);
69 AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70 ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
71 if (lvalue.getAlignment().isZero())
72 lvalue.setAlignment(AtomicAlign);
75 } else if (lvalue.isBitField()) {
76 ValueTy = lvalue.getType();
77 ValueSizeInBits = C.getTypeSize(ValueTy);
78 auto &OrigBFI = lvalue.getBitFieldInfo();
79 auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
80 AtomicSizeInBits = C.toBits(
81 C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
82 .RoundUpToAlignment(lvalue.getAlignment()));
83 auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldAddr());
85 (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
86 lvalue.getAlignment();
87 VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
88 VoidPtrAddr, OffsetInChars.getQuantity());
89 auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
91 CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
92 "atomic_bitfield_base");
95 BFI.StorageSize = AtomicSizeInBits;
96 BFI.StorageOffset += OffsetInChars;
97 LVal = LValue::MakeBitfield(Addr, BFI, lvalue.getType(),
98 lvalue.getAlignment());
99 LVal.setTBAAInfo(lvalue.getTBAAInfo());
100 AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
101 if (AtomicTy.isNull()) {
104 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
105 AtomicTy = C.getConstantArrayType(C.CharTy, Size, ArrayType::Normal,
106 /*IndexTypeQuals=*/0);
108 AtomicAlign = ValueAlign = lvalue.getAlignment();
109 } else if (lvalue.isVectorElt()) {
110 ValueTy = lvalue.getType()->getAs<VectorType>()->getElementType();
111 ValueSizeInBits = C.getTypeSize(ValueTy);
112 AtomicTy = lvalue.getType();
113 AtomicSizeInBits = C.getTypeSize(AtomicTy);
114 AtomicAlign = ValueAlign = lvalue.getAlignment();
117 assert(lvalue.isExtVectorElt());
118 ValueTy = lvalue.getType();
119 ValueSizeInBits = C.getTypeSize(ValueTy);
120 AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
121 lvalue.getType(), lvalue.getExtVectorAddr()
123 ->getPointerElementType()
124 ->getVectorNumElements());
125 AtomicSizeInBits = C.getTypeSize(AtomicTy);
126 AtomicAlign = ValueAlign = lvalue.getAlignment();
129 UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
130 AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
133 QualType getAtomicType() const { return AtomicTy; }
134 QualType getValueType() const { return ValueTy; }
135 CharUnits getAtomicAlignment() const { return AtomicAlign; }
136 CharUnits getValueAlignment() const { return ValueAlign; }
137 uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
138 uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
139 TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
140 bool shouldUseLibcall() const { return UseLibcall; }
141 const LValue &getAtomicLValue() const { return LVal; }
142 llvm::Value *getAtomicAddress() const {
144 return LVal.getAddress();
145 else if (LVal.isBitField())
146 return LVal.getBitFieldAddr();
147 else if (LVal.isVectorElt())
148 return LVal.getVectorAddr();
149 assert(LVal.isExtVectorElt());
150 return LVal.getExtVectorAddr();
153 /// Is the atomic size larger than the underlying value type?
155 /// Note that the absence of padding does not mean that atomic
156 /// objects are completely interchangeable with non-atomic
157 /// objects: we might have promoted the alignment of a type
158 /// without making it bigger.
159 bool hasPadding() const {
160 return (ValueSizeInBits != AtomicSizeInBits);
163 bool emitMemSetZeroIfNecessary() const;
165 llvm::Value *getAtomicSizeValue() const {
166 CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
167 return CGF.CGM.getSize(size);
170 /// Cast the given pointer to an integer pointer suitable for
171 /// atomic operations.
172 llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
174 /// Turn an atomic-layout object into an r-value.
175 RValue convertTempToRValue(llvm::Value *addr, AggValueSlot resultSlot,
176 SourceLocation loc, bool AsValue) const;
178 /// \brief Converts a rvalue to integer value.
179 llvm::Value *convertRValueToInt(RValue RVal) const;
181 RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
182 AggValueSlot ResultSlot,
183 SourceLocation Loc, bool AsValue) const;
185 /// Copy an atomic r-value into atomic-layout memory.
186 void emitCopyIntoMemory(RValue rvalue) const;
188 /// Project an l-value down to the value field.
189 LValue projectValue() const {
190 assert(LVal.isSimple());
191 llvm::Value *addr = getAtomicAddress();
193 addr = CGF.Builder.CreateStructGEP(nullptr, addr, 0);
195 return LValue::MakeAddr(addr, getValueType(), LVal.getAlignment(),
196 CGF.getContext(), LVal.getTBAAInfo());
199 /// \brief Emits atomic load.
200 /// \returns Loaded value.
201 RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
202 bool AsValue, llvm::AtomicOrdering AO,
205 /// \brief Emits atomic compare-and-exchange sequence.
206 /// \param Expected Expected value.
207 /// \param Desired Desired value.
208 /// \param Success Atomic ordering for success operation.
209 /// \param Failure Atomic ordering for failed operation.
210 /// \param IsWeak true if atomic operation is weak, false otherwise.
211 /// \returns Pair of values: previous value from storage (value type) and
212 /// boolean flag (i1 type) with true if success and false otherwise.
213 std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange(
214 RValue Expected, RValue Desired,
215 llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
216 llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent,
217 bool IsWeak = false);
219 /// \brief Emits atomic update.
220 /// \param AO Atomic ordering.
221 /// \param UpdateOp Update operation for the current lvalue.
222 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
223 const llvm::function_ref<RValue(RValue)> &UpdateOp,
225 /// \brief Emits atomic update.
226 /// \param AO Atomic ordering.
227 void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
230 /// Materialize an atomic r-value in atomic-layout memory.
231 llvm::Value *materializeRValue(RValue rvalue) const;
233 /// \brief Translates LLVM atomic ordering to GNU atomic ordering for
235 static AtomicExpr::AtomicOrderingKind
236 translateAtomicOrdering(const llvm::AtomicOrdering AO);
239 bool requiresMemSetZero(llvm::Type *type) const;
241 /// \brief Creates temp alloca for intermediate operations on atomic value.
242 llvm::Value *CreateTempAlloca() const;
244 /// \brief Emits atomic load as a libcall.
245 void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
246 llvm::AtomicOrdering AO, bool IsVolatile);
247 /// \brief Emits atomic load as LLVM instruction.
248 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
249 /// \brief Emits atomic compare-and-exchange op as a libcall.
250 llvm::Value *EmitAtomicCompareExchangeLibcall(
251 llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
252 llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
253 llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent);
254 /// \brief Emits atomic compare-and-exchange op as LLVM instruction.
255 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
256 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
257 llvm::AtomicOrdering Success = llvm::SequentiallyConsistent,
258 llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent,
259 bool IsWeak = false);
260 /// \brief Emit atomic update as libcalls.
262 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
263 const llvm::function_ref<RValue(RValue)> &UpdateOp,
265 /// \brief Emit atomic update as LLVM instructions.
266 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
267 const llvm::function_ref<RValue(RValue)> &UpdateOp,
269 /// \brief Emit atomic update as libcalls.
270 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
272 /// \brief Emit atomic update as LLVM instructions.
273 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
278 AtomicExpr::AtomicOrderingKind
279 AtomicInfo::translateAtomicOrdering(const llvm::AtomicOrdering AO) {
281 case llvm::Unordered:
282 case llvm::NotAtomic:
283 case llvm::Monotonic:
284 return AtomicExpr::AO_ABI_memory_order_relaxed;
286 return AtomicExpr::AO_ABI_memory_order_acquire;
288 return AtomicExpr::AO_ABI_memory_order_release;
289 case llvm::AcquireRelease:
290 return AtomicExpr::AO_ABI_memory_order_acq_rel;
291 case llvm::SequentiallyConsistent:
292 return AtomicExpr::AO_ABI_memory_order_seq_cst;
294 llvm_unreachable("Unhandled AtomicOrdering");
297 llvm::Value *AtomicInfo::CreateTempAlloca() const {
298 auto *TempAlloca = CGF.CreateMemTemp(
299 (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
302 TempAlloca->setAlignment(getAtomicAlignment().getQuantity());
303 // Cast to pointer to value type for bitfields.
304 if (LVal.isBitField())
305 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
306 TempAlloca, getAtomicAddress()->getType());
310 static RValue emitAtomicLibcall(CodeGenFunction &CGF,
314 const CGFunctionInfo &fnInfo =
315 CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
316 FunctionType::ExtInfo(), RequiredArgs::All);
317 llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
318 llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
319 return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
322 /// Does a store of the given IR type modify the full expected width?
323 static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
324 uint64_t expectedSize) {
325 return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
328 /// Does the atomic type require memsetting to zero before initialization?
330 /// The IR type is provided as a way of making certain queries faster.
331 bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
332 // If the atomic type has size padding, we definitely need a memset.
333 if (hasPadding()) return true;
335 // Otherwise, do some simple heuristics to try to avoid it:
336 switch (getEvaluationKind()) {
337 // For scalars and complexes, check whether the store size of the
338 // type uses the full size.
340 return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
342 return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
343 AtomicSizeInBits / 2);
345 // Padding in structs has an undefined bit pattern. User beware.
349 llvm_unreachable("bad evaluation kind");
352 bool AtomicInfo::emitMemSetZeroIfNecessary() const {
353 assert(LVal.isSimple());
354 llvm::Value *addr = LVal.getAddress();
355 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
358 CGF.Builder.CreateMemSet(
359 addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
360 CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
361 LVal.getAlignment().getQuantity());
365 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
366 llvm::Value *Dest, llvm::Value *Ptr,
367 llvm::Value *Val1, llvm::Value *Val2,
368 uint64_t Size, unsigned Align,
369 llvm::AtomicOrdering SuccessOrder,
370 llvm::AtomicOrdering FailureOrder) {
371 // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
372 llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1);
373 Expected->setAlignment(Align);
374 llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2);
375 Desired->setAlignment(Align);
377 llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
378 Ptr, Expected, Desired, SuccessOrder, FailureOrder);
379 Pair->setVolatile(E->isVolatile());
380 Pair->setWeak(IsWeak);
382 // Cmp holds the result of the compare-exchange operation: true on success,
384 llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
385 llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
387 // This basic block is used to hold the store instruction if the operation
389 llvm::BasicBlock *StoreExpectedBB =
390 CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
392 // This basic block is the exit point of the operation, we should end up
393 // here regardless of whether or not the operation succeeded.
394 llvm::BasicBlock *ContinueBB =
395 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
397 // Update Expected if Expected isn't equal to Old, otherwise branch to the
399 CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
401 CGF.Builder.SetInsertPoint(StoreExpectedBB);
402 // Update the memory at Expected with Old's value.
403 llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1);
404 StoreExpected->setAlignment(Align);
405 // Finally, branch to the exit point.
406 CGF.Builder.CreateBr(ContinueBB);
408 CGF.Builder.SetInsertPoint(ContinueBB);
409 // Update the memory at Dest with Cmp's value.
410 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
414 /// Given an ordering required on success, emit all possible cmpxchg
415 /// instructions to cope with the provided (but possibly only dynamically known)
417 static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
418 bool IsWeak, llvm::Value *Dest,
419 llvm::Value *Ptr, llvm::Value *Val1,
421 llvm::Value *FailureOrderVal,
422 uint64_t Size, unsigned Align,
423 llvm::AtomicOrdering SuccessOrder) {
424 llvm::AtomicOrdering FailureOrder;
425 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
426 switch (FO->getSExtValue()) {
428 FailureOrder = llvm::Monotonic;
430 case AtomicExpr::AO_ABI_memory_order_consume:
431 case AtomicExpr::AO_ABI_memory_order_acquire:
432 FailureOrder = llvm::Acquire;
434 case AtomicExpr::AO_ABI_memory_order_seq_cst:
435 FailureOrder = llvm::SequentiallyConsistent;
438 if (FailureOrder >= SuccessOrder) {
439 // Don't assert on undefined behaviour.
441 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
443 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align,
444 SuccessOrder, FailureOrder);
448 // Create all the relevant BB's
449 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
451 MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
452 if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release)
453 AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
454 if (SuccessOrder == llvm::SequentiallyConsistent)
455 SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
457 llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
459 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
461 // Emit all the different atomics
463 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
464 // doesn't matter unless someone is crazy enough to use something that
465 // doesn't fold to a constant for the ordering.
466 CGF.Builder.SetInsertPoint(MonotonicBB);
467 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
468 Size, Align, SuccessOrder, llvm::Monotonic);
469 CGF.Builder.CreateBr(ContBB);
472 CGF.Builder.SetInsertPoint(AcquireBB);
473 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
474 Size, Align, SuccessOrder, llvm::Acquire);
475 CGF.Builder.CreateBr(ContBB);
476 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
478 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
482 CGF.Builder.SetInsertPoint(SeqCstBB);
483 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
484 Size, Align, SuccessOrder, llvm::SequentiallyConsistent);
485 CGF.Builder.CreateBr(ContBB);
486 SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
490 CGF.Builder.SetInsertPoint(ContBB);
493 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
494 llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
495 llvm::Value *IsWeak, llvm::Value *FailureOrder,
496 uint64_t Size, unsigned Align,
497 llvm::AtomicOrdering Order) {
498 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
499 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
501 switch (E->getOp()) {
502 case AtomicExpr::AO__c11_atomic_init:
503 llvm_unreachable("Already handled!");
505 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
506 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
507 FailureOrder, Size, Align, Order);
509 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
510 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
511 FailureOrder, Size, Align, Order);
513 case AtomicExpr::AO__atomic_compare_exchange:
514 case AtomicExpr::AO__atomic_compare_exchange_n: {
515 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
516 emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
517 Val1, Val2, FailureOrder, Size, Align, Order);
519 // Create all the relevant BB's
520 llvm::BasicBlock *StrongBB =
521 CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
522 llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
523 llvm::BasicBlock *ContBB =
524 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
526 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
527 SI->addCase(CGF.Builder.getInt1(false), StrongBB);
529 CGF.Builder.SetInsertPoint(StrongBB);
530 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
531 FailureOrder, Size, Align, Order);
532 CGF.Builder.CreateBr(ContBB);
534 CGF.Builder.SetInsertPoint(WeakBB);
535 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
536 FailureOrder, Size, Align, Order);
537 CGF.Builder.CreateBr(ContBB);
539 CGF.Builder.SetInsertPoint(ContBB);
543 case AtomicExpr::AO__c11_atomic_load:
544 case AtomicExpr::AO__atomic_load_n:
545 case AtomicExpr::AO__atomic_load: {
546 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
547 Load->setAtomic(Order);
548 Load->setAlignment(Size);
549 Load->setVolatile(E->isVolatile());
550 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
551 StoreDest->setAlignment(Align);
555 case AtomicExpr::AO__c11_atomic_store:
556 case AtomicExpr::AO__atomic_store:
557 case AtomicExpr::AO__atomic_store_n: {
558 assert(!Dest && "Store does not return a value");
559 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
560 LoadVal1->setAlignment(Align);
561 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
562 Store->setAtomic(Order);
563 Store->setAlignment(Size);
564 Store->setVolatile(E->isVolatile());
568 case AtomicExpr::AO__c11_atomic_exchange:
569 case AtomicExpr::AO__atomic_exchange_n:
570 case AtomicExpr::AO__atomic_exchange:
571 Op = llvm::AtomicRMWInst::Xchg;
574 case AtomicExpr::AO__atomic_add_fetch:
575 PostOp = llvm::Instruction::Add;
577 case AtomicExpr::AO__c11_atomic_fetch_add:
578 case AtomicExpr::AO__atomic_fetch_add:
579 Op = llvm::AtomicRMWInst::Add;
582 case AtomicExpr::AO__atomic_sub_fetch:
583 PostOp = llvm::Instruction::Sub;
585 case AtomicExpr::AO__c11_atomic_fetch_sub:
586 case AtomicExpr::AO__atomic_fetch_sub:
587 Op = llvm::AtomicRMWInst::Sub;
590 case AtomicExpr::AO__atomic_and_fetch:
591 PostOp = llvm::Instruction::And;
593 case AtomicExpr::AO__c11_atomic_fetch_and:
594 case AtomicExpr::AO__atomic_fetch_and:
595 Op = llvm::AtomicRMWInst::And;
598 case AtomicExpr::AO__atomic_or_fetch:
599 PostOp = llvm::Instruction::Or;
601 case AtomicExpr::AO__c11_atomic_fetch_or:
602 case AtomicExpr::AO__atomic_fetch_or:
603 Op = llvm::AtomicRMWInst::Or;
606 case AtomicExpr::AO__atomic_xor_fetch:
607 PostOp = llvm::Instruction::Xor;
609 case AtomicExpr::AO__c11_atomic_fetch_xor:
610 case AtomicExpr::AO__atomic_fetch_xor:
611 Op = llvm::AtomicRMWInst::Xor;
614 case AtomicExpr::AO__atomic_nand_fetch:
615 PostOp = llvm::Instruction::And;
617 case AtomicExpr::AO__atomic_fetch_nand:
618 Op = llvm::AtomicRMWInst::Nand;
622 llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
623 LoadVal1->setAlignment(Align);
624 llvm::AtomicRMWInst *RMWI =
625 CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
626 RMWI->setVolatile(E->isVolatile());
628 // For __atomic_*_fetch operations, perform the operation again to
629 // determine the value which was written.
630 llvm::Value *Result = RMWI;
632 Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
633 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
634 Result = CGF.Builder.CreateNot(Result);
635 llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
636 StoreDest->setAlignment(Align);
639 // This function emits any expression (scalar, complex, or aggregate)
640 // into a temporary alloca.
642 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
643 llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
644 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
650 AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
651 bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
652 SourceLocation Loc, CharUnits SizeInChars) {
653 if (UseOptimizedLibcall) {
654 // Load value and pass it to the function directly.
655 unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
656 int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
658 CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
659 llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
660 SizeInBits)->getPointerTo();
661 Val = CGF.EmitLoadOfScalar(CGF.Builder.CreateBitCast(Val, IPtrTy), false,
662 Align, CGF.getContext().getPointerType(ValTy),
664 // Coerce the value into an appropriately sized integer type.
665 Args.add(RValue::get(Val), ValTy);
667 // Non-optimized functions always take a reference.
668 Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
669 CGF.getContext().VoidPtrTy);
673 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
674 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
675 QualType MemTy = AtomicTy;
676 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
677 MemTy = AT->getValueType();
678 CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
679 uint64_t Size = sizeChars.getQuantity();
680 CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
681 unsigned Align = alignChars.getQuantity();
682 unsigned MaxInlineWidthInBits =
683 getTarget().getMaxAtomicInlineWidth();
684 bool UseLibcall = (Size != Align ||
685 getContext().toBits(sizeChars) > MaxInlineWidthInBits);
687 llvm::Value *IsWeak = nullptr, *OrderFail = nullptr, *Val1 = nullptr,
689 llvm::Value *Ptr = EmitScalarExpr(E->getPtr());
691 if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
692 assert(!Dest && "Init does not return a value");
693 LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
694 EmitAtomicInit(E->getVal1(), lvalue);
695 return RValue::get(nullptr);
698 llvm::Value *Order = EmitScalarExpr(E->getOrder());
700 switch (E->getOp()) {
701 case AtomicExpr::AO__c11_atomic_init:
702 llvm_unreachable("Already handled!");
704 case AtomicExpr::AO__c11_atomic_load:
705 case AtomicExpr::AO__atomic_load_n:
708 case AtomicExpr::AO__atomic_load:
709 Dest = EmitScalarExpr(E->getVal1());
712 case AtomicExpr::AO__atomic_store:
713 Val1 = EmitScalarExpr(E->getVal1());
716 case AtomicExpr::AO__atomic_exchange:
717 Val1 = EmitScalarExpr(E->getVal1());
718 Dest = EmitScalarExpr(E->getVal2());
721 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
722 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
723 case AtomicExpr::AO__atomic_compare_exchange_n:
724 case AtomicExpr::AO__atomic_compare_exchange:
725 Val1 = EmitScalarExpr(E->getVal1());
726 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
727 Val2 = EmitScalarExpr(E->getVal2());
729 Val2 = EmitValToTemp(*this, E->getVal2());
730 OrderFail = EmitScalarExpr(E->getOrderFail());
731 if (E->getNumSubExprs() == 6)
732 IsWeak = EmitScalarExpr(E->getWeak());
735 case AtomicExpr::AO__c11_atomic_fetch_add:
736 case AtomicExpr::AO__c11_atomic_fetch_sub:
737 if (MemTy->isPointerType()) {
738 // For pointer arithmetic, we're required to do a bit of math:
739 // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
740 // ... but only for the C11 builtins. The GNU builtins expect the
741 // user to multiply by sizeof(T).
742 QualType Val1Ty = E->getVal1()->getType();
743 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
744 CharUnits PointeeIncAmt =
745 getContext().getTypeSizeInChars(MemTy->getPointeeType());
746 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
747 Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
748 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
752 case AtomicExpr::AO__atomic_fetch_add:
753 case AtomicExpr::AO__atomic_fetch_sub:
754 case AtomicExpr::AO__atomic_add_fetch:
755 case AtomicExpr::AO__atomic_sub_fetch:
756 case AtomicExpr::AO__c11_atomic_store:
757 case AtomicExpr::AO__c11_atomic_exchange:
758 case AtomicExpr::AO__atomic_store_n:
759 case AtomicExpr::AO__atomic_exchange_n:
760 case AtomicExpr::AO__c11_atomic_fetch_and:
761 case AtomicExpr::AO__c11_atomic_fetch_or:
762 case AtomicExpr::AO__c11_atomic_fetch_xor:
763 case AtomicExpr::AO__atomic_fetch_and:
764 case AtomicExpr::AO__atomic_fetch_or:
765 case AtomicExpr::AO__atomic_fetch_xor:
766 case AtomicExpr::AO__atomic_fetch_nand:
767 case AtomicExpr::AO__atomic_and_fetch:
768 case AtomicExpr::AO__atomic_or_fetch:
769 case AtomicExpr::AO__atomic_xor_fetch:
770 case AtomicExpr::AO__atomic_nand_fetch:
771 Val1 = EmitValToTemp(*this, E->getVal1());
775 QualType RValTy = E->getType().getUnqualifiedType();
778 if (!RValTy->isVoidType() && !Dest) {
779 Dest = CreateMemTemp(RValTy, ".atomicdst");
784 // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
786 bool UseOptimizedLibcall = false;
787 switch (E->getOp()) {
788 case AtomicExpr::AO__c11_atomic_fetch_add:
789 case AtomicExpr::AO__atomic_fetch_add:
790 case AtomicExpr::AO__c11_atomic_fetch_and:
791 case AtomicExpr::AO__atomic_fetch_and:
792 case AtomicExpr::AO__c11_atomic_fetch_or:
793 case AtomicExpr::AO__atomic_fetch_or:
794 case AtomicExpr::AO__c11_atomic_fetch_sub:
795 case AtomicExpr::AO__atomic_fetch_sub:
796 case AtomicExpr::AO__c11_atomic_fetch_xor:
797 case AtomicExpr::AO__atomic_fetch_xor:
798 // For these, only library calls for certain sizes exist.
799 UseOptimizedLibcall = true;
802 // Only use optimized library calls for sizes for which they exist.
803 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
804 UseOptimizedLibcall = true;
809 if (!UseOptimizedLibcall) {
810 // For non-optimized library calls, the size is the first parameter
811 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
812 getContext().getSizeType());
814 // Atomic address is the first or second parameter
815 Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
817 std::string LibCallName;
818 QualType LoweredMemTy =
819 MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
821 bool HaveRetTy = false;
822 switch (E->getOp()) {
823 // There is only one libcall for compare an exchange, because there is no
824 // optimisation benefit possible from a libcall version of a weak compare
826 // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
827 // void *desired, int success, int failure)
828 // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
829 // int success, int failure)
830 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
831 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
832 case AtomicExpr::AO__atomic_compare_exchange:
833 case AtomicExpr::AO__atomic_compare_exchange_n:
834 LibCallName = "__atomic_compare_exchange";
835 RetTy = getContext().BoolTy;
837 Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
838 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
839 E->getExprLoc(), sizeChars);
840 Args.add(RValue::get(Order), getContext().IntTy);
843 // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
845 // T __atomic_exchange_N(T *mem, T val, int order)
846 case AtomicExpr::AO__c11_atomic_exchange:
847 case AtomicExpr::AO__atomic_exchange_n:
848 case AtomicExpr::AO__atomic_exchange:
849 LibCallName = "__atomic_exchange";
850 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
851 E->getExprLoc(), sizeChars);
853 // void __atomic_store(size_t size, void *mem, void *val, int order)
854 // void __atomic_store_N(T *mem, T val, int order)
855 case AtomicExpr::AO__c11_atomic_store:
856 case AtomicExpr::AO__atomic_store:
857 case AtomicExpr::AO__atomic_store_n:
858 LibCallName = "__atomic_store";
859 RetTy = getContext().VoidTy;
861 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
862 E->getExprLoc(), sizeChars);
864 // void __atomic_load(size_t size, void *mem, void *return, int order)
865 // T __atomic_load_N(T *mem, int order)
866 case AtomicExpr::AO__c11_atomic_load:
867 case AtomicExpr::AO__atomic_load:
868 case AtomicExpr::AO__atomic_load_n:
869 LibCallName = "__atomic_load";
871 // T __atomic_fetch_add_N(T *mem, T val, int order)
872 case AtomicExpr::AO__c11_atomic_fetch_add:
873 case AtomicExpr::AO__atomic_fetch_add:
874 LibCallName = "__atomic_fetch_add";
875 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
876 E->getExprLoc(), sizeChars);
878 // T __atomic_fetch_and_N(T *mem, T val, int order)
879 case AtomicExpr::AO__c11_atomic_fetch_and:
880 case AtomicExpr::AO__atomic_fetch_and:
881 LibCallName = "__atomic_fetch_and";
882 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
883 E->getExprLoc(), sizeChars);
885 // T __atomic_fetch_or_N(T *mem, T val, int order)
886 case AtomicExpr::AO__c11_atomic_fetch_or:
887 case AtomicExpr::AO__atomic_fetch_or:
888 LibCallName = "__atomic_fetch_or";
889 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
890 E->getExprLoc(), sizeChars);
892 // T __atomic_fetch_sub_N(T *mem, T val, int order)
893 case AtomicExpr::AO__c11_atomic_fetch_sub:
894 case AtomicExpr::AO__atomic_fetch_sub:
895 LibCallName = "__atomic_fetch_sub";
896 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
897 E->getExprLoc(), sizeChars);
899 // T __atomic_fetch_xor_N(T *mem, T val, int order)
900 case AtomicExpr::AO__c11_atomic_fetch_xor:
901 case AtomicExpr::AO__atomic_fetch_xor:
902 LibCallName = "__atomic_fetch_xor";
903 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
904 E->getExprLoc(), sizeChars);
906 default: return EmitUnsupportedRValue(E, "atomic library call");
909 // Optimized functions have the size in their name.
910 if (UseOptimizedLibcall)
911 LibCallName += "_" + llvm::utostr(Size);
912 // By default, assume we return a value of the atomic type.
914 if (UseOptimizedLibcall) {
915 // Value is returned directly.
916 // The function returns an appropriately sized integer type.
917 RetTy = getContext().getIntTypeForBitwidth(
918 getContext().toBits(sizeChars), /*Signed=*/false);
920 // Value is returned through parameter before the order.
921 RetTy = getContext().VoidTy;
922 Args.add(RValue::get(EmitCastToVoidPtr(Dest)), getContext().VoidPtrTy);
925 // order is always the last parameter
926 Args.add(RValue::get(Order),
929 RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
930 // The value is returned directly from the libcall.
931 if (HaveRetTy && !RetTy->isVoidType())
933 // The value is returned via an explicit out param.
934 if (RetTy->isVoidType())
935 return RValue::get(nullptr);
936 // The value is returned directly for optimized libcalls but the caller is
937 // expected an out-param.
938 if (UseOptimizedLibcall) {
939 llvm::Value *ResVal = Res.getScalarVal();
940 llvm::StoreInst *StoreDest = Builder.CreateStore(
942 Builder.CreateBitCast(GetDest(), ResVal->getType()->getPointerTo()));
943 StoreDest->setAlignment(Align);
945 return convertTempToRValue(Dest, RValTy, E->getExprLoc());
948 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
949 E->getOp() == AtomicExpr::AO__atomic_store ||
950 E->getOp() == AtomicExpr::AO__atomic_store_n;
951 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
952 E->getOp() == AtomicExpr::AO__atomic_load ||
953 E->getOp() == AtomicExpr::AO__atomic_load_n;
956 llvm::IntegerType::get(getLLVMContext(), Size * 8);
957 llvm::Value *OrigDest = GetDest();
958 Ptr = Builder.CreateBitCast(
959 Ptr, ITy->getPointerTo(Ptr->getType()->getPointerAddressSpace()));
960 if (Val1) Val1 = Builder.CreateBitCast(Val1, ITy->getPointerTo());
961 if (Val2) Val2 = Builder.CreateBitCast(Val2, ITy->getPointerTo());
962 if (Dest && !E->isCmpXChg())
963 Dest = Builder.CreateBitCast(Dest, ITy->getPointerTo());
965 if (isa<llvm::ConstantInt>(Order)) {
966 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
968 case AtomicExpr::AO_ABI_memory_order_relaxed:
969 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
970 Size, Align, llvm::Monotonic);
972 case AtomicExpr::AO_ABI_memory_order_consume:
973 case AtomicExpr::AO_ABI_memory_order_acquire:
975 break; // Avoid crashing on code with undefined behavior
976 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
977 Size, Align, llvm::Acquire);
979 case AtomicExpr::AO_ABI_memory_order_release:
981 break; // Avoid crashing on code with undefined behavior
982 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
983 Size, Align, llvm::Release);
985 case AtomicExpr::AO_ABI_memory_order_acq_rel:
986 if (IsLoad || IsStore)
987 break; // Avoid crashing on code with undefined behavior
988 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
989 Size, Align, llvm::AcquireRelease);
991 case AtomicExpr::AO_ABI_memory_order_seq_cst:
992 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
993 Size, Align, llvm::SequentiallyConsistent);
995 default: // invalid order
996 // We should not ever get here normally, but it's hard to
997 // enforce that in general.
1000 if (RValTy->isVoidType())
1001 return RValue::get(nullptr);
1002 return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
1005 // Long case, when Order isn't obviously constant.
1007 // Create all the relevant BB's
1008 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1009 *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1010 *SeqCstBB = nullptr;
1011 MonotonicBB = createBasicBlock("monotonic", CurFn);
1013 AcquireBB = createBasicBlock("acquire", CurFn);
1015 ReleaseBB = createBasicBlock("release", CurFn);
1016 if (!IsLoad && !IsStore)
1017 AcqRelBB = createBasicBlock("acqrel", CurFn);
1018 SeqCstBB = createBasicBlock("seqcst", CurFn);
1019 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1021 // Create the switch for the split
1022 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1023 // doesn't matter unless someone is crazy enough to use something that
1024 // doesn't fold to a constant for the ordering.
1025 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1026 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1028 // Emit all the different atomics
1029 Builder.SetInsertPoint(MonotonicBB);
1030 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1031 Size, Align, llvm::Monotonic);
1032 Builder.CreateBr(ContBB);
1034 Builder.SetInsertPoint(AcquireBB);
1035 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1036 Size, Align, llvm::Acquire);
1037 Builder.CreateBr(ContBB);
1038 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume),
1040 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire),
1044 Builder.SetInsertPoint(ReleaseBB);
1045 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1046 Size, Align, llvm::Release);
1047 Builder.CreateBr(ContBB);
1048 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release),
1051 if (!IsLoad && !IsStore) {
1052 Builder.SetInsertPoint(AcqRelBB);
1053 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1054 Size, Align, llvm::AcquireRelease);
1055 Builder.CreateBr(ContBB);
1056 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel),
1059 Builder.SetInsertPoint(SeqCstBB);
1060 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail,
1061 Size, Align, llvm::SequentiallyConsistent);
1062 Builder.CreateBr(ContBB);
1063 SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst),
1066 // Cleanup and return
1067 Builder.SetInsertPoint(ContBB);
1068 if (RValTy->isVoidType())
1069 return RValue::get(nullptr);
1070 return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
1073 llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
1074 unsigned addrspace =
1075 cast<llvm::PointerType>(addr->getType())->getAddressSpace();
1076 llvm::IntegerType *ty =
1077 llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1078 return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
1081 RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
1082 AggValueSlot resultSlot,
1083 SourceLocation loc, bool AsValue) const {
1084 if (LVal.isSimple()) {
1085 if (EvaluationKind == TEK_Aggregate)
1086 return resultSlot.asRValue();
1088 // Drill into the padding structure if we have one.
1090 addr = CGF.Builder.CreateStructGEP(nullptr, addr, 0);
1092 // Otherwise, just convert the temporary to an r-value using the
1093 // normal conversion routine.
1094 return CGF.convertTempToRValue(addr, getValueType(), loc);
1097 // Get RValue from temp memory as atomic for non-simple lvalues
1099 CGF.Builder.CreateAlignedLoad(addr, AtomicAlign.getQuantity()));
1100 if (LVal.isBitField())
1101 return CGF.EmitLoadOfBitfieldLValue(LValue::MakeBitfield(
1102 addr, LVal.getBitFieldInfo(), LVal.getType(), LVal.getAlignment()));
1103 if (LVal.isVectorElt())
1104 return CGF.EmitLoadOfLValue(LValue::MakeVectorElt(addr, LVal.getVectorIdx(),
1106 LVal.getAlignment()),
1108 assert(LVal.isExtVectorElt());
1109 return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
1110 addr, LVal.getExtVectorElts(), LVal.getType(), LVal.getAlignment()));
1113 RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1114 AggValueSlot ResultSlot,
1116 bool AsValue) const {
1117 // Try not to in some easy cases.
1118 assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
1119 if (getEvaluationKind() == TEK_Scalar &&
1120 (((!LVal.isBitField() ||
1121 LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1124 auto *ValTy = AsValue
1125 ? CGF.ConvertTypeForMem(ValueTy)
1126 : getAtomicAddress()->getType()->getPointerElementType();
1127 if (ValTy->isIntegerTy()) {
1128 assert(IntVal->getType() == ValTy && "Different integer types.");
1129 return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
1130 } else if (ValTy->isPointerTy())
1131 return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
1132 else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1133 return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
1136 // Create a temporary. This needs to be big enough to hold the
1139 bool TempIsVolatile = false;
1140 CharUnits TempAlignment;
1141 if (AsValue && getEvaluationKind() == TEK_Aggregate) {
1142 assert(!ResultSlot.isIgnored());
1143 Temp = ResultSlot.getAddr();
1144 TempAlignment = getValueAlignment();
1145 TempIsVolatile = ResultSlot.isVolatile();
1147 Temp = CreateTempAlloca();
1148 TempAlignment = getAtomicAlignment();
1151 // Slam the integer into the temporary.
1152 llvm::Value *CastTemp = emitCastToAtomicIntPointer(Temp);
1153 CGF.Builder.CreateAlignedStore(IntVal, CastTemp, TempAlignment.getQuantity())
1154 ->setVolatile(TempIsVolatile);
1156 return convertTempToRValue(Temp, ResultSlot, Loc, AsValue);
1159 void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1160 llvm::AtomicOrdering AO, bool) {
1161 // void __atomic_load(size_t size, void *mem, void *return, int order);
1163 Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1164 Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicAddress())),
1165 CGF.getContext().VoidPtrTy);
1166 Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
1167 CGF.getContext().VoidPtrTy);
1168 Args.add(RValue::get(
1169 llvm::ConstantInt::get(CGF.IntTy, translateAtomicOrdering(AO))),
1170 CGF.getContext().IntTy);
1171 emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1174 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1176 // Okay, we're doing this natively.
1177 llvm::Value *Addr = emitCastToAtomicIntPointer(getAtomicAddress());
1178 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1179 Load->setAtomic(AO);
1181 // Other decoration.
1182 Load->setAlignment(getAtomicAlignment().getQuantity());
1184 Load->setVolatile(true);
1185 if (LVal.getTBAAInfo())
1186 CGF.CGM.DecorateInstruction(Load, LVal.getTBAAInfo());
1190 /// An LValue is a candidate for having its loads and stores be made atomic if
1191 /// we are operating under /volatile:ms *and* the LValue itself is volatile and
1192 /// performing such an operation can be performed without a libcall.
1193 bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
1194 AtomicInfo AI(*this, LV);
1195 bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1196 // An atomic is inline if we don't need to use a libcall.
1197 bool AtomicIsInline = !AI.shouldUseLibcall();
1198 return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline;
1201 /// An type is a candidate for having its loads and stores be made atomic if
1202 /// we are operating under /volatile:ms *and* we know the access is volatile and
1203 /// performing such an operation can be performed without a libcall.
1204 bool CodeGenFunction::typeIsSuitableForInlineAtomic(QualType Ty,
1205 bool IsVolatile) const {
1206 // An atomic is inline if we don't need to use a libcall (e.g. it is builtin).
1207 bool AtomicIsInline = getContext().getTargetInfo().hasBuiltinAtomic(
1208 getContext().getTypeSize(Ty), getContext().getTypeAlign(Ty));
1209 return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline;
1212 RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
1213 AggValueSlot Slot) {
1214 llvm::AtomicOrdering AO;
1215 bool IsVolatile = LV.isVolatileQualified();
1216 if (LV.getType()->isAtomicType()) {
1217 AO = llvm::SequentiallyConsistent;
1222 return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1225 RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1226 bool AsValue, llvm::AtomicOrdering AO,
1228 // Check whether we should use a library call.
1229 if (shouldUseLibcall()) {
1230 llvm::Value *TempAddr;
1231 if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1232 assert(getEvaluationKind() == TEK_Aggregate);
1233 TempAddr = ResultSlot.getAddr();
1235 TempAddr = CreateTempAlloca();
1237 EmitAtomicLoadLibcall(TempAddr, AO, IsVolatile);
1239 // Okay, turn that back into the original value or whole atomic (for
1240 // non-simple lvalues) type.
1241 return convertTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1244 // Okay, we're doing this natively.
1245 auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1247 // If we're ignoring an aggregate return, don't do anything.
1248 if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
1249 return RValue::getAggregate(nullptr, false);
1251 // Okay, turn that back into the original value or atomic (for non-simple
1253 return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1256 /// Emit a load from an l-value of atomic type. Note that the r-value
1257 /// we produce is an r-value of the atomic *value* type.
1258 RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
1259 llvm::AtomicOrdering AO, bool IsVolatile,
1260 AggValueSlot resultSlot) {
1261 AtomicInfo Atomics(*this, src);
1262 return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1266 /// Copy an r-value into memory as part of storing to an atomic type.
1267 /// This needs to create a bit-pattern suitable for atomic operations.
1268 void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1269 assert(LVal.isSimple());
1270 // If we have an r-value, the rvalue should be of the atomic type,
1271 // which means that the caller is responsible for having zeroed
1272 // any padding. Just do an aggregate copy of that type.
1273 if (rvalue.isAggregate()) {
1274 CGF.EmitAggregateCopy(getAtomicAddress(),
1275 rvalue.getAggregateAddr(),
1277 (rvalue.isVolatileQualified()
1278 || LVal.isVolatileQualified()),
1279 LVal.getAlignment());
1283 // Okay, otherwise we're copying stuff.
1285 // Zero out the buffer if necessary.
1286 emitMemSetZeroIfNecessary();
1288 // Drill past the padding if present.
1289 LValue TempLVal = projectValue();
1291 // Okay, store the rvalue in.
1292 if (rvalue.isScalar()) {
1293 CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
1295 CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
1300 /// Materialize an r-value into memory for the purposes of storing it
1301 /// to an atomic type.
1302 llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
1303 // Aggregate r-values are already in memory, and EmitAtomicStore
1304 // requires them to be values of the atomic type.
1305 if (rvalue.isAggregate())
1306 return rvalue.getAggregateAddr();
1308 // Otherwise, make a temporary and materialize into it.
1309 LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType(),
1310 getAtomicAlignment());
1311 AtomicInfo Atomics(CGF, TempLV);
1312 Atomics.emitCopyIntoMemory(rvalue);
1313 return TempLV.getAddress();
1316 llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1317 // If we've got a scalar value of the right size, try to avoid going
1319 if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
1320 llvm::Value *Value = RVal.getScalarVal();
1321 if (isa<llvm::IntegerType>(Value->getType()))
1322 return CGF.EmitToMemory(Value, ValueTy);
1324 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1325 CGF.getLLVMContext(),
1326 LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1327 if (isa<llvm::PointerType>(Value->getType()))
1328 return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1329 else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1330 return CGF.Builder.CreateBitCast(Value, InputIntTy);
1333 // Otherwise, we need to go through memory.
1334 // Put the r-value in memory.
1335 llvm::Value *Addr = materializeRValue(RVal);
1337 // Cast the temporary to the atomic int type and pull a value out.
1338 Addr = emitCastToAtomicIntPointer(Addr);
1339 return CGF.Builder.CreateAlignedLoad(Addr,
1340 getAtomicAlignment().getQuantity());
1343 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1344 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1345 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
1346 // Do the atomic store.
1347 auto *Addr = emitCastToAtomicIntPointer(getAtomicAddress());
1348 auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr, ExpectedVal, DesiredVal,
1350 // Other decoration.
1351 Inst->setVolatile(LVal.isVolatileQualified());
1352 Inst->setWeak(IsWeak);
1354 // Okay, turn that back into the original value type.
1355 auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1356 auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1357 return std::make_pair(PreviousVal, SuccessFailureVal);
1361 AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1362 llvm::Value *DesiredAddr,
1363 llvm::AtomicOrdering Success,
1364 llvm::AtomicOrdering Failure) {
1365 // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1366 // void *desired, int success, int failure);
1368 Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1369 Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicAddress())),
1370 CGF.getContext().VoidPtrTy);
1371 Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
1372 CGF.getContext().VoidPtrTy);
1373 Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
1374 CGF.getContext().VoidPtrTy);
1375 Args.add(RValue::get(llvm::ConstantInt::get(
1376 CGF.IntTy, translateAtomicOrdering(Success))),
1377 CGF.getContext().IntTy);
1378 Args.add(RValue::get(llvm::ConstantInt::get(
1379 CGF.IntTy, translateAtomicOrdering(Failure))),
1380 CGF.getContext().IntTy);
1381 auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1382 CGF.getContext().BoolTy, Args);
1384 return SuccessFailureRVal.getScalarVal();
1387 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1388 RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1389 llvm::AtomicOrdering Failure, bool IsWeak) {
1390 if (Failure >= Success)
1391 // Don't assert on undefined behavior.
1392 Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1394 // Check whether we should use a library call.
1395 if (shouldUseLibcall()) {
1396 // Produce a source address.
1397 auto *ExpectedAddr = materializeRValue(Expected);
1398 auto *DesiredAddr = materializeRValue(Desired);
1399 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr,
1401 return std::make_pair(
1402 convertTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1403 SourceLocation(), /*AsValue=*/false),
1407 // If we've got a scalar value of the right size, try to avoid going
1409 auto *ExpectedVal = convertRValueToInt(Expected);
1410 auto *DesiredVal = convertRValueToInt(Desired);
1411 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1413 return std::make_pair(
1414 ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1415 SourceLocation(), /*AsValue=*/false),
1420 EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1421 const llvm::function_ref<RValue(RValue)> &UpdateOp,
1422 llvm::Value *DesiredAddr) {
1423 llvm::Value *Ptr = nullptr;
1426 LValue AtomicLVal = Atomics.getAtomicLValue();
1428 if (AtomicLVal.isSimple()) {
1431 LValue::MakeAddr(DesiredAddr, AtomicLVal.getType(),
1432 AtomicLVal.getAlignment(), CGF.CGM.getContext());
1434 // Build new lvalue for temp address
1435 Ptr = Atomics.materializeRValue(OldRVal);
1436 if (AtomicLVal.isBitField()) {
1438 LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1439 AtomicLVal.getType(), AtomicLVal.getAlignment());
1441 LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1442 AtomicLVal.getType(), AtomicLVal.getAlignment());
1443 } else if (AtomicLVal.isVectorElt()) {
1444 UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1445 AtomicLVal.getType(),
1446 AtomicLVal.getAlignment());
1447 DesiredLVal = LValue::MakeVectorElt(
1448 DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1449 AtomicLVal.getAlignment());
1451 assert(AtomicLVal.isExtVectorElt());
1452 UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1453 AtomicLVal.getType(),
1454 AtomicLVal.getAlignment());
1455 DesiredLVal = LValue::MakeExtVectorElt(
1456 DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1457 AtomicLVal.getAlignment());
1459 UpdateLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
1460 DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
1461 UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1463 // Store new value in the corresponding memory area
1464 RValue NewRVal = UpdateOp(UpRVal);
1465 if (NewRVal.isScalar()) {
1466 CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1468 assert(NewRVal.isComplex());
1469 CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1474 void AtomicInfo::EmitAtomicUpdateLibcall(
1475 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1477 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1479 llvm::Value *ExpectedAddr = CreateTempAlloca();
1481 EmitAtomicLoadLibcall(ExpectedAddr, AO, IsVolatile);
1482 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1483 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1484 CGF.EmitBlock(ContBB);
1485 auto *DesiredAddr = CreateTempAlloca();
1486 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1488 getAtomicAddress()->getType()->getPointerElementType())) {
1489 auto *OldVal = CGF.Builder.CreateAlignedLoad(
1490 ExpectedAddr, getAtomicAlignment().getQuantity());
1491 CGF.Builder.CreateAlignedStore(OldVal, DesiredAddr,
1492 getAtomicAlignment().getQuantity());
1494 auto OldRVal = convertTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1495 SourceLocation(), /*AsValue=*/false);
1496 EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1498 EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr, AO, Failure);
1499 CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1500 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1503 void AtomicInfo::EmitAtomicUpdateOp(
1504 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1506 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1508 // Do the atomic load.
1509 auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1510 // For non-simple lvalues perform compare-and-swap procedure.
1511 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1512 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1513 auto *CurBB = CGF.Builder.GetInsertBlock();
1514 CGF.EmitBlock(ContBB);
1515 llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1516 /*NumReservedValues=*/2);
1517 PHI->addIncoming(OldVal, CurBB);
1518 auto *NewAtomicAddr = CreateTempAlloca();
1519 auto *NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1520 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1522 getAtomicAddress()->getType()->getPointerElementType())) {
1523 CGF.Builder.CreateAlignedStore(PHI, NewAtomicIntAddr,
1524 getAtomicAlignment().getQuantity());
1526 auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
1527 SourceLocation(), /*AsValue=*/false);
1528 EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1529 auto *DesiredVal = CGF.Builder.CreateAlignedLoad(
1530 NewAtomicIntAddr, getAtomicAlignment().getQuantity());
1531 // Try to write new value using cmpxchg operation
1532 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1533 PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1534 CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1535 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1538 static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
1539 RValue UpdateRVal, llvm::Value *DesiredAddr) {
1540 LValue AtomicLVal = Atomics.getAtomicLValue();
1542 // Build new lvalue for temp address
1543 if (AtomicLVal.isBitField()) {
1545 LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1546 AtomicLVal.getType(), AtomicLVal.getAlignment());
1547 } else if (AtomicLVal.isVectorElt()) {
1549 LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1550 AtomicLVal.getType(), AtomicLVal.getAlignment());
1552 assert(AtomicLVal.isExtVectorElt());
1553 DesiredLVal = LValue::MakeExtVectorElt(
1554 DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1555 AtomicLVal.getAlignment());
1557 DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo());
1558 // Store new value in the corresponding memory area
1559 assert(UpdateRVal.isScalar());
1560 CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1563 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1564 RValue UpdateRVal, bool IsVolatile) {
1565 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1567 llvm::Value *ExpectedAddr = CreateTempAlloca();
1569 EmitAtomicLoadLibcall(ExpectedAddr, AO, IsVolatile);
1570 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1571 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1572 CGF.EmitBlock(ContBB);
1573 auto *DesiredAddr = CreateTempAlloca();
1574 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1576 getAtomicAddress()->getType()->getPointerElementType())) {
1577 auto *OldVal = CGF.Builder.CreateAlignedLoad(
1578 ExpectedAddr, getAtomicAlignment().getQuantity());
1579 CGF.Builder.CreateAlignedStore(OldVal, DesiredAddr,
1580 getAtomicAlignment().getQuantity());
1582 EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1584 EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr, AO, Failure);
1585 CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1586 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1589 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1591 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1593 // Do the atomic load.
1594 auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1595 // For non-simple lvalues perform compare-and-swap procedure.
1596 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1597 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1598 auto *CurBB = CGF.Builder.GetInsertBlock();
1599 CGF.EmitBlock(ContBB);
1600 llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1601 /*NumReservedValues=*/2);
1602 PHI->addIncoming(OldVal, CurBB);
1603 auto *NewAtomicAddr = CreateTempAlloca();
1604 auto *NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1605 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1607 getAtomicAddress()->getType()->getPointerElementType())) {
1608 CGF.Builder.CreateAlignedStore(PHI, NewAtomicIntAddr,
1609 getAtomicAlignment().getQuantity());
1611 EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
1612 auto *DesiredVal = CGF.Builder.CreateAlignedLoad(
1613 NewAtomicIntAddr, getAtomicAlignment().getQuantity());
1614 // Try to write new value using cmpxchg operation
1615 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1616 PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1617 CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1618 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1621 void AtomicInfo::EmitAtomicUpdate(
1622 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1624 if (shouldUseLibcall()) {
1625 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1627 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1631 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
1633 if (shouldUseLibcall()) {
1634 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1636 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1640 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
1642 bool IsVolatile = lvalue.isVolatileQualified();
1643 llvm::AtomicOrdering AO;
1644 if (lvalue.getType()->isAtomicType()) {
1645 AO = llvm::SequentiallyConsistent;
1650 return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1653 /// Emit a store to an l-value of atomic type.
1655 /// Note that the r-value is expected to be an r-value *of the atomic
1656 /// type*; this means that for aggregate r-values, it should include
1657 /// storage for any padding that was necessary.
1658 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
1659 llvm::AtomicOrdering AO, bool IsVolatile,
1661 // If this is an aggregate r-value, it should agree in type except
1662 // maybe for address-space qualification.
1663 assert(!rvalue.isAggregate() ||
1664 rvalue.getAggregateAddr()->getType()->getPointerElementType()
1665 == dest.getAddress()->getType()->getPointerElementType());
1667 AtomicInfo atomics(*this, dest);
1668 LValue LVal = atomics.getAtomicLValue();
1670 // If this is an initialization, just put the value there normally.
1671 if (LVal.isSimple()) {
1673 atomics.emitCopyIntoMemory(rvalue);
1677 // Check whether we should use a library call.
1678 if (atomics.shouldUseLibcall()) {
1679 // Produce a source address.
1680 llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
1682 // void __atomic_store(size_t size, void *mem, void *val, int order)
1684 args.add(RValue::get(atomics.getAtomicSizeValue()),
1685 getContext().getSizeType());
1686 args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicAddress())),
1687 getContext().VoidPtrTy);
1688 args.add(RValue::get(EmitCastToVoidPtr(srcAddr)), getContext().VoidPtrTy);
1689 args.add(RValue::get(llvm::ConstantInt::get(
1690 IntTy, AtomicInfo::translateAtomicOrdering(AO))),
1691 getContext().IntTy);
1692 emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1696 // Okay, we're doing this natively.
1697 llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
1699 // Do the atomic store.
1701 atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
1702 intValue = Builder.CreateIntCast(
1703 intValue, addr->getType()->getPointerElementType(), /*isSigned=*/false);
1704 llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1706 // Initializations don't need to be atomic.
1708 store->setAtomic(AO);
1710 // Other decoration.
1711 store->setAlignment(dest.getAlignment().getQuantity());
1713 store->setVolatile(true);
1714 if (dest.getTBAAInfo())
1715 CGM.DecorateInstruction(store, dest.getTBAAInfo());
1719 // Emit simple atomic update operation.
1720 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
1723 /// Emit a compare-and-exchange op for atomic type.
1725 std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
1726 LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
1727 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
1728 AggValueSlot Slot) {
1729 // If this is an aggregate r-value, it should agree in type except
1730 // maybe for address-space qualification.
1731 assert(!Expected.isAggregate() ||
1732 Expected.getAggregateAddr()->getType()->getPointerElementType() ==
1733 Obj.getAddress()->getType()->getPointerElementType());
1734 assert(!Desired.isAggregate() ||
1735 Desired.getAggregateAddr()->getType()->getPointerElementType() ==
1736 Obj.getAddress()->getType()->getPointerElementType());
1737 AtomicInfo Atomics(*this, Obj);
1739 return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
1743 void CodeGenFunction::EmitAtomicUpdate(
1744 LValue LVal, llvm::AtomicOrdering AO,
1745 const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
1746 AtomicInfo Atomics(*this, LVal);
1747 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
1750 void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
1751 AtomicInfo atomics(*this, dest);
1753 switch (atomics.getEvaluationKind()) {
1755 llvm::Value *value = EmitScalarExpr(init);
1756 atomics.emitCopyIntoMemory(RValue::get(value));
1761 ComplexPairTy value = EmitComplexExpr(init);
1762 atomics.emitCopyIntoMemory(RValue::getComplex(value));
1766 case TEK_Aggregate: {
1767 // Fix up the destination if the initializer isn't an expression
1769 bool Zeroed = false;
1770 if (!init->getType()->isAtomicType()) {
1771 Zeroed = atomics.emitMemSetZeroIfNecessary();
1772 dest = atomics.projectValue();
1775 // Evaluate the expression directly into the destination.
1776 AggValueSlot slot = AggValueSlot::forLValue(dest,
1777 AggValueSlot::IsNotDestructed,
1778 AggValueSlot::DoesNotNeedGCBarriers,
1779 AggValueSlot::IsNotAliased,
1780 Zeroed ? AggValueSlot::IsZeroed :
1781 AggValueSlot::IsNotZeroed);
1783 EmitAggExpr(init, slot);
1787 llvm_unreachable("bad evaluation kind");