1 //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains the code for emitting atomic operations.
11 //===----------------------------------------------------------------------===//
14 #include "CGRecordLayout.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/CodeGen/CGFunctionInfo.h"
20 #include "clang/Frontend/FrontendDiagnostic.h"
21 #include "llvm/ADT/DenseMap.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/IR/Operator.h"
26 using namespace clang;
27 using namespace CodeGen;
34 uint64_t AtomicSizeInBits;
35 uint64_t ValueSizeInBits;
36 CharUnits AtomicAlign;
38 TypeEvaluationKind EvaluationKind;
43 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
44 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45 EvaluationKind(TEK_Scalar), UseLibcall(true) {
46 assert(!lvalue.isGlobalReg());
47 ASTContext &C = CGF.getContext();
48 if (lvalue.isSimple()) {
49 AtomicTy = lvalue.getType();
50 if (auto *ATy = AtomicTy->getAs<AtomicType>())
51 ValueTy = ATy->getValueType();
54 EvaluationKind = CGF.getEvaluationKind(ValueTy);
56 uint64_t ValueAlignInBits;
57 uint64_t AtomicAlignInBits;
58 TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59 ValueSizeInBits = ValueTI.Width;
60 ValueAlignInBits = ValueTI.Align;
62 TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63 AtomicSizeInBits = AtomicTI.Width;
64 AtomicAlignInBits = AtomicTI.Align;
66 assert(ValueSizeInBits <= AtomicSizeInBits);
67 assert(ValueAlignInBits <= AtomicAlignInBits);
69 AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70 ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
71 if (lvalue.getAlignment().isZero())
72 lvalue.setAlignment(AtomicAlign);
75 } else if (lvalue.isBitField()) {
76 ValueTy = lvalue.getType();
77 ValueSizeInBits = C.getTypeSize(ValueTy);
78 auto &OrigBFI = lvalue.getBitFieldInfo();
79 auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
80 AtomicSizeInBits = C.toBits(
81 C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
82 .alignTo(lvalue.getAlignment()));
83 auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
85 (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
86 lvalue.getAlignment();
87 VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
88 VoidPtrAddr, OffsetInChars.getQuantity());
89 auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
91 CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
92 "atomic_bitfield_base");
95 BFI.StorageSize = AtomicSizeInBits;
96 BFI.StorageOffset += OffsetInChars;
97 LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()),
98 BFI, lvalue.getType(), lvalue.getBaseInfo(),
99 lvalue.getTBAAInfo());
100 AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
101 if (AtomicTy.isNull()) {
104 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
106 C.getConstantArrayType(C.CharTy, Size, nullptr, ArrayType::Normal,
107 /*IndexTypeQuals=*/0);
109 AtomicAlign = ValueAlign = lvalue.getAlignment();
110 } else if (lvalue.isVectorElt()) {
111 ValueTy = lvalue.getType()->castAs<VectorType>()->getElementType();
112 ValueSizeInBits = C.getTypeSize(ValueTy);
113 AtomicTy = lvalue.getType();
114 AtomicSizeInBits = C.getTypeSize(AtomicTy);
115 AtomicAlign = ValueAlign = lvalue.getAlignment();
118 assert(lvalue.isExtVectorElt());
119 ValueTy = lvalue.getType();
120 ValueSizeInBits = C.getTypeSize(ValueTy);
121 AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
122 lvalue.getType(), lvalue.getExtVectorAddress()
123 .getElementType()->getVectorNumElements());
124 AtomicSizeInBits = C.getTypeSize(AtomicTy);
125 AtomicAlign = ValueAlign = lvalue.getAlignment();
128 UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
129 AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
132 QualType getAtomicType() const { return AtomicTy; }
133 QualType getValueType() const { return ValueTy; }
134 CharUnits getAtomicAlignment() const { return AtomicAlign; }
135 uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
136 uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
137 TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
138 bool shouldUseLibcall() const { return UseLibcall; }
139 const LValue &getAtomicLValue() const { return LVal; }
140 llvm::Value *getAtomicPointer() const {
142 return LVal.getPointer(CGF);
143 else if (LVal.isBitField())
144 return LVal.getBitFieldPointer();
145 else if (LVal.isVectorElt())
146 return LVal.getVectorPointer();
147 assert(LVal.isExtVectorElt());
148 return LVal.getExtVectorPointer();
150 Address getAtomicAddress() const {
151 return Address(getAtomicPointer(), getAtomicAlignment());
154 Address getAtomicAddressAsAtomicIntPointer() const {
155 return emitCastToAtomicIntPointer(getAtomicAddress());
158 /// Is the atomic size larger than the underlying value type?
160 /// Note that the absence of padding does not mean that atomic
161 /// objects are completely interchangeable with non-atomic
162 /// objects: we might have promoted the alignment of a type
163 /// without making it bigger.
164 bool hasPadding() const {
165 return (ValueSizeInBits != AtomicSizeInBits);
168 bool emitMemSetZeroIfNecessary() const;
170 llvm::Value *getAtomicSizeValue() const {
171 CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
172 return CGF.CGM.getSize(size);
175 /// Cast the given pointer to an integer pointer suitable for atomic
176 /// operations if the source.
177 Address emitCastToAtomicIntPointer(Address Addr) const;
179 /// If Addr is compatible with the iN that will be used for an atomic
180 /// operation, bitcast it. Otherwise, create a temporary that is suitable
181 /// and copy the value across.
182 Address convertToAtomicIntPointer(Address Addr) const;
184 /// Turn an atomic-layout object into an r-value.
185 RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
186 SourceLocation loc, bool AsValue) const;
188 /// Converts a rvalue to integer value.
189 llvm::Value *convertRValueToInt(RValue RVal) const;
191 RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
192 AggValueSlot ResultSlot,
193 SourceLocation Loc, bool AsValue) const;
195 /// Copy an atomic r-value into atomic-layout memory.
196 void emitCopyIntoMemory(RValue rvalue) const;
198 /// Project an l-value down to the value field.
199 LValue projectValue() const {
200 assert(LVal.isSimple());
201 Address addr = getAtomicAddress();
203 addr = CGF.Builder.CreateStructGEP(addr, 0);
205 return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
206 LVal.getBaseInfo(), LVal.getTBAAInfo());
209 /// Emits atomic load.
210 /// \returns Loaded value.
211 RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
212 bool AsValue, llvm::AtomicOrdering AO,
215 /// Emits atomic compare-and-exchange sequence.
216 /// \param Expected Expected value.
217 /// \param Desired Desired value.
218 /// \param Success Atomic ordering for success operation.
219 /// \param Failure Atomic ordering for failed operation.
220 /// \param IsWeak true if atomic operation is weak, false otherwise.
221 /// \returns Pair of values: previous value from storage (value type) and
222 /// boolean flag (i1 type) with true if success and false otherwise.
223 std::pair<RValue, llvm::Value *>
224 EmitAtomicCompareExchange(RValue Expected, RValue Desired,
225 llvm::AtomicOrdering Success =
226 llvm::AtomicOrdering::SequentiallyConsistent,
227 llvm::AtomicOrdering Failure =
228 llvm::AtomicOrdering::SequentiallyConsistent,
229 bool IsWeak = false);
231 /// Emits atomic update.
232 /// \param AO Atomic ordering.
233 /// \param UpdateOp Update operation for the current lvalue.
234 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
235 const llvm::function_ref<RValue(RValue)> &UpdateOp,
237 /// Emits atomic update.
238 /// \param AO Atomic ordering.
239 void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
242 /// Materialize an atomic r-value in atomic-layout memory.
243 Address materializeRValue(RValue rvalue) const;
245 /// Creates temp alloca for intermediate operations on atomic value.
246 Address CreateTempAlloca() const;
248 bool requiresMemSetZero(llvm::Type *type) const;
251 /// Emits atomic load as a libcall.
252 void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
253 llvm::AtomicOrdering AO, bool IsVolatile);
254 /// Emits atomic load as LLVM instruction.
255 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
256 /// Emits atomic compare-and-exchange op as a libcall.
257 llvm::Value *EmitAtomicCompareExchangeLibcall(
258 llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
259 llvm::AtomicOrdering Success =
260 llvm::AtomicOrdering::SequentiallyConsistent,
261 llvm::AtomicOrdering Failure =
262 llvm::AtomicOrdering::SequentiallyConsistent);
263 /// Emits atomic compare-and-exchange op as LLVM instruction.
264 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
265 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
266 llvm::AtomicOrdering Success =
267 llvm::AtomicOrdering::SequentiallyConsistent,
268 llvm::AtomicOrdering Failure =
269 llvm::AtomicOrdering::SequentiallyConsistent,
270 bool IsWeak = false);
271 /// Emit atomic update as libcalls.
273 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
274 const llvm::function_ref<RValue(RValue)> &UpdateOp,
276 /// Emit atomic update as LLVM instructions.
277 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
278 const llvm::function_ref<RValue(RValue)> &UpdateOp,
280 /// Emit atomic update as libcalls.
281 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
283 /// Emit atomic update as LLVM instructions.
284 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
289 Address AtomicInfo::CreateTempAlloca() const {
290 Address TempAlloca = CGF.CreateMemTemp(
291 (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
293 getAtomicAlignment(),
295 // Cast to pointer to value type for bitfields.
296 if (LVal.isBitField())
297 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
298 TempAlloca, getAtomicAddress().getType());
302 static RValue emitAtomicLibcall(CodeGenFunction &CGF,
306 const CGFunctionInfo &fnInfo =
307 CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
308 llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
309 llvm::FunctionCallee fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
310 auto callee = CGCallee::forDirect(fn);
311 return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
314 /// Does a store of the given IR type modify the full expected width?
315 static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
316 uint64_t expectedSize) {
317 return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
320 /// Does the atomic type require memsetting to zero before initialization?
322 /// The IR type is provided as a way of making certain queries faster.
323 bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
324 // If the atomic type has size padding, we definitely need a memset.
325 if (hasPadding()) return true;
327 // Otherwise, do some simple heuristics to try to avoid it:
328 switch (getEvaluationKind()) {
329 // For scalars and complexes, check whether the store size of the
330 // type uses the full size.
332 return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
334 return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
335 AtomicSizeInBits / 2);
337 // Padding in structs has an undefined bit pattern. User beware.
341 llvm_unreachable("bad evaluation kind");
344 bool AtomicInfo::emitMemSetZeroIfNecessary() const {
345 assert(LVal.isSimple());
346 llvm::Value *addr = LVal.getPointer(CGF);
347 if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
350 CGF.Builder.CreateMemSet(
351 addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
352 CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
353 LVal.getAlignment().getAsAlign());
357 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
358 Address Dest, Address Ptr,
359 Address Val1, Address Val2,
361 llvm::AtomicOrdering SuccessOrder,
362 llvm::AtomicOrdering FailureOrder,
363 llvm::SyncScope::ID Scope) {
364 // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
365 llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
366 llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
368 llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
369 Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder,
371 Pair->setVolatile(E->isVolatile());
372 Pair->setWeak(IsWeak);
374 // Cmp holds the result of the compare-exchange operation: true on success,
376 llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
377 llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
379 // This basic block is used to hold the store instruction if the operation
381 llvm::BasicBlock *StoreExpectedBB =
382 CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
384 // This basic block is the exit point of the operation, we should end up
385 // here regardless of whether or not the operation succeeded.
386 llvm::BasicBlock *ContinueBB =
387 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
389 // Update Expected if Expected isn't equal to Old, otherwise branch to the
391 CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
393 CGF.Builder.SetInsertPoint(StoreExpectedBB);
394 // Update the memory at Expected with Old's value.
395 CGF.Builder.CreateStore(Old, Val1);
396 // Finally, branch to the exit point.
397 CGF.Builder.CreateBr(ContinueBB);
399 CGF.Builder.SetInsertPoint(ContinueBB);
400 // Update the memory at Dest with Cmp's value.
401 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
404 /// Given an ordering required on success, emit all possible cmpxchg
405 /// instructions to cope with the provided (but possibly only dynamically known)
407 static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
408 bool IsWeak, Address Dest, Address Ptr,
409 Address Val1, Address Val2,
410 llvm::Value *FailureOrderVal,
412 llvm::AtomicOrdering SuccessOrder,
413 llvm::SyncScope::ID Scope) {
414 llvm::AtomicOrdering FailureOrder;
415 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
416 auto FOS = FO->getSExtValue();
417 if (!llvm::isValidAtomicOrderingCABI(FOS))
418 FailureOrder = llvm::AtomicOrdering::Monotonic;
420 switch ((llvm::AtomicOrderingCABI)FOS) {
421 case llvm::AtomicOrderingCABI::relaxed:
422 case llvm::AtomicOrderingCABI::release:
423 case llvm::AtomicOrderingCABI::acq_rel:
424 FailureOrder = llvm::AtomicOrdering::Monotonic;
426 case llvm::AtomicOrderingCABI::consume:
427 case llvm::AtomicOrderingCABI::acquire:
428 FailureOrder = llvm::AtomicOrdering::Acquire;
430 case llvm::AtomicOrderingCABI::seq_cst:
431 FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
434 if (isStrongerThan(FailureOrder, SuccessOrder)) {
435 // Don't assert on undefined behavior "failure argument shall be no
436 // stronger than the success argument".
438 llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
440 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
441 FailureOrder, Scope);
445 // Create all the relevant BB's
446 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
448 MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
449 if (SuccessOrder != llvm::AtomicOrdering::Monotonic &&
450 SuccessOrder != llvm::AtomicOrdering::Release)
451 AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
452 if (SuccessOrder == llvm::AtomicOrdering::SequentiallyConsistent)
453 SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
455 llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
457 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
459 // Emit all the different atomics
461 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
462 // doesn't matter unless someone is crazy enough to use something that
463 // doesn't fold to a constant for the ordering.
464 CGF.Builder.SetInsertPoint(MonotonicBB);
465 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
466 Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
467 CGF.Builder.CreateBr(ContBB);
470 CGF.Builder.SetInsertPoint(AcquireBB);
471 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
472 Size, SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
473 CGF.Builder.CreateBr(ContBB);
474 SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
476 SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
480 CGF.Builder.SetInsertPoint(SeqCstBB);
481 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
482 llvm::AtomicOrdering::SequentiallyConsistent, Scope);
483 CGF.Builder.CreateBr(ContBB);
484 SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
488 CGF.Builder.SetInsertPoint(ContBB);
491 /// Duplicate the atomic min/max operation in conventional IR for the builtin
492 /// variants that return the new rather than the original value.
493 static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,
494 AtomicExpr::AtomicOp Op,
498 llvm::CmpInst::Predicate Pred;
501 llvm_unreachable("Unexpected min/max operation");
502 case AtomicExpr::AO__atomic_max_fetch:
503 Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
505 case AtomicExpr::AO__atomic_min_fetch:
506 Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
509 llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS, "tst");
510 return Builder.CreateSelect(Cmp, OldVal, RHS, "newval");
513 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
514 Address Ptr, Address Val1, Address Val2,
515 llvm::Value *IsWeak, llvm::Value *FailureOrder,
516 uint64_t Size, llvm::AtomicOrdering Order,
517 llvm::SyncScope::ID Scope) {
518 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
519 bool PostOpMinMax = false;
522 switch (E->getOp()) {
523 case AtomicExpr::AO__c11_atomic_init:
524 case AtomicExpr::AO__opencl_atomic_init:
525 llvm_unreachable("Already handled!");
527 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
528 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
529 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
530 FailureOrder, Size, Order, Scope);
532 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
533 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
534 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
535 FailureOrder, Size, Order, Scope);
537 case AtomicExpr::AO__atomic_compare_exchange:
538 case AtomicExpr::AO__atomic_compare_exchange_n: {
539 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
540 emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
541 Val1, Val2, FailureOrder, Size, Order, Scope);
543 // Create all the relevant BB's
544 llvm::BasicBlock *StrongBB =
545 CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
546 llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
547 llvm::BasicBlock *ContBB =
548 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
550 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
551 SI->addCase(CGF.Builder.getInt1(false), StrongBB);
553 CGF.Builder.SetInsertPoint(StrongBB);
554 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
555 FailureOrder, Size, Order, Scope);
556 CGF.Builder.CreateBr(ContBB);
558 CGF.Builder.SetInsertPoint(WeakBB);
559 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
560 FailureOrder, Size, Order, Scope);
561 CGF.Builder.CreateBr(ContBB);
563 CGF.Builder.SetInsertPoint(ContBB);
567 case AtomicExpr::AO__c11_atomic_load:
568 case AtomicExpr::AO__opencl_atomic_load:
569 case AtomicExpr::AO__atomic_load_n:
570 case AtomicExpr::AO__atomic_load: {
571 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
572 Load->setAtomic(Order, Scope);
573 Load->setVolatile(E->isVolatile());
574 CGF.Builder.CreateStore(Load, Dest);
578 case AtomicExpr::AO__c11_atomic_store:
579 case AtomicExpr::AO__opencl_atomic_store:
580 case AtomicExpr::AO__atomic_store:
581 case AtomicExpr::AO__atomic_store_n: {
582 llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
583 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
584 Store->setAtomic(Order, Scope);
585 Store->setVolatile(E->isVolatile());
589 case AtomicExpr::AO__c11_atomic_exchange:
590 case AtomicExpr::AO__opencl_atomic_exchange:
591 case AtomicExpr::AO__atomic_exchange_n:
592 case AtomicExpr::AO__atomic_exchange:
593 Op = llvm::AtomicRMWInst::Xchg;
596 case AtomicExpr::AO__atomic_add_fetch:
597 PostOp = llvm::Instruction::Add;
599 case AtomicExpr::AO__c11_atomic_fetch_add:
600 case AtomicExpr::AO__opencl_atomic_fetch_add:
601 case AtomicExpr::AO__atomic_fetch_add:
602 Op = llvm::AtomicRMWInst::Add;
605 case AtomicExpr::AO__atomic_sub_fetch:
606 PostOp = llvm::Instruction::Sub;
608 case AtomicExpr::AO__c11_atomic_fetch_sub:
609 case AtomicExpr::AO__opencl_atomic_fetch_sub:
610 case AtomicExpr::AO__atomic_fetch_sub:
611 Op = llvm::AtomicRMWInst::Sub;
614 case AtomicExpr::AO__atomic_min_fetch:
617 case AtomicExpr::AO__c11_atomic_fetch_min:
618 case AtomicExpr::AO__opencl_atomic_fetch_min:
619 case AtomicExpr::AO__atomic_fetch_min:
620 Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
621 : llvm::AtomicRMWInst::UMin;
624 case AtomicExpr::AO__atomic_max_fetch:
627 case AtomicExpr::AO__c11_atomic_fetch_max:
628 case AtomicExpr::AO__opencl_atomic_fetch_max:
629 case AtomicExpr::AO__atomic_fetch_max:
630 Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
631 : llvm::AtomicRMWInst::UMax;
634 case AtomicExpr::AO__atomic_and_fetch:
635 PostOp = llvm::Instruction::And;
637 case AtomicExpr::AO__c11_atomic_fetch_and:
638 case AtomicExpr::AO__opencl_atomic_fetch_and:
639 case AtomicExpr::AO__atomic_fetch_and:
640 Op = llvm::AtomicRMWInst::And;
643 case AtomicExpr::AO__atomic_or_fetch:
644 PostOp = llvm::Instruction::Or;
646 case AtomicExpr::AO__c11_atomic_fetch_or:
647 case AtomicExpr::AO__opencl_atomic_fetch_or:
648 case AtomicExpr::AO__atomic_fetch_or:
649 Op = llvm::AtomicRMWInst::Or;
652 case AtomicExpr::AO__atomic_xor_fetch:
653 PostOp = llvm::Instruction::Xor;
655 case AtomicExpr::AO__c11_atomic_fetch_xor:
656 case AtomicExpr::AO__opencl_atomic_fetch_xor:
657 case AtomicExpr::AO__atomic_fetch_xor:
658 Op = llvm::AtomicRMWInst::Xor;
661 case AtomicExpr::AO__atomic_nand_fetch:
662 PostOp = llvm::Instruction::And; // the NOT is special cased below
664 case AtomicExpr::AO__atomic_fetch_nand:
665 Op = llvm::AtomicRMWInst::Nand;
669 llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
670 llvm::AtomicRMWInst *RMWI =
671 CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order, Scope);
672 RMWI->setVolatile(E->isVolatile());
674 // For __atomic_*_fetch operations, perform the operation again to
675 // determine the value which was written.
676 llvm::Value *Result = RMWI;
678 Result = EmitPostAtomicMinMax(CGF.Builder, E->getOp(),
679 E->getValueType()->isSignedIntegerType(),
682 Result = CGF.Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
684 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
685 Result = CGF.Builder.CreateNot(Result);
686 CGF.Builder.CreateStore(Result, Dest);
689 // This function emits any expression (scalar, complex, or aggregate)
690 // into a temporary alloca.
692 EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
693 Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
694 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
699 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *Expr, Address Dest,
700 Address Ptr, Address Val1, Address Val2,
701 llvm::Value *IsWeak, llvm::Value *FailureOrder,
702 uint64_t Size, llvm::AtomicOrdering Order,
703 llvm::Value *Scope) {
704 auto ScopeModel = Expr->getScopeModel();
706 // LLVM atomic instructions always have synch scope. If clang atomic
707 // expression has no scope operand, use default LLVM synch scope.
709 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
710 Order, CGF.CGM.getLLVMContext().getOrInsertSyncScopeID(""));
714 // Handle constant scope.
715 if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
716 auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
717 CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
718 Order, CGF.CGM.getLLVMContext());
719 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
724 // Handle non-constant scope.
725 auto &Builder = CGF.Builder;
726 auto Scopes = ScopeModel->getRuntimeValues();
727 llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
728 for (auto S : Scopes)
729 BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn);
731 llvm::BasicBlock *ContBB =
732 CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn);
734 auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
735 // If unsupported synch scope is encountered at run time, assume a fallback
736 // synch scope value.
737 auto FallBack = ScopeModel->getFallBackValue();
738 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
739 for (auto S : Scopes) {
742 SI->addCase(Builder.getInt32(S), B);
744 Builder.SetInsertPoint(B);
745 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
747 CGF.getTargetHooks().getLLVMSyncScopeID(CGF.CGM.getLangOpts(),
750 CGF.getLLVMContext()));
751 Builder.CreateBr(ContBB);
754 Builder.SetInsertPoint(ContBB);
758 AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
759 bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
760 SourceLocation Loc, CharUnits SizeInChars) {
761 if (UseOptimizedLibcall) {
762 // Load value and pass it to the function directly.
763 CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
764 int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
766 CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
767 llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
768 SizeInBits)->getPointerTo();
769 Address Ptr = Address(CGF.Builder.CreateBitCast(Val, IPtrTy), Align);
770 Val = CGF.EmitLoadOfScalar(Ptr, false,
771 CGF.getContext().getPointerType(ValTy),
773 // Coerce the value into an appropriately sized integer type.
774 Args.add(RValue::get(Val), ValTy);
776 // Non-optimized functions always take a reference.
777 Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
778 CGF.getContext().VoidPtrTy);
782 RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
783 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
784 QualType MemTy = AtomicTy;
785 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
786 MemTy = AT->getValueType();
787 llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
789 Address Val1 = Address::invalid();
790 Address Val2 = Address::invalid();
791 Address Dest = Address::invalid();
792 Address Ptr = EmitPointerWithAlignment(E->getPtr());
794 if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
795 E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
796 LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
797 EmitAtomicInit(E->getVal1(), lvalue);
798 return RValue::get(nullptr);
801 CharUnits sizeChars, alignChars;
802 std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy);
803 uint64_t Size = sizeChars.getQuantity();
804 unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
806 bool Oversized = getContext().toBits(sizeChars) > MaxInlineWidthInBits;
807 bool Misaligned = (Ptr.getAlignment() % sizeChars) != 0;
808 bool UseLibcall = Misaligned | Oversized;
811 CGM.getDiags().Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
815 llvm::Value *Order = EmitScalarExpr(E->getOrder());
817 E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;
819 switch (E->getOp()) {
820 case AtomicExpr::AO__c11_atomic_init:
821 case AtomicExpr::AO__opencl_atomic_init:
822 llvm_unreachable("Already handled above with EmitAtomicInit!");
824 case AtomicExpr::AO__c11_atomic_load:
825 case AtomicExpr::AO__opencl_atomic_load:
826 case AtomicExpr::AO__atomic_load_n:
829 case AtomicExpr::AO__atomic_load:
830 Dest = EmitPointerWithAlignment(E->getVal1());
833 case AtomicExpr::AO__atomic_store:
834 Val1 = EmitPointerWithAlignment(E->getVal1());
837 case AtomicExpr::AO__atomic_exchange:
838 Val1 = EmitPointerWithAlignment(E->getVal1());
839 Dest = EmitPointerWithAlignment(E->getVal2());
842 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
843 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
844 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
845 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
846 case AtomicExpr::AO__atomic_compare_exchange_n:
847 case AtomicExpr::AO__atomic_compare_exchange:
848 Val1 = EmitPointerWithAlignment(E->getVal1());
849 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
850 Val2 = EmitPointerWithAlignment(E->getVal2());
852 Val2 = EmitValToTemp(*this, E->getVal2());
853 OrderFail = EmitScalarExpr(E->getOrderFail());
854 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
855 E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
856 IsWeak = EmitScalarExpr(E->getWeak());
859 case AtomicExpr::AO__c11_atomic_fetch_add:
860 case AtomicExpr::AO__c11_atomic_fetch_sub:
861 case AtomicExpr::AO__opencl_atomic_fetch_add:
862 case AtomicExpr::AO__opencl_atomic_fetch_sub:
863 if (MemTy->isPointerType()) {
864 // For pointer arithmetic, we're required to do a bit of math:
865 // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
866 // ... but only for the C11 builtins. The GNU builtins expect the
867 // user to multiply by sizeof(T).
868 QualType Val1Ty = E->getVal1()->getType();
869 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
870 CharUnits PointeeIncAmt =
871 getContext().getTypeSizeInChars(MemTy->getPointeeType());
872 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
873 auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
875 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
879 case AtomicExpr::AO__atomic_fetch_add:
880 case AtomicExpr::AO__atomic_fetch_sub:
881 case AtomicExpr::AO__atomic_add_fetch:
882 case AtomicExpr::AO__atomic_sub_fetch:
883 case AtomicExpr::AO__c11_atomic_store:
884 case AtomicExpr::AO__c11_atomic_exchange:
885 case AtomicExpr::AO__opencl_atomic_store:
886 case AtomicExpr::AO__opencl_atomic_exchange:
887 case AtomicExpr::AO__atomic_store_n:
888 case AtomicExpr::AO__atomic_exchange_n:
889 case AtomicExpr::AO__c11_atomic_fetch_and:
890 case AtomicExpr::AO__c11_atomic_fetch_or:
891 case AtomicExpr::AO__c11_atomic_fetch_xor:
892 case AtomicExpr::AO__c11_atomic_fetch_max:
893 case AtomicExpr::AO__c11_atomic_fetch_min:
894 case AtomicExpr::AO__opencl_atomic_fetch_and:
895 case AtomicExpr::AO__opencl_atomic_fetch_or:
896 case AtomicExpr::AO__opencl_atomic_fetch_xor:
897 case AtomicExpr::AO__opencl_atomic_fetch_min:
898 case AtomicExpr::AO__opencl_atomic_fetch_max:
899 case AtomicExpr::AO__atomic_fetch_and:
900 case AtomicExpr::AO__atomic_fetch_or:
901 case AtomicExpr::AO__atomic_fetch_xor:
902 case AtomicExpr::AO__atomic_fetch_nand:
903 case AtomicExpr::AO__atomic_and_fetch:
904 case AtomicExpr::AO__atomic_or_fetch:
905 case AtomicExpr::AO__atomic_xor_fetch:
906 case AtomicExpr::AO__atomic_nand_fetch:
907 case AtomicExpr::AO__atomic_max_fetch:
908 case AtomicExpr::AO__atomic_min_fetch:
909 case AtomicExpr::AO__atomic_fetch_max:
910 case AtomicExpr::AO__atomic_fetch_min:
911 Val1 = EmitValToTemp(*this, E->getVal1());
915 QualType RValTy = E->getType().getUnqualifiedType();
917 // The inlined atomics only function on iN types, where N is a power of 2. We
918 // need to make sure (via temporaries if necessary) that all incoming values
920 LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
921 AtomicInfo Atomics(*this, AtomicVal);
923 Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
924 if (Val1.isValid()) Val1 = Atomics.convertToAtomicIntPointer(Val1);
925 if (Val2.isValid()) Val2 = Atomics.convertToAtomicIntPointer(Val2);
927 Dest = Atomics.emitCastToAtomicIntPointer(Dest);
928 else if (E->isCmpXChg())
929 Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
930 else if (!RValTy->isVoidType())
931 Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
933 // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
935 bool UseOptimizedLibcall = false;
936 switch (E->getOp()) {
937 case AtomicExpr::AO__c11_atomic_init:
938 case AtomicExpr::AO__opencl_atomic_init:
939 llvm_unreachable("Already handled above with EmitAtomicInit!");
941 case AtomicExpr::AO__c11_atomic_fetch_add:
942 case AtomicExpr::AO__opencl_atomic_fetch_add:
943 case AtomicExpr::AO__atomic_fetch_add:
944 case AtomicExpr::AO__c11_atomic_fetch_and:
945 case AtomicExpr::AO__opencl_atomic_fetch_and:
946 case AtomicExpr::AO__atomic_fetch_and:
947 case AtomicExpr::AO__c11_atomic_fetch_or:
948 case AtomicExpr::AO__opencl_atomic_fetch_or:
949 case AtomicExpr::AO__atomic_fetch_or:
950 case AtomicExpr::AO__atomic_fetch_nand:
951 case AtomicExpr::AO__c11_atomic_fetch_sub:
952 case AtomicExpr::AO__opencl_atomic_fetch_sub:
953 case AtomicExpr::AO__atomic_fetch_sub:
954 case AtomicExpr::AO__c11_atomic_fetch_xor:
955 case AtomicExpr::AO__opencl_atomic_fetch_xor:
956 case AtomicExpr::AO__opencl_atomic_fetch_min:
957 case AtomicExpr::AO__opencl_atomic_fetch_max:
958 case AtomicExpr::AO__atomic_fetch_xor:
959 case AtomicExpr::AO__c11_atomic_fetch_max:
960 case AtomicExpr::AO__c11_atomic_fetch_min:
961 case AtomicExpr::AO__atomic_add_fetch:
962 case AtomicExpr::AO__atomic_and_fetch:
963 case AtomicExpr::AO__atomic_nand_fetch:
964 case AtomicExpr::AO__atomic_or_fetch:
965 case AtomicExpr::AO__atomic_sub_fetch:
966 case AtomicExpr::AO__atomic_xor_fetch:
967 case AtomicExpr::AO__atomic_fetch_max:
968 case AtomicExpr::AO__atomic_fetch_min:
969 case AtomicExpr::AO__atomic_max_fetch:
970 case AtomicExpr::AO__atomic_min_fetch:
971 // For these, only library calls for certain sizes exist.
972 UseOptimizedLibcall = true;
975 case AtomicExpr::AO__atomic_load:
976 case AtomicExpr::AO__atomic_store:
977 case AtomicExpr::AO__atomic_exchange:
978 case AtomicExpr::AO__atomic_compare_exchange:
979 // Use the generic version if we don't know that the operand will be
980 // suitably aligned for the optimized version.
984 case AtomicExpr::AO__c11_atomic_load:
985 case AtomicExpr::AO__c11_atomic_store:
986 case AtomicExpr::AO__c11_atomic_exchange:
987 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
988 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
989 case AtomicExpr::AO__opencl_atomic_load:
990 case AtomicExpr::AO__opencl_atomic_store:
991 case AtomicExpr::AO__opencl_atomic_exchange:
992 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
993 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
994 case AtomicExpr::AO__atomic_load_n:
995 case AtomicExpr::AO__atomic_store_n:
996 case AtomicExpr::AO__atomic_exchange_n:
997 case AtomicExpr::AO__atomic_compare_exchange_n:
998 // Only use optimized library calls for sizes for which they exist.
999 // FIXME: Size == 16 optimized library functions exist too.
1000 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
1001 UseOptimizedLibcall = true;
1006 if (!UseOptimizedLibcall) {
1007 // For non-optimized library calls, the size is the first parameter
1008 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
1009 getContext().getSizeType());
1011 // Atomic address is the first or second parameter
1012 // The OpenCL atomic library functions only accept pointer arguments to
1013 // generic address space.
1014 auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {
1017 auto AS = PT->castAs<PointerType>()->getPointeeType().getAddressSpace();
1018 if (AS == LangAS::opencl_generic)
1020 auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
1021 auto T = V->getType();
1022 auto *DestType = T->getPointerElementType()->getPointerTo(DestAS);
1024 return getTargetHooks().performAddrSpaceCast(
1025 *this, V, AS, LangAS::opencl_generic, DestType, false);
1028 Args.add(RValue::get(CastToGenericAddrSpace(
1029 EmitCastToVoidPtr(Ptr.getPointer()), E->getPtr()->getType())),
1030 getContext().VoidPtrTy);
1032 std::string LibCallName;
1033 QualType LoweredMemTy =
1034 MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
1036 bool HaveRetTy = false;
1037 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
1038 bool PostOpMinMax = false;
1039 switch (E->getOp()) {
1040 case AtomicExpr::AO__c11_atomic_init:
1041 case AtomicExpr::AO__opencl_atomic_init:
1042 llvm_unreachable("Already handled!");
1044 // There is only one libcall for compare an exchange, because there is no
1045 // optimisation benefit possible from a libcall version of a weak compare
1047 // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
1048 // void *desired, int success, int failure)
1049 // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
1050 // int success, int failure)
1051 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1052 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1053 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1054 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1055 case AtomicExpr::AO__atomic_compare_exchange:
1056 case AtomicExpr::AO__atomic_compare_exchange_n:
1057 LibCallName = "__atomic_compare_exchange";
1058 RetTy = getContext().BoolTy;
1061 RValue::get(CastToGenericAddrSpace(
1062 EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())),
1063 getContext().VoidPtrTy);
1064 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
1065 MemTy, E->getExprLoc(), sizeChars);
1066 Args.add(RValue::get(Order), getContext().IntTy);
1069 // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
1071 // T __atomic_exchange_N(T *mem, T val, int order)
1072 case AtomicExpr::AO__c11_atomic_exchange:
1073 case AtomicExpr::AO__opencl_atomic_exchange:
1074 case AtomicExpr::AO__atomic_exchange_n:
1075 case AtomicExpr::AO__atomic_exchange:
1076 LibCallName = "__atomic_exchange";
1077 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1078 MemTy, E->getExprLoc(), sizeChars);
1080 // void __atomic_store(size_t size, void *mem, void *val, int order)
1081 // void __atomic_store_N(T *mem, T val, int order)
1082 case AtomicExpr::AO__c11_atomic_store:
1083 case AtomicExpr::AO__opencl_atomic_store:
1084 case AtomicExpr::AO__atomic_store:
1085 case AtomicExpr::AO__atomic_store_n:
1086 LibCallName = "__atomic_store";
1087 RetTy = getContext().VoidTy;
1089 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1090 MemTy, E->getExprLoc(), sizeChars);
1092 // void __atomic_load(size_t size, void *mem, void *return, int order)
1093 // T __atomic_load_N(T *mem, int order)
1094 case AtomicExpr::AO__c11_atomic_load:
1095 case AtomicExpr::AO__opencl_atomic_load:
1096 case AtomicExpr::AO__atomic_load:
1097 case AtomicExpr::AO__atomic_load_n:
1098 LibCallName = "__atomic_load";
1100 // T __atomic_add_fetch_N(T *mem, T val, int order)
1101 // T __atomic_fetch_add_N(T *mem, T val, int order)
1102 case AtomicExpr::AO__atomic_add_fetch:
1103 PostOp = llvm::Instruction::Add;
1105 case AtomicExpr::AO__c11_atomic_fetch_add:
1106 case AtomicExpr::AO__opencl_atomic_fetch_add:
1107 case AtomicExpr::AO__atomic_fetch_add:
1108 LibCallName = "__atomic_fetch_add";
1109 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1110 LoweredMemTy, E->getExprLoc(), sizeChars);
1112 // T __atomic_and_fetch_N(T *mem, T val, int order)
1113 // T __atomic_fetch_and_N(T *mem, T val, int order)
1114 case AtomicExpr::AO__atomic_and_fetch:
1115 PostOp = llvm::Instruction::And;
1117 case AtomicExpr::AO__c11_atomic_fetch_and:
1118 case AtomicExpr::AO__opencl_atomic_fetch_and:
1119 case AtomicExpr::AO__atomic_fetch_and:
1120 LibCallName = "__atomic_fetch_and";
1121 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1122 MemTy, E->getExprLoc(), sizeChars);
1124 // T __atomic_or_fetch_N(T *mem, T val, int order)
1125 // T __atomic_fetch_or_N(T *mem, T val, int order)
1126 case AtomicExpr::AO__atomic_or_fetch:
1127 PostOp = llvm::Instruction::Or;
1129 case AtomicExpr::AO__c11_atomic_fetch_or:
1130 case AtomicExpr::AO__opencl_atomic_fetch_or:
1131 case AtomicExpr::AO__atomic_fetch_or:
1132 LibCallName = "__atomic_fetch_or";
1133 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1134 MemTy, E->getExprLoc(), sizeChars);
1136 // T __atomic_sub_fetch_N(T *mem, T val, int order)
1137 // T __atomic_fetch_sub_N(T *mem, T val, int order)
1138 case AtomicExpr::AO__atomic_sub_fetch:
1139 PostOp = llvm::Instruction::Sub;
1141 case AtomicExpr::AO__c11_atomic_fetch_sub:
1142 case AtomicExpr::AO__opencl_atomic_fetch_sub:
1143 case AtomicExpr::AO__atomic_fetch_sub:
1144 LibCallName = "__atomic_fetch_sub";
1145 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1146 LoweredMemTy, E->getExprLoc(), sizeChars);
1148 // T __atomic_xor_fetch_N(T *mem, T val, int order)
1149 // T __atomic_fetch_xor_N(T *mem, T val, int order)
1150 case AtomicExpr::AO__atomic_xor_fetch:
1151 PostOp = llvm::Instruction::Xor;
1153 case AtomicExpr::AO__c11_atomic_fetch_xor:
1154 case AtomicExpr::AO__opencl_atomic_fetch_xor:
1155 case AtomicExpr::AO__atomic_fetch_xor:
1156 LibCallName = "__atomic_fetch_xor";
1157 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1158 MemTy, E->getExprLoc(), sizeChars);
1160 case AtomicExpr::AO__atomic_min_fetch:
1161 PostOpMinMax = true;
1163 case AtomicExpr::AO__c11_atomic_fetch_min:
1164 case AtomicExpr::AO__atomic_fetch_min:
1165 case AtomicExpr::AO__opencl_atomic_fetch_min:
1166 LibCallName = E->getValueType()->isSignedIntegerType()
1167 ? "__atomic_fetch_min"
1168 : "__atomic_fetch_umin";
1169 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1170 LoweredMemTy, E->getExprLoc(), sizeChars);
1172 case AtomicExpr::AO__atomic_max_fetch:
1173 PostOpMinMax = true;
1175 case AtomicExpr::AO__c11_atomic_fetch_max:
1176 case AtomicExpr::AO__atomic_fetch_max:
1177 case AtomicExpr::AO__opencl_atomic_fetch_max:
1178 LibCallName = E->getValueType()->isSignedIntegerType()
1179 ? "__atomic_fetch_max"
1180 : "__atomic_fetch_umax";
1181 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1182 LoweredMemTy, E->getExprLoc(), sizeChars);
1184 // T __atomic_nand_fetch_N(T *mem, T val, int order)
1185 // T __atomic_fetch_nand_N(T *mem, T val, int order)
1186 case AtomicExpr::AO__atomic_nand_fetch:
1187 PostOp = llvm::Instruction::And; // the NOT is special cased below
1189 case AtomicExpr::AO__atomic_fetch_nand:
1190 LibCallName = "__atomic_fetch_nand";
1191 AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1192 MemTy, E->getExprLoc(), sizeChars);
1196 if (E->isOpenCL()) {
1197 LibCallName = std::string("__opencl") +
1198 StringRef(LibCallName).drop_front(1).str();
1201 // Optimized functions have the size in their name.
1202 if (UseOptimizedLibcall)
1203 LibCallName += "_" + llvm::utostr(Size);
1204 // By default, assume we return a value of the atomic type.
1206 if (UseOptimizedLibcall) {
1207 // Value is returned directly.
1208 // The function returns an appropriately sized integer type.
1209 RetTy = getContext().getIntTypeForBitwidth(
1210 getContext().toBits(sizeChars), /*Signed=*/false);
1212 // Value is returned through parameter before the order.
1213 RetTy = getContext().VoidTy;
1214 Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
1215 getContext().VoidPtrTy);
1218 // order is always the last parameter
1219 Args.add(RValue::get(Order),
1220 getContext().IntTy);
1222 Args.add(RValue::get(Scope), getContext().IntTy);
1224 // PostOp is only needed for the atomic_*_fetch operations, and
1225 // thus is only needed for and implemented in the
1226 // UseOptimizedLibcall codepath.
1227 assert(UseOptimizedLibcall || (!PostOp && !PostOpMinMax));
1229 RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
1230 // The value is returned directly from the libcall.
1234 // The value is returned directly for optimized libcalls but the expr
1235 // provided an out-param.
1236 if (UseOptimizedLibcall && Res.getScalarVal()) {
1237 llvm::Value *ResVal = Res.getScalarVal();
1239 llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1240 ResVal = EmitPostAtomicMinMax(Builder, E->getOp(),
1241 E->getValueType()->isSignedIntegerType(),
1243 } else if (PostOp) {
1244 llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1245 ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1247 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
1248 ResVal = Builder.CreateNot(ResVal);
1250 Builder.CreateStore(
1252 Builder.CreateBitCast(Dest, ResVal->getType()->getPointerTo()));
1255 if (RValTy->isVoidType())
1256 return RValue::get(nullptr);
1258 return convertTempToRValue(
1259 Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1260 RValTy, E->getExprLoc());
1263 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1264 E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
1265 E->getOp() == AtomicExpr::AO__atomic_store ||
1266 E->getOp() == AtomicExpr::AO__atomic_store_n;
1267 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1268 E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
1269 E->getOp() == AtomicExpr::AO__atomic_load ||
1270 E->getOp() == AtomicExpr::AO__atomic_load_n;
1272 if (isa<llvm::ConstantInt>(Order)) {
1273 auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1274 // We should not ever get to a case where the ordering isn't a valid C ABI
1275 // value, but it's hard to enforce that in general.
1276 if (llvm::isValidAtomicOrderingCABI(ord))
1277 switch ((llvm::AtomicOrderingCABI)ord) {
1278 case llvm::AtomicOrderingCABI::relaxed:
1279 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1280 llvm::AtomicOrdering::Monotonic, Scope);
1282 case llvm::AtomicOrderingCABI::consume:
1283 case llvm::AtomicOrderingCABI::acquire:
1285 break; // Avoid crashing on code with undefined behavior
1286 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1287 llvm::AtomicOrdering::Acquire, Scope);
1289 case llvm::AtomicOrderingCABI::release:
1291 break; // Avoid crashing on code with undefined behavior
1292 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1293 llvm::AtomicOrdering::Release, Scope);
1295 case llvm::AtomicOrderingCABI::acq_rel:
1296 if (IsLoad || IsStore)
1297 break; // Avoid crashing on code with undefined behavior
1298 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1299 llvm::AtomicOrdering::AcquireRelease, Scope);
1301 case llvm::AtomicOrderingCABI::seq_cst:
1302 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1303 llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1306 if (RValTy->isVoidType())
1307 return RValue::get(nullptr);
1309 return convertTempToRValue(
1310 Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1311 Dest.getAddressSpace())),
1312 RValTy, E->getExprLoc());
1315 // Long case, when Order isn't obviously constant.
1317 // Create all the relevant BB's
1318 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1319 *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1320 *SeqCstBB = nullptr;
1321 MonotonicBB = createBasicBlock("monotonic", CurFn);
1323 AcquireBB = createBasicBlock("acquire", CurFn);
1325 ReleaseBB = createBasicBlock("release", CurFn);
1326 if (!IsLoad && !IsStore)
1327 AcqRelBB = createBasicBlock("acqrel", CurFn);
1328 SeqCstBB = createBasicBlock("seqcst", CurFn);
1329 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1331 // Create the switch for the split
1332 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1333 // doesn't matter unless someone is crazy enough to use something that
1334 // doesn't fold to a constant for the ordering.
1335 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1336 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1338 // Emit all the different atomics
1339 Builder.SetInsertPoint(MonotonicBB);
1340 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1341 llvm::AtomicOrdering::Monotonic, Scope);
1342 Builder.CreateBr(ContBB);
1344 Builder.SetInsertPoint(AcquireBB);
1345 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1346 llvm::AtomicOrdering::Acquire, Scope);
1347 Builder.CreateBr(ContBB);
1348 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
1350 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
1354 Builder.SetInsertPoint(ReleaseBB);
1355 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1356 llvm::AtomicOrdering::Release, Scope);
1357 Builder.CreateBr(ContBB);
1358 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
1361 if (!IsLoad && !IsStore) {
1362 Builder.SetInsertPoint(AcqRelBB);
1363 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1364 llvm::AtomicOrdering::AcquireRelease, Scope);
1365 Builder.CreateBr(ContBB);
1366 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
1369 Builder.SetInsertPoint(SeqCstBB);
1370 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1371 llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1372 Builder.CreateBr(ContBB);
1373 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
1376 // Cleanup and return
1377 Builder.SetInsertPoint(ContBB);
1378 if (RValTy->isVoidType())
1379 return RValue::get(nullptr);
1381 assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1382 return convertTempToRValue(
1383 Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1384 Dest.getAddressSpace())),
1385 RValTy, E->getExprLoc());
1388 Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
1389 unsigned addrspace =
1390 cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace();
1391 llvm::IntegerType *ty =
1392 llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1393 return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
1396 Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
1397 llvm::Type *Ty = Addr.getElementType();
1398 uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
1399 if (SourceSizeInBits != AtomicSizeInBits) {
1400 Address Tmp = CreateTempAlloca();
1401 CGF.Builder.CreateMemCpy(Tmp, Addr,
1402 std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1406 return emitCastToAtomicIntPointer(Addr);
1409 RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1410 AggValueSlot resultSlot,
1412 bool asValue) const {
1413 if (LVal.isSimple()) {
1414 if (EvaluationKind == TEK_Aggregate)
1415 return resultSlot.asRValue();
1417 // Drill into the padding structure if we have one.
1419 addr = CGF.Builder.CreateStructGEP(addr, 0);
1421 // Otherwise, just convert the temporary to an r-value using the
1422 // normal conversion routine.
1423 return CGF.convertTempToRValue(addr, getValueType(), loc);
1426 // Get RValue from temp memory as atomic for non-simple lvalues
1427 return RValue::get(CGF.Builder.CreateLoad(addr));
1428 if (LVal.isBitField())
1429 return CGF.EmitLoadOfBitfieldLValue(
1430 LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1431 LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1432 if (LVal.isVectorElt())
1433 return CGF.EmitLoadOfLValue(
1434 LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1435 LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1436 assert(LVal.isExtVectorElt());
1437 return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
1438 addr, LVal.getExtVectorElts(), LVal.getType(),
1439 LVal.getBaseInfo(), TBAAAccessInfo()));
1442 RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1443 AggValueSlot ResultSlot,
1445 bool AsValue) const {
1446 // Try not to in some easy cases.
1447 assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
1448 if (getEvaluationKind() == TEK_Scalar &&
1449 (((!LVal.isBitField() ||
1450 LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1453 auto *ValTy = AsValue
1454 ? CGF.ConvertTypeForMem(ValueTy)
1455 : getAtomicAddress().getType()->getPointerElementType();
1456 if (ValTy->isIntegerTy()) {
1457 assert(IntVal->getType() == ValTy && "Different integer types.");
1458 return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
1459 } else if (ValTy->isPointerTy())
1460 return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
1461 else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1462 return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
1465 // Create a temporary. This needs to be big enough to hold the
1467 Address Temp = Address::invalid();
1468 bool TempIsVolatile = false;
1469 if (AsValue && getEvaluationKind() == TEK_Aggregate) {
1470 assert(!ResultSlot.isIgnored());
1471 Temp = ResultSlot.getAddress();
1472 TempIsVolatile = ResultSlot.isVolatile();
1474 Temp = CreateTempAlloca();
1477 // Slam the integer into the temporary.
1478 Address CastTemp = emitCastToAtomicIntPointer(Temp);
1479 CGF.Builder.CreateStore(IntVal, CastTemp)
1480 ->setVolatile(TempIsVolatile);
1482 return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1485 void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1486 llvm::AtomicOrdering AO, bool) {
1487 // void __atomic_load(size_t size, void *mem, void *return, int order);
1489 Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1490 Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1491 CGF.getContext().VoidPtrTy);
1492 Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
1493 CGF.getContext().VoidPtrTy);
1495 RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
1496 CGF.getContext().IntTy);
1497 emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1500 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1502 // Okay, we're doing this natively.
1503 Address Addr = getAtomicAddressAsAtomicIntPointer();
1504 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1505 Load->setAtomic(AO);
1507 // Other decoration.
1509 Load->setVolatile(true);
1510 CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
1514 /// An LValue is a candidate for having its loads and stores be made atomic if
1515 /// we are operating under /volatile:ms *and* the LValue itself is volatile and
1516 /// performing such an operation can be performed without a libcall.
1517 bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) {
1518 if (!CGM.getCodeGenOpts().MSVolatile) return false;
1519 AtomicInfo AI(*this, LV);
1520 bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1521 // An atomic is inline if we don't need to use a libcall.
1522 bool AtomicIsInline = !AI.shouldUseLibcall();
1523 // MSVC doesn't seem to do this for types wider than a pointer.
1524 if (getContext().getTypeSize(LV.getType()) >
1525 getContext().getTypeSize(getContext().getIntPtrType()))
1527 return IsVolatile && AtomicIsInline;
1530 RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL,
1531 AggValueSlot Slot) {
1532 llvm::AtomicOrdering AO;
1533 bool IsVolatile = LV.isVolatileQualified();
1534 if (LV.getType()->isAtomicType()) {
1535 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1537 AO = llvm::AtomicOrdering::Acquire;
1540 return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1543 RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1544 bool AsValue, llvm::AtomicOrdering AO,
1546 // Check whether we should use a library call.
1547 if (shouldUseLibcall()) {
1548 Address TempAddr = Address::invalid();
1549 if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1550 assert(getEvaluationKind() == TEK_Aggregate);
1551 TempAddr = ResultSlot.getAddress();
1553 TempAddr = CreateTempAlloca();
1555 EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
1557 // Okay, turn that back into the original value or whole atomic (for
1558 // non-simple lvalues) type.
1559 return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1562 // Okay, we're doing this natively.
1563 auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1565 // If we're ignoring an aggregate return, don't do anything.
1566 if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
1567 return RValue::getAggregate(Address::invalid(), false);
1569 // Okay, turn that back into the original value or atomic (for non-simple
1571 return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1574 /// Emit a load from an l-value of atomic type. Note that the r-value
1575 /// we produce is an r-value of the atomic *value* type.
1576 RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
1577 llvm::AtomicOrdering AO, bool IsVolatile,
1578 AggValueSlot resultSlot) {
1579 AtomicInfo Atomics(*this, src);
1580 return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1584 /// Copy an r-value into memory as part of storing to an atomic type.
1585 /// This needs to create a bit-pattern suitable for atomic operations.
1586 void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1587 assert(LVal.isSimple());
1588 // If we have an r-value, the rvalue should be of the atomic type,
1589 // which means that the caller is responsible for having zeroed
1590 // any padding. Just do an aggregate copy of that type.
1591 if (rvalue.isAggregate()) {
1592 LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType());
1593 LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(),
1595 bool IsVolatile = rvalue.isVolatileQualified() ||
1596 LVal.isVolatileQualified();
1597 CGF.EmitAggregateCopy(Dest, Src, getAtomicType(),
1598 AggValueSlot::DoesNotOverlap, IsVolatile);
1602 // Okay, otherwise we're copying stuff.
1604 // Zero out the buffer if necessary.
1605 emitMemSetZeroIfNecessary();
1607 // Drill past the padding if present.
1608 LValue TempLVal = projectValue();
1610 // Okay, store the rvalue in.
1611 if (rvalue.isScalar()) {
1612 CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
1614 CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
1619 /// Materialize an r-value into memory for the purposes of storing it
1620 /// to an atomic type.
1621 Address AtomicInfo::materializeRValue(RValue rvalue) const {
1622 // Aggregate r-values are already in memory, and EmitAtomicStore
1623 // requires them to be values of the atomic type.
1624 if (rvalue.isAggregate())
1625 return rvalue.getAggregateAddress();
1627 // Otherwise, make a temporary and materialize into it.
1628 LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1629 AtomicInfo Atomics(CGF, TempLV);
1630 Atomics.emitCopyIntoMemory(rvalue);
1631 return TempLV.getAddress(CGF);
1634 llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1635 // If we've got a scalar value of the right size, try to avoid going
1637 if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
1638 llvm::Value *Value = RVal.getScalarVal();
1639 if (isa<llvm::IntegerType>(Value->getType()))
1640 return CGF.EmitToMemory(Value, ValueTy);
1642 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1643 CGF.getLLVMContext(),
1644 LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1645 if (isa<llvm::PointerType>(Value->getType()))
1646 return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1647 else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1648 return CGF.Builder.CreateBitCast(Value, InputIntTy);
1651 // Otherwise, we need to go through memory.
1652 // Put the r-value in memory.
1653 Address Addr = materializeRValue(RVal);
1655 // Cast the temporary to the atomic int type and pull a value out.
1656 Addr = emitCastToAtomicIntPointer(Addr);
1657 return CGF.Builder.CreateLoad(Addr);
1660 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1661 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1662 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
1663 // Do the atomic store.
1664 Address Addr = getAtomicAddressAsAtomicIntPointer();
1665 auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
1666 ExpectedVal, DesiredVal,
1668 // Other decoration.
1669 Inst->setVolatile(LVal.isVolatileQualified());
1670 Inst->setWeak(IsWeak);
1672 // Okay, turn that back into the original value type.
1673 auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1674 auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1675 return std::make_pair(PreviousVal, SuccessFailureVal);
1679 AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1680 llvm::Value *DesiredAddr,
1681 llvm::AtomicOrdering Success,
1682 llvm::AtomicOrdering Failure) {
1683 // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1684 // void *desired, int success, int failure);
1686 Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1687 Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1688 CGF.getContext().VoidPtrTy);
1689 Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
1690 CGF.getContext().VoidPtrTy);
1691 Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
1692 CGF.getContext().VoidPtrTy);
1693 Args.add(RValue::get(
1694 llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
1695 CGF.getContext().IntTy);
1696 Args.add(RValue::get(
1697 llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
1698 CGF.getContext().IntTy);
1699 auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1700 CGF.getContext().BoolTy, Args);
1702 return SuccessFailureRVal.getScalarVal();
1705 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1706 RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1707 llvm::AtomicOrdering Failure, bool IsWeak) {
1708 if (isStrongerThan(Failure, Success))
1709 // Don't assert on undefined behavior "failure argument shall be no stronger
1710 // than the success argument".
1711 Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1713 // Check whether we should use a library call.
1714 if (shouldUseLibcall()) {
1715 // Produce a source address.
1716 Address ExpectedAddr = materializeRValue(Expected);
1717 Address DesiredAddr = materializeRValue(Desired);
1718 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1719 DesiredAddr.getPointer(),
1721 return std::make_pair(
1722 convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1723 SourceLocation(), /*AsValue=*/false),
1727 // If we've got a scalar value of the right size, try to avoid going
1729 auto *ExpectedVal = convertRValueToInt(Expected);
1730 auto *DesiredVal = convertRValueToInt(Desired);
1731 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1733 return std::make_pair(
1734 ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1735 SourceLocation(), /*AsValue=*/false),
1740 EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1741 const llvm::function_ref<RValue(RValue)> &UpdateOp,
1742 Address DesiredAddr) {
1744 LValue AtomicLVal = Atomics.getAtomicLValue();
1746 if (AtomicLVal.isSimple()) {
1748 DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1750 // Build new lvalue for temp address.
1751 Address Ptr = Atomics.materializeRValue(OldRVal);
1753 if (AtomicLVal.isBitField()) {
1755 LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1756 AtomicLVal.getType(),
1757 AtomicLVal.getBaseInfo(),
1758 AtomicLVal.getTBAAInfo());
1760 LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1761 AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1762 AtomicLVal.getTBAAInfo());
1763 } else if (AtomicLVal.isVectorElt()) {
1764 UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1765 AtomicLVal.getType(),
1766 AtomicLVal.getBaseInfo(),
1767 AtomicLVal.getTBAAInfo());
1768 DesiredLVal = LValue::MakeVectorElt(
1769 DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1770 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1772 assert(AtomicLVal.isExtVectorElt());
1773 UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1774 AtomicLVal.getType(),
1775 AtomicLVal.getBaseInfo(),
1776 AtomicLVal.getTBAAInfo());
1777 DesiredLVal = LValue::MakeExtVectorElt(
1778 DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1779 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1781 UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1783 // Store new value in the corresponding memory area.
1784 RValue NewRVal = UpdateOp(UpRVal);
1785 if (NewRVal.isScalar()) {
1786 CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1788 assert(NewRVal.isComplex());
1789 CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1794 void AtomicInfo::EmitAtomicUpdateLibcall(
1795 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1797 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1799 Address ExpectedAddr = CreateTempAlloca();
1801 EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1802 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1803 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1804 CGF.EmitBlock(ContBB);
1805 Address DesiredAddr = CreateTempAlloca();
1806 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1807 requiresMemSetZero(getAtomicAddress().getElementType())) {
1808 auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1809 CGF.Builder.CreateStore(OldVal, DesiredAddr);
1811 auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1812 AggValueSlot::ignored(),
1813 SourceLocation(), /*AsValue=*/false);
1814 EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1816 EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1817 DesiredAddr.getPointer(),
1819 CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1820 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1823 void AtomicInfo::EmitAtomicUpdateOp(
1824 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1826 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1828 // Do the atomic load.
1829 auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1830 // For non-simple lvalues perform compare-and-swap procedure.
1831 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1832 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1833 auto *CurBB = CGF.Builder.GetInsertBlock();
1834 CGF.EmitBlock(ContBB);
1835 llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1836 /*NumReservedValues=*/2);
1837 PHI->addIncoming(OldVal, CurBB);
1838 Address NewAtomicAddr = CreateTempAlloca();
1839 Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1840 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1841 requiresMemSetZero(getAtomicAddress().getElementType())) {
1842 CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1844 auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
1845 SourceLocation(), /*AsValue=*/false);
1846 EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1847 auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1848 // Try to write new value using cmpxchg operation.
1849 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1850 PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1851 CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1852 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1855 static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
1856 RValue UpdateRVal, Address DesiredAddr) {
1857 LValue AtomicLVal = Atomics.getAtomicLValue();
1859 // Build new lvalue for temp address.
1860 if (AtomicLVal.isBitField()) {
1862 LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1863 AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1864 AtomicLVal.getTBAAInfo());
1865 } else if (AtomicLVal.isVectorElt()) {
1867 LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1868 AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1869 AtomicLVal.getTBAAInfo());
1871 assert(AtomicLVal.isExtVectorElt());
1872 DesiredLVal = LValue::MakeExtVectorElt(
1873 DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1874 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1876 // Store new value in the corresponding memory area.
1877 assert(UpdateRVal.isScalar());
1878 CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1881 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1882 RValue UpdateRVal, bool IsVolatile) {
1883 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1885 Address ExpectedAddr = CreateTempAlloca();
1887 EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1888 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1889 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1890 CGF.EmitBlock(ContBB);
1891 Address DesiredAddr = CreateTempAlloca();
1892 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1893 requiresMemSetZero(getAtomicAddress().getElementType())) {
1894 auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1895 CGF.Builder.CreateStore(OldVal, DesiredAddr);
1897 EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1899 EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1900 DesiredAddr.getPointer(),
1902 CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1903 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1906 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1908 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1910 // Do the atomic load.
1911 auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1912 // For non-simple lvalues perform compare-and-swap procedure.
1913 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1914 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1915 auto *CurBB = CGF.Builder.GetInsertBlock();
1916 CGF.EmitBlock(ContBB);
1917 llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1918 /*NumReservedValues=*/2);
1919 PHI->addIncoming(OldVal, CurBB);
1920 Address NewAtomicAddr = CreateTempAlloca();
1921 Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1922 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1923 requiresMemSetZero(getAtomicAddress().getElementType())) {
1924 CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1926 EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
1927 auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1928 // Try to write new value using cmpxchg operation.
1929 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1930 PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1931 CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1932 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1935 void AtomicInfo::EmitAtomicUpdate(
1936 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1938 if (shouldUseLibcall()) {
1939 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1941 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1945 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
1947 if (shouldUseLibcall()) {
1948 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1950 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1954 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue,
1956 bool IsVolatile = lvalue.isVolatileQualified();
1957 llvm::AtomicOrdering AO;
1958 if (lvalue.getType()->isAtomicType()) {
1959 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1961 AO = llvm::AtomicOrdering::Release;
1964 return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1967 /// Emit a store to an l-value of atomic type.
1969 /// Note that the r-value is expected to be an r-value *of the atomic
1970 /// type*; this means that for aggregate r-values, it should include
1971 /// storage for any padding that was necessary.
1972 void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
1973 llvm::AtomicOrdering AO, bool IsVolatile,
1975 // If this is an aggregate r-value, it should agree in type except
1976 // maybe for address-space qualification.
1977 assert(!rvalue.isAggregate() ||
1978 rvalue.getAggregateAddress().getElementType() ==
1979 dest.getAddress(*this).getElementType());
1981 AtomicInfo atomics(*this, dest);
1982 LValue LVal = atomics.getAtomicLValue();
1984 // If this is an initialization, just put the value there normally.
1985 if (LVal.isSimple()) {
1987 atomics.emitCopyIntoMemory(rvalue);
1991 // Check whether we should use a library call.
1992 if (atomics.shouldUseLibcall()) {
1993 // Produce a source address.
1994 Address srcAddr = atomics.materializeRValue(rvalue);
1996 // void __atomic_store(size_t size, void *mem, void *val, int order)
1998 args.add(RValue::get(atomics.getAtomicSizeValue()),
1999 getContext().getSizeType());
2000 args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
2001 getContext().VoidPtrTy);
2002 args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
2003 getContext().VoidPtrTy);
2005 RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
2006 getContext().IntTy);
2007 emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
2011 // Okay, we're doing this natively.
2012 llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
2014 // Do the atomic store.
2016 atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
2017 intValue = Builder.CreateIntCast(
2018 intValue, addr.getElementType(), /*isSigned=*/false);
2019 llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
2021 // Initializations don't need to be atomic.
2023 store->setAtomic(AO);
2025 // Other decoration.
2027 store->setVolatile(true);
2028 CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
2032 // Emit simple atomic update operation.
2033 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
2036 /// Emit a compare-and-exchange op for atomic type.
2038 std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
2039 LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
2040 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
2041 AggValueSlot Slot) {
2042 // If this is an aggregate r-value, it should agree in type except
2043 // maybe for address-space qualification.
2044 assert(!Expected.isAggregate() ||
2045 Expected.getAggregateAddress().getElementType() ==
2046 Obj.getAddress(*this).getElementType());
2047 assert(!Desired.isAggregate() ||
2048 Desired.getAggregateAddress().getElementType() ==
2049 Obj.getAddress(*this).getElementType());
2050 AtomicInfo Atomics(*this, Obj);
2052 return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
2056 void CodeGenFunction::EmitAtomicUpdate(
2057 LValue LVal, llvm::AtomicOrdering AO,
2058 const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
2059 AtomicInfo Atomics(*this, LVal);
2060 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2063 void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
2064 AtomicInfo atomics(*this, dest);
2066 switch (atomics.getEvaluationKind()) {
2068 llvm::Value *value = EmitScalarExpr(init);
2069 atomics.emitCopyIntoMemory(RValue::get(value));
2074 ComplexPairTy value = EmitComplexExpr(init);
2075 atomics.emitCopyIntoMemory(RValue::getComplex(value));
2079 case TEK_Aggregate: {
2080 // Fix up the destination if the initializer isn't an expression
2082 bool Zeroed = false;
2083 if (!init->getType()->isAtomicType()) {
2084 Zeroed = atomics.emitMemSetZeroIfNecessary();
2085 dest = atomics.projectValue();
2088 // Evaluate the expression directly into the destination.
2089 AggValueSlot slot = AggValueSlot::forLValue(
2090 dest, *this, AggValueSlot::IsNotDestructed,
2091 AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
2092 AggValueSlot::DoesNotOverlap,
2093 Zeroed ? AggValueSlot::IsZeroed : AggValueSlot::IsNotZeroed);
2095 EmitAggExpr(init, slot);
2099 llvm_unreachable("bad evaluation kind");