1 //==-- AArch64ExpandPseudoInsts.cpp - Expand pseudo instructions --*- C++ -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a pass that expands pseudo instructions into target
11 // instructions to allow proper scheduling and other late optimizations. This
12 // pass should be run after register allocation but before the post-regalloc
15 //===----------------------------------------------------------------------===//
17 #include "MCTargetDesc/AArch64AddressingModes.h"
18 #include "AArch64InstrInfo.h"
19 #include "AArch64Subtarget.h"
20 #include "llvm/CodeGen/LivePhysRegs.h"
21 #include "llvm/CodeGen/MachineFunctionPass.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/Support/MathExtras.h"
26 #define AARCH64_EXPAND_PSEUDO_NAME "AArch64 pseudo instruction expansion pass"
29 class AArch64ExpandPseudo : public MachineFunctionPass {
32 AArch64ExpandPseudo() : MachineFunctionPass(ID) {
33 initializeAArch64ExpandPseudoPass(*PassRegistry::getPassRegistry());
36 const AArch64InstrInfo *TII;
38 bool runOnMachineFunction(MachineFunction &Fn) override;
40 StringRef getPassName() const override { return AARCH64_EXPAND_PSEUDO_NAME; }
43 bool expandMBB(MachineBasicBlock &MBB);
44 bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
45 MachineBasicBlock::iterator &NextMBBI);
46 bool expandMOVImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
49 bool expandCMP_SWAP(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
50 unsigned LdarOp, unsigned StlrOp, unsigned CmpOp,
51 unsigned ExtendImm, unsigned ZeroReg,
52 MachineBasicBlock::iterator &NextMBBI);
53 bool expandCMP_SWAP_128(MachineBasicBlock &MBB,
54 MachineBasicBlock::iterator MBBI,
55 MachineBasicBlock::iterator &NextMBBI);
57 char AArch64ExpandPseudo::ID = 0;
60 INITIALIZE_PASS(AArch64ExpandPseudo, "aarch64-expand-pseudo",
61 AARCH64_EXPAND_PSEUDO_NAME, false, false)
63 /// \brief Transfer implicit operands on the pseudo instruction to the
64 /// instructions created from the expansion.
65 static void transferImpOps(MachineInstr &OldMI, MachineInstrBuilder &UseMI,
66 MachineInstrBuilder &DefMI) {
67 const MCInstrDesc &Desc = OldMI.getDesc();
68 for (unsigned i = Desc.getNumOperands(), e = OldMI.getNumOperands(); i != e;
70 const MachineOperand &MO = OldMI.getOperand(i);
71 assert(MO.isReg() && MO.getReg());
79 /// \brief Helper function which extracts the specified 16-bit chunk from a
81 static uint64_t getChunk(uint64_t Imm, unsigned ChunkIdx) {
82 assert(ChunkIdx < 4 && "Out of range chunk index specified!");
84 return (Imm >> (ChunkIdx * 16)) & 0xFFFF;
87 /// \brief Helper function which replicates a 16-bit chunk within a 64-bit
88 /// value. Indices correspond to element numbers in a v4i16.
89 static uint64_t replicateChunk(uint64_t Imm, unsigned FromIdx, unsigned ToIdx) {
90 assert((FromIdx < 4) && (ToIdx < 4) && "Out of range chunk index specified!");
91 const unsigned ShiftAmt = ToIdx * 16;
93 // Replicate the source chunk to the destination position.
94 const uint64_t Chunk = getChunk(Imm, FromIdx) << ShiftAmt;
95 // Clear the destination chunk.
96 Imm &= ~(0xFFFFLL << ShiftAmt);
97 // Insert the replicated chunk.
101 /// \brief Helper function which tries to materialize a 64-bit value with an
102 /// ORR + MOVK instruction sequence.
103 static bool tryOrrMovk(uint64_t UImm, uint64_t OrrImm, MachineInstr &MI,
104 MachineBasicBlock &MBB,
105 MachineBasicBlock::iterator &MBBI,
106 const AArch64InstrInfo *TII, unsigned ChunkIdx) {
107 assert(ChunkIdx < 4 && "Out of range chunk index specified!");
108 const unsigned ShiftAmt = ChunkIdx * 16;
111 if (AArch64_AM::processLogicalImmediate(OrrImm, 64, Encoding)) {
112 // Create the ORR-immediate instruction.
113 MachineInstrBuilder MIB =
114 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXri))
115 .addOperand(MI.getOperand(0))
116 .addReg(AArch64::XZR)
119 // Create the MOVK instruction.
120 const unsigned Imm16 = getChunk(UImm, ChunkIdx);
121 const unsigned DstReg = MI.getOperand(0).getReg();
122 const bool DstIsDead = MI.getOperand(0).isDead();
123 MachineInstrBuilder MIB1 =
124 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
125 .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
128 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt));
130 transferImpOps(MI, MIB, MIB1);
131 MI.eraseFromParent();
138 /// \brief Check whether the given 16-bit chunk replicated to full 64-bit width
139 /// can be materialized with an ORR instruction.
140 static bool canUseOrr(uint64_t Chunk, uint64_t &Encoding) {
141 Chunk = (Chunk << 48) | (Chunk << 32) | (Chunk << 16) | Chunk;
143 return AArch64_AM::processLogicalImmediate(Chunk, 64, Encoding);
146 /// \brief Check for identical 16-bit chunks within the constant and if so
147 /// materialize them with a single ORR instruction. The remaining one or two
148 /// 16-bit chunks will be materialized with MOVK instructions.
150 /// This allows us to materialize constants like |A|B|A|A| or |A|B|C|A| (order
151 /// of the chunks doesn't matter), assuming |A|A|A|A| can be materialized with
152 /// an ORR instruction.
154 static bool tryToreplicateChunks(uint64_t UImm, MachineInstr &MI,
155 MachineBasicBlock &MBB,
156 MachineBasicBlock::iterator &MBBI,
157 const AArch64InstrInfo *TII) {
158 typedef DenseMap<uint64_t, unsigned> CountMap;
161 // Scan the constant and count how often every chunk occurs.
162 for (unsigned Idx = 0; Idx < 4; ++Idx)
163 ++Counts[getChunk(UImm, Idx)];
165 // Traverse the chunks to find one which occurs more than once.
166 for (CountMap::const_iterator Chunk = Counts.begin(), End = Counts.end();
167 Chunk != End; ++Chunk) {
168 const uint64_t ChunkVal = Chunk->first;
169 const unsigned Count = Chunk->second;
171 uint64_t Encoding = 0;
173 // We are looking for chunks which have two or three instances and can be
174 // materialized with an ORR instruction.
175 if ((Count != 2 && Count != 3) || !canUseOrr(ChunkVal, Encoding))
178 const bool CountThree = Count == 3;
179 // Create the ORR-immediate instruction.
180 MachineInstrBuilder MIB =
181 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXri))
182 .addOperand(MI.getOperand(0))
183 .addReg(AArch64::XZR)
186 const unsigned DstReg = MI.getOperand(0).getReg();
187 const bool DstIsDead = MI.getOperand(0).isDead();
189 unsigned ShiftAmt = 0;
191 // Find the first chunk not materialized with the ORR instruction.
192 for (; ShiftAmt < 64; ShiftAmt += 16) {
193 Imm16 = (UImm >> ShiftAmt) & 0xFFFF;
195 if (Imm16 != ChunkVal)
199 // Create the first MOVK instruction.
200 MachineInstrBuilder MIB1 =
201 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
203 RegState::Define | getDeadRegState(DstIsDead && CountThree))
206 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt));
208 // In case we have three instances the whole constant is now materialized
211 transferImpOps(MI, MIB, MIB1);
212 MI.eraseFromParent();
216 // Find the remaining chunk which needs to be materialized.
217 for (ShiftAmt += 16; ShiftAmt < 64; ShiftAmt += 16) {
218 Imm16 = (UImm >> ShiftAmt) & 0xFFFF;
220 if (Imm16 != ChunkVal)
224 // Create the second MOVK instruction.
225 MachineInstrBuilder MIB2 =
226 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
227 .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
230 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt));
232 transferImpOps(MI, MIB, MIB2);
233 MI.eraseFromParent();
240 /// \brief Check whether this chunk matches the pattern '1...0...'. This pattern
241 /// starts a contiguous sequence of ones if we look at the bits from the LSB
243 static bool isStartChunk(uint64_t Chunk) {
244 if (Chunk == 0 || Chunk == UINT64_MAX)
247 return isMask_64(~Chunk);
250 /// \brief Check whether this chunk matches the pattern '0...1...' This pattern
251 /// ends a contiguous sequence of ones if we look at the bits from the LSB
253 static bool isEndChunk(uint64_t Chunk) {
254 if (Chunk == 0 || Chunk == UINT64_MAX)
257 return isMask_64(Chunk);
260 /// \brief Clear or set all bits in the chunk at the given index.
261 static uint64_t updateImm(uint64_t Imm, unsigned Idx, bool Clear) {
262 const uint64_t Mask = 0xFFFF;
265 // Clear chunk in the immediate.
266 Imm &= ~(Mask << (Idx * 16));
268 // Set all bits in the immediate for the particular chunk.
269 Imm |= Mask << (Idx * 16);
274 /// \brief Check whether the constant contains a sequence of contiguous ones,
275 /// which might be interrupted by one or two chunks. If so, materialize the
276 /// sequence of contiguous ones with an ORR instruction.
277 /// Materialize the chunks which are either interrupting the sequence or outside
278 /// of the sequence with a MOVK instruction.
280 /// Assuming S is a chunk which starts the sequence (1...0...), E is a chunk
281 /// which ends the sequence (0...1...). Then we are looking for constants which
282 /// contain at least one S and E chunk.
283 /// E.g. |E|A|B|S|, |A|E|B|S| or |A|B|E|S|.
285 /// We are also looking for constants like |S|A|B|E| where the contiguous
286 /// sequence of ones wraps around the MSB into the LSB.
288 static bool trySequenceOfOnes(uint64_t UImm, MachineInstr &MI,
289 MachineBasicBlock &MBB,
290 MachineBasicBlock::iterator &MBBI,
291 const AArch64InstrInfo *TII) {
292 const int NotSet = -1;
293 const uint64_t Mask = 0xFFFF;
295 int StartIdx = NotSet;
297 // Try to find the chunks which start/end a contiguous sequence of ones.
298 for (int Idx = 0; Idx < 4; ++Idx) {
299 int64_t Chunk = getChunk(UImm, Idx);
300 // Sign extend the 16-bit chunk to 64-bit.
301 Chunk = (Chunk << 48) >> 48;
303 if (isStartChunk(Chunk))
305 else if (isEndChunk(Chunk))
309 // Early exit in case we can't find a start/end chunk.
310 if (StartIdx == NotSet || EndIdx == NotSet)
313 // Outside of the contiguous sequence of ones everything needs to be zero.
314 uint64_t Outside = 0;
315 // Chunks between the start and end chunk need to have all their bits set.
316 uint64_t Inside = Mask;
318 // If our contiguous sequence of ones wraps around from the MSB into the LSB,
319 // just swap indices and pretend we are materializing a contiguous sequence
320 // of zeros surrounded by a contiguous sequence of ones.
321 if (StartIdx > EndIdx) {
322 std::swap(StartIdx, EndIdx);
323 std::swap(Outside, Inside);
326 uint64_t OrrImm = UImm;
327 int FirstMovkIdx = NotSet;
328 int SecondMovkIdx = NotSet;
330 // Find out which chunks we need to patch up to obtain a contiguous sequence
332 for (int Idx = 0; Idx < 4; ++Idx) {
333 const uint64_t Chunk = getChunk(UImm, Idx);
335 // Check whether we are looking at a chunk which is not part of the
336 // contiguous sequence of ones.
337 if ((Idx < StartIdx || EndIdx < Idx) && Chunk != Outside) {
338 OrrImm = updateImm(OrrImm, Idx, Outside == 0);
340 // Remember the index we need to patch.
341 if (FirstMovkIdx == NotSet)
346 // Check whether we are looking a chunk which is part of the contiguous
348 } else if (Idx > StartIdx && Idx < EndIdx && Chunk != Inside) {
349 OrrImm = updateImm(OrrImm, Idx, Inside != Mask);
351 // Remember the index we need to patch.
352 if (FirstMovkIdx == NotSet)
358 assert(FirstMovkIdx != NotSet && "Constant materializable with single ORR!");
360 // Create the ORR-immediate instruction.
361 uint64_t Encoding = 0;
362 AArch64_AM::processLogicalImmediate(OrrImm, 64, Encoding);
363 MachineInstrBuilder MIB =
364 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXri))
365 .addOperand(MI.getOperand(0))
366 .addReg(AArch64::XZR)
369 const unsigned DstReg = MI.getOperand(0).getReg();
370 const bool DstIsDead = MI.getOperand(0).isDead();
372 const bool SingleMovk = SecondMovkIdx == NotSet;
373 // Create the first MOVK instruction.
374 MachineInstrBuilder MIB1 =
375 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
377 RegState::Define | getDeadRegState(DstIsDead && SingleMovk))
379 .addImm(getChunk(UImm, FirstMovkIdx))
381 AArch64_AM::getShifterImm(AArch64_AM::LSL, FirstMovkIdx * 16));
383 // Early exit in case we only need to emit a single MOVK instruction.
385 transferImpOps(MI, MIB, MIB1);
386 MI.eraseFromParent();
390 // Create the second MOVK instruction.
391 MachineInstrBuilder MIB2 =
392 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::MOVKXi))
393 .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
395 .addImm(getChunk(UImm, SecondMovkIdx))
397 AArch64_AM::getShifterImm(AArch64_AM::LSL, SecondMovkIdx * 16));
399 transferImpOps(MI, MIB, MIB2);
400 MI.eraseFromParent();
404 /// \brief Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more
405 /// real move-immediate instructions to synthesize the immediate.
406 bool AArch64ExpandPseudo::expandMOVImm(MachineBasicBlock &MBB,
407 MachineBasicBlock::iterator MBBI,
409 MachineInstr &MI = *MBBI;
410 unsigned DstReg = MI.getOperand(0).getReg();
411 uint64_t Imm = MI.getOperand(1).getImm();
412 const unsigned Mask = 0xFFFF;
414 if (DstReg == AArch64::XZR || DstReg == AArch64::WZR) {
415 // Useless def, and we don't want to risk creating an invalid ORR (which
416 // would really write to sp).
417 MI.eraseFromParent();
421 // Try a MOVI instruction (aka ORR-immediate with the zero register).
422 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
424 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
425 unsigned Opc = (BitSize == 32 ? AArch64::ORRWri : AArch64::ORRXri);
426 MachineInstrBuilder MIB =
427 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc))
428 .addOperand(MI.getOperand(0))
429 .addReg(BitSize == 32 ? AArch64::WZR : AArch64::XZR)
431 transferImpOps(MI, MIB, MIB);
432 MI.eraseFromParent();
436 // Scan the immediate and count the number of 16-bit chunks which are either
437 // all ones or all zeros.
438 unsigned OneChunks = 0;
439 unsigned ZeroChunks = 0;
440 for (unsigned Shift = 0; Shift < BitSize; Shift += 16) {
441 const unsigned Chunk = (Imm >> Shift) & Mask;
448 // Since we can't materialize the constant with a single ORR instruction,
449 // let's see whether we can materialize 3/4 of the constant with an ORR
450 // instruction and use an additional MOVK instruction to materialize the
453 // We are looking for constants with a pattern like: |A|X|B|X| or |X|A|X|B|.
455 // E.g. assuming |A|X|A|X| is a pattern which can be materialized with ORR,
456 // we would create the following instruction sequence:
458 // ORR x0, xzr, |A|X|A|X|
459 // MOVK x0, |B|, LSL #16
461 // Only look at 64-bit constants which can't be materialized with a single
462 // instruction e.g. which have less than either three all zero or all one
465 // Ignore 32-bit constants here, they always can be materialized with a
466 // MOVZ/MOVN + MOVK pair. Since the 32-bit constant can't be materialized
467 // with a single ORR, the best sequence we can achieve is a ORR + MOVK pair.
468 // Thus we fall back to the default code below which in the best case creates
469 // a single MOVZ/MOVN instruction (in case one chunk is all zero or all one).
471 if (BitSize == 64 && OneChunks < 3 && ZeroChunks < 3) {
472 // If we interpret the 64-bit constant as a v4i16, are elements 0 and 2
474 if (getChunk(UImm, 0) == getChunk(UImm, 2)) {
475 // See if we can come up with a constant which can be materialized with
476 // ORR-immediate by replicating element 3 into element 1.
477 uint64_t OrrImm = replicateChunk(UImm, 3, 1);
478 if (tryOrrMovk(UImm, OrrImm, MI, MBB, MBBI, TII, 1))
481 // See if we can come up with a constant which can be materialized with
482 // ORR-immediate by replicating element 1 into element 3.
483 OrrImm = replicateChunk(UImm, 1, 3);
484 if (tryOrrMovk(UImm, OrrImm, MI, MBB, MBBI, TII, 3))
487 // If we interpret the 64-bit constant as a v4i16, are elements 1 and 3
489 } else if (getChunk(UImm, 1) == getChunk(UImm, 3)) {
490 // See if we can come up with a constant which can be materialized with
491 // ORR-immediate by replicating element 2 into element 0.
492 uint64_t OrrImm = replicateChunk(UImm, 2, 0);
493 if (tryOrrMovk(UImm, OrrImm, MI, MBB, MBBI, TII, 0))
496 // See if we can come up with a constant which can be materialized with
497 // ORR-immediate by replicating element 1 into element 3.
498 OrrImm = replicateChunk(UImm, 0, 2);
499 if (tryOrrMovk(UImm, OrrImm, MI, MBB, MBBI, TII, 2))
504 // Check for identical 16-bit chunks within the constant and if so materialize
505 // them with a single ORR instruction. The remaining one or two 16-bit chunks
506 // will be materialized with MOVK instructions.
507 if (BitSize == 64 && tryToreplicateChunks(UImm, MI, MBB, MBBI, TII))
510 // Check whether the constant contains a sequence of contiguous ones, which
511 // might be interrupted by one or two chunks. If so, materialize the sequence
512 // of contiguous ones with an ORR instruction. Materialize the chunks which
513 // are either interrupting the sequence or outside of the sequence with a
515 if (BitSize == 64 && trySequenceOfOnes(UImm, MI, MBB, MBBI, TII))
518 // Use a MOVZ or MOVN instruction to set the high bits, followed by one or
519 // more MOVK instructions to insert additional 16-bit portions into the
523 // Use MOVN to materialize the high bits if we have more all one chunks
524 // than all zero chunks.
525 if (OneChunks > ZeroChunks) {
532 Imm &= (1LL << 32) - 1;
533 FirstOpc = (isNeg ? AArch64::MOVNWi : AArch64::MOVZWi);
535 FirstOpc = (isNeg ? AArch64::MOVNXi : AArch64::MOVZXi);
537 unsigned Shift = 0; // LSL amount for high bits with MOVZ/MOVN
538 unsigned LastShift = 0; // LSL amount for last MOVK
540 unsigned LZ = countLeadingZeros(Imm);
541 unsigned TZ = countTrailingZeros(Imm);
542 Shift = ((63 - LZ) / 16) * 16;
543 LastShift = (TZ / 16) * 16;
545 unsigned Imm16 = (Imm >> Shift) & Mask;
546 bool DstIsDead = MI.getOperand(0).isDead();
547 MachineInstrBuilder MIB1 =
548 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(FirstOpc))
549 .addReg(DstReg, RegState::Define |
550 getDeadRegState(DstIsDead && Shift == LastShift))
552 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift));
554 // If a MOVN was used for the high bits of a negative value, flip the rest
555 // of the bits back for use with MOVK.
559 if (Shift == LastShift) {
560 transferImpOps(MI, MIB1, MIB1);
561 MI.eraseFromParent();
565 MachineInstrBuilder MIB2;
566 unsigned Opc = (BitSize == 32 ? AArch64::MOVKWi : AArch64::MOVKXi);
567 while (Shift != LastShift) {
569 Imm16 = (Imm >> Shift) & Mask;
570 if (Imm16 == (isNeg ? Mask : 0))
571 continue; // This 16-bit portion is already set correctly.
572 MIB2 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc))
575 getDeadRegState(DstIsDead && Shift == LastShift))
578 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, Shift));
581 transferImpOps(MI, MIB1, MIB2);
582 MI.eraseFromParent();
586 static void addPostLoopLiveIns(MachineBasicBlock *MBB, LivePhysRegs &LiveRegs) {
587 for (auto I = LiveRegs.begin(); I != LiveRegs.end(); ++I)
591 bool AArch64ExpandPseudo::expandCMP_SWAP(
592 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned LdarOp,
593 unsigned StlrOp, unsigned CmpOp, unsigned ExtendImm, unsigned ZeroReg,
594 MachineBasicBlock::iterator &NextMBBI) {
595 MachineInstr &MI = *MBBI;
596 DebugLoc DL = MI.getDebugLoc();
597 MachineOperand &Dest = MI.getOperand(0);
598 unsigned StatusReg = MI.getOperand(1).getReg();
599 MachineOperand &Addr = MI.getOperand(2);
600 MachineOperand &Desired = MI.getOperand(3);
601 MachineOperand &New = MI.getOperand(4);
603 LivePhysRegs LiveRegs(&TII->getRegisterInfo());
604 LiveRegs.addLiveOuts(MBB);
605 for (auto I = std::prev(MBB.end()); I != MBBI; --I)
606 LiveRegs.stepBackward(*I);
608 MachineFunction *MF = MBB.getParent();
609 auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
610 auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
611 auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
613 MF->insert(++MBB.getIterator(), LoadCmpBB);
614 MF->insert(++LoadCmpBB->getIterator(), StoreBB);
615 MF->insert(++StoreBB->getIterator(), DoneBB);
618 // ldaxr xDest, [xAddr]
619 // cmp xDest, xDesired
621 LoadCmpBB->addLiveIn(Addr.getReg());
622 LoadCmpBB->addLiveIn(Dest.getReg());
623 LoadCmpBB->addLiveIn(Desired.getReg());
624 addPostLoopLiveIns(LoadCmpBB, LiveRegs);
626 BuildMI(LoadCmpBB, DL, TII->get(LdarOp), Dest.getReg())
627 .addReg(Addr.getReg());
628 BuildMI(LoadCmpBB, DL, TII->get(CmpOp), ZeroReg)
629 .addReg(Dest.getReg(), getKillRegState(Dest.isDead()))
632 BuildMI(LoadCmpBB, DL, TII->get(AArch64::Bcc))
633 .addImm(AArch64CC::NE)
635 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Kill);
636 LoadCmpBB->addSuccessor(DoneBB);
637 LoadCmpBB->addSuccessor(StoreBB);
640 // stlxr wStatus, xNew, [xAddr]
641 // cbnz wStatus, .Lloadcmp
642 StoreBB->addLiveIn(Addr.getReg());
643 StoreBB->addLiveIn(New.getReg());
644 addPostLoopLiveIns(StoreBB, LiveRegs);
646 BuildMI(StoreBB, DL, TII->get(StlrOp), StatusReg)
649 BuildMI(StoreBB, DL, TII->get(AArch64::CBNZW))
650 .addReg(StatusReg, RegState::Kill)
652 StoreBB->addSuccessor(LoadCmpBB);
653 StoreBB->addSuccessor(DoneBB);
655 DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
656 DoneBB->transferSuccessors(&MBB);
657 addPostLoopLiveIns(DoneBB, LiveRegs);
659 MBB.addSuccessor(LoadCmpBB);
661 NextMBBI = MBB.end();
662 MI.eraseFromParent();
666 bool AArch64ExpandPseudo::expandCMP_SWAP_128(
667 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
668 MachineBasicBlock::iterator &NextMBBI) {
670 MachineInstr &MI = *MBBI;
671 DebugLoc DL = MI.getDebugLoc();
672 MachineOperand &DestLo = MI.getOperand(0);
673 MachineOperand &DestHi = MI.getOperand(1);
674 unsigned StatusReg = MI.getOperand(2).getReg();
675 MachineOperand &Addr = MI.getOperand(3);
676 MachineOperand &DesiredLo = MI.getOperand(4);
677 MachineOperand &DesiredHi = MI.getOperand(5);
678 MachineOperand &NewLo = MI.getOperand(6);
679 MachineOperand &NewHi = MI.getOperand(7);
681 LivePhysRegs LiveRegs(&TII->getRegisterInfo());
682 LiveRegs.addLiveOuts(MBB);
683 for (auto I = std::prev(MBB.end()); I != MBBI; --I)
684 LiveRegs.stepBackward(*I);
686 MachineFunction *MF = MBB.getParent();
687 auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
688 auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
689 auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
691 MF->insert(++MBB.getIterator(), LoadCmpBB);
692 MF->insert(++LoadCmpBB->getIterator(), StoreBB);
693 MF->insert(++StoreBB->getIterator(), DoneBB);
696 // ldaxp xDestLo, xDestHi, [xAddr]
697 // cmp xDestLo, xDesiredLo
698 // sbcs xDestHi, xDesiredHi
700 LoadCmpBB->addLiveIn(Addr.getReg());
701 LoadCmpBB->addLiveIn(DestLo.getReg());
702 LoadCmpBB->addLiveIn(DestHi.getReg());
703 LoadCmpBB->addLiveIn(DesiredLo.getReg());
704 LoadCmpBB->addLiveIn(DesiredHi.getReg());
705 addPostLoopLiveIns(LoadCmpBB, LiveRegs);
707 BuildMI(LoadCmpBB, DL, TII->get(AArch64::LDAXPX))
708 .addReg(DestLo.getReg(), RegState::Define)
709 .addReg(DestHi.getReg(), RegState::Define)
710 .addReg(Addr.getReg());
711 BuildMI(LoadCmpBB, DL, TII->get(AArch64::SUBSXrs), AArch64::XZR)
712 .addReg(DestLo.getReg(), getKillRegState(DestLo.isDead()))
713 .addOperand(DesiredLo)
715 BuildMI(LoadCmpBB, DL, TII->get(AArch64::CSINCWr), StatusReg)
716 .addUse(AArch64::WZR)
717 .addUse(AArch64::WZR)
718 .addImm(AArch64CC::EQ);
719 BuildMI(LoadCmpBB, DL, TII->get(AArch64::SUBSXrs), AArch64::XZR)
720 .addReg(DestHi.getReg(), getKillRegState(DestHi.isDead()))
721 .addOperand(DesiredHi)
723 BuildMI(LoadCmpBB, DL, TII->get(AArch64::CSINCWr), StatusReg)
724 .addUse(StatusReg, RegState::Kill)
725 .addUse(StatusReg, RegState::Kill)
726 .addImm(AArch64CC::EQ);
727 BuildMI(LoadCmpBB, DL, TII->get(AArch64::CBNZW))
728 .addUse(StatusReg, RegState::Kill)
730 LoadCmpBB->addSuccessor(DoneBB);
731 LoadCmpBB->addSuccessor(StoreBB);
734 // stlxp wStatus, xNewLo, xNewHi, [xAddr]
735 // cbnz wStatus, .Lloadcmp
736 StoreBB->addLiveIn(Addr.getReg());
737 StoreBB->addLiveIn(NewLo.getReg());
738 StoreBB->addLiveIn(NewHi.getReg());
739 addPostLoopLiveIns(StoreBB, LiveRegs);
740 BuildMI(StoreBB, DL, TII->get(AArch64::STLXPX), StatusReg)
744 BuildMI(StoreBB, DL, TII->get(AArch64::CBNZW))
745 .addReg(StatusReg, RegState::Kill)
747 StoreBB->addSuccessor(LoadCmpBB);
748 StoreBB->addSuccessor(DoneBB);
750 DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
751 DoneBB->transferSuccessors(&MBB);
752 addPostLoopLiveIns(DoneBB, LiveRegs);
754 MBB.addSuccessor(LoadCmpBB);
756 NextMBBI = MBB.end();
757 MI.eraseFromParent();
761 /// \brief If MBBI references a pseudo instruction that should be expanded here,
762 /// do the expansion and return true. Otherwise return false.
763 bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
764 MachineBasicBlock::iterator MBBI,
765 MachineBasicBlock::iterator &NextMBBI) {
766 MachineInstr &MI = *MBBI;
767 unsigned Opcode = MI.getOpcode();
772 case AArch64::ADDWrr:
773 case AArch64::SUBWrr:
774 case AArch64::ADDXrr:
775 case AArch64::SUBXrr:
776 case AArch64::ADDSWrr:
777 case AArch64::SUBSWrr:
778 case AArch64::ADDSXrr:
779 case AArch64::SUBSXrr:
780 case AArch64::ANDWrr:
781 case AArch64::ANDXrr:
782 case AArch64::BICWrr:
783 case AArch64::BICXrr:
784 case AArch64::ANDSWrr:
785 case AArch64::ANDSXrr:
786 case AArch64::BICSWrr:
787 case AArch64::BICSXrr:
788 case AArch64::EONWrr:
789 case AArch64::EONXrr:
790 case AArch64::EORWrr:
791 case AArch64::EORXrr:
792 case AArch64::ORNWrr:
793 case AArch64::ORNXrr:
794 case AArch64::ORRWrr:
795 case AArch64::ORRXrr: {
797 switch (MI.getOpcode()) {
800 case AArch64::ADDWrr: Opcode = AArch64::ADDWrs; break;
801 case AArch64::SUBWrr: Opcode = AArch64::SUBWrs; break;
802 case AArch64::ADDXrr: Opcode = AArch64::ADDXrs; break;
803 case AArch64::SUBXrr: Opcode = AArch64::SUBXrs; break;
804 case AArch64::ADDSWrr: Opcode = AArch64::ADDSWrs; break;
805 case AArch64::SUBSWrr: Opcode = AArch64::SUBSWrs; break;
806 case AArch64::ADDSXrr: Opcode = AArch64::ADDSXrs; break;
807 case AArch64::SUBSXrr: Opcode = AArch64::SUBSXrs; break;
808 case AArch64::ANDWrr: Opcode = AArch64::ANDWrs; break;
809 case AArch64::ANDXrr: Opcode = AArch64::ANDXrs; break;
810 case AArch64::BICWrr: Opcode = AArch64::BICWrs; break;
811 case AArch64::BICXrr: Opcode = AArch64::BICXrs; break;
812 case AArch64::ANDSWrr: Opcode = AArch64::ANDSWrs; break;
813 case AArch64::ANDSXrr: Opcode = AArch64::ANDSXrs; break;
814 case AArch64::BICSWrr: Opcode = AArch64::BICSWrs; break;
815 case AArch64::BICSXrr: Opcode = AArch64::BICSXrs; break;
816 case AArch64::EONWrr: Opcode = AArch64::EONWrs; break;
817 case AArch64::EONXrr: Opcode = AArch64::EONXrs; break;
818 case AArch64::EORWrr: Opcode = AArch64::EORWrs; break;
819 case AArch64::EORXrr: Opcode = AArch64::EORXrs; break;
820 case AArch64::ORNWrr: Opcode = AArch64::ORNWrs; break;
821 case AArch64::ORNXrr: Opcode = AArch64::ORNXrs; break;
822 case AArch64::ORRWrr: Opcode = AArch64::ORRWrs; break;
823 case AArch64::ORRXrr: Opcode = AArch64::ORRXrs; break;
825 MachineInstrBuilder MIB1 =
826 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opcode),
827 MI.getOperand(0).getReg())
828 .addOperand(MI.getOperand(1))
829 .addOperand(MI.getOperand(2))
830 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
831 transferImpOps(MI, MIB1, MIB1);
832 MI.eraseFromParent();
836 case AArch64::LOADgot: {
837 // Expand into ADRP + LDR.
838 unsigned DstReg = MI.getOperand(0).getReg();
839 const MachineOperand &MO1 = MI.getOperand(1);
840 unsigned Flags = MO1.getTargetFlags();
841 MachineInstrBuilder MIB1 =
842 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg);
843 MachineInstrBuilder MIB2 =
844 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::LDRXui))
845 .addOperand(MI.getOperand(0))
848 if (MO1.isGlobal()) {
849 MIB1.addGlobalAddress(MO1.getGlobal(), 0, Flags | AArch64II::MO_PAGE);
850 MIB2.addGlobalAddress(MO1.getGlobal(), 0,
851 Flags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
852 } else if (MO1.isSymbol()) {
853 MIB1.addExternalSymbol(MO1.getSymbolName(), Flags | AArch64II::MO_PAGE);
854 MIB2.addExternalSymbol(MO1.getSymbolName(),
855 Flags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
857 assert(MO1.isCPI() &&
858 "Only expect globals, externalsymbols, or constant pools");
859 MIB1.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
860 Flags | AArch64II::MO_PAGE);
861 MIB2.addConstantPoolIndex(MO1.getIndex(), MO1.getOffset(),
862 Flags | AArch64II::MO_PAGEOFF |
866 transferImpOps(MI, MIB1, MIB2);
867 MI.eraseFromParent();
871 case AArch64::MOVaddr:
872 case AArch64::MOVaddrJT:
873 case AArch64::MOVaddrCP:
874 case AArch64::MOVaddrBA:
875 case AArch64::MOVaddrTLS:
876 case AArch64::MOVaddrEXT: {
877 // Expand into ADRP + ADD.
878 unsigned DstReg = MI.getOperand(0).getReg();
879 MachineInstrBuilder MIB1 =
880 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg)
881 .addOperand(MI.getOperand(1));
883 MachineInstrBuilder MIB2 =
884 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADDXri))
885 .addOperand(MI.getOperand(0))
887 .addOperand(MI.getOperand(2))
890 transferImpOps(MI, MIB1, MIB2);
891 MI.eraseFromParent();
895 case AArch64::MOVi32imm:
896 return expandMOVImm(MBB, MBBI, 32);
897 case AArch64::MOVi64imm:
898 return expandMOVImm(MBB, MBBI, 64);
899 case AArch64::RET_ReallyLR: {
900 // Hiding the LR use with RET_ReallyLR may lead to extra kills in the
901 // function and missing live-ins. We are fine in practice because callee
902 // saved register handling ensures the register value is restored before
903 // RET, but we need the undef flag here to appease the MachineVerifier
905 MachineInstrBuilder MIB =
906 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::RET))
907 .addReg(AArch64::LR, RegState::Undef);
908 transferImpOps(MI, MIB, MIB);
909 MI.eraseFromParent();
912 case AArch64::CMP_SWAP_8:
913 return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRB, AArch64::STLXRB,
915 AArch64_AM::getArithExtendImm(AArch64_AM::UXTB, 0),
916 AArch64::WZR, NextMBBI);
917 case AArch64::CMP_SWAP_16:
918 return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRH, AArch64::STLXRH,
920 AArch64_AM::getArithExtendImm(AArch64_AM::UXTH, 0),
921 AArch64::WZR, NextMBBI);
922 case AArch64::CMP_SWAP_32:
923 return expandCMP_SWAP(MBB, MBBI, AArch64::LDAXRW, AArch64::STLXRW,
925 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0),
926 AArch64::WZR, NextMBBI);
927 case AArch64::CMP_SWAP_64:
928 return expandCMP_SWAP(MBB, MBBI,
929 AArch64::LDAXRX, AArch64::STLXRX, AArch64::SUBSXrs,
930 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0),
931 AArch64::XZR, NextMBBI);
932 case AArch64::CMP_SWAP_128:
933 return expandCMP_SWAP_128(MBB, MBBI, NextMBBI);
938 /// \brief Iterate over the instructions in basic block MBB and expand any
939 /// pseudo instructions. Return true if anything was modified.
940 bool AArch64ExpandPseudo::expandMBB(MachineBasicBlock &MBB) {
941 bool Modified = false;
943 MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
945 MachineBasicBlock::iterator NMBBI = std::next(MBBI);
946 Modified |= expandMI(MBB, MBBI, NMBBI);
953 bool AArch64ExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
954 TII = static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo());
956 bool Modified = false;
958 Modified |= expandMBB(MBB);
962 /// \brief Returns an instance of the pseudo instruction expansion pass.
963 FunctionPass *llvm::createAArch64ExpandPseudoPass() {
964 return new AArch64ExpandPseudo();