1 //===-- PPCFastISel.cpp - PowerPC FastISel implementation -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the PowerPC-specific support for the FastISel class. Some
11 // of the target-specific code is generated by tablegen in the file
12 // PPCGenFastISel.inc, which is #included here.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "ppcfastisel"
18 #include "PPCISelLowering.h"
19 #include "PPCSubtarget.h"
20 #include "PPCTargetMachine.h"
21 #include "MCTargetDesc/PPCPredicates.h"
22 #include "llvm/ADT/Optional.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/FastISel.h"
25 #include "llvm/CodeGen/FunctionLoweringInfo.h"
26 #include "llvm/CodeGen/MachineConstantPool.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/IR/CallingConv.h"
31 #include "llvm/IR/GlobalAlias.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/IR/Operator.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/GetElementPtrTypeIterator.h"
37 #include "llvm/Target/TargetLowering.h"
38 #include "llvm/Target/TargetMachine.h"
40 //===----------------------------------------------------------------------===//
43 // FastLowerArguments: Handle simple cases.
44 // PPCMaterializeGV: Handle TLS.
45 // SelectCall: Handle function pointers.
46 // SelectCall: Handle multi-register return values.
47 // SelectCall: Optimize away nops for local calls.
48 // processCallArgs: Handle bit-converted arguments.
49 // finishCall: Handle multi-register return values.
50 // PPCComputeAddress: Handle parameter references as FrameIndex's.
51 // PPCEmitCmp: Handle immediate as operand 1.
52 // SelectCall: Handle small byval arguments.
53 // SelectIntrinsicCall: Implement.
54 // SelectSelect: Implement.
55 // Consider factoring isTypeLegal into the base class.
56 // Implement switches and jump tables.
58 //===----------------------------------------------------------------------===//
63 typedef struct Address {
76 // Innocuous defaults for our address.
78 : BaseType(RegBase), Offset(0) {
83 class PPCFastISel : public FastISel {
85 const TargetMachine &TM;
86 const TargetInstrInfo &TII;
87 const TargetLowering &TLI;
88 const PPCSubtarget &PPCSubTarget;
92 explicit PPCFastISel(FunctionLoweringInfo &FuncInfo,
93 const TargetLibraryInfo *LibInfo)
94 : FastISel(FuncInfo, LibInfo),
95 TM(FuncInfo.MF->getTarget()),
96 TII(*TM.getInstrInfo()),
97 TLI(*TM.getTargetLowering()),
99 *((static_cast<const PPCTargetMachine *>(&TM))->getSubtargetImpl())
101 Context(&FuncInfo.Fn->getContext()) { }
103 // Backend specific FastISel code.
105 virtual bool TargetSelectInstruction(const Instruction *I);
106 virtual unsigned TargetMaterializeConstant(const Constant *C);
107 virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI);
108 virtual bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
110 virtual bool FastLowerArguments();
111 virtual unsigned FastEmit_i(MVT Ty, MVT RetTy, unsigned Opc, uint64_t Imm);
112 virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
113 const TargetRegisterClass *RC,
114 unsigned Op0, bool Op0IsKill,
116 virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode,
117 const TargetRegisterClass *RC,
118 unsigned Op0, bool Op0IsKill);
119 virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
120 const TargetRegisterClass *RC,
121 unsigned Op0, bool Op0IsKill,
122 unsigned Op1, bool Op1IsKill);
124 // Instruction selection routines.
126 bool SelectLoad(const Instruction *I);
127 bool SelectStore(const Instruction *I);
128 bool SelectBranch(const Instruction *I);
129 bool SelectIndirectBr(const Instruction *I);
130 bool SelectCmp(const Instruction *I);
131 bool SelectFPExt(const Instruction *I);
132 bool SelectFPTrunc(const Instruction *I);
133 bool SelectIToFP(const Instruction *I, bool IsSigned);
134 bool SelectFPToI(const Instruction *I, bool IsSigned);
135 bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode);
136 bool SelectCall(const Instruction *I);
137 bool SelectRet(const Instruction *I);
138 bool SelectTrunc(const Instruction *I);
139 bool SelectIntExt(const Instruction *I);
143 bool isTypeLegal(Type *Ty, MVT &VT);
144 bool isLoadTypeLegal(Type *Ty, MVT &VT);
145 bool PPCEmitCmp(const Value *Src1Value, const Value *Src2Value,
146 bool isZExt, unsigned DestReg);
147 bool PPCEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
148 const TargetRegisterClass *RC, bool IsZExt = true,
149 unsigned FP64LoadOpc = PPC::LFD);
150 bool PPCEmitStore(MVT VT, unsigned SrcReg, Address &Addr);
151 bool PPCComputeAddress(const Value *Obj, Address &Addr);
152 void PPCSimplifyAddress(Address &Addr, MVT VT, bool &UseOffset,
154 bool PPCEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
155 unsigned DestReg, bool IsZExt);
156 unsigned PPCMaterializeFP(const ConstantFP *CFP, MVT VT);
157 unsigned PPCMaterializeGV(const GlobalValue *GV, MVT VT);
158 unsigned PPCMaterializeInt(const Constant *C, MVT VT);
159 unsigned PPCMaterialize32BitInt(int64_t Imm,
160 const TargetRegisterClass *RC);
161 unsigned PPCMaterialize64BitInt(int64_t Imm,
162 const TargetRegisterClass *RC);
163 unsigned PPCMoveToIntReg(const Instruction *I, MVT VT,
164 unsigned SrcReg, bool IsSigned);
165 unsigned PPCMoveToFPReg(MVT VT, unsigned SrcReg, bool IsSigned);
167 // Call handling routines.
169 bool processCallArgs(SmallVectorImpl<Value*> &Args,
170 SmallVectorImpl<unsigned> &ArgRegs,
171 SmallVectorImpl<MVT> &ArgVTs,
172 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
173 SmallVectorImpl<unsigned> &RegArgs,
177 void finishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
178 const Instruction *I, CallingConv::ID CC,
179 unsigned &NumBytes, bool IsVarArg);
180 CCAssignFn *usePPC32CCs(unsigned Flag);
183 #include "PPCGenFastISel.inc"
187 } // end anonymous namespace
189 #include "PPCGenCallingConv.inc"
191 // Function whose sole purpose is to kill compiler warnings
192 // stemming from unused functions included from PPCGenCallingConv.inc.
193 CCAssignFn *PPCFastISel::usePPC32CCs(unsigned Flag) {
195 return CC_PPC32_SVR4;
197 return CC_PPC32_SVR4_ByVal;
199 return CC_PPC32_SVR4_VarArg;
204 static Optional<PPC::Predicate> getComparePred(CmpInst::Predicate Pred) {
206 // These are not representable with any single compare.
207 case CmpInst::FCMP_FALSE:
208 case CmpInst::FCMP_UEQ:
209 case CmpInst::FCMP_UGT:
210 case CmpInst::FCMP_UGE:
211 case CmpInst::FCMP_ULT:
212 case CmpInst::FCMP_ULE:
213 case CmpInst::FCMP_UNE:
214 case CmpInst::FCMP_TRUE:
216 return Optional<PPC::Predicate>();
218 case CmpInst::FCMP_OEQ:
219 case CmpInst::ICMP_EQ:
222 case CmpInst::FCMP_OGT:
223 case CmpInst::ICMP_UGT:
224 case CmpInst::ICMP_SGT:
227 case CmpInst::FCMP_OGE:
228 case CmpInst::ICMP_UGE:
229 case CmpInst::ICMP_SGE:
232 case CmpInst::FCMP_OLT:
233 case CmpInst::ICMP_ULT:
234 case CmpInst::ICMP_SLT:
237 case CmpInst::FCMP_OLE:
238 case CmpInst::ICMP_ULE:
239 case CmpInst::ICMP_SLE:
242 case CmpInst::FCMP_ONE:
243 case CmpInst::ICMP_NE:
246 case CmpInst::FCMP_ORD:
249 case CmpInst::FCMP_UNO:
254 // Determine whether the type Ty is simple enough to be handled by
255 // fast-isel, and return its equivalent machine type in VT.
256 // FIXME: Copied directly from ARM -- factor into base class?
257 bool PPCFastISel::isTypeLegal(Type *Ty, MVT &VT) {
258 EVT Evt = TLI.getValueType(Ty, true);
260 // Only handle simple types.
261 if (Evt == MVT::Other || !Evt.isSimple()) return false;
262 VT = Evt.getSimpleVT();
264 // Handle all legal types, i.e. a register that will directly hold this
266 return TLI.isTypeLegal(VT);
269 // Determine whether the type Ty is simple enough to be handled by
270 // fast-isel as a load target, and return its equivalent machine type in VT.
271 bool PPCFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
272 if (isTypeLegal(Ty, VT)) return true;
274 // If this is a type than can be sign or zero-extended to a basic operation
275 // go ahead and accept it now.
276 if (VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) {
283 // Given a value Obj, create an Address object Addr that represents its
284 // address. Return false if we can't handle it.
285 bool PPCFastISel::PPCComputeAddress(const Value *Obj, Address &Addr) {
286 const User *U = NULL;
287 unsigned Opcode = Instruction::UserOp1;
288 if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
289 // Don't walk into other basic blocks unless the object is an alloca from
290 // another block, otherwise it may not have a virtual register assigned.
291 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
292 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
293 Opcode = I->getOpcode();
296 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
297 Opcode = C->getOpcode();
304 case Instruction::BitCast:
305 // Look through bitcasts.
306 return PPCComputeAddress(U->getOperand(0), Addr);
307 case Instruction::IntToPtr:
308 // Look past no-op inttoptrs.
309 if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
310 return PPCComputeAddress(U->getOperand(0), Addr);
312 case Instruction::PtrToInt:
313 // Look past no-op ptrtoints.
314 if (TLI.getValueType(U->getType()) == TLI.getPointerTy())
315 return PPCComputeAddress(U->getOperand(0), Addr);
317 case Instruction::GetElementPtr: {
318 Address SavedAddr = Addr;
319 long TmpOffset = Addr.Offset;
321 // Iterate through the GEP folding the constants into offsets where
323 gep_type_iterator GTI = gep_type_begin(U);
324 for (User::const_op_iterator II = U->op_begin() + 1, IE = U->op_end();
325 II != IE; ++II, ++GTI) {
326 const Value *Op = *II;
327 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
328 const StructLayout *SL = TD.getStructLayout(STy);
329 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
330 TmpOffset += SL->getElementOffset(Idx);
332 uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType());
334 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
335 // Constant-offset addressing.
336 TmpOffset += CI->getSExtValue() * S;
339 if (canFoldAddIntoGEP(U, Op)) {
340 // A compatible add with a constant operand. Fold the constant.
342 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
343 TmpOffset += CI->getSExtValue() * S;
344 // Iterate on the other operand.
345 Op = cast<AddOperator>(Op)->getOperand(0);
349 goto unsupported_gep;
354 // Try to grab the base operand now.
355 Addr.Offset = TmpOffset;
356 if (PPCComputeAddress(U->getOperand(0), Addr)) return true;
358 // We failed, restore everything and try the other options.
364 case Instruction::Alloca: {
365 const AllocaInst *AI = cast<AllocaInst>(Obj);
366 DenseMap<const AllocaInst*, int>::iterator SI =
367 FuncInfo.StaticAllocaMap.find(AI);
368 if (SI != FuncInfo.StaticAllocaMap.end()) {
369 Addr.BaseType = Address::FrameIndexBase;
370 Addr.Base.FI = SI->second;
377 // FIXME: References to parameters fall through to the behavior
378 // below. They should be able to reference a frame index since
379 // they are stored to the stack, so we can get "ld rx, offset(r1)"
380 // instead of "addi ry, r1, offset / ld rx, 0(ry)". Obj will
381 // just contain the parameter. Try to handle this with a FI.
383 // Try to get this in a register if nothing else has worked.
384 if (Addr.Base.Reg == 0)
385 Addr.Base.Reg = getRegForValue(Obj);
387 // Prevent assignment of base register to X0, which is inappropriate
388 // for loads and stores alike.
389 if (Addr.Base.Reg != 0)
390 MRI.setRegClass(Addr.Base.Reg, &PPC::G8RC_and_G8RC_NOX0RegClass);
392 return Addr.Base.Reg != 0;
395 // Fix up some addresses that can't be used directly. For example, if
396 // an offset won't fit in an instruction field, we may need to move it
397 // into an index register.
398 void PPCFastISel::PPCSimplifyAddress(Address &Addr, MVT VT, bool &UseOffset,
399 unsigned &IndexReg) {
401 // Check whether the offset fits in the instruction field.
402 if (!isInt<16>(Addr.Offset))
405 // If this is a stack pointer and the offset needs to be simplified then
406 // put the alloca address into a register, set the base type back to
407 // register and continue. This should almost never happen.
408 if (!UseOffset && Addr.BaseType == Address::FrameIndexBase) {
409 unsigned ResultReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
410 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ADDI8),
411 ResultReg).addFrameIndex(Addr.Base.FI).addImm(0);
412 Addr.Base.Reg = ResultReg;
413 Addr.BaseType = Address::RegBase;
417 IntegerType *OffsetTy = ((VT == MVT::i32) ? Type::getInt32Ty(*Context)
418 : Type::getInt64Ty(*Context));
419 const ConstantInt *Offset =
420 ConstantInt::getSigned(OffsetTy, (int64_t)(Addr.Offset));
421 IndexReg = PPCMaterializeInt(Offset, MVT::i64);
422 assert(IndexReg && "Unexpected error in PPCMaterializeInt!");
426 // Emit a load instruction if possible, returning true if we succeeded,
427 // otherwise false. See commentary below for how the register class of
428 // the load is determined.
429 bool PPCFastISel::PPCEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
430 const TargetRegisterClass *RC,
431 bool IsZExt, unsigned FP64LoadOpc) {
433 bool UseOffset = true;
435 // If ResultReg is given, it determines the register class of the load.
436 // Otherwise, RC is the register class to use. If the result of the
437 // load isn't anticipated in this block, both may be zero, in which
438 // case we must make a conservative guess. In particular, don't assign
439 // R0 or X0 to the result register, as the result may be used in a load,
440 // store, add-immediate, or isel that won't permit this. (Though
441 // perhaps the spill and reload of live-exit values would handle this?)
442 const TargetRegisterClass *UseRC =
443 (ResultReg ? MRI.getRegClass(ResultReg) :
445 (VT == MVT::f64 ? &PPC::F8RCRegClass :
446 (VT == MVT::f32 ? &PPC::F4RCRegClass :
447 (VT == MVT::i64 ? &PPC::G8RC_and_G8RC_NOX0RegClass :
448 &PPC::GPRC_and_GPRC_NOR0RegClass)))));
450 bool Is32BitInt = UseRC->hasSuperClassEq(&PPC::GPRCRegClass);
452 switch (VT.SimpleTy) {
453 default: // e.g., vector types not handled
456 Opc = Is32BitInt ? PPC::LBZ : PPC::LBZ8;
460 (Is32BitInt ? PPC::LHZ : PPC::LHZ8) :
461 (Is32BitInt ? PPC::LHA : PPC::LHA8));
465 (Is32BitInt ? PPC::LWZ : PPC::LWZ8) :
466 (Is32BitInt ? PPC::LWA_32 : PPC::LWA));
467 if ((Opc == PPC::LWA || Opc == PPC::LWA_32) && ((Addr.Offset & 3) != 0))
472 assert(UseRC->hasSuperClassEq(&PPC::G8RCRegClass) &&
473 "64-bit load with 32-bit target??");
474 UseOffset = ((Addr.Offset & 3) == 0);
484 // If necessary, materialize the offset into a register and use
485 // the indexed form. Also handle stack pointers with special needs.
486 unsigned IndexReg = 0;
487 PPCSimplifyAddress(Addr, VT, UseOffset, IndexReg);
489 ResultReg = createResultReg(UseRC);
491 // Note: If we still have a frame index here, we know the offset is
492 // in range, as otherwise PPCSimplifyAddress would have converted it
494 if (Addr.BaseType == Address::FrameIndexBase) {
496 MachineMemOperand *MMO =
497 FuncInfo.MF->getMachineMemOperand(
498 MachinePointerInfo::getFixedStack(Addr.Base.FI, Addr.Offset),
499 MachineMemOperand::MOLoad, MFI.getObjectSize(Addr.Base.FI),
500 MFI.getObjectAlignment(Addr.Base.FI));
502 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
503 .addImm(Addr.Offset).addFrameIndex(Addr.Base.FI).addMemOperand(MMO);
505 // Base reg with offset in range.
506 } else if (UseOffset) {
508 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
509 .addImm(Addr.Offset).addReg(Addr.Base.Reg);
513 // Get the RR opcode corresponding to the RI one. FIXME: It would be
514 // preferable to use the ImmToIdxMap from PPCRegisterInfo.cpp, but it
515 // is hard to get at.
517 default: llvm_unreachable("Unexpected opcode!");
518 case PPC::LBZ: Opc = PPC::LBZX; break;
519 case PPC::LBZ8: Opc = PPC::LBZX8; break;
520 case PPC::LHZ: Opc = PPC::LHZX; break;
521 case PPC::LHZ8: Opc = PPC::LHZX8; break;
522 case PPC::LHA: Opc = PPC::LHAX; break;
523 case PPC::LHA8: Opc = PPC::LHAX8; break;
524 case PPC::LWZ: Opc = PPC::LWZX; break;
525 case PPC::LWZ8: Opc = PPC::LWZX8; break;
526 case PPC::LWA: Opc = PPC::LWAX; break;
527 case PPC::LWA_32: Opc = PPC::LWAX_32; break;
528 case PPC::LD: Opc = PPC::LDX; break;
529 case PPC::LFS: Opc = PPC::LFSX; break;
530 case PPC::LFD: Opc = PPC::LFDX; break;
532 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
533 .addReg(Addr.Base.Reg).addReg(IndexReg);
539 // Attempt to fast-select a load instruction.
540 bool PPCFastISel::SelectLoad(const Instruction *I) {
541 // FIXME: No atomic loads are supported.
542 if (cast<LoadInst>(I)->isAtomic())
545 // Verify we have a legal type before going any further.
547 if (!isLoadTypeLegal(I->getType(), VT))
550 // See if we can handle this address.
552 if (!PPCComputeAddress(I->getOperand(0), Addr))
555 // Look at the currently assigned register for this instruction
556 // to determine the required register class. This is necessary
557 // to constrain RA from using R0/X0 when this is not legal.
558 unsigned AssignedReg = FuncInfo.ValueMap[I];
559 const TargetRegisterClass *RC =
560 AssignedReg ? MRI.getRegClass(AssignedReg) : 0;
562 unsigned ResultReg = 0;
563 if (!PPCEmitLoad(VT, ResultReg, Addr, RC))
565 UpdateValueMap(I, ResultReg);
569 // Emit a store instruction to store SrcReg at Addr.
570 bool PPCFastISel::PPCEmitStore(MVT VT, unsigned SrcReg, Address &Addr) {
571 assert(SrcReg && "Nothing to store!");
573 bool UseOffset = true;
575 const TargetRegisterClass *RC = MRI.getRegClass(SrcReg);
576 bool Is32BitInt = RC->hasSuperClassEq(&PPC::GPRCRegClass);
578 switch (VT.SimpleTy) {
579 default: // e.g., vector types not handled
582 Opc = Is32BitInt ? PPC::STB : PPC::STB8;
585 Opc = Is32BitInt ? PPC::STH : PPC::STH8;
588 assert(Is32BitInt && "Not GPRC for i32??");
593 UseOffset = ((Addr.Offset & 3) == 0);
603 // If necessary, materialize the offset into a register and use
604 // the indexed form. Also handle stack pointers with special needs.
605 unsigned IndexReg = 0;
606 PPCSimplifyAddress(Addr, VT, UseOffset, IndexReg);
608 // Note: If we still have a frame index here, we know the offset is
609 // in range, as otherwise PPCSimplifyAddress would have converted it
611 if (Addr.BaseType == Address::FrameIndexBase) {
612 MachineMemOperand *MMO =
613 FuncInfo.MF->getMachineMemOperand(
614 MachinePointerInfo::getFixedStack(Addr.Base.FI, Addr.Offset),
615 MachineMemOperand::MOStore, MFI.getObjectSize(Addr.Base.FI),
616 MFI.getObjectAlignment(Addr.Base.FI));
618 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc)).addReg(SrcReg)
619 .addImm(Addr.Offset).addFrameIndex(Addr.Base.FI).addMemOperand(MMO);
621 // Base reg with offset in range.
622 } else if (UseOffset)
623 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc))
624 .addReg(SrcReg).addImm(Addr.Offset).addReg(Addr.Base.Reg);
628 // Get the RR opcode corresponding to the RI one. FIXME: It would be
629 // preferable to use the ImmToIdxMap from PPCRegisterInfo.cpp, but it
630 // is hard to get at.
632 default: llvm_unreachable("Unexpected opcode!");
633 case PPC::STB: Opc = PPC::STBX; break;
634 case PPC::STH : Opc = PPC::STHX; break;
635 case PPC::STW : Opc = PPC::STWX; break;
636 case PPC::STB8: Opc = PPC::STBX8; break;
637 case PPC::STH8: Opc = PPC::STHX8; break;
638 case PPC::STW8: Opc = PPC::STWX8; break;
639 case PPC::STD: Opc = PPC::STDX; break;
640 case PPC::STFS: Opc = PPC::STFSX; break;
641 case PPC::STFD: Opc = PPC::STFDX; break;
643 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc))
644 .addReg(SrcReg).addReg(Addr.Base.Reg).addReg(IndexReg);
650 // Attempt to fast-select a store instruction.
651 bool PPCFastISel::SelectStore(const Instruction *I) {
652 Value *Op0 = I->getOperand(0);
655 // FIXME: No atomics loads are supported.
656 if (cast<StoreInst>(I)->isAtomic())
659 // Verify we have a legal type before going any further.
661 if (!isLoadTypeLegal(Op0->getType(), VT))
664 // Get the value to be stored into a register.
665 SrcReg = getRegForValue(Op0);
669 // See if we can handle this address.
671 if (!PPCComputeAddress(I->getOperand(1), Addr))
674 if (!PPCEmitStore(VT, SrcReg, Addr))
680 // Attempt to fast-select a branch instruction.
681 bool PPCFastISel::SelectBranch(const Instruction *I) {
682 const BranchInst *BI = cast<BranchInst>(I);
683 MachineBasicBlock *BrBB = FuncInfo.MBB;
684 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
685 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
687 // For now, just try the simplest case where it's fed by a compare.
688 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
689 Optional<PPC::Predicate> OptPPCPred = getComparePred(CI->getPredicate());
693 PPC::Predicate PPCPred = OptPPCPred.getValue();
695 // Take advantage of fall-through opportunities.
696 if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
698 PPCPred = PPC::InvertPredicate(PPCPred);
701 unsigned CondReg = createResultReg(&PPC::CRRCRegClass);
703 if (!PPCEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned(),
707 BuildMI(*BrBB, FuncInfo.InsertPt, DL, TII.get(PPC::BCC))
708 .addImm(PPCPred).addReg(CondReg).addMBB(TBB);
709 FastEmitBranch(FBB, DL);
710 FuncInfo.MBB->addSuccessor(TBB);
713 } else if (const ConstantInt *CI =
714 dyn_cast<ConstantInt>(BI->getCondition())) {
715 uint64_t Imm = CI->getZExtValue();
716 MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;
717 FastEmitBranch(Target, DL);
721 // FIXME: ARM looks for a case where the block containing the compare
722 // has been split from the block containing the branch. If this happens,
723 // there is a vreg available containing the result of the compare. I'm
724 // not sure we can do much, as we've lost the predicate information with
725 // the compare instruction -- we have a 4-bit CR but don't know which bit
730 // Attempt to emit a compare of the two source values. Signed and unsigned
731 // comparisons are supported. Return false if we can't handle it.
732 bool PPCFastISel::PPCEmitCmp(const Value *SrcValue1, const Value *SrcValue2,
733 bool IsZExt, unsigned DestReg) {
734 Type *Ty = SrcValue1->getType();
735 EVT SrcEVT = TLI.getValueType(Ty, true);
736 if (!SrcEVT.isSimple())
738 MVT SrcVT = SrcEVT.getSimpleVT();
740 // See if operand 2 is an immediate encodeable in the compare.
741 // FIXME: Operands are not in canonical order at -O0, so an immediate
742 // operand in position 1 is a lost opportunity for now. We are
743 // similar to ARM in this regard.
747 // Only 16-bit integer constants can be represented in compares for
748 // PowerPC. Others will be materialized into a register.
749 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(SrcValue2)) {
750 if (SrcVT == MVT::i64 || SrcVT == MVT::i32 || SrcVT == MVT::i16 ||
751 SrcVT == MVT::i8 || SrcVT == MVT::i1) {
752 const APInt &CIVal = ConstInt->getValue();
753 Imm = (IsZExt) ? (long)CIVal.getZExtValue() : (long)CIVal.getSExtValue();
754 if ((IsZExt && isUInt<16>(Imm)) || (!IsZExt && isInt<16>(Imm)))
760 bool NeedsExt = false;
761 switch (SrcVT.SimpleTy) {
762 default: return false;
764 CmpOpc = PPC::FCMPUS;
767 CmpOpc = PPC::FCMPUD;
773 // Intentional fall-through.
776 CmpOpc = IsZExt ? PPC::CMPLW : PPC::CMPW;
778 CmpOpc = IsZExt ? PPC::CMPLWI : PPC::CMPWI;
782 CmpOpc = IsZExt ? PPC::CMPLD : PPC::CMPD;
784 CmpOpc = IsZExt ? PPC::CMPLDI : PPC::CMPDI;
788 unsigned SrcReg1 = getRegForValue(SrcValue1);
792 unsigned SrcReg2 = 0;
794 SrcReg2 = getRegForValue(SrcValue2);
800 unsigned ExtReg = createResultReg(&PPC::GPRCRegClass);
801 if (!PPCEmitIntExt(SrcVT, SrcReg1, MVT::i32, ExtReg, IsZExt))
806 unsigned ExtReg = createResultReg(&PPC::GPRCRegClass);
807 if (!PPCEmitIntExt(SrcVT, SrcReg2, MVT::i32, ExtReg, IsZExt))
814 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc), DestReg)
815 .addReg(SrcReg1).addReg(SrcReg2);
817 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CmpOpc), DestReg)
818 .addReg(SrcReg1).addImm(Imm);
823 // Attempt to fast-select a floating-point extend instruction.
824 bool PPCFastISel::SelectFPExt(const Instruction *I) {
825 Value *Src = I->getOperand(0);
826 EVT SrcVT = TLI.getValueType(Src->getType(), true);
827 EVT DestVT = TLI.getValueType(I->getType(), true);
829 if (SrcVT != MVT::f32 || DestVT != MVT::f64)
832 unsigned SrcReg = getRegForValue(Src);
836 // No code is generated for a FP extend.
837 UpdateValueMap(I, SrcReg);
841 // Attempt to fast-select a floating-point truncate instruction.
842 bool PPCFastISel::SelectFPTrunc(const Instruction *I) {
843 Value *Src = I->getOperand(0);
844 EVT SrcVT = TLI.getValueType(Src->getType(), true);
845 EVT DestVT = TLI.getValueType(I->getType(), true);
847 if (SrcVT != MVT::f64 || DestVT != MVT::f32)
850 unsigned SrcReg = getRegForValue(Src);
854 // Round the result to single precision.
855 unsigned DestReg = createResultReg(&PPC::F4RCRegClass);
856 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::FRSP), DestReg)
859 UpdateValueMap(I, DestReg);
863 // Move an i32 or i64 value in a GPR to an f64 value in an FPR.
864 // FIXME: When direct register moves are implemented (see PowerISA 2.08),
865 // those should be used instead of moving via a stack slot when the
866 // subtarget permits.
867 // FIXME: The code here is sloppy for the 4-byte case. Can use a 4-byte
868 // stack slot and 4-byte store/load sequence. Or just sext the 4-byte
869 // case to 8 bytes which produces tighter code but wastes stack space.
870 unsigned PPCFastISel::PPCMoveToFPReg(MVT SrcVT, unsigned SrcReg,
873 // If necessary, extend 32-bit int to 64-bit.
874 if (SrcVT == MVT::i32) {
875 unsigned TmpReg = createResultReg(&PPC::G8RCRegClass);
876 if (!PPCEmitIntExt(MVT::i32, SrcReg, MVT::i64, TmpReg, !IsSigned))
881 // Get a stack slot 8 bytes wide, aligned on an 8-byte boundary.
883 Addr.BaseType = Address::FrameIndexBase;
884 Addr.Base.FI = MFI.CreateStackObject(8, 8, false);
886 // Store the value from the GPR.
887 if (!PPCEmitStore(MVT::i64, SrcReg, Addr))
890 // Load the integer value into an FPR. The kind of load used depends
891 // on a number of conditions.
892 unsigned LoadOpc = PPC::LFD;
894 if (SrcVT == MVT::i32) {
896 LoadOpc = PPC::LFIWZX;
898 } else if (PPCSubTarget.hasLFIWAX()) {
899 LoadOpc = PPC::LFIWAX;
904 const TargetRegisterClass *RC = &PPC::F8RCRegClass;
905 unsigned ResultReg = 0;
906 if (!PPCEmitLoad(MVT::f64, ResultReg, Addr, RC, !IsSigned, LoadOpc))
912 // Attempt to fast-select an integer-to-floating-point conversion.
913 bool PPCFastISel::SelectIToFP(const Instruction *I, bool IsSigned) {
915 Type *DstTy = I->getType();
916 if (!isTypeLegal(DstTy, DstVT))
919 if (DstVT != MVT::f32 && DstVT != MVT::f64)
922 Value *Src = I->getOperand(0);
923 EVT SrcEVT = TLI.getValueType(Src->getType(), true);
924 if (!SrcEVT.isSimple())
927 MVT SrcVT = SrcEVT.getSimpleVT();
929 if (SrcVT != MVT::i8 && SrcVT != MVT::i16 &&
930 SrcVT != MVT::i32 && SrcVT != MVT::i64)
933 unsigned SrcReg = getRegForValue(Src);
937 // We can only lower an unsigned convert if we have the newer
938 // floating-point conversion operations.
939 if (!IsSigned && !PPCSubTarget.hasFPCVT())
942 // FIXME: For now we require the newer floating-point conversion operations
943 // (which are present only on P7 and A2 server models) when converting
944 // to single-precision float. Otherwise we have to generate a lot of
945 // fiddly code to avoid double rounding. If necessary, the fiddly code
946 // can be found in PPCTargetLowering::LowerINT_TO_FP().
947 if (DstVT == MVT::f32 && !PPCSubTarget.hasFPCVT())
950 // Extend the input if necessary.
951 if (SrcVT == MVT::i8 || SrcVT == MVT::i16) {
952 unsigned TmpReg = createResultReg(&PPC::G8RCRegClass);
953 if (!PPCEmitIntExt(SrcVT, SrcReg, MVT::i64, TmpReg, !IsSigned))
959 // Move the integer value to an FPR.
960 unsigned FPReg = PPCMoveToFPReg(SrcVT, SrcReg, IsSigned);
964 // Determine the opcode for the conversion.
965 const TargetRegisterClass *RC = &PPC::F8RCRegClass;
966 unsigned DestReg = createResultReg(RC);
969 if (DstVT == MVT::f32)
970 Opc = IsSigned ? PPC::FCFIDS : PPC::FCFIDUS;
972 Opc = IsSigned ? PPC::FCFID : PPC::FCFIDU;
974 // Generate the convert.
975 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
978 UpdateValueMap(I, DestReg);
982 // Move the floating-point value in SrcReg into an integer destination
983 // register, and return the register (or zero if we can't handle it).
984 // FIXME: When direct register moves are implemented (see PowerISA 2.08),
985 // those should be used instead of moving via a stack slot when the
986 // subtarget permits.
987 unsigned PPCFastISel::PPCMoveToIntReg(const Instruction *I, MVT VT,
988 unsigned SrcReg, bool IsSigned) {
989 // Get a stack slot 8 bytes wide, aligned on an 8-byte boundary.
990 // Note that if have STFIWX available, we could use a 4-byte stack
991 // slot for i32, but this being fast-isel we'll just go with the
992 // easiest code gen possible.
994 Addr.BaseType = Address::FrameIndexBase;
995 Addr.Base.FI = MFI.CreateStackObject(8, 8, false);
997 // Store the value from the FPR.
998 if (!PPCEmitStore(MVT::f64, SrcReg, Addr))
1001 // Reload it into a GPR. If we want an i32, modify the address
1002 // to have a 4-byte offset so we load from the right place.
1006 // Look at the currently assigned register for this instruction
1007 // to determine the required register class.
1008 unsigned AssignedReg = FuncInfo.ValueMap[I];
1009 const TargetRegisterClass *RC =
1010 AssignedReg ? MRI.getRegClass(AssignedReg) : 0;
1012 unsigned ResultReg = 0;
1013 if (!PPCEmitLoad(VT, ResultReg, Addr, RC, !IsSigned))
1019 // Attempt to fast-select a floating-point-to-integer conversion.
1020 bool PPCFastISel::SelectFPToI(const Instruction *I, bool IsSigned) {
1022 Type *DstTy = I->getType();
1023 if (!isTypeLegal(DstTy, DstVT))
1026 if (DstVT != MVT::i32 && DstVT != MVT::i64)
1029 // If we don't have FCTIDUZ and we need it, punt to SelectionDAG.
1030 if (DstVT == MVT::i64 && !IsSigned && !PPCSubTarget->hasFPCVT())
1033 Value *Src = I->getOperand(0);
1034 Type *SrcTy = Src->getType();
1035 if (!isTypeLegal(SrcTy, SrcVT))
1038 if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
1041 unsigned SrcReg = getRegForValue(Src);
1045 // Convert f32 to f64 if necessary. This is just a meaningless copy
1046 // to get the register class right. COPY_TO_REGCLASS is needed since
1047 // a COPY from F4RC to F8RC is converted to a F4RC-F4RC copy downstream.
1048 const TargetRegisterClass *InRC = MRI.getRegClass(SrcReg);
1049 if (InRC == &PPC::F4RCRegClass) {
1050 unsigned TmpReg = createResultReg(&PPC::F8RCRegClass);
1051 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1052 TII.get(TargetOpcode::COPY_TO_REGCLASS), TmpReg)
1053 .addReg(SrcReg).addImm(PPC::F8RCRegClassID);
1057 // Determine the opcode for the conversion, which takes place
1058 // entirely within FPRs.
1059 unsigned DestReg = createResultReg(&PPC::F8RCRegClass);
1062 if (DstVT == MVT::i32)
1066 Opc = PPCSubTarget.hasFPCVT() ? PPC::FCTIWUZ : PPC::FCTIDZ;
1068 Opc = IsSigned ? PPC::FCTIDZ : PPC::FCTIDUZ;
1070 // Generate the convert.
1071 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
1074 // Now move the integer value from a float register to an integer register.
1075 unsigned IntReg = PPCMoveToIntReg(I, DstVT, DestReg, IsSigned);
1079 UpdateValueMap(I, IntReg);
1083 // Attempt to fast-select a binary integer operation that isn't already
1084 // handled automatically.
1085 bool PPCFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
1086 EVT DestVT = TLI.getValueType(I->getType(), true);
1088 // We can get here in the case when we have a binary operation on a non-legal
1089 // type and the target independent selector doesn't know how to handle it.
1090 if (DestVT != MVT::i16 && DestVT != MVT::i8)
1093 // Look at the currently assigned register for this instruction
1094 // to determine the required register class. If there is no register,
1095 // make a conservative choice (don't assign R0).
1096 unsigned AssignedReg = FuncInfo.ValueMap[I];
1097 const TargetRegisterClass *RC =
1098 (AssignedReg ? MRI.getRegClass(AssignedReg) :
1099 &PPC::GPRC_and_GPRC_NOR0RegClass);
1100 bool IsGPRC = RC->hasSuperClassEq(&PPC::GPRCRegClass);
1103 switch (ISDOpcode) {
1104 default: return false;
1106 Opc = IsGPRC ? PPC::ADD4 : PPC::ADD8;
1109 Opc = IsGPRC ? PPC::OR : PPC::OR8;
1112 Opc = IsGPRC ? PPC::SUBF : PPC::SUBF8;
1116 unsigned ResultReg = createResultReg(RC ? RC : &PPC::G8RCRegClass);
1117 unsigned SrcReg1 = getRegForValue(I->getOperand(0));
1118 if (SrcReg1 == 0) return false;
1120 // Handle case of small immediate operand.
1121 if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(1))) {
1122 const APInt &CIVal = ConstInt->getValue();
1123 int Imm = (int)CIVal.getSExtValue();
1125 if (isInt<16>(Imm)) {
1128 llvm_unreachable("Missing case!");
1131 MRI.setRegClass(SrcReg1, &PPC::GPRC_and_GPRC_NOR0RegClass);
1135 MRI.setRegClass(SrcReg1, &PPC::G8RC_and_G8RC_NOX0RegClass);
1148 MRI.setRegClass(SrcReg1, &PPC::GPRC_and_GPRC_NOR0RegClass);
1157 MRI.setRegClass(SrcReg1, &PPC::G8RC_and_G8RC_NOX0RegClass);
1164 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
1165 .addReg(SrcReg1).addImm(Imm);
1166 UpdateValueMap(I, ResultReg);
1173 unsigned SrcReg2 = getRegForValue(I->getOperand(1));
1174 if (SrcReg2 == 0) return false;
1176 // Reverse operands for subtract-from.
1177 if (ISDOpcode == ISD::SUB)
1178 std::swap(SrcReg1, SrcReg2);
1180 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
1181 .addReg(SrcReg1).addReg(SrcReg2);
1182 UpdateValueMap(I, ResultReg);
1186 // Handle arguments to a call that we're attempting to fast-select.
1187 // Return false if the arguments are too complex for us at the moment.
1188 bool PPCFastISel::processCallArgs(SmallVectorImpl<Value*> &Args,
1189 SmallVectorImpl<unsigned> &ArgRegs,
1190 SmallVectorImpl<MVT> &ArgVTs,
1191 SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
1192 SmallVectorImpl<unsigned> &RegArgs,
1196 SmallVector<CCValAssign, 16> ArgLocs;
1197 CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, TM, ArgLocs, *Context);
1198 CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CC_PPC64_ELF_FIS);
1200 // Bail out if we can't handle any of the arguments.
1201 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
1202 CCValAssign &VA = ArgLocs[I];
1203 MVT ArgVT = ArgVTs[VA.getValNo()];
1205 // Skip vector arguments for now, as well as long double and
1206 // uint128_t, and anything that isn't passed in a register.
1207 if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64 ||
1208 !VA.isRegLoc() || VA.needsCustom())
1211 // Skip bit-converted arguments for now.
1212 if (VA.getLocInfo() == CCValAssign::BCvt)
1216 // Get a count of how many bytes are to be pushed onto the stack.
1217 NumBytes = CCInfo.getNextStackOffset();
1219 // Issue CALLSEQ_START.
1220 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1221 TII.get(TII.getCallFrameSetupOpcode()))
1224 // Prepare to assign register arguments. Every argument uses up a
1225 // GPR protocol register even if it's passed in a floating-point
1227 unsigned NextGPR = PPC::X3;
1228 unsigned NextFPR = PPC::F1;
1230 // Process arguments.
1231 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
1232 CCValAssign &VA = ArgLocs[I];
1233 unsigned Arg = ArgRegs[VA.getValNo()];
1234 MVT ArgVT = ArgVTs[VA.getValNo()];
1236 // Handle argument promotion and bitcasts.
1237 switch (VA.getLocInfo()) {
1239 llvm_unreachable("Unknown loc info!");
1240 case CCValAssign::Full:
1242 case CCValAssign::SExt: {
1243 MVT DestVT = VA.getLocVT();
1244 const TargetRegisterClass *RC =
1245 (DestVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
1246 unsigned TmpReg = createResultReg(RC);
1247 if (!PPCEmitIntExt(ArgVT, Arg, DestVT, TmpReg, /*IsZExt*/false))
1248 llvm_unreachable("Failed to emit a sext!");
1253 case CCValAssign::AExt:
1254 case CCValAssign::ZExt: {
1255 MVT DestVT = VA.getLocVT();
1256 const TargetRegisterClass *RC =
1257 (DestVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
1258 unsigned TmpReg = createResultReg(RC);
1259 if (!PPCEmitIntExt(ArgVT, Arg, DestVT, TmpReg, /*IsZExt*/true))
1260 llvm_unreachable("Failed to emit a zext!");
1265 case CCValAssign::BCvt: {
1266 // FIXME: Not yet handled.
1267 llvm_unreachable("Should have bailed before getting here!");
1272 // Copy this argument to the appropriate register.
1274 if (ArgVT == MVT::f32 || ArgVT == MVT::f64) {
1280 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1281 ArgReg).addReg(Arg);
1282 RegArgs.push_back(ArgReg);
1288 // For a call that we've determined we can fast-select, finish the
1289 // call sequence and generate a copy to obtain the return value (if any).
1290 void PPCFastISel::finishCall(MVT RetVT, SmallVectorImpl<unsigned> &UsedRegs,
1291 const Instruction *I, CallingConv::ID CC,
1292 unsigned &NumBytes, bool IsVarArg) {
1293 // Issue CallSEQ_END.
1294 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1295 TII.get(TII.getCallFrameDestroyOpcode()))
1296 .addImm(NumBytes).addImm(0);
1298 // Next, generate a copy to obtain the return value.
1299 // FIXME: No multi-register return values yet, though I don't foresee
1300 // any real difficulties there.
1301 if (RetVT != MVT::isVoid) {
1302 SmallVector<CCValAssign, 16> RVLocs;
1303 CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, TM, RVLocs, *Context);
1304 CCInfo.AnalyzeCallResult(RetVT, RetCC_PPC64_ELF_FIS);
1305 CCValAssign &VA = RVLocs[0];
1306 assert(RVLocs.size() == 1 && "No support for multi-reg return values!");
1307 assert(VA.isRegLoc() && "Can only return in registers!");
1309 MVT DestVT = VA.getValVT();
1310 MVT CopyVT = DestVT;
1312 // Ints smaller than a register still arrive in a full 64-bit
1313 // register, so make sure we recognize this.
1314 if (RetVT == MVT::i8 || RetVT == MVT::i16 || RetVT == MVT::i32)
1317 unsigned SourcePhysReg = VA.getLocReg();
1318 unsigned ResultReg = 0;
1320 if (RetVT == CopyVT) {
1321 const TargetRegisterClass *CpyRC = TLI.getRegClassFor(CopyVT);
1322 ResultReg = createResultReg(CpyRC);
1324 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1325 TII.get(TargetOpcode::COPY), ResultReg)
1326 .addReg(SourcePhysReg);
1328 // If necessary, round the floating result to single precision.
1329 } else if (CopyVT == MVT::f64) {
1330 ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1331 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::FRSP),
1332 ResultReg).addReg(SourcePhysReg);
1334 // If only the low half of a general register is needed, generate
1335 // a GPRC copy instead of a G8RC copy. (EXTRACT_SUBREG can't be
1336 // used along the fast-isel path (not lowered), and downstream logic
1337 // also doesn't like a direct subreg copy on a physical reg.)
1338 } else if (RetVT == MVT::i8 || RetVT == MVT::i16 || RetVT == MVT::i32) {
1339 ResultReg = createResultReg(&PPC::GPRCRegClass);
1340 // Convert physical register from G8RC to GPRC.
1341 SourcePhysReg -= PPC::X0 - PPC::R0;
1342 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1343 TII.get(TargetOpcode::COPY), ResultReg)
1344 .addReg(SourcePhysReg);
1347 assert(ResultReg && "ResultReg unset!");
1348 UsedRegs.push_back(SourcePhysReg);
1349 UpdateValueMap(I, ResultReg);
1353 // Attempt to fast-select a call instruction.
1354 bool PPCFastISel::SelectCall(const Instruction *I) {
1355 const CallInst *CI = cast<CallInst>(I);
1356 const Value *Callee = CI->getCalledValue();
1358 // Can't handle inline asm.
1359 if (isa<InlineAsm>(Callee))
1362 // Allow SelectionDAG isel to handle tail calls.
1363 if (CI->isTailCall())
1366 // Obtain calling convention.
1367 ImmutableCallSite CS(CI);
1368 CallingConv::ID CC = CS.getCallingConv();
1370 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
1371 FunctionType *FTy = cast<FunctionType>(PT->getElementType());
1372 bool IsVarArg = FTy->isVarArg();
1374 // Not ready for varargs yet.
1378 // Handle simple calls for now, with legal return types and
1379 // those that can be extended.
1380 Type *RetTy = I->getType();
1382 if (RetTy->isVoidTy())
1383 RetVT = MVT::isVoid;
1384 else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 &&
1388 // FIXME: No multi-register return values yet.
1389 if (RetVT != MVT::isVoid && RetVT != MVT::i8 && RetVT != MVT::i16 &&
1390 RetVT != MVT::i32 && RetVT != MVT::i64 && RetVT != MVT::f32 &&
1391 RetVT != MVT::f64) {
1392 SmallVector<CCValAssign, 16> RVLocs;
1393 CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, TM, RVLocs, *Context);
1394 CCInfo.AnalyzeCallResult(RetVT, RetCC_PPC64_ELF_FIS);
1395 if (RVLocs.size() > 1)
1399 // Bail early if more than 8 arguments, as we only currently
1400 // handle arguments passed in registers.
1401 unsigned NumArgs = CS.arg_size();
1405 // Set up the argument vectors.
1406 SmallVector<Value*, 8> Args;
1407 SmallVector<unsigned, 8> ArgRegs;
1408 SmallVector<MVT, 8> ArgVTs;
1409 SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
1411 Args.reserve(NumArgs);
1412 ArgRegs.reserve(NumArgs);
1413 ArgVTs.reserve(NumArgs);
1414 ArgFlags.reserve(NumArgs);
1416 for (ImmutableCallSite::arg_iterator II = CS.arg_begin(), IE = CS.arg_end();
1418 // FIXME: ARM does something for intrinsic calls here, check into that.
1420 unsigned AttrIdx = II - CS.arg_begin() + 1;
1422 // Only handle easy calls for now. It would be reasonably easy
1423 // to handle <= 8-byte structures passed ByVal in registers, but we
1424 // have to ensure they are right-justified in the register.
1425 if (CS.paramHasAttr(AttrIdx, Attribute::InReg) ||
1426 CS.paramHasAttr(AttrIdx, Attribute::StructRet) ||
1427 CS.paramHasAttr(AttrIdx, Attribute::Nest) ||
1428 CS.paramHasAttr(AttrIdx, Attribute::ByVal))
1431 ISD::ArgFlagsTy Flags;
1432 if (CS.paramHasAttr(AttrIdx, Attribute::SExt))
1434 if (CS.paramHasAttr(AttrIdx, Attribute::ZExt))
1437 Type *ArgTy = (*II)->getType();
1439 if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8)
1442 if (ArgVT.isVector())
1445 unsigned Arg = getRegForValue(*II);
1449 unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
1450 Flags.setOrigAlign(OriginalAlignment);
1452 Args.push_back(*II);
1453 ArgRegs.push_back(Arg);
1454 ArgVTs.push_back(ArgVT);
1455 ArgFlags.push_back(Flags);
1458 // Process the arguments.
1459 SmallVector<unsigned, 8> RegArgs;
1462 if (!processCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
1463 RegArgs, CC, NumBytes, IsVarArg))
1466 // FIXME: No handling for function pointers yet. This requires
1467 // implementing the function descriptor (OPD) setup.
1468 const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
1472 // Build direct call with NOP for TOC restore.
1473 // FIXME: We can and should optimize away the NOP for local calls.
1474 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1475 TII.get(PPC::BL8_NOP));
1477 MIB.addGlobalAddress(GV);
1479 // Add implicit physical register uses to the call.
1480 for (unsigned II = 0, IE = RegArgs.size(); II != IE; ++II)
1481 MIB.addReg(RegArgs[II], RegState::Implicit);
1483 // Add a register mask with the call-preserved registers. Proper
1484 // defs for return values will be added by setPhysRegsDeadExcept().
1485 MIB.addRegMask(TRI.getCallPreservedMask(CC));
1487 // Finish off the call including any return values.
1488 SmallVector<unsigned, 4> UsedRegs;
1489 finishCall(RetVT, UsedRegs, I, CC, NumBytes, IsVarArg);
1491 // Set all unused physregs defs as dead.
1492 static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
1497 // Attempt to fast-select a return instruction.
1498 bool PPCFastISel::SelectRet(const Instruction *I) {
1500 if (!FuncInfo.CanLowerReturn)
1503 const ReturnInst *Ret = cast<ReturnInst>(I);
1504 const Function &F = *I->getParent()->getParent();
1506 // Build a list of return value registers.
1507 SmallVector<unsigned, 4> RetRegs;
1508 CallingConv::ID CC = F.getCallingConv();
1510 if (Ret->getNumOperands() > 0) {
1511 SmallVector<ISD::OutputArg, 4> Outs;
1512 GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI);
1514 // Analyze operands of the call, assigning locations to each operand.
1515 SmallVector<CCValAssign, 16> ValLocs;
1516 CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs, *Context);
1517 CCInfo.AnalyzeReturn(Outs, RetCC_PPC64_ELF_FIS);
1518 const Value *RV = Ret->getOperand(0);
1520 // FIXME: Only one output register for now.
1521 if (ValLocs.size() > 1)
1524 // Special case for returning a constant integer of any size.
1525 // Materialize the constant as an i64 and copy it to the return
1526 // register. This avoids an unnecessary extend or truncate.
1527 if (isa<ConstantInt>(*RV)) {
1528 const Constant *C = cast<Constant>(RV);
1529 unsigned SrcReg = PPCMaterializeInt(C, MVT::i64);
1530 unsigned RetReg = ValLocs[0].getLocReg();
1531 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1532 RetReg).addReg(SrcReg);
1533 RetRegs.push_back(RetReg);
1536 unsigned Reg = getRegForValue(RV);
1541 // Copy the result values into the output registers.
1542 for (unsigned i = 0; i < ValLocs.size(); ++i) {
1544 CCValAssign &VA = ValLocs[i];
1545 assert(VA.isRegLoc() && "Can only return in registers!");
1546 RetRegs.push_back(VA.getLocReg());
1547 unsigned SrcReg = Reg + VA.getValNo();
1549 EVT RVEVT = TLI.getValueType(RV->getType());
1550 if (!RVEVT.isSimple())
1552 MVT RVVT = RVEVT.getSimpleVT();
1553 MVT DestVT = VA.getLocVT();
1555 if (RVVT != DestVT && RVVT != MVT::i8 &&
1556 RVVT != MVT::i16 && RVVT != MVT::i32)
1559 if (RVVT != DestVT) {
1560 switch (VA.getLocInfo()) {
1562 llvm_unreachable("Unknown loc info!");
1563 case CCValAssign::Full:
1564 llvm_unreachable("Full value assign but types don't match?");
1565 case CCValAssign::AExt:
1566 case CCValAssign::ZExt: {
1567 const TargetRegisterClass *RC =
1568 (DestVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
1569 unsigned TmpReg = createResultReg(RC);
1570 if (!PPCEmitIntExt(RVVT, SrcReg, DestVT, TmpReg, true))
1575 case CCValAssign::SExt: {
1576 const TargetRegisterClass *RC =
1577 (DestVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
1578 unsigned TmpReg = createResultReg(RC);
1579 if (!PPCEmitIntExt(RVVT, SrcReg, DestVT, TmpReg, false))
1587 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1588 TII.get(TargetOpcode::COPY), RetRegs[i])
1594 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1597 for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
1598 MIB.addReg(RetRegs[i], RegState::Implicit);
1603 // Attempt to emit an integer extend of SrcReg into DestReg. Both
1604 // signed and zero extensions are supported. Return false if we
1606 bool PPCFastISel::PPCEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1607 unsigned DestReg, bool IsZExt) {
1608 if (DestVT != MVT::i32 && DestVT != MVT::i64)
1610 if (SrcVT != MVT::i8 && SrcVT != MVT::i16 && SrcVT != MVT::i32)
1613 // Signed extensions use EXTSB, EXTSH, EXTSW.
1616 if (SrcVT == MVT::i8)
1617 Opc = (DestVT == MVT::i32) ? PPC::EXTSB : PPC::EXTSB8_32_64;
1618 else if (SrcVT == MVT::i16)
1619 Opc = (DestVT == MVT::i32) ? PPC::EXTSH : PPC::EXTSH8_32_64;
1621 assert(DestVT == MVT::i64 && "Signed extend from i32 to i32??");
1622 Opc = PPC::EXTSW_32_64;
1624 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
1627 // Unsigned 32-bit extensions use RLWINM.
1628 } else if (DestVT == MVT::i32) {
1630 if (SrcVT == MVT::i8)
1633 assert(SrcVT == MVT::i16 && "Unsigned extend from i32 to i32??");
1636 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::RLWINM),
1638 .addReg(SrcReg).addImm(/*SH=*/0).addImm(MB).addImm(/*ME=*/31);
1640 // Unsigned 64-bit extensions use RLDICL (with a 32-bit source).
1643 if (SrcVT == MVT::i8)
1645 else if (SrcVT == MVT::i16)
1649 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1650 TII.get(PPC::RLDICL_32_64), DestReg)
1651 .addReg(SrcReg).addImm(/*SH=*/0).addImm(MB);
1657 // Attempt to fast-select an indirect branch instruction.
1658 bool PPCFastISel::SelectIndirectBr(const Instruction *I) {
1659 unsigned AddrReg = getRegForValue(I->getOperand(0));
1663 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::MTCTR8))
1665 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::BCTR8));
1667 const IndirectBrInst *IB = cast<IndirectBrInst>(I);
1668 for (unsigned i = 0, e = IB->getNumSuccessors(); i != e; ++i)
1669 FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[IB->getSuccessor(i)]);
1674 // Attempt to fast-select an integer truncate instruction.
1675 bool PPCFastISel::SelectTrunc(const Instruction *I) {
1676 Value *Src = I->getOperand(0);
1677 EVT SrcVT = TLI.getValueType(Src->getType(), true);
1678 EVT DestVT = TLI.getValueType(I->getType(), true);
1680 if (SrcVT != MVT::i64 && SrcVT != MVT::i32 && SrcVT != MVT::i16)
1683 if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
1686 unsigned SrcReg = getRegForValue(Src);
1690 // The only interesting case is when we need to switch register classes.
1691 if (SrcVT == MVT::i64) {
1692 unsigned ResultReg = createResultReg(&PPC::GPRCRegClass);
1693 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
1694 ResultReg).addReg(SrcReg, 0, PPC::sub_32);
1698 UpdateValueMap(I, SrcReg);
1702 // Attempt to fast-select an integer extend instruction.
1703 bool PPCFastISel::SelectIntExt(const Instruction *I) {
1704 Type *DestTy = I->getType();
1705 Value *Src = I->getOperand(0);
1706 Type *SrcTy = Src->getType();
1708 bool IsZExt = isa<ZExtInst>(I);
1709 unsigned SrcReg = getRegForValue(Src);
1710 if (!SrcReg) return false;
1712 EVT SrcEVT, DestEVT;
1713 SrcEVT = TLI.getValueType(SrcTy, true);
1714 DestEVT = TLI.getValueType(DestTy, true);
1715 if (!SrcEVT.isSimple())
1717 if (!DestEVT.isSimple())
1720 MVT SrcVT = SrcEVT.getSimpleVT();
1721 MVT DestVT = DestEVT.getSimpleVT();
1723 // If we know the register class needed for the result of this
1724 // instruction, use it. Otherwise pick the register class of the
1725 // correct size that does not contain X0/R0, since we don't know
1726 // whether downstream uses permit that assignment.
1727 unsigned AssignedReg = FuncInfo.ValueMap[I];
1728 const TargetRegisterClass *RC =
1729 (AssignedReg ? MRI.getRegClass(AssignedReg) :
1730 (DestVT == MVT::i64 ? &PPC::G8RC_and_G8RC_NOX0RegClass :
1731 &PPC::GPRC_and_GPRC_NOR0RegClass));
1732 unsigned ResultReg = createResultReg(RC);
1734 if (!PPCEmitIntExt(SrcVT, SrcReg, DestVT, ResultReg, IsZExt))
1737 UpdateValueMap(I, ResultReg);
1741 // Attempt to fast-select an instruction that wasn't handled by
1742 // the table-generated machinery.
1743 bool PPCFastISel::TargetSelectInstruction(const Instruction *I) {
1745 switch (I->getOpcode()) {
1746 case Instruction::Load:
1747 return SelectLoad(I);
1748 case Instruction::Store:
1749 return SelectStore(I);
1750 case Instruction::Br:
1751 return SelectBranch(I);
1752 case Instruction::IndirectBr:
1753 return SelectIndirectBr(I);
1754 case Instruction::FPExt:
1755 return SelectFPExt(I);
1756 case Instruction::FPTrunc:
1757 return SelectFPTrunc(I);
1758 case Instruction::SIToFP:
1759 return SelectIToFP(I, /*IsSigned*/ true);
1760 case Instruction::UIToFP:
1761 return SelectIToFP(I, /*IsSigned*/ false);
1762 case Instruction::FPToSI:
1763 return SelectFPToI(I, /*IsSigned*/ true);
1764 case Instruction::FPToUI:
1765 return SelectFPToI(I, /*IsSigned*/ false);
1766 case Instruction::Add:
1767 return SelectBinaryIntOp(I, ISD::ADD);
1768 case Instruction::Or:
1769 return SelectBinaryIntOp(I, ISD::OR);
1770 case Instruction::Sub:
1771 return SelectBinaryIntOp(I, ISD::SUB);
1772 case Instruction::Call:
1773 if (dyn_cast<IntrinsicInst>(I))
1775 return SelectCall(I);
1776 case Instruction::Ret:
1777 return SelectRet(I);
1778 case Instruction::Trunc:
1779 return SelectTrunc(I);
1780 case Instruction::ZExt:
1781 case Instruction::SExt:
1782 return SelectIntExt(I);
1783 // Here add other flavors of Instruction::XXX that automated
1784 // cases don't catch. For example, switches are terminators
1785 // that aren't yet handled.
1792 // Materialize a floating-point constant into a register, and return
1793 // the register number (or zero if we failed to handle it).
1794 unsigned PPCFastISel::PPCMaterializeFP(const ConstantFP *CFP, MVT VT) {
1795 // No plans to handle long double here.
1796 if (VT != MVT::f32 && VT != MVT::f64)
1799 // All FP constants are loaded from the constant pool.
1800 unsigned Align = TD.getPrefTypeAlignment(CFP->getType());
1801 assert(Align > 0 && "Unexpectedly missing alignment information!");
1802 unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Align);
1803 unsigned DestReg = createResultReg(TLI.getRegClassFor(VT));
1804 CodeModel::Model CModel = TM.getCodeModel();
1806 MachineMemOperand *MMO =
1807 FuncInfo.MF->getMachineMemOperand(
1808 MachinePointerInfo::getConstantPool(), MachineMemOperand::MOLoad,
1809 (VT == MVT::f32) ? 4 : 8, Align);
1811 unsigned Opc = (VT == MVT::f32) ? PPC::LFS : PPC::LFD;
1812 unsigned TmpReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
1814 // For small code model, generate a LF[SD](0, LDtocCPT(Idx, X2)).
1815 if (CModel == CodeModel::Small || CModel == CodeModel::JITDefault) {
1816 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::LDtocCPT),
1818 .addConstantPoolIndex(Idx).addReg(PPC::X2);
1819 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
1820 .addImm(0).addReg(TmpReg).addMemOperand(MMO);
1822 // Otherwise we generate LF[SD](Idx[lo], ADDIStocHA(X2, Idx)).
1823 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ADDIStocHA),
1824 TmpReg).addReg(PPC::X2).addConstantPoolIndex(Idx);
1825 // But for large code model, we must generate a LDtocL followed
1827 if (CModel == CodeModel::Large) {
1828 unsigned TmpReg2 = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
1829 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::LDtocL),
1830 TmpReg2).addConstantPoolIndex(Idx).addReg(TmpReg);
1831 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
1832 .addImm(0).addReg(TmpReg2);
1834 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), DestReg)
1835 .addConstantPoolIndex(Idx, 0, PPCII::MO_TOC_LO)
1837 .addMemOperand(MMO);
1843 // Materialize the address of a global value into a register, and return
1844 // the register number (or zero if we failed to handle it).
1845 unsigned PPCFastISel::PPCMaterializeGV(const GlobalValue *GV, MVT VT) {
1846 assert(VT == MVT::i64 && "Non-address!");
1847 const TargetRegisterClass *RC = &PPC::G8RC_and_G8RC_NOX0RegClass;
1848 unsigned DestReg = createResultReg(RC);
1850 // Global values may be plain old object addresses, TLS object
1851 // addresses, constant pool entries, or jump tables. How we generate
1852 // code for these may depend on small, medium, or large code model.
1853 CodeModel::Model CModel = TM.getCodeModel();
1855 // FIXME: Jump tables are not yet required because fast-isel doesn't
1856 // handle switches; if that changes, we need them as well. For now,
1857 // what follows assumes everything's a generic (or TLS) global address.
1858 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
1860 // If GV is an alias, use the aliasee for determining thread-locality.
1861 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
1862 GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal(false));
1865 // FIXME: We don't yet handle the complexity of TLS.
1866 bool IsTLS = GVar && GVar->isThreadLocal();
1870 // For small code model, generate a simple TOC load.
1871 if (CModel == CodeModel::Small || CModel == CodeModel::JITDefault)
1872 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::LDtoc), DestReg)
1873 .addGlobalAddress(GV).addReg(PPC::X2);
1875 // If the address is an externally defined symbol, a symbol with
1876 // common or externally available linkage, a function address, or a
1877 // jump table address (not yet needed), or if we are generating code
1878 // for large code model, we generate:
1879 // LDtocL(GV, ADDIStocHA(%X2, GV))
1880 // Otherwise we generate:
1881 // ADDItocL(ADDIStocHA(%X2, GV), GV)
1882 // Either way, start with the ADDIStocHA:
1883 unsigned HighPartReg = createResultReg(RC);
1884 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ADDIStocHA),
1885 HighPartReg).addReg(PPC::X2).addGlobalAddress(GV);
1887 // !GVar implies a function address. An external variable is one
1888 // without an initializer.
1889 // If/when switches are implemented, jump tables should be handled
1890 // on the "if" path here.
1891 if (CModel == CodeModel::Large || !GVar || !GVar->hasInitializer() ||
1892 GVar->hasCommonLinkage() || GVar->hasAvailableExternallyLinkage())
1893 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::LDtocL),
1894 DestReg).addGlobalAddress(GV).addReg(HighPartReg);
1896 // Otherwise generate the ADDItocL.
1897 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ADDItocL),
1898 DestReg).addReg(HighPartReg).addGlobalAddress(GV);
1904 // Materialize a 32-bit integer constant into a register, and return
1905 // the register number (or zero if we failed to handle it).
1906 unsigned PPCFastISel::PPCMaterialize32BitInt(int64_t Imm,
1907 const TargetRegisterClass *RC) {
1908 unsigned Lo = Imm & 0xFFFF;
1909 unsigned Hi = (Imm >> 16) & 0xFFFF;
1911 unsigned ResultReg = createResultReg(RC);
1912 bool IsGPRC = RC->hasSuperClassEq(&PPC::GPRCRegClass);
1915 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1916 TII.get(IsGPRC ? PPC::LI : PPC::LI8), ResultReg)
1919 // Both Lo and Hi have nonzero bits.
1920 unsigned TmpReg = createResultReg(RC);
1921 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1922 TII.get(IsGPRC ? PPC::LIS : PPC::LIS8), TmpReg)
1924 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1925 TII.get(IsGPRC ? PPC::ORI : PPC::ORI8), ResultReg)
1926 .addReg(TmpReg).addImm(Lo);
1929 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
1930 TII.get(IsGPRC ? PPC::LIS : PPC::LIS8), ResultReg)
1936 // Materialize a 64-bit integer constant into a register, and return
1937 // the register number (or zero if we failed to handle it).
1938 unsigned PPCFastISel::PPCMaterialize64BitInt(int64_t Imm,
1939 const TargetRegisterClass *RC) {
1940 unsigned Remainder = 0;
1943 // If the value doesn't fit in 32 bits, see if we can shift it
1944 // so that it fits in 32 bits.
1945 if (!isInt<32>(Imm)) {
1946 Shift = countTrailingZeros<uint64_t>(Imm);
1947 int64_t ImmSh = static_cast<uint64_t>(Imm) >> Shift;
1949 if (isInt<32>(ImmSh))
1958 // Handle the high-order 32 bits (if shifted) or the whole 32 bits
1959 // (if not shifted).
1960 unsigned TmpReg1 = PPCMaterialize32BitInt(Imm, RC);
1964 // If upper 32 bits were not zero, we've built them and need to shift
1968 TmpReg2 = createResultReg(RC);
1969 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::RLDICR),
1970 TmpReg2).addReg(TmpReg1).addImm(Shift).addImm(63 - Shift);
1974 unsigned TmpReg3, Hi, Lo;
1975 if ((Hi = (Remainder >> 16) & 0xFFFF)) {
1976 TmpReg3 = createResultReg(RC);
1977 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ORIS8),
1978 TmpReg3).addReg(TmpReg2).addImm(Hi);
1982 if ((Lo = Remainder & 0xFFFF)) {
1983 unsigned ResultReg = createResultReg(RC);
1984 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ORI8),
1985 ResultReg).addReg(TmpReg3).addImm(Lo);
1993 // Materialize an integer constant into a register, and return
1994 // the register number (or zero if we failed to handle it).
1995 unsigned PPCFastISel::PPCMaterializeInt(const Constant *C, MVT VT) {
1997 if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16 &&
1998 VT != MVT::i8 && VT != MVT::i1)
2001 const TargetRegisterClass *RC = ((VT == MVT::i64) ? &PPC::G8RCRegClass :
2002 &PPC::GPRCRegClass);
2004 // If the constant is in range, use a load-immediate.
2005 const ConstantInt *CI = cast<ConstantInt>(C);
2006 if (isInt<16>(CI->getSExtValue())) {
2007 unsigned Opc = (VT == MVT::i64) ? PPC::LI8 : PPC::LI;
2008 unsigned ImmReg = createResultReg(RC);
2009 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ImmReg)
2010 .addImm(CI->getSExtValue());
2014 // Construct the constant piecewise.
2015 int64_t Imm = CI->getZExtValue();
2018 return PPCMaterialize64BitInt(Imm, RC);
2019 else if (VT == MVT::i32)
2020 return PPCMaterialize32BitInt(Imm, RC);
2025 // Materialize a constant into a register, and return the register
2026 // number (or zero if we failed to handle it).
2027 unsigned PPCFastISel::TargetMaterializeConstant(const Constant *C) {
2028 EVT CEVT = TLI.getValueType(C->getType(), true);
2030 // Only handle simple types.
2031 if (!CEVT.isSimple()) return 0;
2032 MVT VT = CEVT.getSimpleVT();
2034 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
2035 return PPCMaterializeFP(CFP, VT);
2036 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
2037 return PPCMaterializeGV(GV, VT);
2038 else if (isa<ConstantInt>(C))
2039 return PPCMaterializeInt(C, VT);
2044 // Materialize the address created by an alloca into a register, and
2045 // return the register number (or zero if we failed to handle it).
2046 unsigned PPCFastISel::TargetMaterializeAlloca(const AllocaInst *AI) {
2047 // Don't handle dynamic allocas.
2048 if (!FuncInfo.StaticAllocaMap.count(AI)) return 0;
2051 if (!isLoadTypeLegal(AI->getType(), VT)) return 0;
2053 DenseMap<const AllocaInst*, int>::iterator SI =
2054 FuncInfo.StaticAllocaMap.find(AI);
2056 if (SI != FuncInfo.StaticAllocaMap.end()) {
2057 unsigned ResultReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass);
2058 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(PPC::ADDI8),
2059 ResultReg).addFrameIndex(SI->second).addImm(0);
2066 // Fold loads into extends when possible.
2067 // FIXME: We can have multiple redundant extend/trunc instructions
2068 // following a load. The folding only picks up one. Extend this
2069 // to check subsequent instructions for the same pattern and remove
2070 // them. Thus ResultReg should be the def reg for the last redundant
2071 // instruction in a chain, and all intervening instructions can be
2072 // removed from parent. Change test/CodeGen/PowerPC/fast-isel-fold.ll
2073 // to add ELF64-NOT: rldicl to the appropriate tests when this works.
2074 bool PPCFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
2075 const LoadInst *LI) {
2076 // Verify we have a legal type before going any further.
2078 if (!isLoadTypeLegal(LI->getType(), VT))
2081 // Combine load followed by zero- or sign-extend.
2082 bool IsZExt = false;
2083 switch(MI->getOpcode()) {
2088 case PPC::RLDICL_32_64: {
2090 unsigned MB = MI->getOperand(3).getImm();
2091 if ((VT == MVT::i8 && MB <= 56) ||
2092 (VT == MVT::i16 && MB <= 48) ||
2093 (VT == MVT::i32 && MB <= 32))
2099 case PPC::RLWINM8: {
2101 unsigned MB = MI->getOperand(3).getImm();
2102 if ((VT == MVT::i8 && MB <= 24) ||
2103 (VT == MVT::i16 && MB <= 16))
2110 case PPC::EXTSB8_32_64:
2111 /* There is no sign-extending load-byte instruction. */
2116 case PPC::EXTSH8_32_64: {
2117 if (VT != MVT::i16 && VT != MVT::i8)
2123 case PPC::EXTSW_32_64: {
2124 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8)
2130 // See if we can handle this address.
2132 if (!PPCComputeAddress(LI->getOperand(0), Addr))
2135 unsigned ResultReg = MI->getOperand(0).getReg();
2137 if (!PPCEmitLoad(VT, ResultReg, Addr, 0, IsZExt))
2140 MI->eraseFromParent();
2144 // Attempt to lower call arguments in a faster way than done by
2145 // the selection DAG code.
2146 bool PPCFastISel::FastLowerArguments() {
2147 // Defer to normal argument lowering for now. It's reasonably
2148 // efficient. Consider doing something like ARM to handle the
2149 // case where all args fit in registers, no varargs, no float
2154 // Handle materializing integer constants into a register. This is not
2155 // automatically generated for PowerPC, so must be explicitly created here.
2156 unsigned PPCFastISel::FastEmit_i(MVT Ty, MVT VT, unsigned Opc, uint64_t Imm) {
2158 if (Opc != ISD::Constant)
2161 if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16 &&
2162 VT != MVT::i8 && VT != MVT::i1)
2165 const TargetRegisterClass *RC = ((VT == MVT::i64) ? &PPC::G8RCRegClass :
2166 &PPC::GPRCRegClass);
2168 return PPCMaterialize64BitInt(Imm, RC);
2170 return PPCMaterialize32BitInt(Imm, RC);
2173 // Override for ADDI and ADDI8 to set the correct register class
2174 // on RHS operand 0. The automatic infrastructure naively assumes
2175 // GPRC for i32 and G8RC for i64; the concept of "no R0" is lost
2176 // for these cases. At the moment, none of the other automatically
2177 // generated RI instructions require special treatment. However, once
2178 // SelectSelect is implemented, "isel" requires similar handling.
2180 // Also be conservative about the output register class. Avoid
2181 // assigning R0 or X0 to the output register for GPRC and G8RC
2182 // register classes, as any such result could be used in ADDI, etc.,
2183 // where those regs have another meaning.
2184 unsigned PPCFastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
2185 const TargetRegisterClass *RC,
2186 unsigned Op0, bool Op0IsKill,
2188 if (MachineInstOpcode == PPC::ADDI)
2189 MRI.setRegClass(Op0, &PPC::GPRC_and_GPRC_NOR0RegClass);
2190 else if (MachineInstOpcode == PPC::ADDI8)
2191 MRI.setRegClass(Op0, &PPC::G8RC_and_G8RC_NOX0RegClass);
2193 const TargetRegisterClass *UseRC =
2194 (RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass :
2195 (RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC));
2197 return FastISel::FastEmitInst_ri(MachineInstOpcode, UseRC,
2198 Op0, Op0IsKill, Imm);
2201 // Override for instructions with one register operand to avoid use of
2202 // R0/X0. The automatic infrastructure isn't aware of the context so
2203 // we must be conservative.
2204 unsigned PPCFastISel::FastEmitInst_r(unsigned MachineInstOpcode,
2205 const TargetRegisterClass* RC,
2206 unsigned Op0, bool Op0IsKill) {
2207 const TargetRegisterClass *UseRC =
2208 (RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass :
2209 (RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC));
2211 return FastISel::FastEmitInst_r(MachineInstOpcode, UseRC, Op0, Op0IsKill);
2214 // Override for instructions with two register operands to avoid use
2215 // of R0/X0. The automatic infrastructure isn't aware of the context
2216 // so we must be conservative.
2217 unsigned PPCFastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
2218 const TargetRegisterClass* RC,
2219 unsigned Op0, bool Op0IsKill,
2220 unsigned Op1, bool Op1IsKill) {
2221 const TargetRegisterClass *UseRC =
2222 (RC == &PPC::GPRCRegClass ? &PPC::GPRC_and_GPRC_NOR0RegClass :
2223 (RC == &PPC::G8RCRegClass ? &PPC::G8RC_and_G8RC_NOX0RegClass : RC));
2225 return FastISel::FastEmitInst_rr(MachineInstOpcode, UseRC, Op0, Op0IsKill,
2230 // Create the fast instruction selector for PowerPC64 ELF.
2231 FastISel *PPC::createFastISel(FunctionLoweringInfo &FuncInfo,
2232 const TargetLibraryInfo *LibInfo) {
2233 const TargetMachine &TM = FuncInfo.MF->getTarget();
2235 // Only available on 64-bit ELF for now.
2236 const PPCSubtarget *Subtarget = &TM.getSubtarget<PPCSubtarget>();
2237 if (Subtarget->isPPC64() && Subtarget->isSVR4ABI())
2238 return new PPCFastISel(FuncInfo, LibInfo);