1 //===--- HexagonBitTracker.cpp --------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "llvm/CodeGen/MachineRegisterInfo.h"
11 #include "llvm/IR/Module.h"
12 #include "llvm/Support/Debug.h"
13 #include "llvm/Support/raw_ostream.h"
16 #include "HexagonInstrInfo.h"
17 #include "HexagonRegisterInfo.h"
18 #include "HexagonTargetMachine.h"
19 #include "HexagonBitTracker.h"
23 typedef BitTracker BT;
25 HexagonEvaluator::HexagonEvaluator(const HexagonRegisterInfo &tri,
26 MachineRegisterInfo &mri,
27 const HexagonInstrInfo &tii,
29 : MachineEvaluator(tri, mri), MF(mf), MFI(mf.getFrameInfo()), TII(tii) {
30 // Populate the VRX map (VR to extension-type).
31 // Go over all the formal parameters of the function. If a given parameter
32 // P is sign- or zero-extended, locate the virtual register holding that
33 // parameter and create an entry in the VRX map indicating the type of ex-
34 // tension (and the source type).
35 // This is a bit complicated to do accurately, since the memory layout in-
36 // formation is necessary to precisely determine whether an aggregate para-
37 // meter will be passed in a register or in memory. What is given in MRI
38 // is the association between the physical register that is live-in (i.e.
39 // holds an argument), and the virtual register that this value will be
40 // copied into. This, by itself, is not sufficient to map back the virtual
41 // register to a formal parameter from Function (since consecutive live-ins
42 // from MRI may not correspond to consecutive formal parameters from Func-
43 // tion). To avoid the complications with in-memory arguments, only consi-
44 // der the initial sequence of formal parameters that are known to be
45 // passed via registers.
47 unsigned InVirtReg, InPhysReg = 0;
48 const Function &F = *MF.getFunction();
49 typedef Function::const_arg_iterator arg_iterator;
50 for (arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
52 const Argument &Arg = *I;
53 Type *ATy = Arg.getType();
55 if (ATy->isIntegerTy())
56 Width = ATy->getIntegerBitWidth();
57 else if (ATy->isPointerTy())
59 // If pointer size is not set through target data, it will default to
60 // Module::AnyPointerSize.
61 if (Width == 0 || Width > 64)
63 AttributeSet Attrs = F.getAttributes();
64 if (Attrs.hasAttribute(AttrIdx, Attribute::ByVal))
66 InPhysReg = getNextPhysReg(InPhysReg, Width);
69 InVirtReg = getVirtRegFor(InPhysReg);
72 if (Attrs.hasAttribute(AttrIdx, Attribute::SExt))
73 VRX.insert(std::make_pair(InVirtReg, ExtType(ExtType::SExt, Width)));
74 else if (Attrs.hasAttribute(AttrIdx, Attribute::ZExt))
75 VRX.insert(std::make_pair(InVirtReg, ExtType(ExtType::ZExt, Width)));
80 BT::BitMask HexagonEvaluator::mask(unsigned Reg, unsigned Sub) const {
82 return MachineEvaluator::mask(Reg, 0);
83 using namespace Hexagon;
84 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
85 unsigned ID = RC->getID();
86 uint16_t RW = getRegBitWidth(RegisterRef(Reg, Sub));
87 auto &HRI = static_cast<const HexagonRegisterInfo&>(TRI);
88 bool IsSubLo = (Sub == HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_lo));
90 case DoubleRegsRegClassID:
91 case VecDblRegsRegClassID:
92 case VecDblRegs128BRegClassID:
93 return IsSubLo ? BT::BitMask(0, RW-1)
94 : BT::BitMask(RW, 2*RW-1);
99 dbgs() << PrintReg(Reg, &TRI, Sub) << '\n';
101 llvm_unreachable("Unexpected register/subregister");
106 std::vector<BT::RegisterRef> Vector;
109 RegisterRefs(const MachineInstr &MI) : Vector(MI.getNumOperands()) {
110 for (unsigned i = 0, n = Vector.size(); i < n; ++i) {
111 const MachineOperand &MO = MI.getOperand(i);
113 Vector[i] = BT::RegisterRef(MO);
114 // For indices that don't correspond to registers, the entry will
115 // remain constructed via the default constructor.
119 size_t size() const { return Vector.size(); }
120 const BT::RegisterRef &operator[](unsigned n) const {
121 // The main purpose of this operator is to assert with bad argument.
122 assert(n < Vector.size());
128 bool HexagonEvaluator::evaluate(const MachineInstr &MI,
129 const CellMapType &Inputs,
130 CellMapType &Outputs) const {
131 unsigned NumDefs = 0;
133 // Sanity verification: there should not be any defs with subregisters.
134 for (unsigned i = 0, n = MI.getNumOperands(); i < n; ++i) {
135 const MachineOperand &MO = MI.getOperand(i);
136 if (!MO.isReg() || !MO.isDef())
139 assert(MO.getSubReg() == 0);
145 using namespace Hexagon;
146 unsigned Opc = MI.getOpcode();
150 // These instructions may be marked as mayLoad, but they are generating
151 // immediate values, so skip them.
156 return evaluateLoad(MI, Inputs, Outputs);
160 // Check COPY instructions that copy formal parameters into virtual
161 // registers. Such parameters can be sign- or zero-extended at the
162 // call site, and we should take advantage of this knowledge. The MRI
163 // keeps a list of pairs of live-in physical and virtual registers,
164 // which provides information about which virtual registers will hold
165 // the argument values. The function will still contain instructions
166 // defining those virtual registers, and in practice those are COPY
167 // instructions from a physical to a virtual register. In such cases,
168 // applying the argument extension to the virtual register can be seen
169 // as simply mirroring the extension that had already been applied to
170 // the physical register at the call site. If the defining instruction
171 // was not a COPY, it would not be clear how to mirror that extension
172 // on the callee's side. For that reason, only check COPY instructions
173 // for potential extensions.
175 if (evaluateFormalCopy(MI, Inputs, Outputs))
179 // Beyond this point, if any operand is a global, skip that instruction.
180 // The reason is that certain instructions that can take an immediate
181 // operand can also have a global symbol in that operand. To avoid
182 // checking what kind of operand a given instruction has individually
183 // for each instruction, do it here. Global symbols as operands gene-
184 // rally do not provide any useful information.
185 for (unsigned i = 0, n = MI.getNumOperands(); i < n; ++i) {
186 const MachineOperand &MO = MI.getOperand(i);
187 if (MO.isGlobal() || MO.isBlockAddress() || MO.isSymbol() || MO.isJTI() ||
192 RegisterRefs Reg(MI);
193 #define op(i) MI.getOperand(i)
194 #define rc(i) RegisterCell::ref(getCell(Reg[i], Inputs))
195 #define im(i) MI.getOperand(i).getImm()
197 // If the instruction has no register operands, skip it.
201 // Record result for register in operand 0.
202 auto rr0 = [this,Reg] (const BT::RegisterCell &Val, CellMapType &Outputs)
204 putCell(Reg[0], Val, Outputs);
207 // Get the cell corresponding to the N-th operand.
208 auto cop = [this, &Reg, &MI, &Inputs](unsigned N,
209 uint16_t W) -> BT::RegisterCell {
210 const MachineOperand &Op = MI.getOperand(N);
212 return eIMM(Op.getImm(), W);
214 return RegisterCell::self(0, W);
215 assert(getRegBitWidth(Reg[N]) == W && "Register width mismatch");
218 // Extract RW low bits of the cell.
219 auto lo = [this] (const BT::RegisterCell &RC, uint16_t RW)
220 -> BT::RegisterCell {
221 assert(RW <= RC.width());
222 return eXTR(RC, 0, RW);
224 // Extract RW high bits of the cell.
225 auto hi = [this] (const BT::RegisterCell &RC, uint16_t RW)
226 -> BT::RegisterCell {
227 uint16_t W = RC.width();
229 return eXTR(RC, W-RW, W);
231 // Extract N-th halfword (counting from the least significant position).
232 auto half = [this] (const BT::RegisterCell &RC, unsigned N)
233 -> BT::RegisterCell {
234 assert(N*16+16 <= RC.width());
235 return eXTR(RC, N*16, N*16+16);
237 // Shuffle bits (pick even/odd from cells and merge into result).
238 auto shuffle = [this] (const BT::RegisterCell &Rs, const BT::RegisterCell &Rt,
239 uint16_t BW, bool Odd) -> BT::RegisterCell {
240 uint16_t I = Odd, Ws = Rs.width();
241 assert(Ws == Rt.width());
242 RegisterCell RC = eXTR(Rt, I*BW, I*BW+BW).cat(eXTR(Rs, I*BW, I*BW+BW));
245 RC.cat(eXTR(Rt, I*BW, I*BW+BW)).cat(eXTR(Rs, I*BW, I*BW+BW));
251 // The bitwidth of the 0th operand. In most (if not all) of the
252 // instructions below, the 0th operand is the defined register.
253 // Pre-compute the bitwidth here, because it is needed in many cases
255 uint16_t W0 = (Reg[0].Reg != 0) ? getRegBitWidth(Reg[0]) : 0;
258 // Transfer immediate:
264 return rr0(eIMM(im(1), W0), Outputs);
266 return rr0(RegisterCell(W0).fill(0, W0, BT::BitValue::Zero), Outputs);
268 return rr0(RegisterCell(W0).fill(0, W0, BT::BitValue::One), Outputs);
270 int FI = op(1).getIndex();
271 int Off = op(2).getImm();
272 unsigned A = MFI.getObjectAlignment(FI) + std::abs(Off);
273 unsigned L = Log2_32(A);
274 RegisterCell RC = RegisterCell::self(Reg[0].Reg, W0);
275 RC.fill(0, L, BT::BitValue::Zero);
276 return rr0(RC, Outputs);
279 // Transfer register:
284 return rr0(rc(1), Outputs);
287 uint16_t PW = 8; // XXX Pred size: getRegBitWidth(Reg[1]);
289 RegisterCell PC = eXTR(rc(1), 0, PW);
290 RegisterCell RC = RegisterCell(RW).insert(PC, BT::BitMask(0, PW-1));
291 RC.fill(PW, RW, BT::BitValue::Zero);
292 return rr0(RC, Outputs);
295 RegisterCell RC = RegisterCell::self(Reg[0].Reg, W0);
296 W0 = 8; // XXX Pred size
297 return rr0(eINS(RC, eXTR(rc(1), 0, W0), 0), Outputs);
308 uint16_t W1 = getRegBitWidth(Reg[1]);
309 assert(W0 == 64 && W1 == 32);
310 RegisterCell CW = RegisterCell(W0).insert(rc(1), BT::BitMask(0, W1-1));
311 RegisterCell RC = eADD(eSXT(CW, W1), rc(2));
312 return rr0(RC, Outputs);
316 return rr0(eADD(rc(1), rc(2)), Outputs);
318 return rr0(eADD(rc(1), eIMM(im(2), W0)), Outputs);
319 case S4_addi_asl_ri: {
320 RegisterCell RC = eADD(eIMM(im(1), W0), eASL(rc(2), im(3)));
321 return rr0(RC, Outputs);
323 case S4_addi_lsr_ri: {
324 RegisterCell RC = eADD(eIMM(im(1), W0), eLSR(rc(2), im(3)));
325 return rr0(RC, Outputs);
328 RegisterCell RC = eADD(rc(1), eADD(rc(2), eIMM(im(3), W0)));
329 return rr0(RC, Outputs);
331 case M4_mpyri_addi: {
332 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0));
333 RegisterCell RC = eADD(eIMM(im(1), W0), lo(M, W0));
334 return rr0(RC, Outputs);
336 case M4_mpyrr_addi: {
337 RegisterCell M = eMLS(rc(2), rc(3));
338 RegisterCell RC = eADD(eIMM(im(1), W0), lo(M, W0));
339 return rr0(RC, Outputs);
341 case M4_mpyri_addr_u2: {
342 RegisterCell M = eMLS(eIMM(im(2), W0), rc(3));
343 RegisterCell RC = eADD(rc(1), lo(M, W0));
344 return rr0(RC, Outputs);
346 case M4_mpyri_addr: {
347 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0));
348 RegisterCell RC = eADD(rc(1), lo(M, W0));
349 return rr0(RC, Outputs);
351 case M4_mpyrr_addr: {
352 RegisterCell M = eMLS(rc(2), rc(3));
353 RegisterCell RC = eADD(rc(1), lo(M, W0));
354 return rr0(RC, Outputs);
357 RegisterCell RC = eADD(rc(1), eSUB(eIMM(im(2), W0), rc(3)));
358 return rr0(RC, Outputs);
361 RegisterCell RC = eADD(rc(1), eADD(rc(2), eIMM(im(3), W0)));
362 return rr0(RC, Outputs);
365 RegisterCell RC = eADD(rc(1), eADD(rc(2), rc(3)));
366 return rr0(RC, Outputs);
369 RegisterCell RC = eADD(rc(1), eSUB(rc(2), rc(3)));
370 return rr0(RC, Outputs);
372 case S2_addasl_rrri: {
373 RegisterCell RC = eADD(rc(1), eASL(rc(2), im(3)));
374 return rr0(RC, Outputs);
377 RegisterCell RPC = RegisterCell::self(Reg[0].Reg, W0);
378 RPC.fill(0, 2, BT::BitValue::Zero);
379 return rr0(eADD(RPC, eIMM(im(2), W0)), Outputs);
383 return rr0(eSUB(rc(1), rc(2)), Outputs);
385 return rr0(eSUB(eIMM(im(1), W0), rc(2)), Outputs);
386 case S4_subi_asl_ri: {
387 RegisterCell RC = eSUB(eIMM(im(1), W0), eASL(rc(2), im(3)));
388 return rr0(RC, Outputs);
390 case S4_subi_lsr_ri: {
391 RegisterCell RC = eSUB(eIMM(im(1), W0), eLSR(rc(2), im(3)));
392 return rr0(RC, Outputs);
395 RegisterCell RC = eSUB(rc(1), eADD(rc(2), eIMM(im(3), W0)));
396 return rr0(RC, Outputs);
399 RegisterCell RC = eSUB(rc(1), eADD(rc(2), rc(3)));
400 return rr0(RC, Outputs);
402 // 32-bit negation is done by "Rd = A2_subri 0, Rs"
404 return rr0(eSUB(eIMM(0, W0), rc(1)), Outputs);
407 RegisterCell M = eMLS(rc(1), rc(2));
408 return rr0(hi(M, W0), Outputs);
411 return rr0(eMLS(rc(1), rc(2)), Outputs);
412 case M2_dpmpyss_acc_s0:
413 return rr0(eADD(rc(1), eMLS(rc(2), rc(3))), Outputs);
414 case M2_dpmpyss_nac_s0:
415 return rr0(eSUB(rc(1), eMLS(rc(2), rc(3))), Outputs);
417 RegisterCell M = eMLS(rc(1), rc(2));
418 return rr0(lo(M, W0), Outputs);
421 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0));
422 RegisterCell RC = eADD(rc(1), lo(M, W0));
423 return rr0(RC, Outputs);
426 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0));
427 RegisterCell RC = eSUB(rc(1), lo(M, W0));
428 return rr0(RC, Outputs);
431 RegisterCell M = eMLS(rc(2), rc(3));
432 RegisterCell RC = eADD(rc(1), lo(M, W0));
433 return rr0(RC, Outputs);
436 RegisterCell M = eMLS(rc(1), eIMM(im(2), W0));
437 return rr0(lo(M, 32), Outputs);
440 RegisterCell M = eMLS(rc(1), eIMM(-im(2), W0));
441 return rr0(lo(M, 32), Outputs);
444 RegisterCell M = eMLS(rc(1), eIMM(im(2), W0));
445 return rr0(lo(M, 32), Outputs);
448 RegisterCell M = eMLU(rc(1), rc(2));
449 return rr0(hi(M, W0), Outputs);
452 return rr0(eMLU(rc(1), rc(2)), Outputs);
453 case M2_dpmpyuu_acc_s0:
454 return rr0(eADD(rc(1), eMLU(rc(2), rc(3))), Outputs);
455 case M2_dpmpyuu_nac_s0:
456 return rr0(eSUB(rc(1), eMLU(rc(2), rc(3))), Outputs);
462 return rr0(eAND(rc(1), eIMM(im(2), W0)), Outputs);
465 return rr0(eAND(rc(1), rc(2)), Outputs);
468 return rr0(eAND(rc(1), eNOT(rc(2))), Outputs);
469 case S4_andi_asl_ri: {
470 RegisterCell RC = eAND(eIMM(im(1), W0), eASL(rc(2), im(3)));
471 return rr0(RC, Outputs);
473 case S4_andi_lsr_ri: {
474 RegisterCell RC = eAND(eIMM(im(1), W0), eLSR(rc(2), im(3)));
475 return rr0(RC, Outputs);
478 return rr0(eAND(rc(1), eAND(rc(2), rc(3))), Outputs);
480 return rr0(eAND(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs);
482 return rr0(eAND(rc(1), eORL(rc(2), rc(3))), Outputs);
484 return rr0(eAND(rc(1), eXOR(rc(2), rc(3))), Outputs);
486 return rr0(eORL(rc(1), eIMM(im(2), W0)), Outputs);
489 return rr0(eORL(rc(1), rc(2)), Outputs);
492 return rr0(eORL(rc(1), eNOT(rc(2))), Outputs);
493 case S4_ori_asl_ri: {
494 RegisterCell RC = eORL(eIMM(im(1), W0), eASL(rc(2), im(3)));
495 return rr0(RC, Outputs);
497 case S4_ori_lsr_ri: {
498 RegisterCell RC = eORL(eIMM(im(1), W0), eLSR(rc(2), im(3)));
499 return rr0(RC, Outputs);
502 return rr0(eORL(rc(1), eAND(rc(2), rc(3))), Outputs);
504 return rr0(eORL(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs);
507 RegisterCell RC = eORL(rc(1), eAND(rc(2), eIMM(im(3), W0)));
508 return rr0(RC, Outputs);
511 RegisterCell RC = eORL(rc(1), eORL(rc(2), eIMM(im(3), W0)));
512 return rr0(RC, Outputs);
515 return rr0(eORL(rc(1), eORL(rc(2), rc(3))), Outputs);
517 return rr0(eORL(rc(1), eXOR(rc(2), rc(3))), Outputs);
520 return rr0(eXOR(rc(1), rc(2)), Outputs);
522 return rr0(eXOR(rc(1), eAND(rc(2), rc(3))), Outputs);
524 return rr0(eXOR(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs);
526 return rr0(eXOR(rc(1), eORL(rc(2), rc(3))), Outputs);
528 return rr0(eXOR(rc(1), eXOR(rc(2), rc(3))), Outputs);
531 return rr0(eNOT(rc(1)), Outputs);
535 return rr0(eASL(rc(1), im(2)), Outputs);
537 return rr0(eASL(rc(1), 16), Outputs);
540 return rr0(eADD(rc(1), eASL(rc(2), im(3))), Outputs);
543 return rr0(eSUB(rc(1), eASL(rc(2), im(3))), Outputs);
546 return rr0(eAND(rc(1), eASL(rc(2), im(3))), Outputs);
549 return rr0(eORL(rc(1), eASL(rc(2), im(3))), Outputs);
550 case S2_asl_i_r_xacc:
551 case S2_asl_i_p_xacc:
552 return rr0(eXOR(rc(1), eASL(rc(2), im(3))), Outputs);
560 return rr0(eASR(rc(1), im(2)), Outputs);
562 return rr0(eASR(rc(1), 16), Outputs);
565 return rr0(eADD(rc(1), eASR(rc(2), im(3))), Outputs);
568 return rr0(eSUB(rc(1), eASR(rc(2), im(3))), Outputs);
571 return rr0(eAND(rc(1), eASR(rc(2), im(3))), Outputs);
574 return rr0(eORL(rc(1), eASR(rc(2), im(3))), Outputs);
575 case S2_asr_i_r_rnd: {
576 // The input is first sign-extended to 64 bits, then the output
577 // is truncated back to 32 bits.
579 RegisterCell XC = eSXT(rc(1).cat(eIMM(0, W0)), W0);
580 RegisterCell RC = eASR(eADD(eASR(XC, im(2)), eIMM(1, 2*W0)), 1);
581 return rr0(eXTR(RC, 0, W0), Outputs);
583 case S2_asr_i_r_rnd_goodsyntax: {
586 return rr0(rc(1), Outputs);
587 // Result: S2_asr_i_r_rnd Rs, u5-1
588 RegisterCell XC = eSXT(rc(1).cat(eIMM(0, W0)), W0);
589 RegisterCell RC = eLSR(eADD(eASR(XC, S-1), eIMM(1, 2*W0)), 1);
590 return rr0(eXTR(RC, 0, W0), Outputs);
594 case S2_asr_i_svw_trun:
600 return rr0(eLSR(rc(1), im(2)), Outputs);
603 return rr0(eADD(rc(1), eLSR(rc(2), im(3))), Outputs);
606 return rr0(eSUB(rc(1), eLSR(rc(2), im(3))), Outputs);
609 return rr0(eAND(rc(1), eLSR(rc(2), im(3))), Outputs);
612 return rr0(eORL(rc(1), eLSR(rc(2), im(3))), Outputs);
613 case S2_lsr_i_r_xacc:
614 case S2_lsr_i_p_xacc:
615 return rr0(eXOR(rc(1), eLSR(rc(2), im(3))), Outputs);
618 RegisterCell RC = rc(1);
619 RC[im(2)] = BT::BitValue::Zero;
620 return rr0(RC, Outputs);
623 RegisterCell RC = rc(1);
624 RC[im(2)] = BT::BitValue::One;
625 return rr0(RC, Outputs);
627 case S2_togglebit_i: {
628 RegisterCell RC = rc(1);
630 RC[BX] = RC[BX].is(0) ? BT::BitValue::One
631 : RC[BX].is(1) ? BT::BitValue::Zero
632 : BT::BitValue::self();
633 return rr0(RC, Outputs);
637 uint16_t W1 = getRegBitWidth(Reg[1]);
639 // Res.uw[1] = Rs[bx+1:], Res.uw[0] = Rs[0:bx]
640 const BT::BitValue Zero = BT::BitValue::Zero;
641 RegisterCell RZ = RegisterCell(W0).fill(BX, W1, Zero)
642 .fill(W1+(W1-BX), W0, Zero);
643 RegisterCell BF1 = eXTR(rc(1), 0, BX), BF2 = eXTR(rc(1), BX, W1);
644 RegisterCell RC = eINS(eINS(RZ, BF1, 0), BF2, W1);
645 return rr0(RC, Outputs);
651 uint16_t Wd = im(2), Of = im(3);
654 return rr0(eIMM(0, W0), Outputs);
655 // If the width extends beyond the register size, pad the register
657 RegisterCell Pad = (Wd+Of > W0) ? rc(1).cat(eIMM(0, Wd+Of-W0)) : rc(1);
658 RegisterCell Ext = eXTR(Pad, Of, Wd+Of);
659 // Ext is short, need to extend it with 0s or sign bit.
660 RegisterCell RC = RegisterCell(W0).insert(Ext, BT::BitMask(0, Wd-1));
661 if (Opc == S2_extractu || Opc == S2_extractup)
662 return rr0(eZXT(RC, Wd), Outputs);
663 return rr0(eSXT(RC, Wd), Outputs);
667 uint16_t Wd = im(3), Of = im(4);
668 assert(Wd < W0 && Of < W0);
669 // If Wd+Of exceeds W0, the inserted bits are truncated.
673 return rr0(rc(1), Outputs);
674 return rr0(eINS(rc(1), eXTR(rc(2), 0, Wd), Of), Outputs);
685 case V6_vcombine_128B:
687 return rr0(cop(2, W0/2).cat(cop(1, W0/2)), Outputs);
691 case A2_combine_hh: {
693 assert(getRegBitWidth(Reg[1]) == 32 && getRegBitWidth(Reg[2]) == 32);
694 // Low half in the output is 0 for _ll and _hl, 1 otherwise:
695 unsigned LoH = !(Opc == A2_combine_ll || Opc == A2_combine_hl);
696 // High half in the output is 0 for _ll and _lh, 1 otherwise:
697 unsigned HiH = !(Opc == A2_combine_ll || Opc == A2_combine_lh);
698 RegisterCell R1 = rc(1);
699 RegisterCell R2 = rc(2);
700 RegisterCell RC = half(R2, LoH).cat(half(R1, HiH));
701 return rr0(RC, Outputs);
705 assert(getRegBitWidth(Reg[1]) == 32 && getRegBitWidth(Reg[2]) == 32);
706 RegisterCell R1 = rc(1);
707 RegisterCell R2 = rc(2);
708 RegisterCell RC = half(R2, 0).cat(half(R1, 0)).cat(half(R2, 1))
710 return rr0(RC, Outputs);
713 RegisterCell RC = shuffle(rc(1), rc(2), 8, false);
714 return rr0(RC, Outputs);
717 RegisterCell RC = shuffle(rc(1), rc(2), 16, false);
718 return rr0(RC, Outputs);
721 RegisterCell RC = shuffle(rc(1), rc(2), 8, true);
722 return rr0(RC, Outputs);
725 RegisterCell RC = shuffle(rc(1), rc(2), 16, true);
726 return rr0(RC, Outputs);
730 uint16_t WP = 8; // XXX Pred size: getRegBitWidth(Reg[1]);
731 assert(WR == 64 && WP == 8);
732 RegisterCell R1 = rc(1);
734 for (uint16_t i = 0; i < WP; ++i) {
735 const BT::BitValue &V = R1[i];
736 BT::BitValue F = (V.is(0) || V.is(1)) ? V : BT::BitValue::self();
737 RC.fill(i*8, i*8+8, F);
739 return rr0(RC, Outputs);
748 BT::BitValue PC0 = rc(1)[0];
749 RegisterCell R2 = cop(2, W0);
750 RegisterCell R3 = cop(3, W0);
751 if (PC0.is(0) || PC0.is(1))
752 return rr0(RegisterCell::ref(PC0 ? R2 : R3), Outputs);
753 R2.meet(R3, Reg[0].Reg);
754 return rr0(R2, Outputs);
760 // Sign- and zero-extension:
763 return rr0(eSXT(rc(1), 8), Outputs);
765 return rr0(eSXT(rc(1), 16), Outputs);
767 uint16_t W1 = getRegBitWidth(Reg[1]);
768 assert(W0 == 64 && W1 == 32);
769 RegisterCell RC = eSXT(rc(1).cat(eIMM(0, W1)), W1);
770 return rr0(RC, Outputs);
773 return rr0(eZXT(rc(1), 8), Outputs);
775 return rr0(eZXT(rc(1), 16), Outputs);
781 // Always produce a 32-bit result.
782 return rr0(eCLB(rc(1), 0/*bit*/, 32), Outputs);
785 return rr0(eCLB(rc(1), 1/*bit*/, 32), Outputs);
788 uint16_t W1 = getRegBitWidth(Reg[1]);
789 RegisterCell R1 = rc(1);
790 BT::BitValue TV = R1[W1-1];
791 if (TV.is(0) || TV.is(1))
792 return rr0(eCLB(R1, TV, 32), Outputs);
797 return rr0(eCTB(rc(1), 0/*bit*/, 32), Outputs);
800 return rr0(eCTB(rc(1), 1/*bit*/, 32), Outputs);
806 RegisterCell P1 = rc(1);
807 bool Has0 = false, All1 = true;
808 for (uint16_t i = 0; i < 8/*XXX*/; ++i) {
819 RC.fill(0, W0, (All1 ? BT::BitValue::One : BT::BitValue::Zero));
820 return rr0(RC, Outputs);
823 RegisterCell P1 = rc(1);
824 bool Has1 = false, All0 = true;
825 for (uint16_t i = 0; i < 8/*XXX*/; ++i) {
836 RC.fill(0, W0, (Has1 ? BT::BitValue::One : BT::BitValue::Zero));
837 return rr0(RC, Outputs);
840 return rr0(eAND(rc(1), rc(2)), Outputs);
842 return rr0(eAND(rc(1), eNOT(rc(2))), Outputs);
844 return rr0(eNOT(rc(1)), Outputs);
846 return rr0(eORL(rc(1), rc(2)), Outputs);
848 return rr0(eORL(rc(1), eNOT(rc(2))), Outputs);
850 return rr0(eXOR(rc(1), rc(2)), Outputs);
852 return rr0(eAND(rc(1), eAND(rc(2), rc(3))), Outputs);
854 return rr0(eAND(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs);
856 return rr0(eAND(rc(1), eORL(rc(2), rc(3))), Outputs);
858 return rr0(eAND(rc(1), eORL(rc(2), eNOT(rc(3)))), Outputs);
860 return rr0(eORL(rc(1), eAND(rc(2), rc(3))), Outputs);
862 return rr0(eORL(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs);
864 return rr0(eORL(rc(1), eORL(rc(2), rc(3))), Outputs);
866 return rr0(eORL(rc(1), eORL(rc(2), eNOT(rc(3)))), Outputs);
877 BT::BitValue V = rc(1)[im(2)];
878 if (V.is(0) || V.is(1)) {
879 // If instruction is S2_tstbit_i, test for 1, otherwise test for 0.
880 bool TV = (Opc == S2_tstbit_i);
881 BT::BitValue F = V.is(TV) ? BT::BitValue::One : BT::BitValue::Zero;
882 return rr0(RegisterCell(W0).fill(0, W0, F), Outputs);
888 return MachineEvaluator::evaluate(MI, Inputs, Outputs);
896 bool HexagonEvaluator::evaluate(const MachineInstr &BI,
897 const CellMapType &Inputs,
898 BranchTargetList &Targets,
899 bool &FallsThru) const {
900 // We need to evaluate one branch at a time. TII::analyzeBranch checks
901 // all the branches in a basic block at once, so we cannot use it.
902 unsigned Opc = BI.getOpcode();
903 bool SimpleBranch = false;
904 bool Negated = false;
906 case Hexagon::J2_jumpf:
907 case Hexagon::J2_jumpfpt:
908 case Hexagon::J2_jumpfnew:
909 case Hexagon::J2_jumpfnewpt:
911 case Hexagon::J2_jumpt:
912 case Hexagon::J2_jumptpt:
913 case Hexagon::J2_jumptnew:
914 case Hexagon::J2_jumptnewpt:
915 // Simple branch: if([!]Pn) jump ...
916 // i.e. Op0 = predicate, Op1 = branch target.
919 case Hexagon::J2_jump:
920 Targets.insert(BI.getOperand(0).getMBB());
924 // If the branch is of unknown type, assume that all successors are
932 // BI is a conditional branch if we got here.
933 RegisterRef PR = BI.getOperand(0);
934 RegisterCell PC = getCell(PR, Inputs);
935 const BT::BitValue &Test = PC[0];
937 // If the condition is neither true nor false, then it's unknown.
938 if (!Test.is(0) && !Test.is(1))
941 // "Test.is(!Negated)" means "branch condition is true".
942 if (!Test.is(!Negated)) {
943 // Condition known to be false.
948 Targets.insert(BI.getOperand(1).getMBB());
953 bool HexagonEvaluator::evaluateLoad(const MachineInstr &MI,
954 const CellMapType &Inputs,
955 CellMapType &Outputs) const {
956 if (TII.isPredicated(MI))
958 assert(MI.mayLoad() && "A load that mayn't?");
959 unsigned Opc = MI.getOpcode();
963 using namespace Hexagon;
971 case L2_loadalignb_pbr:
972 case L2_loadalignb_pcr:
973 case L2_loadalignb_pi:
975 case L2_loadalignh_pbr:
976 case L2_loadalignh_pcr:
977 case L2_loadalignh_pi:
979 case L2_loadbsw2_pbr:
980 case L2_loadbsw2_pci:
981 case L2_loadbsw2_pcr:
983 case L2_loadbsw4_pbr:
984 case L2_loadbsw4_pci:
985 case L2_loadbsw4_pcr:
988 case L2_loadbzw2_pbr:
989 case L2_loadbzw2_pci:
990 case L2_loadbzw2_pcr:
992 case L2_loadbzw4_pbr:
993 case L2_loadbzw4_pci:
994 case L2_loadbzw4_pcr:
1014 case L2_loadrub_pbr:
1015 case L2_loadrub_pci:
1016 case L2_loadrub_pcr:
1042 case L2_loadruh_pbr:
1043 case L2_loadruh_pci:
1044 case L2_loadruh_pcr:
1060 case L2_loadw_locked:
1076 case L4_loadd_locked:
1086 const MachineOperand &MD = MI.getOperand(0);
1087 assert(MD.isReg() && MD.isDef());
1088 RegisterRef RD = MD;
1090 uint16_t W = getRegBitWidth(RD);
1091 assert(W >= BitNum && BitNum > 0);
1092 RegisterCell Res(W);
1094 for (uint16_t i = 0; i < BitNum; ++i)
1095 Res[i] = BT::BitValue::self(BT::BitRef(RD.Reg, i));
1098 const BT::BitValue &Sign = Res[BitNum-1];
1099 for (uint16_t i = BitNum; i < W; ++i)
1100 Res[i] = BT::BitValue::ref(Sign);
1102 for (uint16_t i = BitNum; i < W; ++i)
1103 Res[i] = BT::BitValue::Zero;
1106 putCell(RD, Res, Outputs);
1110 bool HexagonEvaluator::evaluateFormalCopy(const MachineInstr &MI,
1111 const CellMapType &Inputs,
1112 CellMapType &Outputs) const {
1113 // If MI defines a formal parameter, but is not a copy (loads are handled
1114 // in evaluateLoad), then it's not clear what to do.
1115 assert(MI.isCopy());
1117 RegisterRef RD = MI.getOperand(0);
1118 RegisterRef RS = MI.getOperand(1);
1119 assert(RD.Sub == 0);
1120 if (!TargetRegisterInfo::isPhysicalRegister(RS.Reg))
1122 RegExtMap::const_iterator F = VRX.find(RD.Reg);
1126 uint16_t EW = F->second.Width;
1127 // Store RD's cell into the map. This will associate the cell with a virtual
1128 // register, and make zero-/sign-extends possible (otherwise we would be ex-
1129 // tending "self" bit values, which will have no effect, since "self" values
1130 // cannot be references to anything).
1131 putCell(RD, getCell(RS, Inputs), Outputs);
1134 // Read RD's cell from the outputs instead of RS's cell from the inputs:
1135 if (F->second.Type == ExtType::SExt)
1136 Res = eSXT(getCell(RD, Outputs), EW);
1137 else if (F->second.Type == ExtType::ZExt)
1138 Res = eZXT(getCell(RD, Outputs), EW);
1140 putCell(RD, Res, Outputs);
1145 unsigned HexagonEvaluator::getNextPhysReg(unsigned PReg, unsigned Width) const {
1146 using namespace Hexagon;
1147 bool Is64 = DoubleRegsRegClass.contains(PReg);
1148 assert(PReg == 0 || Is64 || IntRegsRegClass.contains(PReg));
1150 static const unsigned Phys32[] = { R0, R1, R2, R3, R4, R5 };
1151 static const unsigned Phys64[] = { D0, D1, D2 };
1152 const unsigned Num32 = sizeof(Phys32)/sizeof(unsigned);
1153 const unsigned Num64 = sizeof(Phys64)/sizeof(unsigned);
1155 // Return the first parameter register of the required width.
1157 return (Width <= 32) ? Phys32[0] : Phys64[0];
1159 // Set Idx32, Idx64 in such a way that Idx+1 would give the index of the
1161 unsigned Idx32 = 0, Idx64 = 0;
1163 while (Idx32 < Num32) {
1164 if (Phys32[Idx32] == PReg)
1170 while (Idx64 < Num64) {
1171 if (Phys64[Idx64] == PReg)
1179 return (Idx32+1 < Num32) ? Phys32[Idx32+1] : 0;
1180 return (Idx64+1 < Num64) ? Phys64[Idx64+1] : 0;
1184 unsigned HexagonEvaluator::getVirtRegFor(unsigned PReg) const {
1185 typedef MachineRegisterInfo::livein_iterator iterator;
1186 for (iterator I = MRI.livein_begin(), E = MRI.livein_end(); I != E; ++I) {
1187 if (I->first == PReg)