1 //===--- HexagonBitTracker.cpp --------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "llvm/CodeGen/MachineRegisterInfo.h"
11 #include "llvm/IR/Module.h"
12 #include "llvm/Support/Debug.h"
13 #include "llvm/Support/raw_ostream.h"
16 #include "HexagonInstrInfo.h"
17 #include "HexagonRegisterInfo.h"
18 #include "HexagonTargetMachine.h"
19 #include "HexagonBitTracker.h"
23 typedef BitTracker BT;
25 HexagonEvaluator::HexagonEvaluator(const HexagonRegisterInfo &tri,
26 MachineRegisterInfo &mri,
27 const HexagonInstrInfo &tii,
29 : MachineEvaluator(tri, mri), MF(mf), MFI(*mf.getFrameInfo()), TII(tii) {
30 // Populate the VRX map (VR to extension-type).
31 // Go over all the formal parameters of the function. If a given parameter
32 // P is sign- or zero-extended, locate the virtual register holding that
33 // parameter and create an entry in the VRX map indicating the type of ex-
34 // tension (and the source type).
35 // This is a bit complicated to do accurately, since the memory layout in-
36 // formation is necessary to precisely determine whether an aggregate para-
37 // meter will be passed in a register or in memory. What is given in MRI
38 // is the association between the physical register that is live-in (i.e.
39 // holds an argument), and the virtual register that this value will be
40 // copied into. This, by itself, is not sufficient to map back the virtual
41 // register to a formal parameter from Function (since consecutive live-ins
42 // from MRI may not correspond to consecutive formal parameters from Func-
43 // tion). To avoid the complications with in-memory arguments, only consi-
44 // der the initial sequence of formal parameters that are known to be
45 // passed via registers.
47 unsigned InVirtReg, InPhysReg = 0;
48 const Function &F = *MF.getFunction();
49 typedef Function::const_arg_iterator arg_iterator;
50 for (arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
52 const Argument &Arg = *I;
53 Type *ATy = Arg.getType();
55 if (ATy->isIntegerTy())
56 Width = ATy->getIntegerBitWidth();
57 else if (ATy->isPointerTy())
59 // If pointer size is not set through target data, it will default to
60 // Module::AnyPointerSize.
61 if (Width == 0 || Width > 64)
63 InPhysReg = getNextPhysReg(InPhysReg, Width);
66 InVirtReg = getVirtRegFor(InPhysReg);
69 AttributeSet Attrs = F.getAttributes();
70 if (Attrs.hasAttribute(AttrIdx, Attribute::SExt))
71 VRX.insert(std::make_pair(InVirtReg, ExtType(ExtType::SExt, Width)));
72 else if (Attrs.hasAttribute(AttrIdx, Attribute::ZExt))
73 VRX.insert(std::make_pair(InVirtReg, ExtType(ExtType::ZExt, Width)));
78 BT::BitMask HexagonEvaluator::mask(unsigned Reg, unsigned Sub) const {
80 return MachineEvaluator::mask(Reg, 0);
81 using namespace Hexagon;
82 const TargetRegisterClass *RC = MRI.getRegClass(Reg);
83 unsigned ID = RC->getID();
84 uint16_t RW = getRegBitWidth(RegisterRef(Reg, Sub));
86 case DoubleRegsRegClassID:
87 return (Sub == subreg_loreg) ? BT::BitMask(0, RW-1)
88 : BT::BitMask(RW, 2*RW-1);
93 dbgs() << PrintReg(Reg, &TRI, Sub) << '\n';
95 llvm_unreachable("Unexpected register/subregister");
100 struct RegisterRefs : public std::vector<BT::RegisterRef> {
101 typedef std::vector<BT::RegisterRef> Base;
102 RegisterRefs(const MachineInstr *MI);
103 const BT::RegisterRef &operator[](unsigned n) const {
104 // The main purpose of this operator is to assert with bad argument.
106 return Base::operator[](n);
110 RegisterRefs::RegisterRefs(const MachineInstr *MI)
111 : Base(MI->getNumOperands()) {
112 for (unsigned i = 0, n = size(); i < n; ++i) {
113 const MachineOperand &MO = MI->getOperand(i);
115 at(i) = BT::RegisterRef(MO);
116 // For indices that don't correspond to registers, the entry will
117 // remain constructed via the default constructor.
123 bool HexagonEvaluator::evaluate(const MachineInstr *MI,
124 const CellMapType &Inputs, CellMapType &Outputs) const {
125 unsigned NumDefs = 0;
127 // Sanity verification: there should not be any defs with subregisters.
128 for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) {
129 const MachineOperand &MO = MI->getOperand(i);
130 if (!MO.isReg() || !MO.isDef())
133 assert(MO.getSubReg() == 0);
140 return evaluateLoad(MI, Inputs, Outputs);
142 // Check COPY instructions that copy formal parameters into virtual
143 // registers. Such parameters can be sign- or zero-extended at the
144 // call site, and we should take advantage of this knowledge. The MRI
145 // keeps a list of pairs of live-in physical and virtual registers,
146 // which provides information about which virtual registers will hold
147 // the argument values. The function will still contain instructions
148 // defining those virtual registers, and in practice those are COPY
149 // instructions from a physical to a virtual register. In such cases,
150 // applying the argument extension to the virtual register can be seen
151 // as simply mirroring the extension that had already been applied to
152 // the physical register at the call site. If the defining instruction
153 // was not a COPY, it would not be clear how to mirror that extension
154 // on the callee's side. For that reason, only check COPY instructions
155 // for potential extensions.
157 if (evaluateFormalCopy(MI, Inputs, Outputs))
161 // Beyond this point, if any operand is a global, skip that instruction.
162 // The reason is that certain instructions that can take an immediate
163 // operand can also have a global symbol in that operand. To avoid
164 // checking what kind of operand a given instruction has individually
165 // for each instruction, do it here. Global symbols as operands gene-
166 // rally do not provide any useful information.
167 for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) {
168 const MachineOperand &MO = MI->getOperand(i);
169 if (MO.isGlobal() || MO.isBlockAddress() || MO.isSymbol() || MO.isJTI() ||
174 RegisterRefs Reg(MI);
175 unsigned Opc = MI->getOpcode();
176 using namespace Hexagon;
177 #define op(i) MI->getOperand(i)
178 #define rc(i) RegisterCell::ref(getCell(Reg[i],Inputs))
179 #define im(i) MI->getOperand(i).getImm()
181 // If the instruction has no register operands, skip it.
185 // Record result for register in operand 0.
186 auto rr0 = [this,Reg] (const BT::RegisterCell &Val, CellMapType &Outputs)
188 putCell(Reg[0], Val, Outputs);
191 // Get the cell corresponding to the N-th operand.
192 auto cop = [this,Reg,MI,Inputs] (unsigned N, uint16_t W)
193 -> BT::RegisterCell {
194 const MachineOperand &Op = MI->getOperand(N);
196 return eIMM(Op.getImm(), W);
198 return RegisterCell::self(0, W);
199 assert(getRegBitWidth(Reg[N]) == W && "Register width mismatch");
202 // Extract RW low bits of the cell.
203 auto lo = [this] (const BT::RegisterCell &RC, uint16_t RW)
204 -> BT::RegisterCell {
205 assert(RW <= RC.width());
206 return eXTR(RC, 0, RW);
208 // Extract RW high bits of the cell.
209 auto hi = [this] (const BT::RegisterCell &RC, uint16_t RW)
210 -> BT::RegisterCell {
211 uint16_t W = RC.width();
213 return eXTR(RC, W-RW, W);
215 // Extract N-th halfword (counting from the least significant position).
216 auto half = [this] (const BT::RegisterCell &RC, unsigned N)
217 -> BT::RegisterCell {
218 assert(N*16+16 <= RC.width());
219 return eXTR(RC, N*16, N*16+16);
221 // Shuffle bits (pick even/odd from cells and merge into result).
222 auto shuffle = [this] (const BT::RegisterCell &Rs, const BT::RegisterCell &Rt,
223 uint16_t BW, bool Odd) -> BT::RegisterCell {
224 uint16_t I = Odd, Ws = Rs.width();
225 assert(Ws == Rt.width());
226 RegisterCell RC = eXTR(Rt, I*BW, I*BW+BW).cat(eXTR(Rs, I*BW, I*BW+BW));
229 RC.cat(eXTR(Rt, I*BW, I*BW+BW)).cat(eXTR(Rs, I*BW, I*BW+BW));
235 // The bitwidth of the 0th operand. In most (if not all) of the
236 // instructions below, the 0th operand is the defined register.
237 // Pre-compute the bitwidth here, because it is needed in many cases
239 uint16_t W0 = (Reg[0].Reg != 0) ? getRegBitWidth(Reg[0]) : 0;
242 // Transfer immediate:
247 case CONST32_Float_Real:
248 case CONST32_Int_Real:
249 case CONST64_Float_Real:
250 case CONST64_Int_Real:
251 return rr0(eIMM(im(1), W0), Outputs);
253 return rr0(RegisterCell(W0).fill(0, W0, BT::BitValue::Zero), Outputs);
255 return rr0(RegisterCell(W0).fill(0, W0, BT::BitValue::One), Outputs);
257 int FI = op(1).getIndex();
258 int Off = op(2).getImm();
259 unsigned A = MFI.getObjectAlignment(FI) + std::abs(Off);
260 unsigned L = Log2_32(A);
261 RegisterCell RC = RegisterCell::self(Reg[0].Reg, W0);
262 RC.fill(0, L, BT::BitValue::Zero);
263 return rr0(RC, Outputs);
266 // Transfer register:
271 return rr0(rc(1), Outputs);
274 uint16_t PW = 8; // XXX Pred size: getRegBitWidth(Reg[1]);
276 RegisterCell PC = eXTR(rc(1), 0, PW);
277 RegisterCell RC = RegisterCell(RW).insert(PC, BT::BitMask(0, PW-1));
278 RC.fill(PW, RW, BT::BitValue::Zero);
279 return rr0(RC, Outputs);
282 RegisterCell RC = RegisterCell::self(Reg[0].Reg, W0);
283 W0 = 8; // XXX Pred size
284 return rr0(eINS(RC, eXTR(rc(1), 0, W0), 0), Outputs);
295 uint16_t W1 = getRegBitWidth(Reg[1]);
296 assert(W0 == 64 && W1 == 32);
297 RegisterCell CW = RegisterCell(W0).insert(rc(1), BT::BitMask(0, W1-1));
298 RegisterCell RC = eADD(eSXT(CW, W1), rc(2));
299 return rr0(RC, Outputs);
303 return rr0(eADD(rc(1), rc(2)), Outputs);
305 return rr0(eADD(rc(1), eIMM(im(2), W0)), Outputs);
306 case S4_addi_asl_ri: {
307 RegisterCell RC = eADD(eIMM(im(1), W0), eASL(rc(2), im(3)));
308 return rr0(RC, Outputs);
310 case S4_addi_lsr_ri: {
311 RegisterCell RC = eADD(eIMM(im(1), W0), eLSR(rc(2), im(3)));
312 return rr0(RC, Outputs);
315 RegisterCell RC = eADD(rc(1), eADD(rc(2), eIMM(im(3), W0)));
316 return rr0(RC, Outputs);
318 case M4_mpyri_addi: {
319 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0));
320 RegisterCell RC = eADD(eIMM(im(1), W0), lo(M, W0));
321 return rr0(RC, Outputs);
323 case M4_mpyrr_addi: {
324 RegisterCell M = eMLS(rc(2), rc(3));
325 RegisterCell RC = eADD(eIMM(im(1), W0), lo(M, W0));
326 return rr0(RC, Outputs);
328 case M4_mpyri_addr_u2: {
329 RegisterCell M = eMLS(eIMM(im(2), W0), rc(3));
330 RegisterCell RC = eADD(rc(1), lo(M, W0));
331 return rr0(RC, Outputs);
333 case M4_mpyri_addr: {
334 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0));
335 RegisterCell RC = eADD(rc(1), lo(M, W0));
336 return rr0(RC, Outputs);
338 case M4_mpyrr_addr: {
339 RegisterCell M = eMLS(rc(2), rc(3));
340 RegisterCell RC = eADD(rc(1), lo(M, W0));
341 return rr0(RC, Outputs);
344 RegisterCell RC = eADD(rc(1), eSUB(eIMM(im(2), W0), rc(3)));
345 return rr0(RC, Outputs);
348 RegisterCell RC = eADD(rc(1), eADD(rc(2), eIMM(im(3), W0)));
349 return rr0(RC, Outputs);
352 RegisterCell RC = eADD(rc(1), eADD(rc(2), rc(3)));
353 return rr0(RC, Outputs);
356 RegisterCell RC = eADD(rc(1), eSUB(rc(2), rc(3)));
357 return rr0(RC, Outputs);
359 case S2_addasl_rrri: {
360 RegisterCell RC = eADD(rc(1), eASL(rc(2), im(3)));
361 return rr0(RC, Outputs);
364 RegisterCell RPC = RegisterCell::self(Reg[0].Reg, W0);
365 RPC.fill(0, 2, BT::BitValue::Zero);
366 return rr0(eADD(RPC, eIMM(im(2), W0)), Outputs);
370 return rr0(eSUB(rc(1), rc(2)), Outputs);
372 return rr0(eSUB(eIMM(im(1), W0), rc(2)), Outputs);
373 case S4_subi_asl_ri: {
374 RegisterCell RC = eSUB(eIMM(im(1), W0), eASL(rc(2), im(3)));
375 return rr0(RC, Outputs);
377 case S4_subi_lsr_ri: {
378 RegisterCell RC = eSUB(eIMM(im(1), W0), eLSR(rc(2), im(3)));
379 return rr0(RC, Outputs);
382 RegisterCell RC = eSUB(rc(1), eADD(rc(2), eIMM(im(3), W0)));
383 return rr0(RC, Outputs);
386 RegisterCell RC = eSUB(rc(1), eADD(rc(2), rc(3)));
387 return rr0(RC, Outputs);
389 // 32-bit negation is done by "Rd = A2_subri 0, Rs"
391 return rr0(eSUB(eIMM(0, W0), rc(1)), Outputs);
394 RegisterCell M = eMLS(rc(1), rc(2));
395 return rr0(hi(M, W0), Outputs);
398 return rr0(eMLS(rc(1), rc(2)), Outputs);
399 case M2_dpmpyss_acc_s0:
400 return rr0(eADD(rc(1), eMLS(rc(2), rc(3))), Outputs);
401 case M2_dpmpyss_nac_s0:
402 return rr0(eSUB(rc(1), eMLS(rc(2), rc(3))), Outputs);
404 RegisterCell M = eMLS(rc(1), rc(2));
405 return rr0(lo(M, W0), Outputs);
408 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0));
409 RegisterCell RC = eADD(rc(1), lo(M, W0));
410 return rr0(RC, Outputs);
413 RegisterCell M = eMLS(rc(2), eIMM(im(3), W0));
414 RegisterCell RC = eSUB(rc(1), lo(M, W0));
415 return rr0(RC, Outputs);
418 RegisterCell M = eMLS(rc(2), rc(3));
419 RegisterCell RC = eADD(rc(1), lo(M, W0));
420 return rr0(RC, Outputs);
423 RegisterCell M = eMLS(rc(1), eIMM(im(2), W0));
424 return rr0(lo(M, 32), Outputs);
427 RegisterCell M = eMLS(rc(1), eIMM(-im(2), W0));
428 return rr0(lo(M, 32), Outputs);
431 RegisterCell M = eMLS(rc(1), eIMM(im(2), W0));
432 return rr0(lo(M, 32), Outputs);
435 RegisterCell M = eMLU(rc(1), rc(2));
436 return rr0(hi(M, W0), Outputs);
439 return rr0(eMLU(rc(1), rc(2)), Outputs);
440 case M2_dpmpyuu_acc_s0:
441 return rr0(eADD(rc(1), eMLU(rc(2), rc(3))), Outputs);
442 case M2_dpmpyuu_nac_s0:
443 return rr0(eSUB(rc(1), eMLU(rc(2), rc(3))), Outputs);
449 return rr0(eAND(rc(1), eIMM(im(2), W0)), Outputs);
452 return rr0(eAND(rc(1), rc(2)), Outputs);
455 return rr0(eAND(rc(1), eNOT(rc(2))), Outputs);
456 case S4_andi_asl_ri: {
457 RegisterCell RC = eAND(eIMM(im(1), W0), eASL(rc(2), im(3)));
458 return rr0(RC, Outputs);
460 case S4_andi_lsr_ri: {
461 RegisterCell RC = eAND(eIMM(im(1), W0), eLSR(rc(2), im(3)));
462 return rr0(RC, Outputs);
465 return rr0(eAND(rc(1), eAND(rc(2), rc(3))), Outputs);
467 return rr0(eAND(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs);
469 return rr0(eAND(rc(1), eORL(rc(2), rc(3))), Outputs);
471 return rr0(eAND(rc(1), eXOR(rc(2), rc(3))), Outputs);
473 return rr0(eORL(rc(1), eIMM(im(2), W0)), Outputs);
476 return rr0(eORL(rc(1), rc(2)), Outputs);
479 return rr0(eORL(rc(1), eNOT(rc(2))), Outputs);
480 case S4_ori_asl_ri: {
481 RegisterCell RC = eORL(eIMM(im(1), W0), eASL(rc(2), im(3)));
482 return rr0(RC, Outputs);
484 case S4_ori_lsr_ri: {
485 RegisterCell RC = eORL(eIMM(im(1), W0), eLSR(rc(2), im(3)));
486 return rr0(RC, Outputs);
489 return rr0(eORL(rc(1), eAND(rc(2), rc(3))), Outputs);
491 return rr0(eORL(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs);
494 RegisterCell RC = eORL(rc(1), eAND(rc(2), eIMM(im(3), W0)));
495 return rr0(RC, Outputs);
498 RegisterCell RC = eORL(rc(1), eORL(rc(2), eIMM(im(3), W0)));
499 return rr0(RC, Outputs);
502 return rr0(eORL(rc(1), eORL(rc(2), rc(3))), Outputs);
504 return rr0(eORL(rc(1), eXOR(rc(2), rc(3))), Outputs);
507 return rr0(eXOR(rc(1), rc(2)), Outputs);
509 return rr0(eXOR(rc(1), eAND(rc(2), rc(3))), Outputs);
511 return rr0(eXOR(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs);
513 return rr0(eXOR(rc(1), eORL(rc(2), rc(3))), Outputs);
515 return rr0(eXOR(rc(1), eXOR(rc(2), rc(3))), Outputs);
518 return rr0(eNOT(rc(1)), Outputs);
522 return rr0(eASL(rc(1), im(2)), Outputs);
524 return rr0(eASL(rc(1), 16), Outputs);
527 return rr0(eADD(rc(1), eASL(rc(2), im(3))), Outputs);
530 return rr0(eSUB(rc(1), eASL(rc(2), im(3))), Outputs);
533 return rr0(eAND(rc(1), eASL(rc(2), im(3))), Outputs);
536 return rr0(eORL(rc(1), eASL(rc(2), im(3))), Outputs);
537 case S2_asl_i_r_xacc:
538 case S2_asl_i_p_xacc:
539 return rr0(eXOR(rc(1), eASL(rc(2), im(3))), Outputs);
547 return rr0(eASR(rc(1), im(2)), Outputs);
549 return rr0(eASR(rc(1), 16), Outputs);
552 return rr0(eADD(rc(1), eASR(rc(2), im(3))), Outputs);
555 return rr0(eSUB(rc(1), eASR(rc(2), im(3))), Outputs);
558 return rr0(eAND(rc(1), eASR(rc(2), im(3))), Outputs);
561 return rr0(eORL(rc(1), eASR(rc(2), im(3))), Outputs);
562 case S2_asr_i_r_rnd: {
563 // The input is first sign-extended to 64 bits, then the output
564 // is truncated back to 32 bits.
566 RegisterCell XC = eSXT(rc(1).cat(eIMM(0, W0)), W0);
567 RegisterCell RC = eASR(eADD(eASR(XC, im(2)), eIMM(1, 2*W0)), 1);
568 return rr0(eXTR(RC, 0, W0), Outputs);
570 case S2_asr_i_r_rnd_goodsyntax: {
573 return rr0(rc(1), Outputs);
574 // Result: S2_asr_i_r_rnd Rs, u5-1
575 RegisterCell XC = eSXT(rc(1).cat(eIMM(0, W0)), W0);
576 RegisterCell RC = eLSR(eADD(eASR(XC, S-1), eIMM(1, 2*W0)), 1);
577 return rr0(eXTR(RC, 0, W0), Outputs);
581 case S2_asr_i_svw_trun:
587 return rr0(eLSR(rc(1), im(2)), Outputs);
590 return rr0(eADD(rc(1), eLSR(rc(2), im(3))), Outputs);
593 return rr0(eSUB(rc(1), eLSR(rc(2), im(3))), Outputs);
596 return rr0(eAND(rc(1), eLSR(rc(2), im(3))), Outputs);
599 return rr0(eORL(rc(1), eLSR(rc(2), im(3))), Outputs);
600 case S2_lsr_i_r_xacc:
601 case S2_lsr_i_p_xacc:
602 return rr0(eXOR(rc(1), eLSR(rc(2), im(3))), Outputs);
605 RegisterCell RC = rc(1);
606 RC[im(2)] = BT::BitValue::Zero;
607 return rr0(RC, Outputs);
610 RegisterCell RC = rc(1);
611 RC[im(2)] = BT::BitValue::One;
612 return rr0(RC, Outputs);
614 case S2_togglebit_i: {
615 RegisterCell RC = rc(1);
617 RC[BX] = RC[BX].is(0) ? BT::BitValue::One
618 : RC[BX].is(1) ? BT::BitValue::Zero
619 : BT::BitValue::self();
620 return rr0(RC, Outputs);
624 uint16_t W1 = getRegBitWidth(Reg[1]);
626 // Res.uw[1] = Rs[bx+1:], Res.uw[0] = Rs[0:bx]
627 const BT::BitValue Zero = BT::BitValue::Zero;
628 RegisterCell RZ = RegisterCell(W0).fill(BX, W1, Zero)
629 .fill(W1+(W1-BX), W0, Zero);
630 RegisterCell BF1 = eXTR(rc(1), 0, BX), BF2 = eXTR(rc(1), BX, W1);
631 RegisterCell RC = eINS(eINS(RZ, BF1, 0), BF2, W1);
632 return rr0(RC, Outputs);
638 uint16_t Wd = im(2), Of = im(3);
641 return rr0(eIMM(0, W0), Outputs);
642 // If the width extends beyond the register size, pad the register
644 RegisterCell Pad = (Wd+Of > W0) ? rc(1).cat(eIMM(0, Wd+Of-W0)) : rc(1);
645 RegisterCell Ext = eXTR(Pad, Of, Wd+Of);
646 // Ext is short, need to extend it with 0s or sign bit.
647 RegisterCell RC = RegisterCell(W0).insert(Ext, BT::BitMask(0, Wd-1));
648 if (Opc == S2_extractu || Opc == S2_extractup)
649 return rr0(eZXT(RC, Wd), Outputs);
650 return rr0(eSXT(RC, Wd), Outputs);
654 uint16_t Wd = im(3), Of = im(4);
655 assert(Wd < W0 && Of < W0);
656 // If Wd+Of exceeds W0, the inserted bits are truncated.
660 return rr0(rc(1), Outputs);
661 return rr0(eINS(rc(1), eXTR(rc(2), 0, Wd), Of), Outputs);
672 return rr0(cop(2, W0/2).cat(cop(1, W0/2)), Outputs);
676 case A2_combine_hh: {
678 assert(getRegBitWidth(Reg[1]) == 32 && getRegBitWidth(Reg[2]) == 32);
679 // Low half in the output is 0 for _ll and _hl, 1 otherwise:
680 unsigned LoH = !(Opc == A2_combine_ll || Opc == A2_combine_hl);
681 // High half in the output is 0 for _ll and _lh, 1 otherwise:
682 unsigned HiH = !(Opc == A2_combine_ll || Opc == A2_combine_lh);
683 RegisterCell R1 = rc(1);
684 RegisterCell R2 = rc(2);
685 RegisterCell RC = half(R2, LoH).cat(half(R1, HiH));
686 return rr0(RC, Outputs);
690 assert(getRegBitWidth(Reg[1]) == 32 && getRegBitWidth(Reg[2]) == 32);
691 RegisterCell R1 = rc(1);
692 RegisterCell R2 = rc(2);
693 RegisterCell RC = half(R2, 0).cat(half(R1, 0)).cat(half(R2, 1))
695 return rr0(RC, Outputs);
698 RegisterCell RC = shuffle(rc(1), rc(2), 8, false);
699 return rr0(RC, Outputs);
702 RegisterCell RC = shuffle(rc(1), rc(2), 16, false);
703 return rr0(RC, Outputs);
706 RegisterCell RC = shuffle(rc(1), rc(2), 8, true);
707 return rr0(RC, Outputs);
710 RegisterCell RC = shuffle(rc(1), rc(2), 16, true);
711 return rr0(RC, Outputs);
715 uint16_t WP = 8; // XXX Pred size: getRegBitWidth(Reg[1]);
716 assert(WR == 64 && WP == 8);
717 RegisterCell R1 = rc(1);
719 for (uint16_t i = 0; i < WP; ++i) {
720 const BT::BitValue &V = R1[i];
721 BT::BitValue F = (V.is(0) || V.is(1)) ? V : BT::BitValue::self();
722 RC.fill(i*8, i*8+8, F);
724 return rr0(RC, Outputs);
733 BT::BitValue PC0 = rc(1)[0];
734 RegisterCell R2 = cop(2, W0);
735 RegisterCell R3 = cop(3, W0);
736 if (PC0.is(0) || PC0.is(1))
737 return rr0(RegisterCell::ref(PC0 ? R2 : R3), Outputs);
738 R2.meet(R3, Reg[0].Reg);
739 return rr0(R2, Outputs);
745 // Sign- and zero-extension:
748 return rr0(eSXT(rc(1), 8), Outputs);
750 return rr0(eSXT(rc(1), 16), Outputs);
752 uint16_t W1 = getRegBitWidth(Reg[1]);
753 assert(W0 == 64 && W1 == 32);
754 RegisterCell RC = eSXT(rc(1).cat(eIMM(0, W1)), W1);
755 return rr0(RC, Outputs);
758 return rr0(eZXT(rc(1), 8), Outputs);
760 return rr0(eZXT(rc(1), 16), Outputs);
766 // Always produce a 32-bit result.
767 return rr0(eCLB(rc(1), 0/*bit*/, 32), Outputs);
770 return rr0(eCLB(rc(1), 1/*bit*/, 32), Outputs);
773 uint16_t W1 = getRegBitWidth(Reg[1]);
774 RegisterCell R1 = rc(1);
775 BT::BitValue TV = R1[W1-1];
776 if (TV.is(0) || TV.is(1))
777 return rr0(eCLB(R1, TV, 32), Outputs);
782 return rr0(eCTB(rc(1), 0/*bit*/, 32), Outputs);
785 return rr0(eCTB(rc(1), 1/*bit*/, 32), Outputs);
791 RegisterCell P1 = rc(1);
792 bool Has0 = false, All1 = true;
793 for (uint16_t i = 0; i < 8/*XXX*/; ++i) {
804 RC.fill(0, W0, (All1 ? BT::BitValue::One : BT::BitValue::Zero));
805 return rr0(RC, Outputs);
808 RegisterCell P1 = rc(1);
809 bool Has1 = false, All0 = true;
810 for (uint16_t i = 0; i < 8/*XXX*/; ++i) {
821 RC.fill(0, W0, (Has1 ? BT::BitValue::One : BT::BitValue::Zero));
822 return rr0(RC, Outputs);
825 return rr0(eAND(rc(1), rc(2)), Outputs);
827 return rr0(eAND(rc(1), eNOT(rc(2))), Outputs);
829 return rr0(eNOT(rc(1)), Outputs);
831 return rr0(eORL(rc(1), rc(2)), Outputs);
833 return rr0(eORL(rc(1), eNOT(rc(2))), Outputs);
835 return rr0(eXOR(rc(1), rc(2)), Outputs);
837 return rr0(eAND(rc(1), eAND(rc(2), rc(3))), Outputs);
839 return rr0(eAND(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs);
841 return rr0(eAND(rc(1), eORL(rc(2), rc(3))), Outputs);
843 return rr0(eAND(rc(1), eORL(rc(2), eNOT(rc(3)))), Outputs);
845 return rr0(eORL(rc(1), eAND(rc(2), rc(3))), Outputs);
847 return rr0(eORL(rc(1), eAND(rc(2), eNOT(rc(3)))), Outputs);
849 return rr0(eORL(rc(1), eORL(rc(2), rc(3))), Outputs);
851 return rr0(eORL(rc(1), eORL(rc(2), eNOT(rc(3)))), Outputs);
862 BT::BitValue V = rc(1)[im(2)];
863 if (V.is(0) || V.is(1)) {
864 // If instruction is S2_tstbit_i, test for 1, otherwise test for 0.
865 bool TV = (Opc == S2_tstbit_i);
866 BT::BitValue F = V.is(TV) ? BT::BitValue::One : BT::BitValue::Zero;
867 return rr0(RegisterCell(W0).fill(0, W0, F), Outputs);
873 return MachineEvaluator::evaluate(MI, Inputs, Outputs);
882 bool HexagonEvaluator::evaluate(const MachineInstr *BI,
883 const CellMapType &Inputs, BranchTargetList &Targets,
884 bool &FallsThru) const {
885 // We need to evaluate one branch at a time. TII::AnalyzeBranch checks
886 // all the branches in a basic block at once, so we cannot use it.
887 unsigned Opc = BI->getOpcode();
888 bool SimpleBranch = false;
889 bool Negated = false;
891 case Hexagon::J2_jumpf:
892 case Hexagon::J2_jumpfnew:
893 case Hexagon::J2_jumpfnewpt:
895 case Hexagon::J2_jumpt:
896 case Hexagon::J2_jumptnew:
897 case Hexagon::J2_jumptnewpt:
898 // Simple branch: if([!]Pn) jump ...
899 // i.e. Op0 = predicate, Op1 = branch target.
902 case Hexagon::J2_jump:
903 Targets.insert(BI->getOperand(0).getMBB());
907 // If the branch is of unknown type, assume that all successors are
915 // BI is a conditional branch if we got here.
916 RegisterRef PR = BI->getOperand(0);
917 RegisterCell PC = getCell(PR, Inputs);
918 const BT::BitValue &Test = PC[0];
920 // If the condition is neither true nor false, then it's unknown.
921 if (!Test.is(0) && !Test.is(1))
924 // "Test.is(!Negated)" means "branch condition is true".
925 if (!Test.is(!Negated)) {
926 // Condition known to be false.
931 Targets.insert(BI->getOperand(1).getMBB());
937 bool HexagonEvaluator::evaluateLoad(const MachineInstr *MI,
938 const CellMapType &Inputs, CellMapType &Outputs) const {
939 if (TII.isPredicated(MI))
941 assert(MI->mayLoad() && "A load that mayn't?");
942 unsigned Opc = MI->getOpcode();
946 using namespace Hexagon;
954 case L2_loadalignb_pbr:
955 case L2_loadalignb_pcr:
956 case L2_loadalignb_pi:
958 case L2_loadalignh_pbr:
959 case L2_loadalignh_pcr:
960 case L2_loadalignh_pi:
962 case L2_loadbsw2_pbr:
963 case L2_loadbsw2_pci:
964 case L2_loadbsw2_pcr:
966 case L2_loadbsw4_pbr:
967 case L2_loadbsw4_pci:
968 case L2_loadbsw4_pcr:
971 case L2_loadbzw2_pbr:
972 case L2_loadbzw2_pci:
973 case L2_loadbzw2_pcr:
975 case L2_loadbzw4_pbr:
976 case L2_loadbzw4_pci:
977 case L2_loadbzw4_pcr:
1001 case L4_loadrub_abs:
1025 case L2_loadruh_pbr:
1026 case L2_loadruh_pci:
1027 case L2_loadruh_pcr:
1030 case L4_loadruh_abs:
1043 case L2_loadw_locked:
1059 case L4_loadd_locked:
1069 const MachineOperand &MD = MI->getOperand(0);
1070 assert(MD.isReg() && MD.isDef());
1071 RegisterRef RD = MD;
1073 uint16_t W = getRegBitWidth(RD);
1074 assert(W >= BitNum && BitNum > 0);
1075 RegisterCell Res(W);
1077 for (uint16_t i = 0; i < BitNum; ++i)
1078 Res[i] = BT::BitValue::self(BT::BitRef(RD.Reg, i));
1081 const BT::BitValue &Sign = Res[BitNum-1];
1082 for (uint16_t i = BitNum; i < W; ++i)
1083 Res[i] = BT::BitValue::ref(Sign);
1085 for (uint16_t i = BitNum; i < W; ++i)
1086 Res[i] = BT::BitValue::Zero;
1089 putCell(RD, Res, Outputs);
1094 bool HexagonEvaluator::evaluateFormalCopy(const MachineInstr *MI,
1095 const CellMapType &Inputs, CellMapType &Outputs) const {
1096 // If MI defines a formal parameter, but is not a copy (loads are handled
1097 // in evaluateLoad), then it's not clear what to do.
1098 assert(MI->isCopy());
1100 RegisterRef RD = MI->getOperand(0);
1101 RegisterRef RS = MI->getOperand(1);
1102 assert(RD.Sub == 0);
1103 if (!TargetRegisterInfo::isPhysicalRegister(RS.Reg))
1105 RegExtMap::const_iterator F = VRX.find(RD.Reg);
1109 uint16_t EW = F->second.Width;
1110 // Store RD's cell into the map. This will associate the cell with a virtual
1111 // register, and make zero-/sign-extends possible (otherwise we would be ex-
1112 // tending "self" bit values, which will have no effect, since "self" values
1113 // cannot be references to anything).
1114 putCell(RD, getCell(RS, Inputs), Outputs);
1117 // Read RD's cell from the outputs instead of RS's cell from the inputs:
1118 if (F->second.Type == ExtType::SExt)
1119 Res = eSXT(getCell(RD, Outputs), EW);
1120 else if (F->second.Type == ExtType::ZExt)
1121 Res = eZXT(getCell(RD, Outputs), EW);
1123 putCell(RD, Res, Outputs);
1128 unsigned HexagonEvaluator::getNextPhysReg(unsigned PReg, unsigned Width) const {
1129 using namespace Hexagon;
1130 bool Is64 = DoubleRegsRegClass.contains(PReg);
1131 assert(PReg == 0 || Is64 || IntRegsRegClass.contains(PReg));
1133 static const unsigned Phys32[] = { R0, R1, R2, R3, R4, R5 };
1134 static const unsigned Phys64[] = { D0, D1, D2 };
1135 const unsigned Num32 = sizeof(Phys32)/sizeof(unsigned);
1136 const unsigned Num64 = sizeof(Phys64)/sizeof(unsigned);
1138 // Return the first parameter register of the required width.
1140 return (Width <= 32) ? Phys32[0] : Phys64[0];
1142 // Set Idx32, Idx64 in such a way that Idx+1 would give the index of the
1144 unsigned Idx32 = 0, Idx64 = 0;
1146 while (Idx32 < Num32) {
1147 if (Phys32[Idx32] == PReg)
1153 while (Idx64 < Num64) {
1154 if (Phys64[Idx64] == PReg)
1162 return (Idx32+1 < Num32) ? Phys32[Idx32+1] : 0;
1163 return (Idx64+1 < Num64) ? Phys64[Idx64+1] : 0;
1167 unsigned HexagonEvaluator::getVirtRegFor(unsigned PReg) const {
1168 typedef MachineRegisterInfo::livein_iterator iterator;
1169 for (iterator I = MRI.livein_begin(), E = MRI.livein_end(); I != E; ++I) {
1170 if (I->first == PReg)