1 //===----------- PPCVSXSwapRemoval.cpp - Remove VSX LE Swaps -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===---------------------------------------------------------------------===//
10 // This pass analyzes vector computations and removes unnecessary
11 // doubleword swaps (xxswapd instructions). This pass is performed
12 // only for little-endian VSX code generation.
14 // For this specific case, loads and stores of v4i32, v4f32, v2i64,
15 // and v2f64 vectors are inefficient. These are implemented using
16 // the lxvd2x and stxvd2x instructions, which invert the order of
17 // doublewords in a vector register. Thus code generation inserts
18 // an xxswapd after each such load, and prior to each such store.
20 // The extra xxswapd instructions reduce performance. The purpose
21 // of this pass is to reduce the number of xxswapd instructions
22 // required for correctness.
24 // The primary insight is that much code that operates on vectors
25 // does not care about the relative order of elements in a register,
26 // so long as the correct memory order is preserved. If we have a
27 // computation where all input values are provided by lxvd2x/xxswapd,
28 // all outputs are stored using xxswapd/lxvd2x, and all intermediate
29 // computations are lane-insensitive (independent of element order),
30 // then all the xxswapd instructions associated with the loads and
31 // stores may be removed without changing observable semantics.
33 // This pass uses standard equivalence class infrastructure to create
34 // maximal webs of computations fitting the above description. Each
35 // such web is then optimized by removing its unnecessary xxswapd
38 // There are some lane-sensitive operations for which we can still
39 // permit the optimization, provided we modify those operations
40 // accordingly. Such operations are identified as using "special
41 // handling" within this module.
43 //===---------------------------------------------------------------------===//
45 #include "PPCInstrInfo.h"
47 #include "PPCInstrBuilder.h"
48 #include "PPCTargetMachine.h"
49 #include "llvm/ADT/DenseMap.h"
50 #include "llvm/ADT/EquivalenceClasses.h"
51 #include "llvm/CodeGen/MachineFunctionPass.h"
52 #include "llvm/CodeGen/MachineInstrBuilder.h"
53 #include "llvm/CodeGen/MachineRegisterInfo.h"
54 #include "llvm/Support/Debug.h"
55 #include "llvm/Support/Format.h"
56 #include "llvm/Support/raw_ostream.h"
60 #define DEBUG_TYPE "ppc-vsx-swaps"
63 void initializePPCVSXSwapRemovalPass(PassRegistry&);
68 // A PPCVSXSwapEntry is created for each machine instruction that
69 // is relevant to a vector computation.
70 struct PPCVSXSwapEntry {
71 // Pointer to the instruction.
74 // Unique ID (position in the swap vector).
77 // Attributes of this node.
78 unsigned int IsLoad : 1;
79 unsigned int IsStore : 1;
80 unsigned int IsSwap : 1;
81 unsigned int MentionsPhysVR : 1;
82 unsigned int IsSwappable : 1;
83 unsigned int MentionsPartialVR : 1;
84 unsigned int SpecialHandling : 3;
85 unsigned int WebRejected : 1;
86 unsigned int WillRemove : 1;
100 struct PPCVSXSwapRemoval : public MachineFunctionPass {
103 const PPCInstrInfo *TII;
105 MachineRegisterInfo *MRI;
107 // Swap entries are allocated in a vector for better performance.
108 std::vector<PPCVSXSwapEntry> SwapVector;
110 // A mapping is maintained between machine instructions and
111 // their swap entries. The key is the address of the MI.
112 DenseMap<MachineInstr*, int> SwapMap;
114 // Equivalence classes are used to gather webs of related computation.
115 // Swap entries are represented by their VSEId fields.
116 EquivalenceClasses<int> *EC;
118 PPCVSXSwapRemoval() : MachineFunctionPass(ID) {
119 initializePPCVSXSwapRemovalPass(*PassRegistry::getPassRegistry());
123 // Initialize data structures.
124 void initialize(MachineFunction &MFParm);
126 // Walk the machine instructions to gather vector usage information.
127 // Return true iff vector mentions are present.
128 bool gatherVectorInstructions();
130 // Add an entry to the swap vector and swap map.
131 int addSwapEntry(MachineInstr *MI, PPCVSXSwapEntry &SwapEntry);
133 // Hunt backwards through COPY and SUBREG_TO_REG chains for a
134 // source register. VecIdx indicates the swap vector entry to
135 // mark as mentioning a physical register if the search leads
137 unsigned lookThruCopyLike(unsigned SrcReg, unsigned VecIdx);
139 // Generate equivalence classes for related computations (webs).
142 // Analyze webs and determine those that cannot be optimized.
143 void recordUnoptimizableWebs();
145 // Record which swap instructions can be safely removed.
146 void markSwapsForRemoval();
148 // Remove swaps and update other instructions requiring special
149 // handling. Return true iff any changes are made.
152 // Update instructions requiring special handling.
153 void handleSpecialSwappables(int EntryIdx);
155 // Dump a description of the entries in the swap vector.
156 void dumpSwapVector();
158 // Return true iff the given register is in the given class.
159 bool isRegInClass(unsigned Reg, const TargetRegisterClass *RC) {
160 if (TargetRegisterInfo::isVirtualRegister(Reg))
161 return RC->hasSubClassEq(MRI->getRegClass(Reg));
162 if (RC->contains(Reg))
167 // Return true iff the given register is a full vector register.
168 bool isVecReg(unsigned Reg) {
169 return (isRegInClass(Reg, &PPC::VSRCRegClass) ||
170 isRegInClass(Reg, &PPC::VRRCRegClass));
173 // Return true iff the given register is a partial vector register.
174 bool isScalarVecReg(unsigned Reg) {
175 return (isRegInClass(Reg, &PPC::VSFRCRegClass) ||
176 isRegInClass(Reg, &PPC::VSSRCRegClass));
179 // Return true iff the given register mentions all or part of a
180 // vector register. Also sets Partial to true if the mention
181 // is for just the floating-point register overlap of the register.
182 bool isAnyVecReg(unsigned Reg, bool &Partial) {
183 if (isScalarVecReg(Reg))
185 return isScalarVecReg(Reg) || isVecReg(Reg);
189 // Main entry point for this pass.
190 bool runOnMachineFunction(MachineFunction &MF) override {
191 // If we don't have VSX on the subtarget, don't do anything.
192 const PPCSubtarget &STI = MF.getSubtarget<PPCSubtarget>();
196 bool Changed = false;
199 if (gatherVectorInstructions()) {
201 recordUnoptimizableWebs();
202 markSwapsForRemoval();
203 Changed = removeSwaps();
206 // FIXME: See the allocation of EC in initialize().
212 // Initialize data structures for this pass. In particular, clear the
213 // swap vector and allocate the equivalence class mapping before
214 // processing each function.
215 void PPCVSXSwapRemoval::initialize(MachineFunction &MFParm) {
217 MRI = &MF->getRegInfo();
218 TII = static_cast<const PPCInstrInfo*>(MF->getSubtarget().getInstrInfo());
220 // An initial vector size of 256 appears to work well in practice.
221 // Small/medium functions with vector content tend not to incur a
222 // reallocation at this size. Three of the vector tests in
223 // projects/test-suite reallocate, which seems like a reasonable rate.
224 const int InitialVectorSize(256);
226 SwapVector.reserve(InitialVectorSize);
228 // FIXME: Currently we allocate EC each time because we don't have
229 // access to the set representation on which to call clear(). Should
230 // consider adding a clear() method to the EquivalenceClasses class.
231 EC = new EquivalenceClasses<int>;
234 // Create an entry in the swap vector for each instruction that mentions
235 // a full vector register, recording various characteristics of the
236 // instructions there.
237 bool PPCVSXSwapRemoval::gatherVectorInstructions() {
238 bool RelevantFunction = false;
240 for (MachineBasicBlock &MBB : *MF) {
241 for (MachineInstr &MI : MBB) {
243 bool RelevantInstr = false;
244 bool Partial = false;
246 for (const MachineOperand &MO : MI.operands()) {
249 unsigned Reg = MO.getReg();
250 if (isAnyVecReg(Reg, Partial)) {
251 RelevantInstr = true;
259 RelevantFunction = true;
261 // Create a SwapEntry initialized to zeros, then fill in the
262 // instruction and ID fields before pushing it to the back
263 // of the swap vector.
264 PPCVSXSwapEntry SwapEntry{};
265 int VecIdx = addSwapEntry(&MI, SwapEntry);
267 switch(MI.getOpcode()) {
269 // Unless noted otherwise, an instruction is considered
270 // safe for the optimization. There are a large number of
271 // such true-SIMD instructions (all vector math, logical,
272 // select, compare, etc.). However, if the instruction
273 // mentions a partial vector register and does not have
274 // special handling defined, it is not swappable.
276 SwapVector[VecIdx].MentionsPartialVR = 1;
278 SwapVector[VecIdx].IsSwappable = 1;
280 case PPC::XXPERMDI: {
281 // This is a swap if it is of the form XXPERMDI t, s, s, 2.
282 // Unfortunately, MachineCSE ignores COPY and SUBREG_TO_REG, so we
283 // can also see XXPERMDI t, SUBREG_TO_REG(s), SUBREG_TO_REG(s), 2,
284 // for example. We have to look through chains of COPY and
285 // SUBREG_TO_REG to find the real source value for comparison.
286 // If the real source value is a physical register, then mark the
287 // XXPERMDI as mentioning a physical register.
288 int immed = MI.getOperand(3).getImm();
290 unsigned trueReg1 = lookThruCopyLike(MI.getOperand(1).getReg(),
292 unsigned trueReg2 = lookThruCopyLike(MI.getOperand(2).getReg(),
294 if (trueReg1 == trueReg2)
295 SwapVector[VecIdx].IsSwap = 1;
297 // We can still handle these if the two registers are not
298 // identical, by adjusting the form of the XXPERMDI.
299 SwapVector[VecIdx].IsSwappable = 1;
300 SwapVector[VecIdx].SpecialHandling = SHValues::SH_XXPERMDI;
302 // This is a doubleword splat if it is of the form
303 // XXPERMDI t, s, s, 0 or XXPERMDI t, s, s, 3. As above we
304 // must look through chains of copy-likes to find the source
305 // register. We turn off the marking for mention of a physical
306 // register, because splatting it is safe; the optimization
307 // will not swap the value in the physical register. Whether
308 // or not the two input registers are identical, we can handle
309 // these by adjusting the form of the XXPERMDI.
310 } else if (immed == 0 || immed == 3) {
312 SwapVector[VecIdx].IsSwappable = 1;
313 SwapVector[VecIdx].SpecialHandling = SHValues::SH_XXPERMDI;
315 unsigned trueReg1 = lookThruCopyLike(MI.getOperand(1).getReg(),
317 unsigned trueReg2 = lookThruCopyLike(MI.getOperand(2).getReg(),
319 if (trueReg1 == trueReg2)
320 SwapVector[VecIdx].MentionsPhysVR = 0;
323 // We can still handle these by adjusting the form of the XXPERMDI.
324 SwapVector[VecIdx].IsSwappable = 1;
325 SwapVector[VecIdx].SpecialHandling = SHValues::SH_XXPERMDI;
330 // Non-permuting loads are currently unsafe. We can use special
331 // handling for this in the future. By not marking these as
332 // IsSwap, we ensure computations containing them will be rejected
334 SwapVector[VecIdx].IsLoad = 1;
338 // Permuting loads are marked as both load and swap, and are
339 // safe for optimization.
340 SwapVector[VecIdx].IsLoad = 1;
341 SwapVector[VecIdx].IsSwap = 1;
344 // Non-permuting stores are currently unsafe. We can use special
345 // handling for this in the future. By not marking these as
346 // IsSwap, we ensure computations containing them will be rejected
348 SwapVector[VecIdx].IsStore = 1;
352 // Permuting stores are marked as both store and swap, and are
353 // safe for optimization.
354 SwapVector[VecIdx].IsStore = 1;
355 SwapVector[VecIdx].IsSwap = 1;
358 // These are fine provided they are moving between full vector
360 if (isVecReg(MI.getOperand(0).getReg()) &&
361 isVecReg(MI.getOperand(1).getReg()))
362 SwapVector[VecIdx].IsSwappable = 1;
363 // If we have a copy from one scalar floating-point register
364 // to another, we can accept this even if it is a physical
365 // register. The only way this gets involved is if it feeds
366 // a SUBREG_TO_REG, which is handled by introducing a swap.
367 else if (isScalarVecReg(MI.getOperand(0).getReg()) &&
368 isScalarVecReg(MI.getOperand(1).getReg()))
369 SwapVector[VecIdx].IsSwappable = 1;
371 case PPC::SUBREG_TO_REG: {
372 // These are fine provided they are moving between full vector
373 // register classes. If they are moving from a scalar
374 // floating-point class to a vector class, we can handle those
375 // as well, provided we introduce a swap. It is generally the
376 // case that we will introduce fewer swaps than we remove, but
377 // (FIXME) a cost model could be used. However, introduced
378 // swaps could potentially be CSEd, so this is not trivial.
379 if (isVecReg(MI.getOperand(0).getReg()) &&
380 isVecReg(MI.getOperand(2).getReg()))
381 SwapVector[VecIdx].IsSwappable = 1;
382 else if (isVecReg(MI.getOperand(0).getReg()) &&
383 isScalarVecReg(MI.getOperand(2).getReg())) {
384 SwapVector[VecIdx].IsSwappable = 1;
385 SwapVector[VecIdx].SpecialHandling = SHValues::SH_COPYSCALAR;
392 // Splats are lane-sensitive, but we can use special handling
393 // to adjust the source lane for the splat. This is not yet
394 // implemented. When it is, we need to uncomment the following:
395 SwapVector[VecIdx].IsSwappable = 1;
396 SwapVector[VecIdx].SpecialHandling = SHValues::SH_SPLAT;
398 // The presence of the following lane-sensitive operations in a
399 // web will kill the optimization, at least for now. For these
400 // we do nothing, causing the optimization to fail.
401 // FIXME: Some of these could be permitted with special handling,
402 // and will be phased in as time permits.
403 // FIXME: There is no simple and maintainable way to express a set
404 // of opcodes having a common attribute in TableGen. Should this
405 // change, this is a prime candidate to use such a mechanism.
407 case PPC::EXTRACT_SUBREG:
408 case PPC::INSERT_SUBREG:
409 case PPC::COPY_TO_REGCLASS:
422 case PPC::VCIPHERLAST:
442 case PPC::VNCIPHERLAST:
467 case PPC::VSHASIGMAD:
468 case PPC::VSHASIGMAW:
489 // XXSLDWI could be replaced by a general permute with one of three
490 // permute control vectors (for shift values 1, 2, 3). However,
491 // VPERM has a more restrictive register class.
499 if (RelevantFunction) {
500 DEBUG(dbgs() << "Swap vector when first built\n\n");
504 return RelevantFunction;
507 // Add an entry to the swap vector and swap map, and make a
508 // singleton equivalence class for the entry.
509 int PPCVSXSwapRemoval::addSwapEntry(MachineInstr *MI,
510 PPCVSXSwapEntry& SwapEntry) {
511 SwapEntry.VSEMI = MI;
512 SwapEntry.VSEId = SwapVector.size();
513 SwapVector.push_back(SwapEntry);
514 EC->insert(SwapEntry.VSEId);
515 SwapMap[MI] = SwapEntry.VSEId;
516 return SwapEntry.VSEId;
519 // This is used to find the "true" source register for an
520 // XXPERMDI instruction, since MachineCSE does not handle the
521 // "copy-like" operations (Copy and SubregToReg). Returns
522 // the original SrcReg unless it is the target of a copy-like
523 // operation, in which case we chain backwards through all
524 // such operations to the ultimate source register. If a
525 // physical register is encountered, we stop the search and
526 // flag the swap entry indicated by VecIdx (the original
527 // XXPERMDI) as mentioning a physical register.
528 unsigned PPCVSXSwapRemoval::lookThruCopyLike(unsigned SrcReg,
530 MachineInstr *MI = MRI->getVRegDef(SrcReg);
531 if (!MI->isCopyLike())
536 CopySrcReg = MI->getOperand(1).getReg();
538 assert(MI->isSubregToReg() && "bad opcode for lookThruCopyLike");
539 CopySrcReg = MI->getOperand(2).getReg();
542 if (!TargetRegisterInfo::isVirtualRegister(CopySrcReg)) {
543 SwapVector[VecIdx].MentionsPhysVR = 1;
547 return lookThruCopyLike(CopySrcReg, VecIdx);
550 // Generate equivalence classes for related computations (webs) by
551 // def-use relationships of virtual registers. Mention of a physical
552 // register terminates the generation of equivalence classes as this
553 // indicates a use of a parameter, definition of a return value, use
554 // of a value returned from a call, or definition of a parameter to a
555 // call. Computations with physical register mentions are flagged
556 // as such so their containing webs will not be optimized.
557 void PPCVSXSwapRemoval::formWebs() {
559 DEBUG(dbgs() << "\n*** Forming webs for swap removal ***\n\n");
561 for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size(); ++EntryIdx) {
563 MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
565 DEBUG(dbgs() << "\n" << SwapVector[EntryIdx].VSEId << " ");
568 // It's sufficient to walk vector uses and join them to their unique
569 // definitions. In addition, check full vector register operands
570 // for physical regs. We exclude partial-vector register operands
571 // because we can handle them if copied to a full vector.
572 for (const MachineOperand &MO : MI->operands()) {
576 unsigned Reg = MO.getReg();
577 if (!isVecReg(Reg) && !isScalarVecReg(Reg))
580 if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
581 if (!(MI->isCopy() && isScalarVecReg(Reg)))
582 SwapVector[EntryIdx].MentionsPhysVR = 1;
589 MachineInstr* DefMI = MRI->getVRegDef(Reg);
590 assert(SwapMap.find(DefMI) != SwapMap.end() &&
591 "Inconsistency: def of vector reg not found in swap map!");
592 int DefIdx = SwapMap[DefMI];
593 (void)EC->unionSets(SwapVector[DefIdx].VSEId,
594 SwapVector[EntryIdx].VSEId);
596 DEBUG(dbgs() << format("Unioning %d with %d\n", SwapVector[DefIdx].VSEId,
597 SwapVector[EntryIdx].VSEId));
598 DEBUG(dbgs() << " Def: ");
599 DEBUG(DefMI->dump());
604 // Walk the swap vector entries looking for conditions that prevent their
605 // containing computations from being optimized. When such conditions are
606 // found, mark the representative of the computation's equivalence class
608 void PPCVSXSwapRemoval::recordUnoptimizableWebs() {
610 DEBUG(dbgs() << "\n*** Rejecting webs for swap removal ***\n\n");
612 for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size(); ++EntryIdx) {
613 int Repr = EC->getLeaderValue(SwapVector[EntryIdx].VSEId);
615 // If representative is already rejected, don't waste further time.
616 if (SwapVector[Repr].WebRejected)
619 // Reject webs containing mentions of physical or partial registers, or
620 // containing operations that we don't know how to handle in a lane-
622 if (SwapVector[EntryIdx].MentionsPhysVR ||
623 SwapVector[EntryIdx].MentionsPartialVR ||
624 !(SwapVector[EntryIdx].IsSwappable || SwapVector[EntryIdx].IsSwap)) {
626 SwapVector[Repr].WebRejected = 1;
629 format("Web %d rejected for physreg, partial reg, or not swap[pable]\n",
631 DEBUG(dbgs() << " in " << EntryIdx << ": ");
632 DEBUG(SwapVector[EntryIdx].VSEMI->dump());
633 DEBUG(dbgs() << "\n");
636 // Reject webs than contain swapping loads that feed something other
637 // than a swap instruction.
638 else if (SwapVector[EntryIdx].IsLoad && SwapVector[EntryIdx].IsSwap) {
639 MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
640 unsigned DefReg = MI->getOperand(0).getReg();
642 // We skip debug instructions in the analysis. (Note that debug
643 // location information is still maintained by this optimization
644 // because it remains on the LXVD2X and STXVD2X instructions after
645 // the XXPERMDIs are removed.)
646 for (MachineInstr &UseMI : MRI->use_nodbg_instructions(DefReg)) {
647 int UseIdx = SwapMap[&UseMI];
649 if (!SwapVector[UseIdx].IsSwap || SwapVector[UseIdx].IsLoad ||
650 SwapVector[UseIdx].IsStore) {
652 SwapVector[Repr].WebRejected = 1;
655 format("Web %d rejected for load not feeding swap\n", Repr));
656 DEBUG(dbgs() << " def " << EntryIdx << ": ");
658 DEBUG(dbgs() << " use " << UseIdx << ": ");
660 DEBUG(dbgs() << "\n");
664 // Reject webs that contain swapping stores that are fed by something
665 // other than a swap instruction.
666 } else if (SwapVector[EntryIdx].IsStore && SwapVector[EntryIdx].IsSwap) {
667 MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
668 unsigned UseReg = MI->getOperand(0).getReg();
669 MachineInstr *DefMI = MRI->getVRegDef(UseReg);
670 int DefIdx = SwapMap[DefMI];
672 if (!SwapVector[DefIdx].IsSwap || SwapVector[DefIdx].IsLoad ||
673 SwapVector[DefIdx].IsStore) {
675 SwapVector[Repr].WebRejected = 1;
678 format("Web %d rejected for store not fed by swap\n", Repr));
679 DEBUG(dbgs() << " def " << DefIdx << ": ");
680 DEBUG(DefMI->dump());
681 DEBUG(dbgs() << " use " << EntryIdx << ": ");
683 DEBUG(dbgs() << "\n");
688 DEBUG(dbgs() << "Swap vector after web analysis:\n\n");
692 // Walk the swap vector entries looking for swaps fed by permuting loads
693 // and swaps that feed permuting stores. If the containing computation
694 // has not been marked rejected, mark each such swap for removal.
695 // (Removal is delayed in case optimization has disturbed the pattern,
696 // such that multiple loads feed the same swap, etc.)
697 void PPCVSXSwapRemoval::markSwapsForRemoval() {
699 DEBUG(dbgs() << "\n*** Marking swaps for removal ***\n\n");
701 for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size(); ++EntryIdx) {
703 if (SwapVector[EntryIdx].IsLoad && SwapVector[EntryIdx].IsSwap) {
704 int Repr = EC->getLeaderValue(SwapVector[EntryIdx].VSEId);
706 if (!SwapVector[Repr].WebRejected) {
707 MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
708 unsigned DefReg = MI->getOperand(0).getReg();
710 for (MachineInstr &UseMI : MRI->use_nodbg_instructions(DefReg)) {
711 int UseIdx = SwapMap[&UseMI];
712 SwapVector[UseIdx].WillRemove = 1;
714 DEBUG(dbgs() << "Marking swap fed by load for removal: ");
719 } else if (SwapVector[EntryIdx].IsStore && SwapVector[EntryIdx].IsSwap) {
720 int Repr = EC->getLeaderValue(SwapVector[EntryIdx].VSEId);
722 if (!SwapVector[Repr].WebRejected) {
723 MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
724 unsigned UseReg = MI->getOperand(0).getReg();
725 MachineInstr *DefMI = MRI->getVRegDef(UseReg);
726 int DefIdx = SwapMap[DefMI];
727 SwapVector[DefIdx].WillRemove = 1;
729 DEBUG(dbgs() << "Marking swap feeding store for removal: ");
730 DEBUG(DefMI->dump());
733 } else if (SwapVector[EntryIdx].IsSwappable &&
734 SwapVector[EntryIdx].SpecialHandling != 0) {
735 int Repr = EC->getLeaderValue(SwapVector[EntryIdx].VSEId);
737 if (!SwapVector[Repr].WebRejected)
738 handleSpecialSwappables(EntryIdx);
743 // The identified swap entry requires special handling to allow its
744 // containing computation to be optimized. Perform that handling
746 // FIXME: Additional opportunities will be phased in with subsequent
748 void PPCVSXSwapRemoval::handleSpecialSwappables(int EntryIdx) {
749 switch (SwapVector[EntryIdx].SpecialHandling) {
752 assert(false && "Unexpected special handling type");
755 // For splats based on an index into a vector, add N/2 modulo N
756 // to the index, where N is the number of vector elements.
757 case SHValues::SH_SPLAT: {
758 MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
761 DEBUG(dbgs() << "Changing splat: ");
764 switch (MI->getOpcode()) {
766 assert(false && "Unexpected splat opcode");
767 case PPC::VSPLTB: NElts = 16; break;
768 case PPC::VSPLTH: NElts = 8; break;
769 case PPC::VSPLTW: NElts = 4; break;
772 unsigned EltNo = MI->getOperand(1).getImm();
773 EltNo = (EltNo + NElts / 2) % NElts;
774 MI->getOperand(1).setImm(EltNo);
776 DEBUG(dbgs() << " Into: ");
781 // For an XXPERMDI that isn't handled otherwise, we need to
782 // reverse the order of the operands. If the selector operand
783 // has a value of 0 or 3, we need to change it to 3 or 0,
784 // respectively. Otherwise we should leave it alone. (This
785 // is equivalent to reversing the two bits of the selector
786 // operand and complementing the result.)
787 case SHValues::SH_XXPERMDI: {
788 MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
790 DEBUG(dbgs() << "Changing XXPERMDI: ");
793 unsigned Selector = MI->getOperand(3).getImm();
794 if (Selector == 0 || Selector == 3)
795 Selector = 3 - Selector;
796 MI->getOperand(3).setImm(Selector);
798 unsigned Reg1 = MI->getOperand(1).getReg();
799 unsigned Reg2 = MI->getOperand(2).getReg();
800 MI->getOperand(1).setReg(Reg2);
801 MI->getOperand(2).setReg(Reg1);
803 DEBUG(dbgs() << " Into: ");
808 // For a copy from a scalar floating-point register to a vector
809 // register, removing swaps will leave the copied value in the
810 // wrong lane. Insert a swap following the copy to fix this.
811 case SHValues::SH_COPYSCALAR: {
812 MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
814 DEBUG(dbgs() << "Changing SUBREG_TO_REG: ");
817 unsigned DstReg = MI->getOperand(0).getReg();
818 const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg);
819 unsigned NewVReg = MRI->createVirtualRegister(DstRC);
821 MI->getOperand(0).setReg(NewVReg);
822 DEBUG(dbgs() << " Into: ");
825 MachineBasicBlock::iterator InsertPoint = MI->getNextNode();
827 // Note that an XXPERMDI requires a VSRC, so if the SUBREG_TO_REG
828 // is copying to a VRRC, we need to be careful to avoid a register
829 // assignment problem. In this case we must copy from VRRC to VSRC
830 // prior to the swap, and from VSRC to VRRC following the swap.
831 // Coalescing will usually remove all this mess.
833 if (DstRC == &PPC::VRRCRegClass) {
834 unsigned VSRCTmp1 = MRI->createVirtualRegister(&PPC::VSRCRegClass);
835 unsigned VSRCTmp2 = MRI->createVirtualRegister(&PPC::VSRCRegClass);
837 BuildMI(*MI->getParent(), InsertPoint, MI->getDebugLoc(),
838 TII->get(PPC::COPY), VSRCTmp1)
840 DEBUG(MI->getNextNode()->dump());
842 BuildMI(*MI->getParent(), InsertPoint, MI->getDebugLoc(),
843 TII->get(PPC::XXPERMDI), VSRCTmp2)
847 DEBUG(MI->getNextNode()->getNextNode()->dump());
849 BuildMI(*MI->getParent(), InsertPoint, MI->getDebugLoc(),
850 TII->get(PPC::COPY), DstReg)
852 DEBUG(MI->getNextNode()->getNextNode()->getNextNode()->dump());
856 BuildMI(*MI->getParent(), InsertPoint, MI->getDebugLoc(),
857 TII->get(PPC::XXPERMDI), DstReg)
862 DEBUG(MI->getNextNode()->dump());
869 // Walk the swap vector and replace each entry marked for removal with
871 bool PPCVSXSwapRemoval::removeSwaps() {
873 DEBUG(dbgs() << "\n*** Removing swaps ***\n\n");
875 bool Changed = false;
877 for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size(); ++EntryIdx) {
878 if (SwapVector[EntryIdx].WillRemove) {
880 MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
881 MachineBasicBlock *MBB = MI->getParent();
882 BuildMI(*MBB, MI, MI->getDebugLoc(),
883 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
884 .addOperand(MI->getOperand(1));
886 DEBUG(dbgs() << format("Replaced %d with copy: ",
887 SwapVector[EntryIdx].VSEId));
890 MI->eraseFromParent();
897 // For debug purposes, dump the contents of the swap vector.
898 void PPCVSXSwapRemoval::dumpSwapVector() {
900 for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size(); ++EntryIdx) {
902 MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
903 int ID = SwapVector[EntryIdx].VSEId;
905 DEBUG(dbgs() << format("%6d", ID));
906 DEBUG(dbgs() << format("%6d", EC->getLeaderValue(ID)));
907 DEBUG(dbgs() << format(" BB#%3d", MI->getParent()->getNumber()));
908 DEBUG(dbgs() << format(" %14s ", TII->getName(MI->getOpcode())));
910 if (SwapVector[EntryIdx].IsLoad)
911 DEBUG(dbgs() << "load ");
912 if (SwapVector[EntryIdx].IsStore)
913 DEBUG(dbgs() << "store ");
914 if (SwapVector[EntryIdx].IsSwap)
915 DEBUG(dbgs() << "swap ");
916 if (SwapVector[EntryIdx].MentionsPhysVR)
917 DEBUG(dbgs() << "physreg ");
918 if (SwapVector[EntryIdx].MentionsPartialVR)
919 DEBUG(dbgs() << "partialreg ");
921 if (SwapVector[EntryIdx].IsSwappable) {
922 DEBUG(dbgs() << "swappable ");
923 switch(SwapVector[EntryIdx].SpecialHandling) {
925 DEBUG(dbgs() << "special:**unknown**");
930 DEBUG(dbgs() << "special:extract ");
933 DEBUG(dbgs() << "special:insert ");
936 DEBUG(dbgs() << "special:load ");
939 DEBUG(dbgs() << "special:store ");
942 DEBUG(dbgs() << "special:splat ");
945 DEBUG(dbgs() << "special:xxpermdi ");
948 DEBUG(dbgs() << "special:copyscalar ");
953 if (SwapVector[EntryIdx].WebRejected)
954 DEBUG(dbgs() << "rejected ");
955 if (SwapVector[EntryIdx].WillRemove)
956 DEBUG(dbgs() << "remove ");
958 DEBUG(dbgs() << "\n");
960 // For no-asserts builds.
965 DEBUG(dbgs() << "\n");
968 } // end default namespace
970 INITIALIZE_PASS_BEGIN(PPCVSXSwapRemoval, DEBUG_TYPE,
971 "PowerPC VSX Swap Removal", false, false)
972 INITIALIZE_PASS_END(PPCVSXSwapRemoval, DEBUG_TYPE,
973 "PowerPC VSX Swap Removal", false, false)
975 char PPCVSXSwapRemoval::ID = 0;
977 llvm::createPPCVSXSwapRemovalPass() { return new PPCVSXSwapRemoval(); }