1 //===-- NEONPreAllocPass.cpp - Allocate adjacent NEON registers--*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "neon-prealloc"
12 #include "ARMInstrInfo.h"
13 #include "llvm/CodeGen/MachineInstr.h"
14 #include "llvm/CodeGen/MachineInstrBuilder.h"
15 #include "llvm/CodeGen/MachineRegisterInfo.h"
16 #include "llvm/CodeGen/MachineFunctionPass.h"
20 class NEONPreAllocPass : public MachineFunctionPass {
21 const TargetInstrInfo *TII;
22 MachineRegisterInfo *MRI;
26 NEONPreAllocPass() : MachineFunctionPass(ID) {}
28 virtual bool runOnMachineFunction(MachineFunction &MF);
30 virtual const char *getPassName() const {
31 return "NEON register pre-allocation pass";
35 bool FormsRegSequence(MachineInstr *MI,
36 unsigned FirstOpnd, unsigned NumRegs,
37 unsigned Offset, unsigned Stride) const;
38 bool PreAllocNEONRegisters(MachineBasicBlock &MBB);
41 char NEONPreAllocPass::ID = 0;
44 static bool isNEONMultiRegOp(int Opcode, unsigned &FirstOpnd, unsigned &NumRegs,
45 unsigned &Offset, unsigned &Stride) {
46 // Default to unit stride with no offset.
83 case ARM::VLD2LNq16odd:
84 case ARM::VLD2LNq32odd:
102 case ARM::VLD3q8_UPD:
103 case ARM::VLD3q16_UPD:
104 case ARM::VLD3q32_UPD:
111 case ARM::VLD3q8odd_UPD:
112 case ARM::VLD3q16odd_UPD:
113 case ARM::VLD3q32odd_UPD:
128 case ARM::VLD3LNq16odd:
129 case ARM::VLD3LNq32odd:
147 case ARM::VLD4q8_UPD:
148 case ARM::VLD4q16_UPD:
149 case ARM::VLD4q32_UPD:
156 case ARM::VLD4q8odd_UPD:
157 case ARM::VLD4q16odd_UPD:
158 case ARM::VLD4q32odd_UPD:
173 case ARM::VLD4LNq16odd:
174 case ARM::VLD4LNq32odd:
196 case ARM::VST2LNq16odd:
197 case ARM::VST2LNq32odd:
219 case ARM::VST3LNq16odd:
220 case ARM::VST3LNq32odd:
242 case ARM::VST4LNq16odd:
243 case ARM::VST4LNq32odd:
285 NEONPreAllocPass::FormsRegSequence(MachineInstr *MI,
286 unsigned FirstOpnd, unsigned NumRegs,
287 unsigned Offset, unsigned Stride) const {
288 MachineOperand &FMO = MI->getOperand(FirstOpnd);
289 assert(FMO.isReg() && FMO.getSubReg() == 0 && "unexpected operand");
290 unsigned VirtReg = FMO.getReg();
292 assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
293 "expected a virtual register");
295 unsigned LastSubIdx = 0;
297 MachineInstr *RegSeq = 0;
298 for (unsigned R = 0; R < NumRegs; ++R) {
299 const MachineOperand &MO = MI->getOperand(FirstOpnd + R);
300 assert(MO.isReg() && MO.getSubReg() == 0 && "unexpected operand");
301 unsigned VirtReg = MO.getReg();
302 assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
303 "expected a virtual register");
304 // Feeding into a REG_SEQUENCE.
305 if (!MRI->hasOneNonDBGUse(VirtReg))
307 MachineInstr *UseMI = &*MRI->use_nodbg_begin(VirtReg);
308 if (!UseMI->isRegSequence())
310 if (RegSeq && RegSeq != UseMI)
312 unsigned OpIdx = 1 + (Offset + R * Stride) * 2;
313 if (UseMI->getOperand(OpIdx).getReg() != VirtReg)
314 llvm_unreachable("Malformed REG_SEQUENCE instruction!");
315 unsigned SubIdx = UseMI->getOperand(OpIdx + 1).getImm();
317 if (LastSubIdx != SubIdx-Stride)
320 // Must start from dsub_0 or qsub_0.
321 if (SubIdx != (ARM::dsub_0+Offset) &&
322 SubIdx != (ARM::qsub_0+Offset))
329 // In the case of vld3, etc., make sure the trailing operand of
330 // REG_SEQUENCE is an undef.
332 unsigned OpIdx = 1 + (Offset + 3 * Stride) * 2;
333 const MachineOperand &MO = RegSeq->getOperand(OpIdx);
334 unsigned VirtReg = MO.getReg();
335 MachineInstr *DefMI = MRI->getVRegDef(VirtReg);
336 if (!DefMI || !DefMI->isImplicitDef())
342 unsigned LastSrcReg = 0;
343 SmallVector<unsigned, 4> SubIds;
344 for (unsigned R = 0; R < NumRegs; ++R) {
345 const MachineOperand &MO = MI->getOperand(FirstOpnd + R);
346 assert(MO.isReg() && MO.getSubReg() == 0 && "unexpected operand");
347 unsigned VirtReg = MO.getReg();
348 assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
349 "expected a virtual register");
350 // Extracting from a Q or QQ register.
351 MachineInstr *DefMI = MRI->getVRegDef(VirtReg);
352 if (!DefMI || !DefMI->isCopy() || !DefMI->getOperand(1).getSubReg())
354 VirtReg = DefMI->getOperand(1).getReg();
355 if (LastSrcReg && LastSrcReg != VirtReg)
357 LastSrcReg = VirtReg;
358 const TargetRegisterClass *RC = MRI->getRegClass(VirtReg);
359 if (RC != ARM::QPRRegisterClass &&
360 RC != ARM::QQPRRegisterClass &&
361 RC != ARM::QQQQPRRegisterClass)
363 unsigned SubIdx = DefMI->getOperand(1).getSubReg();
365 if (LastSubIdx != SubIdx-Stride)
368 // Must start from dsub_0 or qsub_0.
369 if (SubIdx != (ARM::dsub_0+Offset) &&
370 SubIdx != (ARM::qsub_0+Offset))
373 SubIds.push_back(SubIdx);
377 // FIXME: Update the uses of EXTRACT_SUBREG from REG_SEQUENCE is
378 // currently required for correctness. e.g.
379 // %reg1041<def> = REG_SEQUENCE %reg1040<kill>, 5, %reg1035<kill>, 6
380 // %reg1042<def> = EXTRACT_SUBREG %reg1041, 6
381 // %reg1043<def> = EXTRACT_SUBREG %reg1041, 5
382 // VST1q16 %reg1025<kill>, 0, %reg1043<kill>, %reg1042<kill>,
383 // reg1042 and reg1043 should be replaced with reg1041:6 and reg1041:5
385 // We need to change how we model uses of REG_SEQUENCE.
386 for (unsigned R = 0; R < NumRegs; ++R) {
387 MachineOperand &MO = MI->getOperand(FirstOpnd + R);
388 unsigned OldReg = MO.getReg();
389 MachineInstr *DefMI = MRI->getVRegDef(OldReg);
390 assert(DefMI->isCopy());
391 MO.setReg(LastSrcReg);
392 MO.setSubReg(SubIds[R]);
394 // Delete the EXTRACT_SUBREG if its result is now dead.
395 if (MRI->use_empty(OldReg))
396 DefMI->eraseFromParent();
402 bool NEONPreAllocPass::PreAllocNEONRegisters(MachineBasicBlock &MBB) {
403 bool Modified = false;
405 MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
406 for (; MBBI != E; ++MBBI) {
407 MachineInstr *MI = &*MBBI;
408 unsigned FirstOpnd, NumRegs, Offset, Stride;
409 if (!isNEONMultiRegOp(MI->getOpcode(), FirstOpnd, NumRegs, Offset, Stride))
411 if (FormsRegSequence(MI, FirstOpnd, NumRegs, Offset, Stride))
414 MachineBasicBlock::iterator NextI = llvm::next(MBBI);
415 for (unsigned R = 0; R < NumRegs; ++R) {
416 MachineOperand &MO = MI->getOperand(FirstOpnd + R);
417 assert(MO.isReg() && MO.getSubReg() == 0 && "unexpected operand");
418 unsigned VirtReg = MO.getReg();
419 assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
420 "expected a virtual register");
422 // For now, just assign a fixed set of adjacent registers.
423 // This leaves plenty of room for future improvements.
424 static const unsigned NEONDRegs[] = {
425 ARM::D0, ARM::D1, ARM::D2, ARM::D3,
426 ARM::D4, ARM::D5, ARM::D6, ARM::D7
428 MO.setReg(NEONDRegs[Offset + R * Stride]);
431 // Insert a copy from VirtReg.
432 BuildMI(MBB, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY),MO.getReg())
433 .addReg(VirtReg, getKillRegState(MO.isKill()));
435 } else if (MO.isDef() && !MO.isDead()) {
436 // Add a copy to VirtReg.
437 BuildMI(MBB, NextI, DebugLoc(), TII->get(TargetOpcode::COPY), VirtReg)
438 .addReg(MO.getReg());
446 bool NEONPreAllocPass::runOnMachineFunction(MachineFunction &MF) {
447 TII = MF.getTarget().getInstrInfo();
448 MRI = &MF.getRegInfo();
450 bool Modified = false;
451 for (MachineFunction::iterator MFI = MF.begin(), E = MF.end(); MFI != E;
453 MachineBasicBlock &MBB = *MFI;
454 Modified |= PreAllocNEONRegisters(MBB);
460 /// createNEONPreAllocPass - returns an instance of the NEON register
461 /// pre-allocation pass.
462 FunctionPass *llvm::createNEONPreAllocPass() {
463 return new NEONPreAllocPass();