1 //===-- SICodeEmitter.cpp - SI Code Emitter -------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // The SI code emitter produces machine code that can be executed directly on
13 //===----------------------------------------------------------------------===//
17 #include "AMDGPUUtil.h"
18 #include "AMDILCodeEmitter.h"
19 #include "SIInstrInfo.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "llvm/CodeGen/MachineFunctionPass.h"
22 #include "llvm/CodeGen/MachineInstr.h"
23 #include "llvm/Support/FormattedStream.h"
24 #include "llvm/Target/TargetMachine.h"
28 #define LITERAL_REG 255
29 #define VGPR_BIT(src_idx) (1ULL << (9 * src_idx - 1))
34 class SICodeEmitter : public MachineFunctionPass, public AMDILCodeEmitter {
38 formatted_raw_ostream &_OS;
39 const TargetMachine *TM;
40 void emitState(MachineFunction & MF);
41 void emitInstr(MachineInstr &MI);
43 void outputBytes(uint64_t value, unsigned bytes);
44 unsigned GPRAlign(const MachineInstr &MI, unsigned OpNo, unsigned shift)
48 SICodeEmitter(formatted_raw_ostream &OS) : MachineFunctionPass(ID),
50 const char *getPassName() const { return "SI Code Emitter"; }
51 bool runOnMachineFunction(MachineFunction &MF);
53 /// getMachineOpValue - Return the encoding for MO
54 virtual uint64_t getMachineOpValue(const MachineInstr &MI,
55 const MachineOperand &MO) const;
57 /// GPR4AlignEncode - Encoding for when 4 consectuive registers are used
58 virtual unsigned GPR4AlignEncode(const MachineInstr &MI, unsigned OpNo)
61 /// GPR2AlignEncode - Encoding for when 2 consecutive registers are used
62 virtual unsigned GPR2AlignEncode(const MachineInstr &MI, unsigned OpNo)
64 /// i32LiteralEncode - Encode an i32 literal this is used as an operand
65 /// for an instruction in place of a register.
66 virtual uint64_t i32LiteralEncode(const MachineInstr &MI, unsigned OpNo)
68 /// SMRDmemriEncode - Encoding for SMRD indexed loads
69 virtual uint32_t SMRDmemriEncode(const MachineInstr &MI, unsigned OpNo)
72 /// VOPPostEncode - Post-Encoder method for VOP instructions
73 virtual uint64_t VOPPostEncode(const MachineInstr &MI,
74 uint64_t Value) const;
78 char SICodeEmitter::ID = 0;
80 FunctionPass *llvm::createSICodeEmitterPass(formatted_raw_ostream &OS) {
81 return new SICodeEmitter(OS);
84 void SICodeEmitter::emitState(MachineFunction & MF)
89 const SIRegisterInfo * RI =
90 static_cast<const SIRegisterInfo*>(TM->getRegisterInfo());
91 SIMachineFunctionInfo * MFI = MF.getInfo<SIMachineFunctionInfo>();
93 for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
95 MachineBasicBlock &MBB = *BB;
96 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
98 MachineInstr &MI = *I;
99 unsigned numOperands = MI.getNumOperands();
100 for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) {
101 MachineOperand & MO = MI.getOperand(op_idx);
111 if (reg == AMDGPU::VCC) {
115 if (AMDGPU::SReg_32RegClass.contains(reg)) {
118 } else if (AMDGPU::VReg_32RegClass.contains(reg)) {
121 } else if (AMDGPU::SReg_64RegClass.contains(reg)) {
124 } else if (AMDGPU::VReg_64RegClass.contains(reg)) {
127 } else if (AMDGPU::SReg_128RegClass.contains(reg)) {
130 } else if (AMDGPU::VReg_128RegClass.contains(reg)) {
133 } else if (AMDGPU::SReg_256RegClass.contains(reg)) {
137 assert("!Unknown register class");
139 hwReg = RI->getEncodingValue(reg);
140 maxUsed = ((hwReg + 1) * width) - 1;
142 maxSGPR = maxUsed > maxSGPR ? maxUsed : maxSGPR;
144 maxVGPR = maxUsed > maxVGPR ? maxUsed : maxVGPR;
152 outputBytes(maxSGPR + 1, 4);
153 outputBytes(maxVGPR + 1, 4);
154 outputBytes(MFI->spi_ps_input_addr, 4);
157 bool SICodeEmitter::runOnMachineFunction(MachineFunction &MF)
159 TM = &MF.getTarget();
160 const AMDILSubtarget &STM = TM->getSubtarget<AMDILSubtarget>();
162 if (STM.dumpCode()) {
168 for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
170 MachineBasicBlock &MBB = *BB;
171 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
173 MachineInstr &MI = *I;
174 if (MI.getOpcode() != AMDGPU::KILL && MI.getOpcode() != AMDGPU::RETURN) {
180 MachineInstr * End = BuildMI(MF, DebugLoc(),
181 TM->getInstrInfo()->get(AMDGPU::S_ENDPGM));
186 void SICodeEmitter::emitInstr(MachineInstr &MI)
188 const SIInstrInfo * SII = static_cast<const SIInstrInfo*>(TM->getInstrInfo());
190 uint64_t hwInst = getBinaryCodeForInstr(MI);
192 if ((hwInst & 0xffffffff) == 0xffffffff) {
193 fprintf(stderr, "Unsupported Instruction: \n");
198 unsigned bytes = SII->getEncodingBytes(MI);
199 outputBytes(hwInst, bytes);
202 uint64_t SICodeEmitter::getMachineOpValue(const MachineInstr &MI,
203 const MachineOperand &MO) const
205 const SIRegisterInfo * RI =
206 static_cast<const SIRegisterInfo*>(TM->getRegisterInfo());
208 switch(MO.getType()) {
209 case MachineOperand::MO_Register:
210 return RI->getEncodingValue(MO.getReg());
212 case MachineOperand::MO_Immediate:
215 case MachineOperand::MO_FPImmediate:
216 // XXX: Not all instructions can use inline literals
217 // XXX: We should make sure this is a 32-bit constant
218 return LITERAL_REG | (MO.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue() << 32);
220 llvm_unreachable("Encoding of this operand type is not supported yet.");
225 unsigned SICodeEmitter::GPRAlign(const MachineInstr &MI, unsigned OpNo,
226 unsigned shift) const
228 const SIRegisterInfo * RI =
229 static_cast<const SIRegisterInfo*>(TM->getRegisterInfo());
230 unsigned regCode = RI->getEncodingValue(MI.getOperand(OpNo).getReg());
231 return regCode >> shift;
234 unsigned SICodeEmitter::GPR4AlignEncode(const MachineInstr &MI,
237 return GPRAlign(MI, OpNo, 2);
240 unsigned SICodeEmitter::GPR2AlignEncode(const MachineInstr &MI,
243 return GPRAlign(MI, OpNo, 1);
246 uint64_t SICodeEmitter::i32LiteralEncode(const MachineInstr &MI,
249 return LITERAL_REG | (MI.getOperand(OpNo).getImm() << 32);
252 #define SMRD_OFFSET_MASK 0xff
253 #define SMRD_IMM_SHIFT 8
254 #define SMRD_SBASE_MASK 0x3f
255 #define SMRD_SBASE_SHIFT 9
256 /// SMRDmemriEncode - This function is responsibe for encoding the offset
257 /// and the base ptr for SMRD instructions it should return a bit string in
260 /// OFFSET = bits{7-0}
262 /// SBASE = bits{14-9}
264 uint32_t SICodeEmitter::SMRDmemriEncode(const MachineInstr &MI,
269 const MachineOperand &OffsetOp = MI.getOperand(OpNo + 1);
271 //XXX: Use this function for SMRD loads with register offsets
272 assert(OffsetOp.isImm());
275 (getMachineOpValue(MI, OffsetOp) & SMRD_OFFSET_MASK)
276 | (1 << SMRD_IMM_SHIFT) //XXX If the Offset is a register we shouldn't set this bit
277 | ((GPR2AlignEncode(MI, OpNo) & SMRD_SBASE_MASK) << SMRD_SBASE_SHIFT)
283 /// Set the "VGPR" bit for VOP args that can take either a VGPR or a SGPR.
284 /// XXX: It would be nice if we could handle this without a PostEncode function.
285 uint64_t SICodeEmitter::VOPPostEncode(const MachineInstr &MI,
286 uint64_t Value) const
288 const SIInstrInfo * SII = static_cast<const SIInstrInfo*>(TM->getInstrInfo());
289 unsigned encodingType = SII->getEncodingType(MI);
291 unsigned vgprBitOffset;
293 if (encodingType == SIInstrEncodingType::VOP3) {
301 // Add one to skip over the destination reg operand.
302 for (unsigned opIdx = 1; opIdx < numSrcOps + 1; opIdx++) {
303 if (!MI.getOperand(opIdx).isReg()) {
306 unsigned reg = MI.getOperand(opIdx).getReg();
307 if (AMDGPU::VReg_32RegClass.contains(reg)
308 || AMDGPU::VReg_64RegClass.contains(reg)) {
309 Value |= (VGPR_BIT(opIdx)) << vgprBitOffset;
316 void SICodeEmitter::outputBytes(uint64_t value, unsigned bytes)
318 for (unsigned i = 0; i < bytes; i++) {
319 _OS.write((uint8_t) ((value >> (8 * i)) & 0xff));